2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #ifndef __CHELSIO_COMMON_H
33 #define __CHELSIO_COMMON_H
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/ctype.h>
38 #include <linux/delay.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
45 #define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46 #define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47 #define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
54 #define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
61 # define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
64 # define CH_DBG(adapter, category, fmt, ...)
67 /* Additional NETIF_MSG_* categories */
68 #define NETIF_MSG_MMIO 0x8000000
71 struct net_device
*dev
;
72 struct dev_mc_list
*mclist
;
76 static inline void init_rx_mode(struct t3_rx_mode
*p
, struct net_device
*dev
,
77 struct dev_mc_list
*mclist
)
84 static inline u8
*t3_get_next_mcaddr(struct t3_rx_mode
*rm
)
88 if (rm
->mclist
&& rm
->idx
< rm
->dev
->mc_count
) {
89 addr
= rm
->mclist
->dmi_addr
;
90 rm
->mclist
= rm
->mclist
->next
;
97 MAX_NPORTS
= 2, /* max # of ports */
98 MAX_FRAME_SIZE
= 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE
= 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE
= 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE
= 128, /* TCB size */
102 NMTUS
= 16, /* size of MTU table */
103 NCCTRL_WIN
= 32, /* # of congestion control windows */
106 #define MAX_RX_COALESCING_LEN 16224U
111 PAUSE_AUTONEG
= 1 << 2
115 SUPPORTED_OFFLOAD
= 1 << 24,
116 SUPPORTED_IRQ
= 1 << 25
119 enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB
,
121 STAT_ULP_CH1_PBL_OOB
,
124 IRQ_NUM_STATS
/* keep last */
128 SGE_QSETS
= 8, /* # of SGE Tx/Rx/RspQ sets */
129 SGE_RXQ_PER_SET
= 2, /* # of Rx queues per set */
130 SGE_TXQ_PER_SET
= 3 /* # of Tx queues per set */
133 enum sge_context_type
{ /* SGE egress context types */
141 AN_PKT_SIZE
= 32, /* async notification packet size */
142 IMMED_PKT_SIZE
= 48 /* packet size for immediate data */
145 struct sg_ent
{ /* SGE scatter/gather entry */
150 #ifndef SGE_NUM_GENBITS
152 # define SGE_NUM_GENBITS 2
155 #define TX_DESC_FLITS 16U
156 #define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
162 int (*read
)(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
163 int reg_addr
, unsigned int *val
);
164 int (*write
)(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
165 int reg_addr
, unsigned int val
);
168 struct adapter_info
{
169 unsigned char nports
; /* # of ports */
170 unsigned char phy_base_addr
; /* MDIO PHY base address */
172 unsigned char mdiinv
;
173 unsigned int gpio_out
; /* GPIO output settings */
174 unsigned int gpio_intr
; /* GPIO IRQ enable mask */
175 unsigned long caps
; /* adapter capabilities */
176 const struct mdio_ops
*mdio_ops
; /* MDIO operations */
177 const char *desc
; /* product description */
180 struct port_type_info
{
181 void (*phy_prep
)(struct cphy
*phy
, struct adapter
*adapter
,
182 int phy_addr
, const struct mdio_ops
*ops
);
188 unsigned long parity_err
;
189 unsigned long active_rgn_full
;
190 unsigned long nfa_srch_err
;
191 unsigned long unknown_cmd
;
192 unsigned long reqq_parity_err
;
193 unsigned long dispq_parity_err
;
194 unsigned long del_act_empty
;
198 unsigned long corr_err
;
199 unsigned long uncorr_err
;
200 unsigned long parity_err
;
201 unsigned long addr_err
;
205 u64 tx_octets
; /* total # of octets in good frames */
206 u64 tx_octets_bad
; /* total # of octets in error frames */
207 u64 tx_frames
; /* all good frames */
208 u64 tx_mcast_frames
; /* good multicast frames */
209 u64 tx_bcast_frames
; /* good broadcast frames */
210 u64 tx_pause
; /* # of transmitted pause frames */
211 u64 tx_deferred
; /* frames with deferred transmissions */
212 u64 tx_late_collisions
; /* # of late collisions */
213 u64 tx_total_collisions
; /* # of total collisions */
214 u64 tx_excess_collisions
; /* frame errors from excessive collissions */
215 u64 tx_underrun
; /* # of Tx FIFO underruns */
216 u64 tx_len_errs
; /* # of Tx length errors */
217 u64 tx_mac_internal_errs
; /* # of internal MAC errors on Tx */
218 u64 tx_excess_deferral
; /* # of frames with excessive deferral */
219 u64 tx_fcs_errs
; /* # of frames with bad FCS */
221 u64 tx_frames_64
; /* # of Tx frames in a particular range */
222 u64 tx_frames_65_127
;
223 u64 tx_frames_128_255
;
224 u64 tx_frames_256_511
;
225 u64 tx_frames_512_1023
;
226 u64 tx_frames_1024_1518
;
227 u64 tx_frames_1519_max
;
229 u64 rx_octets
; /* total # of octets in good frames */
230 u64 rx_octets_bad
; /* total # of octets in error frames */
231 u64 rx_frames
; /* all good frames */
232 u64 rx_mcast_frames
; /* good multicast frames */
233 u64 rx_bcast_frames
; /* good broadcast frames */
234 u64 rx_pause
; /* # of received pause frames */
235 u64 rx_fcs_errs
; /* # of received frames with bad FCS */
236 u64 rx_align_errs
; /* alignment errors */
237 u64 rx_symbol_errs
; /* symbol errors */
238 u64 rx_data_errs
; /* data errors */
239 u64 rx_sequence_errs
; /* sequence errors */
240 u64 rx_runt
; /* # of runt frames */
241 u64 rx_jabber
; /* # of jabber frames */
242 u64 rx_short
; /* # of short frames */
243 u64 rx_too_long
; /* # of oversized frames */
244 u64 rx_mac_internal_errs
; /* # of internal MAC errors on Rx */
246 u64 rx_frames_64
; /* # of Rx frames in a particular range */
247 u64 rx_frames_65_127
;
248 u64 rx_frames_128_255
;
249 u64 rx_frames_256_511
;
250 u64 rx_frames_512_1023
;
251 u64 rx_frames_1024_1518
;
252 u64 rx_frames_1519_max
;
254 u64 rx_cong_drops
; /* # of Rx drops due to SGE congestion */
256 unsigned long tx_fifo_parity_err
;
257 unsigned long rx_fifo_parity_err
;
258 unsigned long tx_fifo_urun
;
259 unsigned long rx_fifo_ovfl
;
260 unsigned long serdes_signal_loss
;
261 unsigned long xaui_pcs_ctc_err
;
262 unsigned long xaui_pcs_align_change
;
265 struct tp_mib_stats
{
268 u32 ipInHdrErrors_hi
;
269 u32 ipInHdrErrors_lo
;
270 u32 ipInAddrErrors_hi
;
271 u32 ipInAddrErrors_lo
;
272 u32 ipInUnknownProtos_hi
;
273 u32 ipInUnknownProtos_lo
;
278 u32 ipOutRequests_hi
;
279 u32 ipOutRequests_lo
;
280 u32 ipOutDiscards_hi
;
281 u32 ipOutDiscards_lo
;
282 u32 ipOutNoRoutes_hi
;
283 u32 ipOutNoRoutes_lo
;
301 u32 tcpRetransSeg_hi
;
302 u32 tcpRetransSeg_lo
;
310 unsigned int nchan
; /* # of channels */
311 unsigned int pmrx_size
; /* total PMRX capacity */
312 unsigned int pmtx_size
; /* total PMTX capacity */
313 unsigned int cm_size
; /* total CM capacity */
314 unsigned int chan_rx_size
; /* per channel Rx size */
315 unsigned int chan_tx_size
; /* per channel Tx size */
316 unsigned int rx_pg_size
; /* Rx page size */
317 unsigned int tx_pg_size
; /* Tx page size */
318 unsigned int rx_num_pgs
; /* # of Rx pages */
319 unsigned int tx_num_pgs
; /* # of Tx pages */
320 unsigned int ntimer_qs
; /* # of timer queues */
323 struct qset_params
{ /* SGE queue set parameters */
324 unsigned int polling
; /* polling/interrupt service for rspq */
325 unsigned int coalesce_usecs
; /* irq coalescing timer */
326 unsigned int rspq_size
; /* # of entries in response queue */
327 unsigned int fl_size
; /* # of entries in regular free list */
328 unsigned int jumbo_size
; /* # of entries in jumbo free list */
329 unsigned int txq_size
[SGE_TXQ_PER_SET
]; /* Tx queue sizes */
330 unsigned int cong_thres
; /* FL congestion threshold */
334 unsigned int max_pkt_size
; /* max offload pkt size */
335 struct qset_params qset
[SGE_QSETS
];
339 unsigned int mode
; /* selects MC5 width */
340 unsigned int nservers
; /* size of server region */
341 unsigned int nfilters
; /* size of filter region */
342 unsigned int nroutes
; /* size of routing region */
345 /* Default MC5 region sizes */
347 DEFAULT_NSERVERS
= 512,
348 DEFAULT_NFILTERS
= 128
351 /* MC5 modes, these must be non-0 */
353 MC5_MODE_144_BIT
= 1,
362 unsigned int mem_timing
;
364 u8 port_type
[MAX_NPORTS
];
365 unsigned short xauicfg
[2];
369 unsigned int vpd_cap_addr
;
370 unsigned int pcie_cap_addr
;
371 unsigned short speed
;
373 unsigned char variant
;
378 PCI_VARIANT_PCIX_MODE1_PARITY
,
379 PCI_VARIANT_PCIX_MODE1_ECC
,
380 PCI_VARIANT_PCIX_266_MODE2
,
384 struct adapter_params
{
385 struct sge_params sge
;
386 struct mc5_params mc5
;
388 struct vpd_params vpd
;
389 struct pci_params pci
;
391 const struct adapter_info
*info
;
393 unsigned short mtus
[NMTUS
];
394 unsigned short a_wnd
[NCCTRL_WIN
];
395 unsigned short b_wnd
[NCCTRL_WIN
];
397 unsigned int nports
; /* # of ethernet ports */
398 unsigned int stats_update_period
; /* MAC stats accumulation period */
399 unsigned int linkpoll_period
; /* link poll period in 0.1s */
400 unsigned int rev
; /* chip revision */
403 struct trace_params
{
421 unsigned int supported
; /* link capabilities */
422 unsigned int advertising
; /* advertised capabilities */
423 unsigned short requested_speed
; /* speed user has requested */
424 unsigned short speed
; /* actual link speed */
425 unsigned char requested_duplex
; /* duplex user has requested */
426 unsigned char duplex
; /* actual link duplex */
427 unsigned char requested_fc
; /* flow control user has requested */
428 unsigned char fc
; /* actual link flow control */
429 unsigned char autoneg
; /* autonegotiating? */
430 unsigned int link_ok
; /* link up? */
433 #define SPEED_INVALID 0xffff
434 #define DUPLEX_INVALID 0xff
437 struct adapter
*adapter
;
438 unsigned int tcam_size
;
439 unsigned char part_type
;
440 unsigned char parity_enabled
;
442 struct mc5_stats stats
;
445 static inline unsigned int t3_mc5_size(const struct mc5
*p
)
451 struct adapter
*adapter
; /* backpointer to adapter */
452 unsigned int size
; /* memory size in bytes */
453 unsigned int width
; /* MC7 interface width */
454 unsigned int offset
; /* register address offset for MC7 instance */
455 const char *name
; /* name of MC7 instance */
456 struct mc7_stats stats
; /* MC7 statistics */
459 static inline unsigned int t3_mc7_size(const struct mc7
*p
)
465 struct adapter
*adapter
;
467 unsigned int nucast
; /* # of address filters for unicast MACs */
468 struct mac_stats stats
;
472 MAC_DIRECTION_RX
= 1,
473 MAC_DIRECTION_TX
= 2,
474 MAC_RXFIFO_SIZE
= 32768
477 /* IEEE 802.3ae specified MDIO devices */
479 MDIO_DEV_PMA_PMD
= 1,
485 /* PHY loopback direction */
491 /* PHY interrupt types */
493 cphy_cause_link_change
= 1,
494 cphy_cause_fifo_error
= 2
499 void (*destroy
)(struct cphy
*phy
);
500 int (*reset
)(struct cphy
*phy
, int wait
);
502 int (*intr_enable
)(struct cphy
*phy
);
503 int (*intr_disable
)(struct cphy
*phy
);
504 int (*intr_clear
)(struct cphy
*phy
);
505 int (*intr_handler
)(struct cphy
*phy
);
507 int (*autoneg_enable
)(struct cphy
*phy
);
508 int (*autoneg_restart
)(struct cphy
*phy
);
510 int (*advertise
)(struct cphy
*phy
, unsigned int advertise_map
);
511 int (*set_loopback
)(struct cphy
*phy
, int mmd
, int dir
, int enable
);
512 int (*set_speed_duplex
)(struct cphy
*phy
, int speed
, int duplex
);
513 int (*get_link_status
)(struct cphy
*phy
, int *link_ok
, int *speed
,
514 int *duplex
, int *fc
);
515 int (*power_down
)(struct cphy
*phy
, int enable
);
520 int addr
; /* PHY address */
521 struct adapter
*adapter
; /* associated adapter */
522 unsigned long fifo_errors
; /* FIFO over/under-flows */
523 const struct cphy_ops
*ops
; /* PHY operations */
524 int (*mdio_read
)(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
525 int reg_addr
, unsigned int *val
);
526 int (*mdio_write
)(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
527 int reg_addr
, unsigned int val
);
530 /* Convenience MDIO read/write wrappers */
531 static inline int mdio_read(struct cphy
*phy
, int mmd
, int reg
,
534 return phy
->mdio_read(phy
->adapter
, phy
->addr
, mmd
, reg
, valp
);
537 static inline int mdio_write(struct cphy
*phy
, int mmd
, int reg
,
540 return phy
->mdio_write(phy
->adapter
, phy
->addr
, mmd
, reg
, val
);
543 /* Convenience initializer */
544 static inline void cphy_init(struct cphy
*phy
, struct adapter
*adapter
,
545 int phy_addr
, struct cphy_ops
*phy_ops
,
546 const struct mdio_ops
*mdio_ops
)
548 phy
->adapter
= adapter
;
549 phy
->addr
= phy_addr
;
552 phy
->mdio_read
= mdio_ops
->read
;
553 phy
->mdio_write
= mdio_ops
->write
;
557 /* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
558 #define MAC_STATS_ACCUM_SECS 180
560 #define XGM_REG(reg_addr, idx) \
561 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
563 struct addr_val_pair
{
564 unsigned int reg_addr
;
570 #ifndef PCI_VENDOR_ID_CHELSIO
571 # define PCI_VENDOR_ID_CHELSIO 0x1425
574 #define for_each_port(adapter, iter) \
575 for (iter = 0; iter < (adapter)->params.nports; ++iter)
577 #define adapter_info(adap) ((adap)->params.info)
579 static inline int uses_xaui(const struct adapter
*adap
)
581 return adapter_info(adap
)->caps
& SUPPORTED_AUI
;
584 static inline int is_10G(const struct adapter
*adap
)
586 return adapter_info(adap
)->caps
& SUPPORTED_10000baseT_Full
;
589 static inline int is_offload(const struct adapter
*adap
)
591 return adapter_info(adap
)->caps
& SUPPORTED_OFFLOAD
;
594 static inline unsigned int core_ticks_per_usec(const struct adapter
*adap
)
596 return adap
->params
.vpd
.cclk
/ 1000;
599 static inline unsigned int is_pcie(const struct adapter
*adap
)
601 return adap
->params
.pci
.variant
== PCI_VARIANT_PCIE
;
604 void t3_set_reg_field(struct adapter
*adap
, unsigned int addr
, u32 mask
,
606 void t3_write_regs(struct adapter
*adapter
, const struct addr_val_pair
*p
,
607 int n
, unsigned int offset
);
608 int t3_wait_op_done_val(struct adapter
*adapter
, int reg
, u32 mask
,
609 int polarity
, int attempts
, int delay
, u32
*valp
);
610 static inline int t3_wait_op_done(struct adapter
*adapter
, int reg
, u32 mask
,
611 int polarity
, int attempts
, int delay
)
613 return t3_wait_op_done_val(adapter
, reg
, mask
, polarity
, attempts
,
616 int t3_mdio_change_bits(struct cphy
*phy
, int mmd
, int reg
, unsigned int clear
,
618 int t3_phy_reset(struct cphy
*phy
, int mmd
, int wait
);
619 int t3_phy_advertise(struct cphy
*phy
, unsigned int advert
);
620 int t3_set_phy_speed_duplex(struct cphy
*phy
, int speed
, int duplex
);
622 void t3_intr_enable(struct adapter
*adapter
);
623 void t3_intr_disable(struct adapter
*adapter
);
624 void t3_intr_clear(struct adapter
*adapter
);
625 void t3_port_intr_enable(struct adapter
*adapter
, int idx
);
626 void t3_port_intr_disable(struct adapter
*adapter
, int idx
);
627 void t3_port_intr_clear(struct adapter
*adapter
, int idx
);
628 int t3_slow_intr_handler(struct adapter
*adapter
);
629 int t3_phy_intr_handler(struct adapter
*adapter
);
631 void t3_link_changed(struct adapter
*adapter
, int port_id
);
632 int t3_link_start(struct cphy
*phy
, struct cmac
*mac
, struct link_config
*lc
);
633 const struct adapter_info
*t3_get_adapter_info(unsigned int board_id
);
634 int t3_seeprom_read(struct adapter
*adapter
, u32 addr
, u32
*data
);
635 int t3_seeprom_write(struct adapter
*adapter
, u32 addr
, u32 data
);
636 int t3_seeprom_wp(struct adapter
*adapter
, int enable
);
637 int t3_read_flash(struct adapter
*adapter
, unsigned int addr
,
638 unsigned int nwords
, u32
*data
, int byte_oriented
);
639 int t3_load_fw(struct adapter
*adapter
, const u8
* fw_data
, unsigned int size
);
640 int t3_get_fw_version(struct adapter
*adapter
, u32
*vers
);
641 int t3_check_fw_version(struct adapter
*adapter
);
642 int t3_init_hw(struct adapter
*adapter
, u32 fw_params
);
643 void mac_prep(struct cmac
*mac
, struct adapter
*adapter
, int index
);
644 void early_hw_init(struct adapter
*adapter
, const struct adapter_info
*ai
);
645 int t3_prep_adapter(struct adapter
*adapter
, const struct adapter_info
*ai
,
647 void t3_led_ready(struct adapter
*adapter
);
648 void t3_fatal_err(struct adapter
*adapter
);
649 void t3_set_vlan_accel(struct adapter
*adapter
, unsigned int ports
, int on
);
650 void t3_config_rss(struct adapter
*adapter
, unsigned int rss_config
,
651 const u8
* cpus
, const u16
*rspq
);
652 int t3_read_rss(struct adapter
*adapter
, u8
* lkup
, u16
*map
);
653 int t3_mps_set_active_ports(struct adapter
*adap
, unsigned int port_mask
);
654 int t3_cim_ctl_blk_read(struct adapter
*adap
, unsigned int addr
,
655 unsigned int n
, unsigned int *valp
);
656 int t3_mc7_bd_read(struct mc7
*mc7
, unsigned int start
, unsigned int n
,
659 int t3_mac_reset(struct cmac
*mac
);
660 void t3b_pcs_reset(struct cmac
*mac
);
661 int t3_mac_enable(struct cmac
*mac
, int which
);
662 int t3_mac_disable(struct cmac
*mac
, int which
);
663 int t3_mac_set_mtu(struct cmac
*mac
, unsigned int mtu
);
664 int t3_mac_set_rx_mode(struct cmac
*mac
, struct t3_rx_mode
*rm
);
665 int t3_mac_set_address(struct cmac
*mac
, unsigned int idx
, u8 addr
[6]);
666 int t3_mac_set_num_ucast(struct cmac
*mac
, int n
);
667 const struct mac_stats
*t3_mac_update_stats(struct cmac
*mac
);
668 int t3_mac_set_speed_duplex_fc(struct cmac
*mac
, int speed
, int duplex
, int fc
);
670 void t3_mc5_prep(struct adapter
*adapter
, struct mc5
*mc5
, int mode
);
671 int t3_mc5_init(struct mc5
*mc5
, unsigned int nservers
, unsigned int nfilters
,
672 unsigned int nroutes
);
673 void t3_mc5_intr_handler(struct mc5
*mc5
);
674 int t3_read_mc5_range(const struct mc5
*mc5
, unsigned int start
, unsigned int n
,
677 int t3_tp_set_coalescing_size(struct adapter
*adap
, unsigned int size
, int psh
);
678 void t3_tp_set_max_rxsize(struct adapter
*adap
, unsigned int size
);
679 void t3_tp_set_offload_mode(struct adapter
*adap
, int enable
);
680 void t3_tp_get_mib_stats(struct adapter
*adap
, struct tp_mib_stats
*tps
);
681 void t3_load_mtus(struct adapter
*adap
, unsigned short mtus
[NMTUS
],
682 unsigned short alpha
[NCCTRL_WIN
],
683 unsigned short beta
[NCCTRL_WIN
], unsigned short mtu_cap
);
684 void t3_read_hw_mtus(struct adapter
*adap
, unsigned short mtus
[NMTUS
]);
685 void t3_get_cong_cntl_tab(struct adapter
*adap
,
686 unsigned short incr
[NMTUS
][NCCTRL_WIN
]);
687 void t3_config_trace_filter(struct adapter
*adapter
,
688 const struct trace_params
*tp
, int filter_index
,
689 int invert
, int enable
);
690 int t3_config_sched(struct adapter
*adap
, unsigned int kbps
, int sched
);
692 void t3_sge_prep(struct adapter
*adap
, struct sge_params
*p
);
693 void t3_sge_init(struct adapter
*adap
, struct sge_params
*p
);
694 int t3_sge_init_ecntxt(struct adapter
*adapter
, unsigned int id
, int gts_enable
,
695 enum sge_context_type type
, int respq
, u64 base_addr
,
696 unsigned int size
, unsigned int token
, int gen
,
698 int t3_sge_init_flcntxt(struct adapter
*adapter
, unsigned int id
,
699 int gts_enable
, u64 base_addr
, unsigned int size
,
700 unsigned int esize
, unsigned int cong_thres
, int gen
,
702 int t3_sge_init_rspcntxt(struct adapter
*adapter
, unsigned int id
,
703 int irq_vec_idx
, u64 base_addr
, unsigned int size
,
704 unsigned int fl_thres
, int gen
, unsigned int cidx
);
705 int t3_sge_init_cqcntxt(struct adapter
*adapter
, unsigned int id
, u64 base_addr
,
706 unsigned int size
, int rspq
, int ovfl_mode
,
707 unsigned int credits
, unsigned int credit_thres
);
708 int t3_sge_enable_ecntxt(struct adapter
*adapter
, unsigned int id
, int enable
);
709 int t3_sge_disable_fl(struct adapter
*adapter
, unsigned int id
);
710 int t3_sge_disable_rspcntxt(struct adapter
*adapter
, unsigned int id
);
711 int t3_sge_disable_cqcntxt(struct adapter
*adapter
, unsigned int id
);
712 int t3_sge_read_ecntxt(struct adapter
*adapter
, unsigned int id
, u32 data
[4]);
713 int t3_sge_read_fl(struct adapter
*adapter
, unsigned int id
, u32 data
[4]);
714 int t3_sge_read_cq(struct adapter
*adapter
, unsigned int id
, u32 data
[4]);
715 int t3_sge_read_rspq(struct adapter
*adapter
, unsigned int id
, u32 data
[4]);
716 int t3_sge_cqcntxt_op(struct adapter
*adapter
, unsigned int id
, unsigned int op
,
717 unsigned int credits
);
719 void t3_vsc8211_phy_prep(struct cphy
*phy
, struct adapter
*adapter
,
720 int phy_addr
, const struct mdio_ops
*mdio_ops
);
721 void t3_ael1002_phy_prep(struct cphy
*phy
, struct adapter
*adapter
,
722 int phy_addr
, const struct mdio_ops
*mdio_ops
);
723 void t3_ael1006_phy_prep(struct cphy
*phy
, struct adapter
*adapter
,
724 int phy_addr
, const struct mdio_ops
*mdio_ops
);
725 void t3_qt2045_phy_prep(struct cphy
*phy
, struct adapter
*adapter
, int phy_addr
,
726 const struct mdio_ops
*mdio_ops
);
727 void t3_xaui_direct_phy_prep(struct cphy
*phy
, struct adapter
*adapter
,
728 int phy_addr
, const struct mdio_ops
*mdio_ops
);
729 #endif /* __CHELSIO_COMMON_H */