8250: Remove commented out irq cruft
[linux-2.6/x86.git] / drivers / net / cxgb3 / t3_hw.c
blobfb485d0a43d8a768bcc5673c4a3d51b1f5a8f0f3
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 if (mmd_addr)
217 return -EINVAL;
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
229 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235 if (mmd_addr)
236 return -EINVAL;
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
247 static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
277 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
298 static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
317 int ret;
318 unsigned int val;
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
325 return ret;
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
340 int err;
341 unsigned int ctl;
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
356 return ctl ? -1 : 0;
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
369 int err;
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
413 int err;
414 unsigned int ctl;
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
437 static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
465 const struct adapter_info *t3_get_adapter_info(unsigned int id)
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
490 #undef CAPS_1G
491 #undef CAPS_10G
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
500 struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
595 return 0;
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter *adapter, int enable)
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c)
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
627 int i, addr, ret;
628 struct t3_vpd vpd;
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
669 /* serial flash and firmware constants */
670 enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
685 FW_MIN_SIZE = 8 /* at least version and csum */
689 * sf1_read - read data from the serial flash
690 * @adapter: the adapter
691 * @byte_cnt: number of bytes to read
692 * @cont: whether another operation will be chained
693 * @valp: where to store the read data
695 * Reads up to 4 bytes of data from the serial flash. The location of
696 * the read needs to be specified prior to calling this by issuing the
697 * appropriate commands to the serial flash.
699 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
700 u32 *valp)
702 int ret;
704 if (!byte_cnt || byte_cnt > 4)
705 return -EINVAL;
706 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
707 return -EBUSY;
708 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
709 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
710 if (!ret)
711 *valp = t3_read_reg(adapter, A_SF_DATA);
712 return ret;
716 * sf1_write - write data to the serial flash
717 * @adapter: the adapter
718 * @byte_cnt: number of bytes to write
719 * @cont: whether another operation will be chained
720 * @val: value to write
722 * Writes up to 4 bytes of data to the serial flash. The location of
723 * the write needs to be specified prior to calling this by issuing the
724 * appropriate commands to the serial flash.
726 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
727 u32 val)
729 if (!byte_cnt || byte_cnt > 4)
730 return -EINVAL;
731 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
732 return -EBUSY;
733 t3_write_reg(adapter, A_SF_DATA, val);
734 t3_write_reg(adapter, A_SF_OP,
735 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
736 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
740 * flash_wait_op - wait for a flash operation to complete
741 * @adapter: the adapter
742 * @attempts: max number of polls of the status register
743 * @delay: delay between polls in ms
745 * Wait for a flash operation to complete by polling the status register.
747 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
749 int ret;
750 u32 status;
752 while (1) {
753 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
754 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
755 return ret;
756 if (!(status & 1))
757 return 0;
758 if (--attempts == 0)
759 return -EAGAIN;
760 if (delay)
761 msleep(delay);
766 * t3_read_flash - read words from serial flash
767 * @adapter: the adapter
768 * @addr: the start address for the read
769 * @nwords: how many 32-bit words to read
770 * @data: where to store the read data
771 * @byte_oriented: whether to store data as bytes or as words
773 * Read the specified number of 32-bit words from the serial flash.
774 * If @byte_oriented is set the read data is stored as a byte array
775 * (i.e., big-endian), otherwise as 32-bit words in the platform's
776 * natural endianess.
778 int t3_read_flash(struct adapter *adapter, unsigned int addr,
779 unsigned int nwords, u32 *data, int byte_oriented)
781 int ret;
783 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
784 return -EINVAL;
786 addr = swab32(addr) | SF_RD_DATA_FAST;
788 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
789 (ret = sf1_read(adapter, 1, 1, data)) != 0)
790 return ret;
792 for (; nwords; nwords--, data++) {
793 ret = sf1_read(adapter, 4, nwords > 1, data);
794 if (ret)
795 return ret;
796 if (byte_oriented)
797 *data = htonl(*data);
799 return 0;
803 * t3_write_flash - write up to a page of data to the serial flash
804 * @adapter: the adapter
805 * @addr: the start address to write
806 * @n: length of data to write
807 * @data: the data to write
809 * Writes up to a page of data (256 bytes) to the serial flash starting
810 * at the given address.
812 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int n, const u8 *data)
815 int ret;
816 u32 buf[64];
817 unsigned int i, c, left, val, offset = addr & 0xff;
819 if (addr + n > SF_SIZE || offset + n > 256)
820 return -EINVAL;
822 val = swab32(addr) | SF_PROG_PAGE;
824 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
825 (ret = sf1_write(adapter, 4, 1, val)) != 0)
826 return ret;
828 for (left = n; left; left -= c) {
829 c = min(left, 4U);
830 for (val = 0, i = 0; i < c; ++i)
831 val = (val << 8) + *data++;
833 ret = sf1_write(adapter, c, c != left, val);
834 if (ret)
835 return ret;
837 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
838 return ret;
840 /* Read the page to verify the write succeeded */
841 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
842 if (ret)
843 return ret;
845 if (memcmp(data - n, (u8 *) buf + offset, n))
846 return -EIO;
847 return 0;
850 enum fw_version_type {
851 FW_VERSION_N3,
852 FW_VERSION_T3
856 * t3_get_fw_version - read the firmware version
857 * @adapter: the adapter
858 * @vers: where to place the version
860 * Reads the FW version from flash.
862 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
864 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
868 * t3_check_fw_version - check if the FW is compatible with this driver
869 * @adapter: the adapter
871 * Checks if an adapter's FW is compatible with the driver. Returns 0
872 * if the versions are compatible, a negative error otherwise.
874 int t3_check_fw_version(struct adapter *adapter)
876 int ret;
877 u32 vers;
878 unsigned int type, major, minor;
880 ret = t3_get_fw_version(adapter, &vers);
881 if (ret)
882 return ret;
884 type = G_FW_VERSION_TYPE(vers);
885 major = G_FW_VERSION_MAJOR(vers);
886 minor = G_FW_VERSION_MINOR(vers);
888 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
889 minor == FW_VERSION_MINOR)
890 return 0;
892 CH_ERR(adapter, "found wrong FW version(%u.%u), "
893 "driver needs version %u.%u\n", major, minor,
894 FW_VERSION_MAJOR, FW_VERSION_MINOR);
895 return -EINVAL;
899 * t3_flash_erase_sectors - erase a range of flash sectors
900 * @adapter: the adapter
901 * @start: the first sector to erase
902 * @end: the last sector to erase
904 * Erases the sectors in the given range.
906 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
908 while (start <= end) {
909 int ret;
911 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
912 (ret = sf1_write(adapter, 4, 0,
913 SF_ERASE_SECTOR | (start << 8))) != 0 ||
914 (ret = flash_wait_op(adapter, 5, 500)) != 0)
915 return ret;
916 start++;
918 return 0;
922 * t3_load_fw - download firmware
923 * @adapter: the adapter
924 * @fw_data: the firrware image to write
925 * @size: image size
927 * Write the supplied firmware image to the card's serial flash.
928 * The FW image has the following sections: @size - 8 bytes of code and
929 * data, followed by 4 bytes of FW version, followed by the 32-bit
930 * 1's complement checksum of the whole image.
932 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
934 u32 csum;
935 unsigned int i;
936 const u32 *p = (const u32 *)fw_data;
937 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
939 if ((size & 3) || size < FW_MIN_SIZE)
940 return -EINVAL;
941 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
942 return -EFBIG;
944 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
945 csum += ntohl(p[i]);
946 if (csum != 0xffffffff) {
947 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
948 csum);
949 return -EINVAL;
952 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
953 if (ret)
954 goto out;
956 size -= 8; /* trim off version and checksum */
957 for (addr = FW_FLASH_BOOT_ADDR; size;) {
958 unsigned int chunk_size = min(size, 256U);
960 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
961 if (ret)
962 goto out;
964 addr += chunk_size;
965 fw_data += chunk_size;
966 size -= chunk_size;
969 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
970 out:
971 if (ret)
972 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
973 return ret;
976 #define CIM_CTL_BASE 0x2000
979 * t3_cim_ctl_blk_read - read a block from CIM control region
981 * @adap: the adapter
982 * @addr: the start address within the CIM control region
983 * @n: number of words to read
984 * @valp: where to store the result
986 * Reads a block of 4-byte words from the CIM control region.
988 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
989 unsigned int n, unsigned int *valp)
991 int ret = 0;
993 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
994 return -EBUSY;
996 for ( ; !ret && n--; addr += 4) {
997 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
998 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
999 0, 5, 2);
1000 if (!ret)
1001 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1003 return ret;
1008 * t3_link_changed - handle interface link changes
1009 * @adapter: the adapter
1010 * @port_id: the port index that changed link state
1012 * Called when a port's link settings change to propagate the new values
1013 * to the associated PHY and MAC. After performing the common tasks it
1014 * invokes an OS-specific handler.
1016 void t3_link_changed(struct adapter *adapter, int port_id)
1018 int link_ok, speed, duplex, fc;
1019 struct port_info *pi = adap2pinfo(adapter, port_id);
1020 struct cphy *phy = &pi->phy;
1021 struct cmac *mac = &pi->mac;
1022 struct link_config *lc = &pi->link_config;
1024 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1026 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1027 uses_xaui(adapter)) {
1028 if (link_ok)
1029 t3b_pcs_reset(mac);
1030 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1031 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1033 lc->link_ok = link_ok;
1034 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1035 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1036 if (lc->requested_fc & PAUSE_AUTONEG)
1037 fc &= lc->requested_fc;
1038 else
1039 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1041 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1042 /* Set MAC speed, duplex, and flow control to match PHY. */
1043 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1044 lc->fc = fc;
1047 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1051 * t3_link_start - apply link configuration to MAC/PHY
1052 * @phy: the PHY to setup
1053 * @mac: the MAC to setup
1054 * @lc: the requested link configuration
1056 * Set up a port's MAC and PHY according to a desired link configuration.
1057 * - If the PHY can auto-negotiate first decide what to advertise, then
1058 * enable/disable auto-negotiation as desired, and reset.
1059 * - If the PHY does not auto-negotiate just reset it.
1060 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1061 * otherwise do it later based on the outcome of auto-negotiation.
1063 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1065 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1067 lc->link_ok = 0;
1068 if (lc->supported & SUPPORTED_Autoneg) {
1069 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1070 if (fc) {
1071 lc->advertising |= ADVERTISED_Asym_Pause;
1072 if (fc & PAUSE_RX)
1073 lc->advertising |= ADVERTISED_Pause;
1075 phy->ops->advertise(phy, lc->advertising);
1077 if (lc->autoneg == AUTONEG_DISABLE) {
1078 lc->speed = lc->requested_speed;
1079 lc->duplex = lc->requested_duplex;
1080 lc->fc = (unsigned char)fc;
1081 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1082 fc);
1083 /* Also disables autoneg */
1084 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1085 phy->ops->reset(phy, 0);
1086 } else
1087 phy->ops->autoneg_enable(phy);
1088 } else {
1089 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1090 lc->fc = (unsigned char)fc;
1091 phy->ops->reset(phy, 0);
1093 return 0;
1097 * t3_set_vlan_accel - control HW VLAN extraction
1098 * @adapter: the adapter
1099 * @ports: bitmap of adapter ports to operate on
1100 * @on: enable (1) or disable (0) HW VLAN extraction
1102 * Enables or disables HW extraction of VLAN tags for the given port.
1104 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1106 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1107 ports << S_VLANEXTRACTIONENABLE,
1108 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1111 struct intr_info {
1112 unsigned int mask; /* bits to check in interrupt status */
1113 const char *msg; /* message to print or NULL */
1114 short stat_idx; /* stat counter to increment or -1 */
1115 unsigned short fatal:1; /* whether the condition reported is fatal */
1119 * t3_handle_intr_status - table driven interrupt handler
1120 * @adapter: the adapter that generated the interrupt
1121 * @reg: the interrupt status register to process
1122 * @mask: a mask to apply to the interrupt status
1123 * @acts: table of interrupt actions
1124 * @stats: statistics counters tracking interrupt occurences
1126 * A table driven interrupt handler that applies a set of masks to an
1127 * interrupt status word and performs the corresponding actions if the
1128 * interrupts described by the mask have occured. The actions include
1129 * optionally printing a warning or alert message, and optionally
1130 * incrementing a stat counter. The table is terminated by an entry
1131 * specifying mask 0. Returns the number of fatal interrupt conditions.
1133 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1134 unsigned int mask,
1135 const struct intr_info *acts,
1136 unsigned long *stats)
1138 int fatal = 0;
1139 unsigned int status = t3_read_reg(adapter, reg) & mask;
1141 for (; acts->mask; ++acts) {
1142 if (!(status & acts->mask))
1143 continue;
1144 if (acts->fatal) {
1145 fatal++;
1146 CH_ALERT(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 } else if (acts->msg)
1149 CH_WARN(adapter, "%s (0x%x)\n",
1150 acts->msg, status & acts->mask);
1151 if (acts->stat_idx >= 0)
1152 stats[acts->stat_idx]++;
1154 if (status) /* clear processed interrupts */
1155 t3_write_reg(adapter, reg, status);
1156 return fatal;
1159 #define SGE_INTR_MASK (F_RSPQDISABLED)
1160 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1161 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1162 F_NFASRCHFAIL)
1163 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1164 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1165 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1166 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1167 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1168 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1169 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1170 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1171 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1172 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1173 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1174 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1175 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1176 V_BISTERR(M_BISTERR) | F_PEXERR)
1177 #define ULPRX_INTR_MASK F_PARERR
1178 #define ULPTX_INTR_MASK 0
1179 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1180 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1181 F_ZERO_SWITCH_ERROR)
1182 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1183 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1184 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1185 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1186 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1187 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1188 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1189 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1190 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1191 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1192 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1193 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1194 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1195 V_MCAPARERRENB(M_MCAPARERRENB))
1196 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1197 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1198 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1199 F_MPS0 | F_CPL_SWITCH)
1202 * Interrupt handler for the PCIX1 module.
1204 static void pci_intr_handler(struct adapter *adapter)
1206 static const struct intr_info pcix1_intr_info[] = {
1207 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1208 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1209 {F_RCVTARABT, "PCI received target abort", -1, 1},
1210 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1211 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1212 {F_DETPARERR, "PCI detected parity error", -1, 1},
1213 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1214 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1215 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1217 {F_DETCORECCERR, "PCI correctable ECC error",
1218 STAT_PCI_CORR_ECC, 0},
1219 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1220 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1221 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1223 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1225 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1227 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1228 "error", -1, 1},
1232 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1233 pcix1_intr_info, adapter->irq_stats))
1234 t3_fatal_err(adapter);
1238 * Interrupt handler for the PCIE module.
1240 static void pcie_intr_handler(struct adapter *adapter)
1242 static const struct intr_info pcie_intr_info[] = {
1243 {F_PEXERR, "PCI PEX error", -1, 1},
1244 {F_UNXSPLCPLERRR,
1245 "PCI unexpected split completion DMA read error", -1, 1},
1246 {F_UNXSPLCPLERRC,
1247 "PCI unexpected split completion DMA command error", -1, 1},
1248 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1249 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1250 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1251 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1252 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1253 "PCI MSI-X table/PBA parity error", -1, 1},
1254 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1258 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1259 pcie_intr_info, adapter->irq_stats))
1260 t3_fatal_err(adapter);
1264 * TP interrupt handler.
1266 static void tp_intr_handler(struct adapter *adapter)
1268 static const struct intr_info tp_intr_info[] = {
1269 {0xffffff, "TP parity error", -1, 1},
1270 {0x1000000, "TP out of Rx pages", -1, 1},
1271 {0x2000000, "TP out of Tx pages", -1, 1},
1275 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1276 tp_intr_info, NULL))
1277 t3_fatal_err(adapter);
1281 * CIM interrupt handler.
1283 static void cim_intr_handler(struct adapter *adapter)
1285 static const struct intr_info cim_intr_info[] = {
1286 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1287 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1288 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1289 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1290 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1291 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1292 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1293 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1294 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1295 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1296 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1297 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1301 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1302 cim_intr_info, NULL))
1303 t3_fatal_err(adapter);
1307 * ULP RX interrupt handler.
1309 static void ulprx_intr_handler(struct adapter *adapter)
1311 static const struct intr_info ulprx_intr_info[] = {
1312 {F_PARERR, "ULP RX parity error", -1, 1},
1316 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1317 ulprx_intr_info, NULL))
1318 t3_fatal_err(adapter);
1322 * ULP TX interrupt handler.
1324 static void ulptx_intr_handler(struct adapter *adapter)
1326 static const struct intr_info ulptx_intr_info[] = {
1327 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1328 STAT_ULP_CH0_PBL_OOB, 0},
1329 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1330 STAT_ULP_CH1_PBL_OOB, 0},
1334 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1335 ulptx_intr_info, adapter->irq_stats))
1336 t3_fatal_err(adapter);
1339 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1340 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1341 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1342 F_ICSPI1_TX_FRAMING_ERROR)
1343 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1344 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1345 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1346 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1349 * PM TX interrupt handler.
1351 static void pmtx_intr_handler(struct adapter *adapter)
1353 static const struct intr_info pmtx_intr_info[] = {
1354 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1355 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1356 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1357 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1358 "PMTX ispi parity error", -1, 1},
1359 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1360 "PMTX ospi parity error", -1, 1},
1364 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1365 pmtx_intr_info, NULL))
1366 t3_fatal_err(adapter);
1369 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1370 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1371 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1372 F_IESPI1_TX_FRAMING_ERROR)
1373 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1374 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1375 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1376 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1379 * PM RX interrupt handler.
1381 static void pmrx_intr_handler(struct adapter *adapter)
1383 static const struct intr_info pmrx_intr_info[] = {
1384 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1385 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1386 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1387 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1388 "PMRX ispi parity error", -1, 1},
1389 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1390 "PMRX ospi parity error", -1, 1},
1394 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1395 pmrx_intr_info, NULL))
1396 t3_fatal_err(adapter);
1400 * CPL switch interrupt handler.
1402 static void cplsw_intr_handler(struct adapter *adapter)
1404 static const struct intr_info cplsw_intr_info[] = {
1405 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1406 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1407 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1408 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1409 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1413 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1414 cplsw_intr_info, NULL))
1415 t3_fatal_err(adapter);
1419 * MPS interrupt handler.
1421 static void mps_intr_handler(struct adapter *adapter)
1423 static const struct intr_info mps_intr_info[] = {
1424 {0x1ff, "MPS parity error", -1, 1},
1428 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1429 mps_intr_info, NULL))
1430 t3_fatal_err(adapter);
1433 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1436 * MC7 interrupt handler.
1438 static void mc7_intr_handler(struct mc7 *mc7)
1440 struct adapter *adapter = mc7->adapter;
1441 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1443 if (cause & F_CE) {
1444 mc7->stats.corr_err++;
1445 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1446 "data 0x%x 0x%x 0x%x\n", mc7->name,
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1448 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1449 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1450 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1453 if (cause & F_UE) {
1454 mc7->stats.uncorr_err++;
1455 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1456 "data 0x%x 0x%x 0x%x\n", mc7->name,
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1458 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1459 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1460 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1463 if (G_PE(cause)) {
1464 mc7->stats.parity_err++;
1465 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1466 mc7->name, G_PE(cause));
1469 if (cause & F_AE) {
1470 u32 addr = 0;
1472 if (adapter->params.rev > 0)
1473 addr = t3_read_reg(adapter,
1474 mc7->offset + A_MC7_ERR_ADDR);
1475 mc7->stats.addr_err++;
1476 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1477 mc7->name, addr);
1480 if (cause & MC7_INTR_FATAL)
1481 t3_fatal_err(adapter);
1483 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1486 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1487 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1489 * XGMAC interrupt handler.
1491 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1493 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1494 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1496 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1497 mac->stats.tx_fifo_parity_err++;
1498 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1500 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1501 mac->stats.rx_fifo_parity_err++;
1502 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1504 if (cause & F_TXFIFO_UNDERRUN)
1505 mac->stats.tx_fifo_urun++;
1506 if (cause & F_RXFIFO_OVERFLOW)
1507 mac->stats.rx_fifo_ovfl++;
1508 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1509 mac->stats.serdes_signal_loss++;
1510 if (cause & F_XAUIPCSCTCERR)
1511 mac->stats.xaui_pcs_ctc_err++;
1512 if (cause & F_XAUIPCSALIGNCHANGE)
1513 mac->stats.xaui_pcs_align_change++;
1515 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1516 if (cause & XGM_INTR_FATAL)
1517 t3_fatal_err(adap);
1518 return cause != 0;
1522 * Interrupt handler for PHY events.
1524 int t3_phy_intr_handler(struct adapter *adapter)
1526 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1527 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1529 for_each_port(adapter, i) {
1530 struct port_info *p = adap2pinfo(adapter, i);
1532 mask = gpi - (gpi & (gpi - 1));
1533 gpi -= mask;
1535 if (!(p->port_type->caps & SUPPORTED_IRQ))
1536 continue;
1538 if (cause & mask) {
1539 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1541 if (phy_cause & cphy_cause_link_change)
1542 t3_link_changed(adapter, i);
1543 if (phy_cause & cphy_cause_fifo_error)
1544 p->phy.fifo_errors++;
1548 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1549 return 0;
1553 * T3 slow path (non-data) interrupt handler.
1555 int t3_slow_intr_handler(struct adapter *adapter)
1557 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1559 cause &= adapter->slow_intr_mask;
1560 if (!cause)
1561 return 0;
1562 if (cause & F_PCIM0) {
1563 if (is_pcie(adapter))
1564 pcie_intr_handler(adapter);
1565 else
1566 pci_intr_handler(adapter);
1568 if (cause & F_SGE3)
1569 t3_sge_err_intr_handler(adapter);
1570 if (cause & F_MC7_PMRX)
1571 mc7_intr_handler(&adapter->pmrx);
1572 if (cause & F_MC7_PMTX)
1573 mc7_intr_handler(&adapter->pmtx);
1574 if (cause & F_MC7_CM)
1575 mc7_intr_handler(&adapter->cm);
1576 if (cause & F_CIM)
1577 cim_intr_handler(adapter);
1578 if (cause & F_TP1)
1579 tp_intr_handler(adapter);
1580 if (cause & F_ULP2_RX)
1581 ulprx_intr_handler(adapter);
1582 if (cause & F_ULP2_TX)
1583 ulptx_intr_handler(adapter);
1584 if (cause & F_PM1_RX)
1585 pmrx_intr_handler(adapter);
1586 if (cause & F_PM1_TX)
1587 pmtx_intr_handler(adapter);
1588 if (cause & F_CPL_SWITCH)
1589 cplsw_intr_handler(adapter);
1590 if (cause & F_MPS0)
1591 mps_intr_handler(adapter);
1592 if (cause & F_MC5A)
1593 t3_mc5_intr_handler(&adapter->mc5);
1594 if (cause & F_XGMAC0_0)
1595 mac_intr_handler(adapter, 0);
1596 if (cause & F_XGMAC0_1)
1597 mac_intr_handler(adapter, 1);
1598 if (cause & F_T3DBG)
1599 t3_os_ext_intr_handler(adapter);
1601 /* Clear the interrupts just processed. */
1602 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1603 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1604 return 1;
1608 * t3_intr_enable - enable interrupts
1609 * @adapter: the adapter whose interrupts should be enabled
1611 * Enable interrupts by setting the interrupt enable registers of the
1612 * various HW modules and then enabling the top-level interrupt
1613 * concentrator.
1615 void t3_intr_enable(struct adapter *adapter)
1617 static const struct addr_val_pair intr_en_avp[] = {
1618 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1619 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1620 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1621 MC7_INTR_MASK},
1622 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1623 MC7_INTR_MASK},
1624 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1625 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1626 {A_TP_INT_ENABLE, 0x3bfffff},
1627 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1628 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1629 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1630 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1633 adapter->slow_intr_mask = PL_INTR_MASK;
1635 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1637 if (adapter->params.rev > 0) {
1638 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1639 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1640 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1641 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1642 F_PBL_BOUND_ERR_CH1);
1643 } else {
1644 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1645 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1648 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1649 adapter_info(adapter)->gpio_intr);
1650 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1651 adapter_info(adapter)->gpio_intr);
1652 if (is_pcie(adapter))
1653 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1654 else
1655 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1656 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1657 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1661 * t3_intr_disable - disable a card's interrupts
1662 * @adapter: the adapter whose interrupts should be disabled
1664 * Disable interrupts. We only disable the top-level interrupt
1665 * concentrator and the SGE data interrupts.
1667 void t3_intr_disable(struct adapter *adapter)
1669 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1670 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1671 adapter->slow_intr_mask = 0;
1675 * t3_intr_clear - clear all interrupts
1676 * @adapter: the adapter whose interrupts should be cleared
1678 * Clears all interrupts.
1680 void t3_intr_clear(struct adapter *adapter)
1682 static const unsigned int cause_reg_addr[] = {
1683 A_SG_INT_CAUSE,
1684 A_SG_RSPQ_FL_STATUS,
1685 A_PCIX_INT_CAUSE,
1686 A_MC7_INT_CAUSE,
1687 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1688 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1689 A_CIM_HOST_INT_CAUSE,
1690 A_TP_INT_CAUSE,
1691 A_MC5_DB_INT_CAUSE,
1692 A_ULPRX_INT_CAUSE,
1693 A_ULPTX_INT_CAUSE,
1694 A_CPL_INTR_CAUSE,
1695 A_PM1_TX_INT_CAUSE,
1696 A_PM1_RX_INT_CAUSE,
1697 A_MPS_INT_CAUSE,
1698 A_T3DBG_INT_CAUSE,
1700 unsigned int i;
1702 /* Clear PHY and MAC interrupts for each port. */
1703 for_each_port(adapter, i)
1704 t3_port_intr_clear(adapter, i);
1706 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1707 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1709 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1710 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1714 * t3_port_intr_enable - enable port-specific interrupts
1715 * @adapter: associated adapter
1716 * @idx: index of port whose interrupts should be enabled
1718 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1719 * adapter port.
1721 void t3_port_intr_enable(struct adapter *adapter, int idx)
1723 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1725 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1726 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1727 phy->ops->intr_enable(phy);
1731 * t3_port_intr_disable - disable port-specific interrupts
1732 * @adapter: associated adapter
1733 * @idx: index of port whose interrupts should be disabled
1735 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1736 * adapter port.
1738 void t3_port_intr_disable(struct adapter *adapter, int idx)
1740 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1742 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1743 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1744 phy->ops->intr_disable(phy);
1748 * t3_port_intr_clear - clear port-specific interrupts
1749 * @adapter: associated adapter
1750 * @idx: index of port whose interrupts to clear
1752 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1753 * adapter port.
1755 void t3_port_intr_clear(struct adapter *adapter, int idx)
1757 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1759 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1760 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1761 phy->ops->intr_clear(phy);
1765 * t3_sge_write_context - write an SGE context
1766 * @adapter: the adapter
1767 * @id: the context id
1768 * @type: the context type
1770 * Program an SGE context with the values already loaded in the
1771 * CONTEXT_DATA? registers.
1773 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1774 unsigned int type)
1776 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1777 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1778 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1779 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1780 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1781 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1782 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1783 0, 5, 1);
1787 * t3_sge_init_ecntxt - initialize an SGE egress context
1788 * @adapter: the adapter to configure
1789 * @id: the context id
1790 * @gts_enable: whether to enable GTS for the context
1791 * @type: the egress context type
1792 * @respq: associated response queue
1793 * @base_addr: base address of queue
1794 * @size: number of queue entries
1795 * @token: uP token
1796 * @gen: initial generation value for the context
1797 * @cidx: consumer pointer
1799 * Initialize an SGE egress context and make it ready for use. If the
1800 * platform allows concurrent context operations, the caller is
1801 * responsible for appropriate locking.
1803 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1804 enum sge_context_type type, int respq, u64 base_addr,
1805 unsigned int size, unsigned int token, int gen,
1806 unsigned int cidx)
1808 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1810 if (base_addr & 0xfff) /* must be 4K aligned */
1811 return -EINVAL;
1812 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1813 return -EBUSY;
1815 base_addr >>= 12;
1816 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1817 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1818 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1819 V_EC_BASE_LO(base_addr & 0xffff));
1820 base_addr >>= 16;
1821 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1822 base_addr >>= 32;
1823 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1824 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1825 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1826 F_EC_VALID);
1827 return t3_sge_write_context(adapter, id, F_EGRESS);
1831 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1832 * @adapter: the adapter to configure
1833 * @id: the context id
1834 * @gts_enable: whether to enable GTS for the context
1835 * @base_addr: base address of queue
1836 * @size: number of queue entries
1837 * @bsize: size of each buffer for this queue
1838 * @cong_thres: threshold to signal congestion to upstream producers
1839 * @gen: initial generation value for the context
1840 * @cidx: consumer pointer
1842 * Initialize an SGE free list context and make it ready for use. The
1843 * caller is responsible for ensuring only one context operation occurs
1844 * at a time.
1846 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1847 int gts_enable, u64 base_addr, unsigned int size,
1848 unsigned int bsize, unsigned int cong_thres, int gen,
1849 unsigned int cidx)
1851 if (base_addr & 0xfff) /* must be 4K aligned */
1852 return -EINVAL;
1853 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1854 return -EBUSY;
1856 base_addr >>= 12;
1857 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1858 base_addr >>= 32;
1859 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1860 V_FL_BASE_HI((u32) base_addr) |
1861 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1862 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1863 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1864 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1865 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1866 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1867 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1868 return t3_sge_write_context(adapter, id, F_FREELIST);
1872 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1873 * @adapter: the adapter to configure
1874 * @id: the context id
1875 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1876 * @base_addr: base address of queue
1877 * @size: number of queue entries
1878 * @fl_thres: threshold for selecting the normal or jumbo free list
1879 * @gen: initial generation value for the context
1880 * @cidx: consumer pointer
1882 * Initialize an SGE response queue context and make it ready for use.
1883 * The caller is responsible for ensuring only one context operation
1884 * occurs at a time.
1886 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1887 int irq_vec_idx, u64 base_addr, unsigned int size,
1888 unsigned int fl_thres, int gen, unsigned int cidx)
1890 unsigned int intr = 0;
1892 if (base_addr & 0xfff) /* must be 4K aligned */
1893 return -EINVAL;
1894 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1895 return -EBUSY;
1897 base_addr >>= 12;
1898 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1899 V_CQ_INDEX(cidx));
1900 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1901 base_addr >>= 32;
1902 if (irq_vec_idx >= 0)
1903 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1904 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1905 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1906 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1907 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1911 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1912 * @adapter: the adapter to configure
1913 * @id: the context id
1914 * @base_addr: base address of queue
1915 * @size: number of queue entries
1916 * @rspq: response queue for async notifications
1917 * @ovfl_mode: CQ overflow mode
1918 * @credits: completion queue credits
1919 * @credit_thres: the credit threshold
1921 * Initialize an SGE completion queue context and make it ready for use.
1922 * The caller is responsible for ensuring only one context operation
1923 * occurs at a time.
1925 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1926 unsigned int size, int rspq, int ovfl_mode,
1927 unsigned int credits, unsigned int credit_thres)
1929 if (base_addr & 0xfff) /* must be 4K aligned */
1930 return -EINVAL;
1931 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1932 return -EBUSY;
1934 base_addr >>= 12;
1935 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1936 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1937 base_addr >>= 32;
1938 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1939 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1940 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1941 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1942 V_CQ_CREDIT_THRES(credit_thres));
1943 return t3_sge_write_context(adapter, id, F_CQ);
1947 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1948 * @adapter: the adapter
1949 * @id: the egress context id
1950 * @enable: enable (1) or disable (0) the context
1952 * Enable or disable an SGE egress context. The caller is responsible for
1953 * ensuring only one context operation occurs at a time.
1955 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1957 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1958 return -EBUSY;
1960 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1961 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1962 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1963 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1964 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1965 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1966 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1967 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1968 0, 5, 1);
1972 * t3_sge_disable_fl - disable an SGE free-buffer list
1973 * @adapter: the adapter
1974 * @id: the free list context id
1976 * Disable an SGE free-buffer list. The caller is responsible for
1977 * ensuring only one context operation occurs at a time.
1979 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1981 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1982 return -EBUSY;
1984 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1985 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1986 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1987 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1988 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1989 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1990 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1991 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1992 0, 5, 1);
1996 * t3_sge_disable_rspcntxt - disable an SGE response queue
1997 * @adapter: the adapter
1998 * @id: the response queue context id
2000 * Disable an SGE response queue. The caller is responsible for
2001 * ensuring only one context operation occurs at a time.
2003 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2005 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2006 return -EBUSY;
2008 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2009 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2010 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2011 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2012 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2013 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2014 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2015 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2016 0, 5, 1);
2020 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2021 * @adapter: the adapter
2022 * @id: the completion queue context id
2024 * Disable an SGE completion queue. The caller is responsible for
2025 * ensuring only one context operation occurs at a time.
2027 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2029 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2030 return -EBUSY;
2032 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2033 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2034 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2035 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2036 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2037 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2038 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2039 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2040 0, 5, 1);
2044 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2045 * @adapter: the adapter
2046 * @id: the context id
2047 * @op: the operation to perform
2049 * Perform the selected operation on an SGE completion queue context.
2050 * The caller is responsible for ensuring only one context operation
2051 * occurs at a time.
2053 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2054 unsigned int credits)
2056 u32 val;
2058 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2059 return -EBUSY;
2061 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2062 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2063 V_CONTEXT(id) | F_CQ);
2064 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2065 0, 5, 1, &val))
2066 return -EIO;
2068 if (op >= 2 && op < 7) {
2069 if (adapter->params.rev > 0)
2070 return G_CQ_INDEX(val);
2072 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2073 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2074 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2075 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2076 return -EIO;
2077 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2079 return 0;
2083 * t3_sge_read_context - read an SGE context
2084 * @type: the context type
2085 * @adapter: the adapter
2086 * @id: the context id
2087 * @data: holds the retrieved context
2089 * Read an SGE egress context. The caller is responsible for ensuring
2090 * only one context operation occurs at a time.
2092 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2093 unsigned int id, u32 data[4])
2095 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2096 return -EBUSY;
2098 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2099 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2100 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2101 5, 1))
2102 return -EIO;
2103 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2104 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2105 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2106 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2107 return 0;
2111 * t3_sge_read_ecntxt - read an SGE egress context
2112 * @adapter: the adapter
2113 * @id: the context id
2114 * @data: holds the retrieved context
2116 * Read an SGE egress context. The caller is responsible for ensuring
2117 * only one context operation occurs at a time.
2119 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2121 if (id >= 65536)
2122 return -EINVAL;
2123 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2127 * t3_sge_read_cq - read an SGE CQ context
2128 * @adapter: the adapter
2129 * @id: the context id
2130 * @data: holds the retrieved context
2132 * Read an SGE CQ context. The caller is responsible for ensuring
2133 * only one context operation occurs at a time.
2135 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2137 if (id >= 65536)
2138 return -EINVAL;
2139 return t3_sge_read_context(F_CQ, adapter, id, data);
2143 * t3_sge_read_fl - read an SGE free-list context
2144 * @adapter: the adapter
2145 * @id: the context id
2146 * @data: holds the retrieved context
2148 * Read an SGE free-list context. The caller is responsible for ensuring
2149 * only one context operation occurs at a time.
2151 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2153 if (id >= SGE_QSETS * 2)
2154 return -EINVAL;
2155 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2159 * t3_sge_read_rspq - read an SGE response queue context
2160 * @adapter: the adapter
2161 * @id: the context id
2162 * @data: holds the retrieved context
2164 * Read an SGE response queue context. The caller is responsible for
2165 * ensuring only one context operation occurs at a time.
2167 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2169 if (id >= SGE_QSETS)
2170 return -EINVAL;
2171 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2175 * t3_config_rss - configure Rx packet steering
2176 * @adapter: the adapter
2177 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2178 * @cpus: values for the CPU lookup table (0xff terminated)
2179 * @rspq: values for the response queue lookup table (0xffff terminated)
2181 * Programs the receive packet steering logic. @cpus and @rspq provide
2182 * the values for the CPU and response queue lookup tables. If they
2183 * provide fewer values than the size of the tables the supplied values
2184 * are used repeatedly until the tables are fully populated.
2186 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2187 const u8 * cpus, const u16 *rspq)
2189 int i, j, cpu_idx = 0, q_idx = 0;
2191 if (cpus)
2192 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2193 u32 val = i << 16;
2195 for (j = 0; j < 2; ++j) {
2196 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2197 if (cpus[cpu_idx] == 0xff)
2198 cpu_idx = 0;
2200 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2203 if (rspq)
2204 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2205 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2206 (i << 16) | rspq[q_idx++]);
2207 if (rspq[q_idx] == 0xffff)
2208 q_idx = 0;
2211 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2215 * t3_read_rss - read the contents of the RSS tables
2216 * @adapter: the adapter
2217 * @lkup: holds the contents of the RSS lookup table
2218 * @map: holds the contents of the RSS map table
2220 * Reads the contents of the receive packet steering tables.
2222 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2224 int i;
2225 u32 val;
2227 if (lkup)
2228 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2229 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2230 0xffff0000 | i);
2231 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2232 if (!(val & 0x80000000))
2233 return -EAGAIN;
2234 *lkup++ = val;
2235 *lkup++ = (val >> 8);
2238 if (map)
2239 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2240 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2241 0xffff0000 | i);
2242 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2243 if (!(val & 0x80000000))
2244 return -EAGAIN;
2245 *map++ = val;
2247 return 0;
2251 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2252 * @adap: the adapter
2253 * @enable: 1 to select offload mode, 0 for regular NIC
2255 * Switches TP to NIC/offload mode.
2257 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2259 if (is_offload(adap) || !enable)
2260 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2261 V_NICMODE(!enable));
2265 * pm_num_pages - calculate the number of pages of the payload memory
2266 * @mem_size: the size of the payload memory
2267 * @pg_size: the size of each payload memory page
2269 * Calculate the number of pages, each of the given size, that fit in a
2270 * memory of the specified size, respecting the HW requirement that the
2271 * number of pages must be a multiple of 24.
2273 static inline unsigned int pm_num_pages(unsigned int mem_size,
2274 unsigned int pg_size)
2276 unsigned int n = mem_size / pg_size;
2278 return n - n % 24;
2281 #define mem_region(adap, start, size, reg) \
2282 t3_write_reg((adap), A_ ## reg, (start)); \
2283 start += size
2286 * partition_mem - partition memory and configure TP memory settings
2287 * @adap: the adapter
2288 * @p: the TP parameters
2290 * Partitions context and payload memory and configures TP's memory
2291 * registers.
2293 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2295 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2296 unsigned int timers = 0, timers_shift = 22;
2298 if (adap->params.rev > 0) {
2299 if (tids <= 16 * 1024) {
2300 timers = 1;
2301 timers_shift = 16;
2302 } else if (tids <= 64 * 1024) {
2303 timers = 2;
2304 timers_shift = 18;
2305 } else if (tids <= 256 * 1024) {
2306 timers = 3;
2307 timers_shift = 20;
2311 t3_write_reg(adap, A_TP_PMM_SIZE,
2312 p->chan_rx_size | (p->chan_tx_size >> 16));
2314 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2315 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2316 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2317 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2318 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2320 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2321 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2322 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2324 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2325 /* Add a bit of headroom and make multiple of 24 */
2326 pstructs += 48;
2327 pstructs -= pstructs % 24;
2328 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2330 m = tids * TCB_SIZE;
2331 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2332 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2333 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2334 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2335 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2336 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2337 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2338 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2340 m = (m + 4095) & ~0xfff;
2341 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2342 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2344 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2345 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2346 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2347 if (tids < m)
2348 adap->params.mc5.nservers += m - tids;
2351 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2352 u32 val)
2354 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2355 t3_write_reg(adap, A_TP_PIO_DATA, val);
2358 static void tp_config(struct adapter *adap, const struct tp_params *p)
2360 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2361 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2362 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2363 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2364 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2365 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2366 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2367 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2368 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2369 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2370 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2371 F_IPV6ENABLE | F_NICMODE);
2372 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2373 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2374 t3_set_reg_field(adap, A_TP_PARA_REG6,
2375 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2378 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2379 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2380 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2381 F_RXCONGESTIONMODE);
2382 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2384 if (adap->params.rev > 0) {
2385 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2386 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2387 F_TXPACEAUTO);
2388 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2389 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2390 } else
2391 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2393 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2394 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2395 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2398 /* Desired TP timer resolution in usec */
2399 #define TP_TMR_RES 50
2401 /* TCP timer values in ms */
2402 #define TP_DACK_TIMER 50
2403 #define TP_RTO_MIN 250
2406 * tp_set_timers - set TP timing parameters
2407 * @adap: the adapter to set
2408 * @core_clk: the core clock frequency in Hz
2410 * Set TP's timing parameters, such as the various timer resolutions and
2411 * the TCP timer values.
2413 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2415 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2416 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2417 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2418 unsigned int tps = core_clk >> tre;
2420 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2421 V_DELAYEDACKRESOLUTION(dack_re) |
2422 V_TIMESTAMPRESOLUTION(tstamp_re));
2423 t3_write_reg(adap, A_TP_DACK_TIMER,
2424 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2425 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2426 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2427 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2428 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2429 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2430 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2431 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2432 V_KEEPALIVEMAX(9));
2434 #define SECONDS * tps
2436 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2437 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2438 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2439 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2440 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2441 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2442 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2443 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2444 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2446 #undef SECONDS
2450 * t3_tp_set_coalescing_size - set receive coalescing size
2451 * @adap: the adapter
2452 * @size: the receive coalescing size
2453 * @psh: whether a set PSH bit should deliver coalesced data
2455 * Set the receive coalescing size and PSH bit handling.
2457 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2459 u32 val;
2461 if (size > MAX_RX_COALESCING_LEN)
2462 return -EINVAL;
2464 val = t3_read_reg(adap, A_TP_PARA_REG3);
2465 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2467 if (size) {
2468 val |= F_RXCOALESCEENABLE;
2469 if (psh)
2470 val |= F_RXCOALESCEPSHEN;
2471 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2472 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2474 t3_write_reg(adap, A_TP_PARA_REG3, val);
2475 return 0;
2479 * t3_tp_set_max_rxsize - set the max receive size
2480 * @adap: the adapter
2481 * @size: the max receive size
2483 * Set TP's max receive size. This is the limit that applies when
2484 * receive coalescing is disabled.
2486 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2488 t3_write_reg(adap, A_TP_PARA_REG7,
2489 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2492 static void __devinit init_mtus(unsigned short mtus[])
2495 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2496 * it can accomodate max size TCP/IP headers when SACK and timestamps
2497 * are enabled and still have at least 8 bytes of payload.
2499 mtus[0] = 88;
2500 mtus[1] = 256;
2501 mtus[2] = 512;
2502 mtus[3] = 576;
2503 mtus[4] = 808;
2504 mtus[5] = 1024;
2505 mtus[6] = 1280;
2506 mtus[7] = 1492;
2507 mtus[8] = 1500;
2508 mtus[9] = 2002;
2509 mtus[10] = 2048;
2510 mtus[11] = 4096;
2511 mtus[12] = 4352;
2512 mtus[13] = 8192;
2513 mtus[14] = 9000;
2514 mtus[15] = 9600;
2518 * Initial congestion control parameters.
2520 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2522 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2523 a[9] = 2;
2524 a[10] = 3;
2525 a[11] = 4;
2526 a[12] = 5;
2527 a[13] = 6;
2528 a[14] = 7;
2529 a[15] = 8;
2530 a[16] = 9;
2531 a[17] = 10;
2532 a[18] = 14;
2533 a[19] = 17;
2534 a[20] = 21;
2535 a[21] = 25;
2536 a[22] = 30;
2537 a[23] = 35;
2538 a[24] = 45;
2539 a[25] = 60;
2540 a[26] = 80;
2541 a[27] = 100;
2542 a[28] = 200;
2543 a[29] = 300;
2544 a[30] = 400;
2545 a[31] = 500;
2547 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2548 b[9] = b[10] = 1;
2549 b[11] = b[12] = 2;
2550 b[13] = b[14] = b[15] = b[16] = 3;
2551 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2552 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2553 b[28] = b[29] = 6;
2554 b[30] = b[31] = 7;
2557 /* The minimum additive increment value for the congestion control table */
2558 #define CC_MIN_INCR 2U
2561 * t3_load_mtus - write the MTU and congestion control HW tables
2562 * @adap: the adapter
2563 * @mtus: the unrestricted values for the MTU table
2564 * @alphs: the values for the congestion control alpha parameter
2565 * @beta: the values for the congestion control beta parameter
2566 * @mtu_cap: the maximum permitted effective MTU
2568 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2569 * Update the high-speed congestion control table with the supplied alpha,
2570 * beta, and MTUs.
2572 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2573 unsigned short alpha[NCCTRL_WIN],
2574 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2576 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2577 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2578 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2579 28672, 40960, 57344, 81920, 114688, 163840, 229376
2582 unsigned int i, w;
2584 for (i = 0; i < NMTUS; ++i) {
2585 unsigned int mtu = min(mtus[i], mtu_cap);
2586 unsigned int log2 = fls(mtu);
2588 if (!(mtu & ((1 << log2) >> 2))) /* round */
2589 log2--;
2590 t3_write_reg(adap, A_TP_MTU_TABLE,
2591 (i << 24) | (log2 << 16) | mtu);
2593 for (w = 0; w < NCCTRL_WIN; ++w) {
2594 unsigned int inc;
2596 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2597 CC_MIN_INCR);
2599 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2600 (w << 16) | (beta[w] << 13) | inc);
2606 * t3_read_hw_mtus - returns the values in the HW MTU table
2607 * @adap: the adapter
2608 * @mtus: where to store the HW MTU values
2610 * Reads the HW MTU table.
2612 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2614 int i;
2616 for (i = 0; i < NMTUS; ++i) {
2617 unsigned int val;
2619 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2620 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2621 mtus[i] = val & 0x3fff;
2626 * t3_get_cong_cntl_tab - reads the congestion control table
2627 * @adap: the adapter
2628 * @incr: where to store the alpha values
2630 * Reads the additive increments programmed into the HW congestion
2631 * control table.
2633 void t3_get_cong_cntl_tab(struct adapter *adap,
2634 unsigned short incr[NMTUS][NCCTRL_WIN])
2636 unsigned int mtu, w;
2638 for (mtu = 0; mtu < NMTUS; ++mtu)
2639 for (w = 0; w < NCCTRL_WIN; ++w) {
2640 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2641 0xffff0000 | (mtu << 5) | w);
2642 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2643 0x1fff;
2648 * t3_tp_get_mib_stats - read TP's MIB counters
2649 * @adap: the adapter
2650 * @tps: holds the returned counter values
2652 * Returns the values of TP's MIB counters.
2654 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2656 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2657 sizeof(*tps) / sizeof(u32), 0);
2660 #define ulp_region(adap, name, start, len) \
2661 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2662 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2663 (start) + (len) - 1); \
2664 start += len
2666 #define ulptx_region(adap, name, start, len) \
2667 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2668 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2669 (start) + (len) - 1)
2671 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2673 unsigned int m = p->chan_rx_size;
2675 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2676 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2677 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2678 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2679 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2680 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2681 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2682 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2685 void t3_config_trace_filter(struct adapter *adapter,
2686 const struct trace_params *tp, int filter_index,
2687 int invert, int enable)
2689 u32 addr, key[4], mask[4];
2691 key[0] = tp->sport | (tp->sip << 16);
2692 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2693 key[2] = tp->dip;
2694 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2696 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2697 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2698 mask[2] = tp->dip_mask;
2699 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2701 if (invert)
2702 key[3] |= (1 << 29);
2703 if (enable)
2704 key[3] |= (1 << 28);
2706 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2707 tp_wr_indirect(adapter, addr++, key[0]);
2708 tp_wr_indirect(adapter, addr++, mask[0]);
2709 tp_wr_indirect(adapter, addr++, key[1]);
2710 tp_wr_indirect(adapter, addr++, mask[1]);
2711 tp_wr_indirect(adapter, addr++, key[2]);
2712 tp_wr_indirect(adapter, addr++, mask[2]);
2713 tp_wr_indirect(adapter, addr++, key[3]);
2714 tp_wr_indirect(adapter, addr, mask[3]);
2715 t3_read_reg(adapter, A_TP_PIO_DATA);
2719 * t3_config_sched - configure a HW traffic scheduler
2720 * @adap: the adapter
2721 * @kbps: target rate in Kbps
2722 * @sched: the scheduler index
2724 * Configure a HW scheduler for the target rate
2726 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2728 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2729 unsigned int clk = adap->params.vpd.cclk * 1000;
2730 unsigned int selected_cpt = 0, selected_bpt = 0;
2732 if (kbps > 0) {
2733 kbps *= 125; /* -> bytes */
2734 for (cpt = 1; cpt <= 255; cpt++) {
2735 tps = clk / cpt;
2736 bpt = (kbps + tps / 2) / tps;
2737 if (bpt > 0 && bpt <= 255) {
2738 v = bpt * tps;
2739 delta = v >= kbps ? v - kbps : kbps - v;
2740 if (delta <= mindelta) {
2741 mindelta = delta;
2742 selected_cpt = cpt;
2743 selected_bpt = bpt;
2745 } else if (selected_cpt)
2746 break;
2748 if (!selected_cpt)
2749 return -EINVAL;
2751 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2752 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2753 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2754 if (sched & 1)
2755 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2756 else
2757 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2758 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2759 return 0;
2762 static int tp_init(struct adapter *adap, const struct tp_params *p)
2764 int busy = 0;
2766 tp_config(adap, p);
2767 t3_set_vlan_accel(adap, 3, 0);
2769 if (is_offload(adap)) {
2770 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2771 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2772 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2773 0, 1000, 5);
2774 if (busy)
2775 CH_ERR(adap, "TP initialization timed out\n");
2778 if (!busy)
2779 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2780 return busy;
2783 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2785 if (port_mask & ~((1 << adap->params.nports) - 1))
2786 return -EINVAL;
2787 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2788 port_mask << S_PORT0ACTIVE);
2789 return 0;
2793 * Perform the bits of HW initialization that are dependent on the number
2794 * of available ports.
2796 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2798 int i;
2800 if (nports == 1) {
2801 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2802 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2803 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2804 F_PORT0ACTIVE | F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2806 } else {
2807 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2808 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2809 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2810 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2811 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2812 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2813 F_ENFORCEPKT);
2814 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2815 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2816 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2817 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2818 for (i = 0; i < 16; i++)
2819 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2820 (i << 16) | 0x1010);
2824 static int calibrate_xgm(struct adapter *adapter)
2826 if (uses_xaui(adapter)) {
2827 unsigned int v, i;
2829 for (i = 0; i < 5; ++i) {
2830 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2831 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2832 msleep(1);
2833 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2834 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2835 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2836 V_XAUIIMP(G_CALIMP(v) >> 2));
2837 return 0;
2840 CH_ERR(adapter, "MAC calibration failed\n");
2841 return -1;
2842 } else {
2843 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2844 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2845 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2846 F_XGM_IMPSETUPDATE);
2848 return 0;
2851 static void calibrate_xgm_t3b(struct adapter *adapter)
2853 if (!uses_xaui(adapter)) {
2854 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2855 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2856 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2857 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2858 F_XGM_IMPSETUPDATE);
2859 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2861 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2862 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2866 struct mc7_timing_params {
2867 unsigned char ActToPreDly;
2868 unsigned char ActToRdWrDly;
2869 unsigned char PreCyc;
2870 unsigned char RefCyc[5];
2871 unsigned char BkCyc;
2872 unsigned char WrToRdDly;
2873 unsigned char RdToWrDly;
2877 * Write a value to a register and check that the write completed. These
2878 * writes normally complete in a cycle or two, so one read should suffice.
2879 * The very first read exists to flush the posted write to the device.
2881 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2883 t3_write_reg(adapter, addr, val);
2884 t3_read_reg(adapter, addr); /* flush */
2885 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2886 return 0;
2887 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2888 return -EIO;
2891 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2893 static const unsigned int mc7_mode[] = {
2894 0x632, 0x642, 0x652, 0x432, 0x442
2896 static const struct mc7_timing_params mc7_timings[] = {
2897 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2898 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2899 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2900 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2901 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2904 u32 val;
2905 unsigned int width, density, slow, attempts;
2906 struct adapter *adapter = mc7->adapter;
2907 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2909 if (!mc7->size)
2910 return 0;
2912 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2913 slow = val & F_SLOW;
2914 width = G_WIDTH(val);
2915 density = G_DEN(val);
2917 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2918 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2919 msleep(1);
2921 if (!slow) {
2922 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2923 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2924 msleep(1);
2925 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2926 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2927 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2928 mc7->name);
2929 goto out_fail;
2933 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2934 V_ACTTOPREDLY(p->ActToPreDly) |
2935 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2936 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2937 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2939 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2940 val | F_CLKEN | F_TERM150);
2941 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2943 if (!slow)
2944 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2945 F_DLLENB);
2946 udelay(1);
2948 val = slow ? 3 : 6;
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2953 goto out_fail;
2955 if (!slow) {
2956 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2957 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2958 udelay(5);
2961 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2962 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2963 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2964 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2965 mc7_mode[mem_type]) ||
2966 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2967 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2968 goto out_fail;
2970 /* clock value is in KHz */
2971 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2972 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2974 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2975 F_PERREFEN | V_PREREFDIV(mc7_clock));
2976 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2978 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2979 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2980 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2981 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2982 (mc7->size << width) - 1);
2983 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2984 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2986 attempts = 50;
2987 do {
2988 msleep(250);
2989 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2990 } while ((val & F_BUSY) && --attempts);
2991 if (val & F_BUSY) {
2992 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2993 goto out_fail;
2996 /* Enable normal memory accesses. */
2997 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2998 return 0;
3000 out_fail:
3001 return -1;
3004 static void config_pcie(struct adapter *adap)
3006 static const u16 ack_lat[4][6] = {
3007 {237, 416, 559, 1071, 2095, 4143},
3008 {128, 217, 289, 545, 1057, 2081},
3009 {73, 118, 154, 282, 538, 1050},
3010 {67, 107, 86, 150, 278, 534}
3012 static const u16 rpl_tmr[4][6] = {
3013 {711, 1248, 1677, 3213, 6285, 12429},
3014 {384, 651, 867, 1635, 3171, 6243},
3015 {219, 354, 462, 846, 1614, 3150},
3016 {201, 321, 258, 450, 834, 1602}
3019 u16 val;
3020 unsigned int log2_width, pldsize;
3021 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3023 pci_read_config_word(adap->pdev,
3024 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3025 &val);
3026 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3027 pci_read_config_word(adap->pdev,
3028 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3029 &val);
3031 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3032 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3033 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3034 log2_width = fls(adap->params.pci.width) - 1;
3035 acklat = ack_lat[log2_width][pldsize];
3036 if (val & 1) /* check LOsEnable */
3037 acklat += fst_trn_tx * 4;
3038 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3040 if (adap->params.rev == 0)
3041 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3042 V_T3A_ACKLAT(M_T3A_ACKLAT),
3043 V_T3A_ACKLAT(acklat));
3044 else
3045 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3046 V_ACKLAT(acklat));
3048 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3049 V_REPLAYLMT(rpllmt));
3051 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3052 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3056 * Initialize and configure T3 HW modules. This performs the
3057 * initialization steps that need to be done once after a card is reset.
3058 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3060 * fw_params are passed to FW and their value is platform dependent. Only the
3061 * top 8 bits are available for use, the rest must be 0.
3063 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3065 int err = -EIO, attempts = 100;
3066 const struct vpd_params *vpd = &adapter->params.vpd;
3068 if (adapter->params.rev > 0)
3069 calibrate_xgm_t3b(adapter);
3070 else if (calibrate_xgm(adapter))
3071 goto out_err;
3073 if (vpd->mclk) {
3074 partition_mem(adapter, &adapter->params.tp);
3076 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3077 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3078 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3079 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3080 adapter->params.mc5.nfilters,
3081 adapter->params.mc5.nroutes))
3082 goto out_err;
3085 if (tp_init(adapter, &adapter->params.tp))
3086 goto out_err;
3088 t3_tp_set_coalescing_size(adapter,
3089 min(adapter->params.sge.max_pkt_size,
3090 MAX_RX_COALESCING_LEN), 1);
3091 t3_tp_set_max_rxsize(adapter,
3092 min(adapter->params.sge.max_pkt_size, 16384U));
3093 ulp_config(adapter, &adapter->params.tp);
3095 if (is_pcie(adapter))
3096 config_pcie(adapter);
3097 else
3098 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3100 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3101 init_hw_for_avail_ports(adapter, adapter->params.nports);
3102 t3_sge_init(adapter, &adapter->params.sge);
3104 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3105 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3106 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3107 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3109 do { /* wait for uP to initialize */
3110 msleep(20);
3111 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3112 if (!attempts) {
3113 CH_ERR(adapter, "uP initialization timed out\n");
3114 goto out_err;
3117 err = 0;
3118 out_err:
3119 return err;
3123 * get_pci_mode - determine a card's PCI mode
3124 * @adapter: the adapter
3125 * @p: where to store the PCI settings
3127 * Determines a card's PCI mode and associated parameters, such as speed
3128 * and width.
3130 static void __devinit get_pci_mode(struct adapter *adapter,
3131 struct pci_params *p)
3133 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3134 u32 pci_mode, pcie_cap;
3136 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3137 if (pcie_cap) {
3138 u16 val;
3140 p->variant = PCI_VARIANT_PCIE;
3141 p->pcie_cap_addr = pcie_cap;
3142 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3143 &val);
3144 p->width = (val >> 4) & 0x3f;
3145 return;
3148 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3149 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3150 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3151 pci_mode = G_PCIXINITPAT(pci_mode);
3152 if (pci_mode == 0)
3153 p->variant = PCI_VARIANT_PCI;
3154 else if (pci_mode < 4)
3155 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3156 else if (pci_mode < 8)
3157 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3158 else
3159 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3163 * init_link_config - initialize a link's SW state
3164 * @lc: structure holding the link state
3165 * @ai: information about the current card
3167 * Initializes the SW state maintained for each link, including the link's
3168 * capabilities and default speed/duplex/flow-control/autonegotiation
3169 * settings.
3171 static void __devinit init_link_config(struct link_config *lc,
3172 unsigned int caps)
3174 lc->supported = caps;
3175 lc->requested_speed = lc->speed = SPEED_INVALID;
3176 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3177 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3178 if (lc->supported & SUPPORTED_Autoneg) {
3179 lc->advertising = lc->supported;
3180 lc->autoneg = AUTONEG_ENABLE;
3181 lc->requested_fc |= PAUSE_AUTONEG;
3182 } else {
3183 lc->advertising = 0;
3184 lc->autoneg = AUTONEG_DISABLE;
3189 * mc7_calc_size - calculate MC7 memory size
3190 * @cfg: the MC7 configuration
3192 * Calculates the size of an MC7 memory in bytes from the value of its
3193 * configuration register.
3195 static unsigned int __devinit mc7_calc_size(u32 cfg)
3197 unsigned int width = G_WIDTH(cfg);
3198 unsigned int banks = !!(cfg & F_BKS) + 1;
3199 unsigned int org = !!(cfg & F_ORG) + 1;
3200 unsigned int density = G_DEN(cfg);
3201 unsigned int MBs = ((256 << density) * banks) / (org << width);
3203 return MBs << 20;
3206 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3207 unsigned int base_addr, const char *name)
3209 u32 cfg;
3211 mc7->adapter = adapter;
3212 mc7->name = name;
3213 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3214 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3215 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3216 mc7->width = G_WIDTH(cfg);
3219 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3221 mac->adapter = adapter;
3222 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3223 mac->nucast = 1;
3225 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3226 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3227 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3228 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3229 F_ENRGMII, 0);
3233 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3235 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3237 mi1_init(adapter, ai);
3238 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3239 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3240 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3241 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3242 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3244 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3245 val |= F_ENRGMII;
3247 /* Enable MAC clocks so we can access the registers */
3248 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3249 t3_read_reg(adapter, A_XGM_PORT_CFG);
3251 val |= F_CLKDIVRESET_;
3252 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3253 t3_read_reg(adapter, A_XGM_PORT_CFG);
3254 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3255 t3_read_reg(adapter, A_XGM_PORT_CFG);
3259 * Reset the adapter.
3260 * Older PCIe cards lose their config space during reset, PCI-X
3261 * ones don't.
3263 int t3_reset_adapter(struct adapter *adapter)
3265 int i, save_and_restore_pcie =
3266 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3267 uint16_t devid = 0;
3269 if (save_and_restore_pcie)
3270 pci_save_state(adapter->pdev);
3271 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3274 * Delay. Give Some time to device to reset fully.
3275 * XXX The delay time should be modified.
3277 for (i = 0; i < 10; i++) {
3278 msleep(50);
3279 pci_read_config_word(adapter->pdev, 0x00, &devid);
3280 if (devid == 0x1425)
3281 break;
3284 if (devid != 0x1425)
3285 return -1;
3287 if (save_and_restore_pcie)
3288 pci_restore_state(adapter->pdev);
3289 return 0;
3293 * Initialize adapter SW state for the various HW modules, set initial values
3294 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3295 * interface.
3297 int __devinit t3_prep_adapter(struct adapter *adapter,
3298 const struct adapter_info *ai, int reset)
3300 int ret;
3301 unsigned int i, j = 0;
3303 get_pci_mode(adapter, &adapter->params.pci);
3305 adapter->params.info = ai;
3306 adapter->params.nports = ai->nports;
3307 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3308 adapter->params.linkpoll_period = 0;
3309 adapter->params.stats_update_period = is_10G(adapter) ?
3310 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3311 adapter->params.pci.vpd_cap_addr =
3312 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3313 ret = get_vpd_params(adapter, &adapter->params.vpd);
3314 if (ret < 0)
3315 return ret;
3317 if (reset && t3_reset_adapter(adapter))
3318 return -1;
3320 t3_sge_prep(adapter, &adapter->params.sge);
3322 if (adapter->params.vpd.mclk) {
3323 struct tp_params *p = &adapter->params.tp;
3325 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3326 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3327 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3329 p->nchan = ai->nports;
3330 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3331 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3332 p->cm_size = t3_mc7_size(&adapter->cm);
3333 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3334 p->chan_tx_size = p->pmtx_size / p->nchan;
3335 p->rx_pg_size = 64 * 1024;
3336 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3337 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3338 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3339 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3340 adapter->params.rev > 0 ? 12 : 6;
3343 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3344 t3_mc7_size(&adapter->pmtx) &&
3345 t3_mc7_size(&adapter->cm);
3347 if (is_offload(adapter)) {
3348 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3349 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3350 DEFAULT_NFILTERS : 0;
3351 adapter->params.mc5.nroutes = 0;
3352 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3354 init_mtus(adapter->params.mtus);
3355 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3358 early_hw_init(adapter, ai);
3360 for_each_port(adapter, i) {
3361 u8 hw_addr[6];
3362 struct port_info *p = adap2pinfo(adapter, i);
3364 while (!adapter->params.vpd.port_type[j])
3365 ++j;
3367 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3368 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3369 ai->mdio_ops);
3370 mac_prep(&p->mac, adapter, j);
3371 ++j;
3374 * The VPD EEPROM stores the base Ethernet address for the
3375 * card. A port's address is derived from the base by adding
3376 * the port's index to the base's low octet.
3378 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3379 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3381 memcpy(adapter->port[i]->dev_addr, hw_addr,
3382 ETH_ALEN);
3383 memcpy(adapter->port[i]->perm_addr, hw_addr,
3384 ETH_ALEN);
3385 init_link_config(&p->link_config, p->port_type->caps);
3386 p->phy.ops->power_down(&p->phy, 1);
3387 if (!(p->port_type->caps & SUPPORTED_IRQ))
3388 adapter->params.linkpoll_period = 10;
3391 return 0;
3394 void t3_led_ready(struct adapter *adapter)
3396 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3397 F_GPIO0_OUT_VAL);