ar9170usb: reset device on resume
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / cxgb3 / t3_hw.c
blobc8a865a7e26ce3a62dc8ad2358fbddf2aa696cdc
1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
220 if (!ret)
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
226 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
244 static const struct mdio_ops mi1_mdio_ops = {
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
254 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
268 * MI1 read/write operations for indirect-addressed PHYs.
270 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
275 int ret;
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
282 MDIO_ATTEMPTS, 10);
283 if (!ret)
284 ret = t3_read_reg(adapter, A_MI1_DATA);
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
290 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
295 int ret;
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
303 MDIO_ATTEMPTS, 10);
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
309 static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
326 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
329 int ret;
330 unsigned int val;
332 ret = t3_mdio_read(phy, mmd, reg, &val);
333 if (!ret) {
334 val &= ~clear;
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
337 return ret;
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
350 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
352 int err;
353 unsigned int ctl;
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
357 if (err || !wait)
358 return err;
360 do {
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
362 if (err)
363 return err;
364 ctl &= MDIO_CTRL1_RESET;
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
369 return ctl ? -1 : 0;
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
380 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
382 int err;
383 unsigned int val = 0;
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
386 if (err)
387 return err;
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
396 if (err)
397 return err;
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
423 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
425 unsigned int val = 0;
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
447 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
449 int err;
450 unsigned int ctl;
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
453 if (err)
454 return err;
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 int t3_phy_lasi_intr_enable(struct cphy *phy)
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, LASI_CTRL, 1);
478 int t3_phy_lasi_intr_disable(struct cphy *phy)
480 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, LASI_CTRL, 0);
483 int t3_phy_lasi_intr_clear(struct cphy *phy)
485 u32 val;
487 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, LASI_STAT, &val);
490 int t3_phy_lasi_intr_handler(struct cphy *phy)
492 unsigned int status;
493 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, LASI_STAT, &status);
495 if (err)
496 return err;
497 return (status & 1) ? cphy_cause_link_change : 0;
500 static const struct adapter_info t3_adap_info[] = {
501 {1, 1, 0,
502 F_GPIO2_OEN | F_GPIO4_OEN |
503 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
504 &mi1_mdio_ops, "Chelsio PE9000"},
505 {1, 1, 0,
506 F_GPIO2_OEN | F_GPIO4_OEN |
507 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
508 &mi1_mdio_ops, "Chelsio T302"},
509 {1, 0, 0,
510 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
511 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
512 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
513 &mi1_mdio_ext_ops, "Chelsio T310"},
514 {1, 1, 0,
515 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
516 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
517 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
518 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
519 &mi1_mdio_ext_ops, "Chelsio T320"},
522 {1, 0, 0,
523 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
524 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
525 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
526 &mi1_mdio_ext_ops, "Chelsio T310" },
530 * Return the adapter_info structure with a given index. Out-of-range indices
531 * return NULL.
533 const struct adapter_info *t3_get_adapter_info(unsigned int id)
535 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
538 struct port_type_info {
539 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
540 int phy_addr, const struct mdio_ops *ops);
543 static const struct port_type_info port_types[] = {
544 { NULL },
545 { t3_ael1002_phy_prep },
546 { t3_vsc8211_phy_prep },
547 { NULL},
548 { t3_xaui_direct_phy_prep },
549 { t3_ael2005_phy_prep },
550 { t3_qt2045_phy_prep },
551 { t3_ael1006_phy_prep },
552 { NULL },
555 #define VPD_ENTRY(name, len) \
556 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
559 * Partial EEPROM Vital Product Data structure. Includes only the ID and
560 * VPD-R sections.
562 struct t3_vpd {
563 u8 id_tag;
564 u8 id_len[2];
565 u8 id_data[16];
566 u8 vpdr_tag;
567 u8 vpdr_len[2];
568 VPD_ENTRY(pn, 16); /* part number */
569 VPD_ENTRY(ec, 16); /* EC level */
570 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
571 VPD_ENTRY(na, 12); /* MAC address base */
572 VPD_ENTRY(cclk, 6); /* core clock */
573 VPD_ENTRY(mclk, 6); /* mem clock */
574 VPD_ENTRY(uclk, 6); /* uP clk */
575 VPD_ENTRY(mdc, 6); /* MDIO clk */
576 VPD_ENTRY(mt, 2); /* mem timing */
577 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
578 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
579 VPD_ENTRY(port0, 2); /* PHY0 complex */
580 VPD_ENTRY(port1, 2); /* PHY1 complex */
581 VPD_ENTRY(port2, 2); /* PHY2 complex */
582 VPD_ENTRY(port3, 2); /* PHY3 complex */
583 VPD_ENTRY(rv, 1); /* csum */
584 u32 pad; /* for multiple-of-4 sizing and alignment */
587 #define EEPROM_MAX_POLL 40
588 #define EEPROM_STAT_ADDR 0x4000
589 #define VPD_BASE 0xc00
592 * t3_seeprom_read - read a VPD EEPROM location
593 * @adapter: adapter to read
594 * @addr: EEPROM address
595 * @data: where to store the read data
597 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
598 * VPD ROM capability. A zero is written to the flag bit when the
599 * addres is written to the control register. The hardware device will
600 * set the flag to 1 when 4 bytes have been read into the data register.
602 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
604 u16 val;
605 int attempts = EEPROM_MAX_POLL;
606 u32 v;
607 unsigned int base = adapter->params.pci.vpd_cap_addr;
609 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
610 return -EINVAL;
612 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
613 do {
614 udelay(10);
615 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
616 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
618 if (!(val & PCI_VPD_ADDR_F)) {
619 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
620 return -EIO;
622 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
623 *data = cpu_to_le32(v);
624 return 0;
628 * t3_seeprom_write - write a VPD EEPROM location
629 * @adapter: adapter to write
630 * @addr: EEPROM address
631 * @data: value to write
633 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
634 * VPD ROM capability.
636 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
638 u16 val;
639 int attempts = EEPROM_MAX_POLL;
640 unsigned int base = adapter->params.pci.vpd_cap_addr;
642 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
643 return -EINVAL;
645 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
646 le32_to_cpu(data));
647 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
648 addr | PCI_VPD_ADDR_F);
649 do {
650 msleep(1);
651 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
652 } while ((val & PCI_VPD_ADDR_F) && --attempts);
654 if (val & PCI_VPD_ADDR_F) {
655 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
656 return -EIO;
658 return 0;
662 * t3_seeprom_wp - enable/disable EEPROM write protection
663 * @adapter: the adapter
664 * @enable: 1 to enable write protection, 0 to disable it
666 * Enables or disables write protection on the serial EEPROM.
668 int t3_seeprom_wp(struct adapter *adapter, int enable)
670 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
674 * Convert a character holding a hex digit to a number.
676 static unsigned int hex2int(unsigned char c)
678 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
682 * get_vpd_params - read VPD parameters from VPD EEPROM
683 * @adapter: adapter to read
684 * @p: where to store the parameters
686 * Reads card parameters stored in VPD EEPROM.
688 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
690 int i, addr, ret;
691 struct t3_vpd vpd;
694 * Card information is normally at VPD_BASE but some early cards had
695 * it at 0.
697 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
698 if (ret)
699 return ret;
700 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
702 for (i = 0; i < sizeof(vpd); i += 4) {
703 ret = t3_seeprom_read(adapter, addr + i,
704 (__le32 *)((u8 *)&vpd + i));
705 if (ret)
706 return ret;
709 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
710 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
711 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
712 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
713 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
714 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
716 /* Old eeproms didn't have port information */
717 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
718 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
719 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
720 } else {
721 p->port_type[0] = hex2int(vpd.port0_data[0]);
722 p->port_type[1] = hex2int(vpd.port1_data[0]);
723 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
724 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
727 for (i = 0; i < 6; i++)
728 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
729 hex2int(vpd.na_data[2 * i + 1]);
730 return 0;
733 /* serial flash and firmware constants */
734 enum {
735 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
736 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
737 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
739 /* flash command opcodes */
740 SF_PROG_PAGE = 2, /* program page */
741 SF_WR_DISABLE = 4, /* disable writes */
742 SF_RD_STATUS = 5, /* read status register */
743 SF_WR_ENABLE = 6, /* enable writes */
744 SF_RD_DATA_FAST = 0xb, /* read flash */
745 SF_ERASE_SECTOR = 0xd8, /* erase sector */
747 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
748 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
749 FW_MIN_SIZE = 8 /* at least version and csum */
753 * sf1_read - read data from the serial flash
754 * @adapter: the adapter
755 * @byte_cnt: number of bytes to read
756 * @cont: whether another operation will be chained
757 * @valp: where to store the read data
759 * Reads up to 4 bytes of data from the serial flash. The location of
760 * the read needs to be specified prior to calling this by issuing the
761 * appropriate commands to the serial flash.
763 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
764 u32 *valp)
766 int ret;
768 if (!byte_cnt || byte_cnt > 4)
769 return -EINVAL;
770 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
771 return -EBUSY;
772 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
773 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
774 if (!ret)
775 *valp = t3_read_reg(adapter, A_SF_DATA);
776 return ret;
780 * sf1_write - write data to the serial flash
781 * @adapter: the adapter
782 * @byte_cnt: number of bytes to write
783 * @cont: whether another operation will be chained
784 * @val: value to write
786 * Writes up to 4 bytes of data to the serial flash. The location of
787 * the write needs to be specified prior to calling this by issuing the
788 * appropriate commands to the serial flash.
790 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
791 u32 val)
793 if (!byte_cnt || byte_cnt > 4)
794 return -EINVAL;
795 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
796 return -EBUSY;
797 t3_write_reg(adapter, A_SF_DATA, val);
798 t3_write_reg(adapter, A_SF_OP,
799 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
800 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
804 * flash_wait_op - wait for a flash operation to complete
805 * @adapter: the adapter
806 * @attempts: max number of polls of the status register
807 * @delay: delay between polls in ms
809 * Wait for a flash operation to complete by polling the status register.
811 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
813 int ret;
814 u32 status;
816 while (1) {
817 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
818 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
819 return ret;
820 if (!(status & 1))
821 return 0;
822 if (--attempts == 0)
823 return -EAGAIN;
824 if (delay)
825 msleep(delay);
830 * t3_read_flash - read words from serial flash
831 * @adapter: the adapter
832 * @addr: the start address for the read
833 * @nwords: how many 32-bit words to read
834 * @data: where to store the read data
835 * @byte_oriented: whether to store data as bytes or as words
837 * Read the specified number of 32-bit words from the serial flash.
838 * If @byte_oriented is set the read data is stored as a byte array
839 * (i.e., big-endian), otherwise as 32-bit words in the platform's
840 * natural endianess.
842 int t3_read_flash(struct adapter *adapter, unsigned int addr,
843 unsigned int nwords, u32 *data, int byte_oriented)
845 int ret;
847 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
848 return -EINVAL;
850 addr = swab32(addr) | SF_RD_DATA_FAST;
852 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
853 (ret = sf1_read(adapter, 1, 1, data)) != 0)
854 return ret;
856 for (; nwords; nwords--, data++) {
857 ret = sf1_read(adapter, 4, nwords > 1, data);
858 if (ret)
859 return ret;
860 if (byte_oriented)
861 *data = htonl(*data);
863 return 0;
867 * t3_write_flash - write up to a page of data to the serial flash
868 * @adapter: the adapter
869 * @addr: the start address to write
870 * @n: length of data to write
871 * @data: the data to write
873 * Writes up to a page of data (256 bytes) to the serial flash starting
874 * at the given address.
876 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
877 unsigned int n, const u8 *data)
879 int ret;
880 u32 buf[64];
881 unsigned int i, c, left, val, offset = addr & 0xff;
883 if (addr + n > SF_SIZE || offset + n > 256)
884 return -EINVAL;
886 val = swab32(addr) | SF_PROG_PAGE;
888 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 1, val)) != 0)
890 return ret;
892 for (left = n; left; left -= c) {
893 c = min(left, 4U);
894 for (val = 0, i = 0; i < c; ++i)
895 val = (val << 8) + *data++;
897 ret = sf1_write(adapter, c, c != left, val);
898 if (ret)
899 return ret;
901 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
902 return ret;
904 /* Read the page to verify the write succeeded */
905 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
906 if (ret)
907 return ret;
909 if (memcmp(data - n, (u8 *) buf + offset, n))
910 return -EIO;
911 return 0;
915 * t3_get_tp_version - read the tp sram version
916 * @adapter: the adapter
917 * @vers: where to place the version
919 * Reads the protocol sram version from sram.
921 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
923 int ret;
925 /* Get version loaded in SRAM */
926 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
927 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
928 1, 1, 5, 1);
929 if (ret)
930 return ret;
932 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
934 return 0;
938 * t3_check_tpsram_version - read the tp sram version
939 * @adapter: the adapter
941 * Reads the protocol sram version from flash.
943 int t3_check_tpsram_version(struct adapter *adapter)
945 int ret;
946 u32 vers;
947 unsigned int major, minor;
949 if (adapter->params.rev == T3_REV_A)
950 return 0;
953 ret = t3_get_tp_version(adapter, &vers);
954 if (ret)
955 return ret;
957 major = G_TP_VERSION_MAJOR(vers);
958 minor = G_TP_VERSION_MINOR(vers);
960 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
961 return 0;
962 else {
963 CH_ERR(adapter, "found wrong TP version (%u.%u), "
964 "driver compiled for version %d.%d\n", major, minor,
965 TP_VERSION_MAJOR, TP_VERSION_MINOR);
967 return -EINVAL;
971 * t3_check_tpsram - check if provided protocol SRAM
972 * is compatible with this driver
973 * @adapter: the adapter
974 * @tp_sram: the firmware image to write
975 * @size: image size
977 * Checks if an adapter's tp sram is compatible with the driver.
978 * Returns 0 if the versions are compatible, a negative error otherwise.
980 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
981 unsigned int size)
983 u32 csum;
984 unsigned int i;
985 const __be32 *p = (const __be32 *)tp_sram;
987 /* Verify checksum */
988 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
989 csum += ntohl(p[i]);
990 if (csum != 0xffffffff) {
991 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
992 csum);
993 return -EINVAL;
996 return 0;
999 enum fw_version_type {
1000 FW_VERSION_N3,
1001 FW_VERSION_T3
1005 * t3_get_fw_version - read the firmware version
1006 * @adapter: the adapter
1007 * @vers: where to place the version
1009 * Reads the FW version from flash.
1011 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1013 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1017 * t3_check_fw_version - check if the FW is compatible with this driver
1018 * @adapter: the adapter
1020 * Checks if an adapter's FW is compatible with the driver. Returns 0
1021 * if the versions are compatible, a negative error otherwise.
1023 int t3_check_fw_version(struct adapter *adapter)
1025 int ret;
1026 u32 vers;
1027 unsigned int type, major, minor;
1029 ret = t3_get_fw_version(adapter, &vers);
1030 if (ret)
1031 return ret;
1033 type = G_FW_VERSION_TYPE(vers);
1034 major = G_FW_VERSION_MAJOR(vers);
1035 minor = G_FW_VERSION_MINOR(vers);
1037 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1038 minor == FW_VERSION_MINOR)
1039 return 0;
1040 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1041 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1042 "driver compiled for version %u.%u\n", major, minor,
1043 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1044 else {
1045 CH_WARN(adapter, "found newer FW version(%u.%u), "
1046 "driver compiled for version %u.%u\n", major, minor,
1047 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1048 return 0;
1050 return -EINVAL;
1054 * t3_flash_erase_sectors - erase a range of flash sectors
1055 * @adapter: the adapter
1056 * @start: the first sector to erase
1057 * @end: the last sector to erase
1059 * Erases the sectors in the given range.
1061 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1063 while (start <= end) {
1064 int ret;
1066 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1067 (ret = sf1_write(adapter, 4, 0,
1068 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1069 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1070 return ret;
1071 start++;
1073 return 0;
1077 * t3_load_fw - download firmware
1078 * @adapter: the adapter
1079 * @fw_data: the firmware image to write
1080 * @size: image size
1082 * Write the supplied firmware image to the card's serial flash.
1083 * The FW image has the following sections: @size - 8 bytes of code and
1084 * data, followed by 4 bytes of FW version, followed by the 32-bit
1085 * 1's complement checksum of the whole image.
1087 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1089 u32 csum;
1090 unsigned int i;
1091 const __be32 *p = (const __be32 *)fw_data;
1092 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1094 if ((size & 3) || size < FW_MIN_SIZE)
1095 return -EINVAL;
1096 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1097 return -EFBIG;
1099 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1100 csum += ntohl(p[i]);
1101 if (csum != 0xffffffff) {
1102 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1103 csum);
1104 return -EINVAL;
1107 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1108 if (ret)
1109 goto out;
1111 size -= 8; /* trim off version and checksum */
1112 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1113 unsigned int chunk_size = min(size, 256U);
1115 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1116 if (ret)
1117 goto out;
1119 addr += chunk_size;
1120 fw_data += chunk_size;
1121 size -= chunk_size;
1124 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1125 out:
1126 if (ret)
1127 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1128 return ret;
1131 #define CIM_CTL_BASE 0x2000
1134 * t3_cim_ctl_blk_read - read a block from CIM control region
1136 * @adap: the adapter
1137 * @addr: the start address within the CIM control region
1138 * @n: number of words to read
1139 * @valp: where to store the result
1141 * Reads a block of 4-byte words from the CIM control region.
1143 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1144 unsigned int n, unsigned int *valp)
1146 int ret = 0;
1148 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1149 return -EBUSY;
1151 for ( ; !ret && n--; addr += 4) {
1152 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1153 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1154 0, 5, 2);
1155 if (!ret)
1156 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1158 return ret;
1161 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1162 u32 *rx_hash_high, u32 *rx_hash_low)
1164 /* stop Rx unicast traffic */
1165 t3_mac_disable_exact_filters(mac);
1167 /* stop broadcast, multicast, promiscuous mode traffic */
1168 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1169 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1170 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1171 F_DISBCAST);
1173 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1174 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1176 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1177 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1179 /* Leave time to drain max RX fifo */
1180 msleep(1);
1183 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1184 u32 rx_hash_high, u32 rx_hash_low)
1186 t3_mac_enable_exact_filters(mac);
1187 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1188 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1189 rx_cfg);
1190 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1191 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1195 * t3_link_changed - handle interface link changes
1196 * @adapter: the adapter
1197 * @port_id: the port index that changed link state
1199 * Called when a port's link settings change to propagate the new values
1200 * to the associated PHY and MAC. After performing the common tasks it
1201 * invokes an OS-specific handler.
1203 void t3_link_changed(struct adapter *adapter, int port_id)
1205 int link_ok, speed, duplex, fc;
1206 struct port_info *pi = adap2pinfo(adapter, port_id);
1207 struct cphy *phy = &pi->phy;
1208 struct cmac *mac = &pi->mac;
1209 struct link_config *lc = &pi->link_config;
1211 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1213 if (!lc->link_ok && link_ok) {
1214 u32 rx_cfg, rx_hash_high, rx_hash_low;
1215 u32 status;
1217 t3_xgm_intr_enable(adapter, port_id);
1218 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1219 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1220 t3_mac_enable(mac, MAC_DIRECTION_RX);
1222 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1223 if (status & F_LINKFAULTCHANGE) {
1224 mac->stats.link_faults++;
1225 pi->link_fault = 1;
1227 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1230 if (lc->requested_fc & PAUSE_AUTONEG)
1231 fc &= lc->requested_fc;
1232 else
1233 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1235 if (link_ok == lc->link_ok && speed == lc->speed &&
1236 duplex == lc->duplex && fc == lc->fc)
1237 return; /* nothing changed */
1239 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1240 uses_xaui(adapter)) {
1241 if (link_ok)
1242 t3b_pcs_reset(mac);
1243 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1244 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1246 lc->link_ok = link_ok;
1247 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1248 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1250 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1251 /* Set MAC speed, duplex, and flow control to match PHY. */
1252 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1253 lc->fc = fc;
1256 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1259 void t3_link_fault(struct adapter *adapter, int port_id)
1261 struct port_info *pi = adap2pinfo(adapter, port_id);
1262 struct cmac *mac = &pi->mac;
1263 struct cphy *phy = &pi->phy;
1264 struct link_config *lc = &pi->link_config;
1265 int link_ok, speed, duplex, fc, link_fault;
1266 u32 rx_cfg, rx_hash_high, rx_hash_low;
1268 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1270 if (adapter->params.rev > 0 && uses_xaui(adapter))
1271 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1273 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1274 t3_mac_enable(mac, MAC_DIRECTION_RX);
1276 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1278 link_fault = t3_read_reg(adapter,
1279 A_XGM_INT_STATUS + mac->offset);
1280 link_fault &= F_LINKFAULTCHANGE;
1282 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1284 if (link_fault) {
1285 lc->link_ok = 0;
1286 lc->speed = SPEED_INVALID;
1287 lc->duplex = DUPLEX_INVALID;
1289 t3_os_link_fault(adapter, port_id, 0);
1291 /* Account link faults only when the phy reports a link up */
1292 if (link_ok)
1293 mac->stats.link_faults++;
1294 } else {
1295 if (link_ok)
1296 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1297 F_TXACTENABLE | F_RXEN);
1299 pi->link_fault = 0;
1300 lc->link_ok = (unsigned char)link_ok;
1301 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1302 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1303 t3_os_link_fault(adapter, port_id, link_ok);
1308 * t3_link_start - apply link configuration to MAC/PHY
1309 * @phy: the PHY to setup
1310 * @mac: the MAC to setup
1311 * @lc: the requested link configuration
1313 * Set up a port's MAC and PHY according to a desired link configuration.
1314 * - If the PHY can auto-negotiate first decide what to advertise, then
1315 * enable/disable auto-negotiation as desired, and reset.
1316 * - If the PHY does not auto-negotiate just reset it.
1317 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1318 * otherwise do it later based on the outcome of auto-negotiation.
1320 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1322 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1324 lc->link_ok = 0;
1325 if (lc->supported & SUPPORTED_Autoneg) {
1326 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1327 if (fc) {
1328 lc->advertising |= ADVERTISED_Asym_Pause;
1329 if (fc & PAUSE_RX)
1330 lc->advertising |= ADVERTISED_Pause;
1332 phy->ops->advertise(phy, lc->advertising);
1334 if (lc->autoneg == AUTONEG_DISABLE) {
1335 lc->speed = lc->requested_speed;
1336 lc->duplex = lc->requested_duplex;
1337 lc->fc = (unsigned char)fc;
1338 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1339 fc);
1340 /* Also disables autoneg */
1341 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1342 } else
1343 phy->ops->autoneg_enable(phy);
1344 } else {
1345 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1346 lc->fc = (unsigned char)fc;
1347 phy->ops->reset(phy, 0);
1349 return 0;
1353 * t3_set_vlan_accel - control HW VLAN extraction
1354 * @adapter: the adapter
1355 * @ports: bitmap of adapter ports to operate on
1356 * @on: enable (1) or disable (0) HW VLAN extraction
1358 * Enables or disables HW extraction of VLAN tags for the given port.
1360 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1362 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1363 ports << S_VLANEXTRACTIONENABLE,
1364 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1367 struct intr_info {
1368 unsigned int mask; /* bits to check in interrupt status */
1369 const char *msg; /* message to print or NULL */
1370 short stat_idx; /* stat counter to increment or -1 */
1371 unsigned short fatal; /* whether the condition reported is fatal */
1375 * t3_handle_intr_status - table driven interrupt handler
1376 * @adapter: the adapter that generated the interrupt
1377 * @reg: the interrupt status register to process
1378 * @mask: a mask to apply to the interrupt status
1379 * @acts: table of interrupt actions
1380 * @stats: statistics counters tracking interrupt occurences
1382 * A table driven interrupt handler that applies a set of masks to an
1383 * interrupt status word and performs the corresponding actions if the
1384 * interrupts described by the mask have occured. The actions include
1385 * optionally printing a warning or alert message, and optionally
1386 * incrementing a stat counter. The table is terminated by an entry
1387 * specifying mask 0. Returns the number of fatal interrupt conditions.
1389 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1390 unsigned int mask,
1391 const struct intr_info *acts,
1392 unsigned long *stats)
1394 int fatal = 0;
1395 unsigned int status = t3_read_reg(adapter, reg) & mask;
1397 for (; acts->mask; ++acts) {
1398 if (!(status & acts->mask))
1399 continue;
1400 if (acts->fatal) {
1401 fatal++;
1402 CH_ALERT(adapter, "%s (0x%x)\n",
1403 acts->msg, status & acts->mask);
1404 } else if (acts->msg)
1405 CH_WARN(adapter, "%s (0x%x)\n",
1406 acts->msg, status & acts->mask);
1407 if (acts->stat_idx >= 0)
1408 stats[acts->stat_idx]++;
1410 if (status) /* clear processed interrupts */
1411 t3_write_reg(adapter, reg, status);
1412 return fatal;
1415 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1416 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1417 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1418 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1419 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1420 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1421 F_HIRCQPARITYERROR)
1422 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1423 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1424 F_NFASRCHFAIL)
1425 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1426 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1427 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1428 F_TXFIFO_UNDERRUN)
1429 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1430 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1431 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1432 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1433 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1434 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1435 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1436 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1437 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1438 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1439 F_TXPARERR | V_BISTERR(M_BISTERR))
1440 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1441 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1442 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1443 #define ULPTX_INTR_MASK 0xfc
1444 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1445 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1446 F_ZERO_SWITCH_ERROR)
1447 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1448 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1449 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1450 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1451 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1452 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1453 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1454 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1455 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1456 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1457 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1458 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1459 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1460 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1461 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1462 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1463 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1464 V_MCAPARERRENB(M_MCAPARERRENB))
1465 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1466 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1467 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1468 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1469 F_MPS0 | F_CPL_SWITCH)
1471 * Interrupt handler for the PCIX1 module.
1473 static void pci_intr_handler(struct adapter *adapter)
1475 static const struct intr_info pcix1_intr_info[] = {
1476 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1477 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1478 {F_RCVTARABT, "PCI received target abort", -1, 1},
1479 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1480 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1481 {F_DETPARERR, "PCI detected parity error", -1, 1},
1482 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1483 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1484 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1486 {F_DETCORECCERR, "PCI correctable ECC error",
1487 STAT_PCI_CORR_ECC, 0},
1488 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1489 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1490 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1492 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1494 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1496 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1497 "error", -1, 1},
1501 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1502 pcix1_intr_info, adapter->irq_stats))
1503 t3_fatal_err(adapter);
1507 * Interrupt handler for the PCIE module.
1509 static void pcie_intr_handler(struct adapter *adapter)
1511 static const struct intr_info pcie_intr_info[] = {
1512 {F_PEXERR, "PCI PEX error", -1, 1},
1513 {F_UNXSPLCPLERRR,
1514 "PCI unexpected split completion DMA read error", -1, 1},
1515 {F_UNXSPLCPLERRC,
1516 "PCI unexpected split completion DMA command error", -1, 1},
1517 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1518 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1519 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1520 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1521 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1522 "PCI MSI-X table/PBA parity error", -1, 1},
1523 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1524 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1525 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1526 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1527 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1531 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1532 CH_ALERT(adapter, "PEX error code 0x%x\n",
1533 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1535 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1536 pcie_intr_info, adapter->irq_stats))
1537 t3_fatal_err(adapter);
1541 * TP interrupt handler.
1543 static void tp_intr_handler(struct adapter *adapter)
1545 static const struct intr_info tp_intr_info[] = {
1546 {0xffffff, "TP parity error", -1, 1},
1547 {0x1000000, "TP out of Rx pages", -1, 1},
1548 {0x2000000, "TP out of Tx pages", -1, 1},
1552 static struct intr_info tp_intr_info_t3c[] = {
1553 {0x1fffffff, "TP parity error", -1, 1},
1554 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1555 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1559 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1560 adapter->params.rev < T3_REV_C ?
1561 tp_intr_info : tp_intr_info_t3c, NULL))
1562 t3_fatal_err(adapter);
1566 * CIM interrupt handler.
1568 static void cim_intr_handler(struct adapter *adapter)
1570 static const struct intr_info cim_intr_info[] = {
1571 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1572 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1573 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1574 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1575 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1576 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1577 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1578 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1579 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1580 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1581 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1582 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1583 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1584 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1585 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1586 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1587 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1588 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1589 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1590 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1591 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1592 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1593 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1594 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1598 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1599 cim_intr_info, NULL))
1600 t3_fatal_err(adapter);
1604 * ULP RX interrupt handler.
1606 static void ulprx_intr_handler(struct adapter *adapter)
1608 static const struct intr_info ulprx_intr_info[] = {
1609 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1610 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1611 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1612 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1613 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1614 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1615 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1616 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1620 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1621 ulprx_intr_info, NULL))
1622 t3_fatal_err(adapter);
1626 * ULP TX interrupt handler.
1628 static void ulptx_intr_handler(struct adapter *adapter)
1630 static const struct intr_info ulptx_intr_info[] = {
1631 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1632 STAT_ULP_CH0_PBL_OOB, 0},
1633 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1634 STAT_ULP_CH1_PBL_OOB, 0},
1635 {0xfc, "ULP TX parity error", -1, 1},
1639 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1640 ulptx_intr_info, adapter->irq_stats))
1641 t3_fatal_err(adapter);
1644 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1645 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1646 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1647 F_ICSPI1_TX_FRAMING_ERROR)
1648 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1649 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1650 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1651 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1654 * PM TX interrupt handler.
1656 static void pmtx_intr_handler(struct adapter *adapter)
1658 static const struct intr_info pmtx_intr_info[] = {
1659 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1660 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1661 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1662 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1663 "PMTX ispi parity error", -1, 1},
1664 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1665 "PMTX ospi parity error", -1, 1},
1669 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1670 pmtx_intr_info, NULL))
1671 t3_fatal_err(adapter);
1674 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1675 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1676 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1677 F_IESPI1_TX_FRAMING_ERROR)
1678 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1679 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1680 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1681 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1684 * PM RX interrupt handler.
1686 static void pmrx_intr_handler(struct adapter *adapter)
1688 static const struct intr_info pmrx_intr_info[] = {
1689 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1690 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1691 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1692 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1693 "PMRX ispi parity error", -1, 1},
1694 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1695 "PMRX ospi parity error", -1, 1},
1699 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1700 pmrx_intr_info, NULL))
1701 t3_fatal_err(adapter);
1705 * CPL switch interrupt handler.
1707 static void cplsw_intr_handler(struct adapter *adapter)
1709 static const struct intr_info cplsw_intr_info[] = {
1710 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1711 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1712 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1713 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1714 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1715 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1719 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1720 cplsw_intr_info, NULL))
1721 t3_fatal_err(adapter);
1725 * MPS interrupt handler.
1727 static void mps_intr_handler(struct adapter *adapter)
1729 static const struct intr_info mps_intr_info[] = {
1730 {0x1ff, "MPS parity error", -1, 1},
1734 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1735 mps_intr_info, NULL))
1736 t3_fatal_err(adapter);
1739 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1742 * MC7 interrupt handler.
1744 static void mc7_intr_handler(struct mc7 *mc7)
1746 struct adapter *adapter = mc7->adapter;
1747 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1749 if (cause & F_CE) {
1750 mc7->stats.corr_err++;
1751 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1752 "data 0x%x 0x%x 0x%x\n", mc7->name,
1753 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1754 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1755 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1756 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1759 if (cause & F_UE) {
1760 mc7->stats.uncorr_err++;
1761 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1762 "data 0x%x 0x%x 0x%x\n", mc7->name,
1763 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1764 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1765 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1766 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1769 if (G_PE(cause)) {
1770 mc7->stats.parity_err++;
1771 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1772 mc7->name, G_PE(cause));
1775 if (cause & F_AE) {
1776 u32 addr = 0;
1778 if (adapter->params.rev > 0)
1779 addr = t3_read_reg(adapter,
1780 mc7->offset + A_MC7_ERR_ADDR);
1781 mc7->stats.addr_err++;
1782 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1783 mc7->name, addr);
1786 if (cause & MC7_INTR_FATAL)
1787 t3_fatal_err(adapter);
1789 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1792 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1793 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1795 * XGMAC interrupt handler.
1797 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1799 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1801 * We mask out interrupt causes for which we're not taking interrupts.
1802 * This allows us to use polling logic to monitor some of the other
1803 * conditions when taking interrupts would impose too much load on the
1804 * system.
1806 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1807 ~F_RXFIFO_OVERFLOW;
1809 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1810 mac->stats.tx_fifo_parity_err++;
1811 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1813 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1814 mac->stats.rx_fifo_parity_err++;
1815 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1817 if (cause & F_TXFIFO_UNDERRUN)
1818 mac->stats.tx_fifo_urun++;
1819 if (cause & F_RXFIFO_OVERFLOW)
1820 mac->stats.rx_fifo_ovfl++;
1821 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1822 mac->stats.serdes_signal_loss++;
1823 if (cause & F_XAUIPCSCTCERR)
1824 mac->stats.xaui_pcs_ctc_err++;
1825 if (cause & F_XAUIPCSALIGNCHANGE)
1826 mac->stats.xaui_pcs_align_change++;
1827 if (cause & F_XGM_INT) {
1828 t3_set_reg_field(adap,
1829 A_XGM_INT_ENABLE + mac->offset,
1830 F_XGM_INT, 0);
1831 mac->stats.link_faults++;
1833 t3_os_link_fault_handler(adap, idx);
1836 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1838 if (cause & XGM_INTR_FATAL)
1839 t3_fatal_err(adap);
1841 return cause != 0;
1845 * Interrupt handler for PHY events.
1847 int t3_phy_intr_handler(struct adapter *adapter)
1849 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1851 for_each_port(adapter, i) {
1852 struct port_info *p = adap2pinfo(adapter, i);
1854 if (!(p->phy.caps & SUPPORTED_IRQ))
1855 continue;
1857 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1858 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1860 if (phy_cause & cphy_cause_link_change)
1861 t3_link_changed(adapter, i);
1862 if (phy_cause & cphy_cause_fifo_error)
1863 p->phy.fifo_errors++;
1864 if (phy_cause & cphy_cause_module_change)
1865 t3_os_phymod_changed(adapter, i);
1869 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1870 return 0;
1874 * T3 slow path (non-data) interrupt handler.
1876 int t3_slow_intr_handler(struct adapter *adapter)
1878 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1880 cause &= adapter->slow_intr_mask;
1881 if (!cause)
1882 return 0;
1883 if (cause & F_PCIM0) {
1884 if (is_pcie(adapter))
1885 pcie_intr_handler(adapter);
1886 else
1887 pci_intr_handler(adapter);
1889 if (cause & F_SGE3)
1890 t3_sge_err_intr_handler(adapter);
1891 if (cause & F_MC7_PMRX)
1892 mc7_intr_handler(&adapter->pmrx);
1893 if (cause & F_MC7_PMTX)
1894 mc7_intr_handler(&adapter->pmtx);
1895 if (cause & F_MC7_CM)
1896 mc7_intr_handler(&adapter->cm);
1897 if (cause & F_CIM)
1898 cim_intr_handler(adapter);
1899 if (cause & F_TP1)
1900 tp_intr_handler(adapter);
1901 if (cause & F_ULP2_RX)
1902 ulprx_intr_handler(adapter);
1903 if (cause & F_ULP2_TX)
1904 ulptx_intr_handler(adapter);
1905 if (cause & F_PM1_RX)
1906 pmrx_intr_handler(adapter);
1907 if (cause & F_PM1_TX)
1908 pmtx_intr_handler(adapter);
1909 if (cause & F_CPL_SWITCH)
1910 cplsw_intr_handler(adapter);
1911 if (cause & F_MPS0)
1912 mps_intr_handler(adapter);
1913 if (cause & F_MC5A)
1914 t3_mc5_intr_handler(&adapter->mc5);
1915 if (cause & F_XGMAC0_0)
1916 mac_intr_handler(adapter, 0);
1917 if (cause & F_XGMAC0_1)
1918 mac_intr_handler(adapter, 1);
1919 if (cause & F_T3DBG)
1920 t3_os_ext_intr_handler(adapter);
1922 /* Clear the interrupts just processed. */
1923 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1924 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1925 return 1;
1928 static unsigned int calc_gpio_intr(struct adapter *adap)
1930 unsigned int i, gpi_intr = 0;
1932 for_each_port(adap, i)
1933 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1934 adapter_info(adap)->gpio_intr[i])
1935 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1936 return gpi_intr;
1940 * t3_intr_enable - enable interrupts
1941 * @adapter: the adapter whose interrupts should be enabled
1943 * Enable interrupts by setting the interrupt enable registers of the
1944 * various HW modules and then enabling the top-level interrupt
1945 * concentrator.
1947 void t3_intr_enable(struct adapter *adapter)
1949 static const struct addr_val_pair intr_en_avp[] = {
1950 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1951 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1952 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1953 MC7_INTR_MASK},
1954 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1955 MC7_INTR_MASK},
1956 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1957 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1958 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1959 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1960 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1961 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1964 adapter->slow_intr_mask = PL_INTR_MASK;
1966 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1967 t3_write_reg(adapter, A_TP_INT_ENABLE,
1968 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1970 if (adapter->params.rev > 0) {
1971 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1972 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1973 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1974 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1975 F_PBL_BOUND_ERR_CH1);
1976 } else {
1977 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1978 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1981 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1983 if (is_pcie(adapter))
1984 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1985 else
1986 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1987 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1988 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1992 * t3_intr_disable - disable a card's interrupts
1993 * @adapter: the adapter whose interrupts should be disabled
1995 * Disable interrupts. We only disable the top-level interrupt
1996 * concentrator and the SGE data interrupts.
1998 void t3_intr_disable(struct adapter *adapter)
2000 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2001 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2002 adapter->slow_intr_mask = 0;
2006 * t3_intr_clear - clear all interrupts
2007 * @adapter: the adapter whose interrupts should be cleared
2009 * Clears all interrupts.
2011 void t3_intr_clear(struct adapter *adapter)
2013 static const unsigned int cause_reg_addr[] = {
2014 A_SG_INT_CAUSE,
2015 A_SG_RSPQ_FL_STATUS,
2016 A_PCIX_INT_CAUSE,
2017 A_MC7_INT_CAUSE,
2018 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2019 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2020 A_CIM_HOST_INT_CAUSE,
2021 A_TP_INT_CAUSE,
2022 A_MC5_DB_INT_CAUSE,
2023 A_ULPRX_INT_CAUSE,
2024 A_ULPTX_INT_CAUSE,
2025 A_CPL_INTR_CAUSE,
2026 A_PM1_TX_INT_CAUSE,
2027 A_PM1_RX_INT_CAUSE,
2028 A_MPS_INT_CAUSE,
2029 A_T3DBG_INT_CAUSE,
2031 unsigned int i;
2033 /* Clear PHY and MAC interrupts for each port. */
2034 for_each_port(adapter, i)
2035 t3_port_intr_clear(adapter, i);
2037 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2038 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2040 if (is_pcie(adapter))
2041 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2042 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2043 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2046 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2048 struct port_info *pi = adap2pinfo(adapter, idx);
2050 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2051 XGM_EXTRA_INTR_MASK);
2054 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2056 struct port_info *pi = adap2pinfo(adapter, idx);
2058 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2059 0x7ff);
2063 * t3_port_intr_enable - enable port-specific interrupts
2064 * @adapter: associated adapter
2065 * @idx: index of port whose interrupts should be enabled
2067 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2068 * adapter port.
2070 void t3_port_intr_enable(struct adapter *adapter, int idx)
2072 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2074 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2075 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2076 phy->ops->intr_enable(phy);
2080 * t3_port_intr_disable - disable port-specific interrupts
2081 * @adapter: associated adapter
2082 * @idx: index of port whose interrupts should be disabled
2084 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2085 * adapter port.
2087 void t3_port_intr_disable(struct adapter *adapter, int idx)
2089 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2091 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2092 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2093 phy->ops->intr_disable(phy);
2097 * t3_port_intr_clear - clear port-specific interrupts
2098 * @adapter: associated adapter
2099 * @idx: index of port whose interrupts to clear
2101 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2102 * adapter port.
2104 void t3_port_intr_clear(struct adapter *adapter, int idx)
2106 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2108 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2109 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2110 phy->ops->intr_clear(phy);
2113 #define SG_CONTEXT_CMD_ATTEMPTS 100
2116 * t3_sge_write_context - write an SGE context
2117 * @adapter: the adapter
2118 * @id: the context id
2119 * @type: the context type
2121 * Program an SGE context with the values already loaded in the
2122 * CONTEXT_DATA? registers.
2124 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2125 unsigned int type)
2127 if (type == F_RESPONSEQ) {
2129 * Can't write the Response Queue Context bits for
2130 * Interrupt Armed or the Reserve bits after the chip
2131 * has been initialized out of reset. Writing to these
2132 * bits can confuse the hardware.
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2138 } else {
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2142 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2144 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2145 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2146 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2147 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2151 * clear_sge_ctxt - completely clear an SGE context
2152 * @adapter: the adapter
2153 * @id: the context id
2154 * @type: the context type
2156 * Completely clear an SGE context. Used predominantly at post-reset
2157 * initialization. Note in particular that we don't skip writing to any
2158 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2159 * does ...
2161 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2162 unsigned int type)
2164 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2165 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2166 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2167 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2168 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2169 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2170 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2171 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2172 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2173 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2174 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2175 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2179 * t3_sge_init_ecntxt - initialize an SGE egress context
2180 * @adapter: the adapter to configure
2181 * @id: the context id
2182 * @gts_enable: whether to enable GTS for the context
2183 * @type: the egress context type
2184 * @respq: associated response queue
2185 * @base_addr: base address of queue
2186 * @size: number of queue entries
2187 * @token: uP token
2188 * @gen: initial generation value for the context
2189 * @cidx: consumer pointer
2191 * Initialize an SGE egress context and make it ready for use. If the
2192 * platform allows concurrent context operations, the caller is
2193 * responsible for appropriate locking.
2195 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2196 enum sge_context_type type, int respq, u64 base_addr,
2197 unsigned int size, unsigned int token, int gen,
2198 unsigned int cidx)
2200 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2202 if (base_addr & 0xfff) /* must be 4K aligned */
2203 return -EINVAL;
2204 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2205 return -EBUSY;
2207 base_addr >>= 12;
2208 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2209 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2211 V_EC_BASE_LO(base_addr & 0xffff));
2212 base_addr >>= 16;
2213 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2214 base_addr >>= 32;
2215 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2216 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2217 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2218 F_EC_VALID);
2219 return t3_sge_write_context(adapter, id, F_EGRESS);
2223 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2224 * @adapter: the adapter to configure
2225 * @id: the context id
2226 * @gts_enable: whether to enable GTS for the context
2227 * @base_addr: base address of queue
2228 * @size: number of queue entries
2229 * @bsize: size of each buffer for this queue
2230 * @cong_thres: threshold to signal congestion to upstream producers
2231 * @gen: initial generation value for the context
2232 * @cidx: consumer pointer
2234 * Initialize an SGE free list context and make it ready for use. The
2235 * caller is responsible for ensuring only one context operation occurs
2236 * at a time.
2238 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2239 int gts_enable, u64 base_addr, unsigned int size,
2240 unsigned int bsize, unsigned int cong_thres, int gen,
2241 unsigned int cidx)
2243 if (base_addr & 0xfff) /* must be 4K aligned */
2244 return -EINVAL;
2245 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2246 return -EBUSY;
2248 base_addr >>= 12;
2249 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2250 base_addr >>= 32;
2251 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2252 V_FL_BASE_HI((u32) base_addr) |
2253 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2254 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2255 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2256 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2257 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2258 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2259 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2260 return t3_sge_write_context(adapter, id, F_FREELIST);
2264 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2265 * @adapter: the adapter to configure
2266 * @id: the context id
2267 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2268 * @base_addr: base address of queue
2269 * @size: number of queue entries
2270 * @fl_thres: threshold for selecting the normal or jumbo free list
2271 * @gen: initial generation value for the context
2272 * @cidx: consumer pointer
2274 * Initialize an SGE response queue context and make it ready for use.
2275 * The caller is responsible for ensuring only one context operation
2276 * occurs at a time.
2278 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2279 int irq_vec_idx, u64 base_addr, unsigned int size,
2280 unsigned int fl_thres, int gen, unsigned int cidx)
2282 unsigned int intr = 0;
2284 if (base_addr & 0xfff) /* must be 4K aligned */
2285 return -EINVAL;
2286 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2287 return -EBUSY;
2289 base_addr >>= 12;
2290 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2291 V_CQ_INDEX(cidx));
2292 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2293 base_addr >>= 32;
2294 if (irq_vec_idx >= 0)
2295 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2296 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2297 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2298 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2299 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2303 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2304 * @adapter: the adapter to configure
2305 * @id: the context id
2306 * @base_addr: base address of queue
2307 * @size: number of queue entries
2308 * @rspq: response queue for async notifications
2309 * @ovfl_mode: CQ overflow mode
2310 * @credits: completion queue credits
2311 * @credit_thres: the credit threshold
2313 * Initialize an SGE completion queue context and make it ready for use.
2314 * The caller is responsible for ensuring only one context operation
2315 * occurs at a time.
2317 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2318 unsigned int size, int rspq, int ovfl_mode,
2319 unsigned int credits, unsigned int credit_thres)
2321 if (base_addr & 0xfff) /* must be 4K aligned */
2322 return -EINVAL;
2323 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2324 return -EBUSY;
2326 base_addr >>= 12;
2327 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2328 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2329 base_addr >>= 32;
2330 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2331 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2332 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2333 V_CQ_ERR(ovfl_mode));
2334 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2335 V_CQ_CREDIT_THRES(credit_thres));
2336 return t3_sge_write_context(adapter, id, F_CQ);
2340 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2341 * @adapter: the adapter
2342 * @id: the egress context id
2343 * @enable: enable (1) or disable (0) the context
2345 * Enable or disable an SGE egress context. The caller is responsible for
2346 * ensuring only one context operation occurs at a time.
2348 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2350 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2351 return -EBUSY;
2353 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2354 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2355 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2356 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2357 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2358 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2359 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2360 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2361 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2365 * t3_sge_disable_fl - disable an SGE free-buffer list
2366 * @adapter: the adapter
2367 * @id: the free list context id
2369 * Disable an SGE free-buffer list. The caller is responsible for
2370 * ensuring only one context operation occurs at a time.
2372 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2374 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2375 return -EBUSY;
2377 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2378 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2379 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2380 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2381 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2382 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2383 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2384 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2385 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2389 * t3_sge_disable_rspcntxt - disable an SGE response queue
2390 * @adapter: the adapter
2391 * @id: the response queue context id
2393 * Disable an SGE response queue. The caller is responsible for
2394 * ensuring only one context operation occurs at a time.
2396 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2398 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2399 return -EBUSY;
2401 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2402 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2403 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2404 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2405 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2406 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2407 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2408 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2409 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2413 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2414 * @adapter: the adapter
2415 * @id: the completion queue context id
2417 * Disable an SGE completion queue. The caller is responsible for
2418 * ensuring only one context operation occurs at a time.
2420 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2422 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2423 return -EBUSY;
2425 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2426 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2427 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2428 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2429 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2430 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2431 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2432 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2433 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2437 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2438 * @adapter: the adapter
2439 * @id: the context id
2440 * @op: the operation to perform
2442 * Perform the selected operation on an SGE completion queue context.
2443 * The caller is responsible for ensuring only one context operation
2444 * occurs at a time.
2446 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2447 unsigned int credits)
2449 u32 val;
2451 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2452 return -EBUSY;
2454 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2455 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2456 V_CONTEXT(id) | F_CQ);
2457 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2458 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2459 return -EIO;
2461 if (op >= 2 && op < 7) {
2462 if (adapter->params.rev > 0)
2463 return G_CQ_INDEX(val);
2465 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2466 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2467 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2468 F_CONTEXT_CMD_BUSY, 0,
2469 SG_CONTEXT_CMD_ATTEMPTS, 1))
2470 return -EIO;
2471 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2473 return 0;
2477 * t3_sge_read_context - read an SGE context
2478 * @type: the context type
2479 * @adapter: the adapter
2480 * @id: the context id
2481 * @data: holds the retrieved context
2483 * Read an SGE egress context. The caller is responsible for ensuring
2484 * only one context operation occurs at a time.
2486 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2487 unsigned int id, u32 data[4])
2489 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2490 return -EBUSY;
2492 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2493 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2494 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2495 SG_CONTEXT_CMD_ATTEMPTS, 1))
2496 return -EIO;
2497 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2498 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2499 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2500 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2501 return 0;
2505 * t3_sge_read_ecntxt - read an SGE egress context
2506 * @adapter: the adapter
2507 * @id: the context id
2508 * @data: holds the retrieved context
2510 * Read an SGE egress context. The caller is responsible for ensuring
2511 * only one context operation occurs at a time.
2513 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2515 if (id >= 65536)
2516 return -EINVAL;
2517 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2521 * t3_sge_read_cq - read an SGE CQ context
2522 * @adapter: the adapter
2523 * @id: the context id
2524 * @data: holds the retrieved context
2526 * Read an SGE CQ context. The caller is responsible for ensuring
2527 * only one context operation occurs at a time.
2529 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2531 if (id >= 65536)
2532 return -EINVAL;
2533 return t3_sge_read_context(F_CQ, adapter, id, data);
2537 * t3_sge_read_fl - read an SGE free-list context
2538 * @adapter: the adapter
2539 * @id: the context id
2540 * @data: holds the retrieved context
2542 * Read an SGE free-list context. The caller is responsible for ensuring
2543 * only one context operation occurs at a time.
2545 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2547 if (id >= SGE_QSETS * 2)
2548 return -EINVAL;
2549 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2553 * t3_sge_read_rspq - read an SGE response queue context
2554 * @adapter: the adapter
2555 * @id: the context id
2556 * @data: holds the retrieved context
2558 * Read an SGE response queue context. The caller is responsible for
2559 * ensuring only one context operation occurs at a time.
2561 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2563 if (id >= SGE_QSETS)
2564 return -EINVAL;
2565 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2569 * t3_config_rss - configure Rx packet steering
2570 * @adapter: the adapter
2571 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2572 * @cpus: values for the CPU lookup table (0xff terminated)
2573 * @rspq: values for the response queue lookup table (0xffff terminated)
2575 * Programs the receive packet steering logic. @cpus and @rspq provide
2576 * the values for the CPU and response queue lookup tables. If they
2577 * provide fewer values than the size of the tables the supplied values
2578 * are used repeatedly until the tables are fully populated.
2580 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2581 const u8 * cpus, const u16 *rspq)
2583 int i, j, cpu_idx = 0, q_idx = 0;
2585 if (cpus)
2586 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2587 u32 val = i << 16;
2589 for (j = 0; j < 2; ++j) {
2590 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2591 if (cpus[cpu_idx] == 0xff)
2592 cpu_idx = 0;
2594 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2597 if (rspq)
2598 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2599 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2600 (i << 16) | rspq[q_idx++]);
2601 if (rspq[q_idx] == 0xffff)
2602 q_idx = 0;
2605 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2609 * t3_read_rss - read the contents of the RSS tables
2610 * @adapter: the adapter
2611 * @lkup: holds the contents of the RSS lookup table
2612 * @map: holds the contents of the RSS map table
2614 * Reads the contents of the receive packet steering tables.
2616 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2618 int i;
2619 u32 val;
2621 if (lkup)
2622 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2623 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2624 0xffff0000 | i);
2625 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2626 if (!(val & 0x80000000))
2627 return -EAGAIN;
2628 *lkup++ = val;
2629 *lkup++ = (val >> 8);
2632 if (map)
2633 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2634 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2635 0xffff0000 | i);
2636 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2637 if (!(val & 0x80000000))
2638 return -EAGAIN;
2639 *map++ = val;
2641 return 0;
2645 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2646 * @adap: the adapter
2647 * @enable: 1 to select offload mode, 0 for regular NIC
2649 * Switches TP to NIC/offload mode.
2651 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2653 if (is_offload(adap) || !enable)
2654 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2655 V_NICMODE(!enable));
2659 * pm_num_pages - calculate the number of pages of the payload memory
2660 * @mem_size: the size of the payload memory
2661 * @pg_size: the size of each payload memory page
2663 * Calculate the number of pages, each of the given size, that fit in a
2664 * memory of the specified size, respecting the HW requirement that the
2665 * number of pages must be a multiple of 24.
2667 static inline unsigned int pm_num_pages(unsigned int mem_size,
2668 unsigned int pg_size)
2670 unsigned int n = mem_size / pg_size;
2672 return n - n % 24;
2675 #define mem_region(adap, start, size, reg) \
2676 t3_write_reg((adap), A_ ## reg, (start)); \
2677 start += size
2680 * partition_mem - partition memory and configure TP memory settings
2681 * @adap: the adapter
2682 * @p: the TP parameters
2684 * Partitions context and payload memory and configures TP's memory
2685 * registers.
2687 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2689 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2690 unsigned int timers = 0, timers_shift = 22;
2692 if (adap->params.rev > 0) {
2693 if (tids <= 16 * 1024) {
2694 timers = 1;
2695 timers_shift = 16;
2696 } else if (tids <= 64 * 1024) {
2697 timers = 2;
2698 timers_shift = 18;
2699 } else if (tids <= 256 * 1024) {
2700 timers = 3;
2701 timers_shift = 20;
2705 t3_write_reg(adap, A_TP_PMM_SIZE,
2706 p->chan_rx_size | (p->chan_tx_size >> 16));
2708 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2709 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2710 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2711 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2712 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2714 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2715 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2716 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2718 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2719 /* Add a bit of headroom and make multiple of 24 */
2720 pstructs += 48;
2721 pstructs -= pstructs % 24;
2722 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2724 m = tids * TCB_SIZE;
2725 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2726 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2727 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2728 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2729 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2730 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2731 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2732 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2734 m = (m + 4095) & ~0xfff;
2735 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2736 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2738 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2739 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2740 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2741 if (tids < m)
2742 adap->params.mc5.nservers += m - tids;
2745 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2746 u32 val)
2748 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2749 t3_write_reg(adap, A_TP_PIO_DATA, val);
2752 static void tp_config(struct adapter *adap, const struct tp_params *p)
2754 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2755 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2756 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2757 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2758 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2759 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2760 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2761 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2762 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2763 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2764 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2765 F_IPV6ENABLE | F_NICMODE);
2766 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2767 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2768 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2769 adap->params.rev > 0 ? F_ENABLEESND :
2770 F_T3A_ENABLEESND);
2772 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2773 F_ENABLEEPCMDAFULL,
2774 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2775 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2776 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2777 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2778 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2779 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2780 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2782 if (adap->params.rev > 0) {
2783 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2784 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2785 F_TXPACEAUTO);
2786 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2787 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2788 } else
2789 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2791 if (adap->params.rev == T3_REV_C)
2792 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2793 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2794 V_TABLELATENCYDELTA(4));
2796 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2797 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2798 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2799 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2802 /* Desired TP timer resolution in usec */
2803 #define TP_TMR_RES 50
2805 /* TCP timer values in ms */
2806 #define TP_DACK_TIMER 50
2807 #define TP_RTO_MIN 250
2810 * tp_set_timers - set TP timing parameters
2811 * @adap: the adapter to set
2812 * @core_clk: the core clock frequency in Hz
2814 * Set TP's timing parameters, such as the various timer resolutions and
2815 * the TCP timer values.
2817 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2819 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2820 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2821 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2822 unsigned int tps = core_clk >> tre;
2824 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2825 V_DELAYEDACKRESOLUTION(dack_re) |
2826 V_TIMESTAMPRESOLUTION(tstamp_re));
2827 t3_write_reg(adap, A_TP_DACK_TIMER,
2828 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2829 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2830 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2831 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2832 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2833 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2834 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2835 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2836 V_KEEPALIVEMAX(9));
2838 #define SECONDS * tps
2840 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2841 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2842 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2843 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2844 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2845 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2846 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2847 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2848 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2850 #undef SECONDS
2854 * t3_tp_set_coalescing_size - set receive coalescing size
2855 * @adap: the adapter
2856 * @size: the receive coalescing size
2857 * @psh: whether a set PSH bit should deliver coalesced data
2859 * Set the receive coalescing size and PSH bit handling.
2861 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2863 u32 val;
2865 if (size > MAX_RX_COALESCING_LEN)
2866 return -EINVAL;
2868 val = t3_read_reg(adap, A_TP_PARA_REG3);
2869 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2871 if (size) {
2872 val |= F_RXCOALESCEENABLE;
2873 if (psh)
2874 val |= F_RXCOALESCEPSHEN;
2875 size = min(MAX_RX_COALESCING_LEN, size);
2876 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2877 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2879 t3_write_reg(adap, A_TP_PARA_REG3, val);
2880 return 0;
2884 * t3_tp_set_max_rxsize - set the max receive size
2885 * @adap: the adapter
2886 * @size: the max receive size
2888 * Set TP's max receive size. This is the limit that applies when
2889 * receive coalescing is disabled.
2891 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2893 t3_write_reg(adap, A_TP_PARA_REG7,
2894 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2897 static void init_mtus(unsigned short mtus[])
2900 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2901 * it can accomodate max size TCP/IP headers when SACK and timestamps
2902 * are enabled and still have at least 8 bytes of payload.
2904 mtus[0] = 88;
2905 mtus[1] = 88;
2906 mtus[2] = 256;
2907 mtus[3] = 512;
2908 mtus[4] = 576;
2909 mtus[5] = 1024;
2910 mtus[6] = 1280;
2911 mtus[7] = 1492;
2912 mtus[8] = 1500;
2913 mtus[9] = 2002;
2914 mtus[10] = 2048;
2915 mtus[11] = 4096;
2916 mtus[12] = 4352;
2917 mtus[13] = 8192;
2918 mtus[14] = 9000;
2919 mtus[15] = 9600;
2923 * Initial congestion control parameters.
2925 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2927 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2928 a[9] = 2;
2929 a[10] = 3;
2930 a[11] = 4;
2931 a[12] = 5;
2932 a[13] = 6;
2933 a[14] = 7;
2934 a[15] = 8;
2935 a[16] = 9;
2936 a[17] = 10;
2937 a[18] = 14;
2938 a[19] = 17;
2939 a[20] = 21;
2940 a[21] = 25;
2941 a[22] = 30;
2942 a[23] = 35;
2943 a[24] = 45;
2944 a[25] = 60;
2945 a[26] = 80;
2946 a[27] = 100;
2947 a[28] = 200;
2948 a[29] = 300;
2949 a[30] = 400;
2950 a[31] = 500;
2952 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2953 b[9] = b[10] = 1;
2954 b[11] = b[12] = 2;
2955 b[13] = b[14] = b[15] = b[16] = 3;
2956 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2957 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2958 b[28] = b[29] = 6;
2959 b[30] = b[31] = 7;
2962 /* The minimum additive increment value for the congestion control table */
2963 #define CC_MIN_INCR 2U
2966 * t3_load_mtus - write the MTU and congestion control HW tables
2967 * @adap: the adapter
2968 * @mtus: the unrestricted values for the MTU table
2969 * @alphs: the values for the congestion control alpha parameter
2970 * @beta: the values for the congestion control beta parameter
2971 * @mtu_cap: the maximum permitted effective MTU
2973 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2974 * Update the high-speed congestion control table with the supplied alpha,
2975 * beta, and MTUs.
2977 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2978 unsigned short alpha[NCCTRL_WIN],
2979 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2981 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2982 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2983 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2984 28672, 40960, 57344, 81920, 114688, 163840, 229376
2987 unsigned int i, w;
2989 for (i = 0; i < NMTUS; ++i) {
2990 unsigned int mtu = min(mtus[i], mtu_cap);
2991 unsigned int log2 = fls(mtu);
2993 if (!(mtu & ((1 << log2) >> 2))) /* round */
2994 log2--;
2995 t3_write_reg(adap, A_TP_MTU_TABLE,
2996 (i << 24) | (log2 << 16) | mtu);
2998 for (w = 0; w < NCCTRL_WIN; ++w) {
2999 unsigned int inc;
3001 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3002 CC_MIN_INCR);
3004 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3005 (w << 16) | (beta[w] << 13) | inc);
3011 * t3_read_hw_mtus - returns the values in the HW MTU table
3012 * @adap: the adapter
3013 * @mtus: where to store the HW MTU values
3015 * Reads the HW MTU table.
3017 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3019 int i;
3021 for (i = 0; i < NMTUS; ++i) {
3022 unsigned int val;
3024 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3025 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3026 mtus[i] = val & 0x3fff;
3031 * t3_get_cong_cntl_tab - reads the congestion control table
3032 * @adap: the adapter
3033 * @incr: where to store the alpha values
3035 * Reads the additive increments programmed into the HW congestion
3036 * control table.
3038 void t3_get_cong_cntl_tab(struct adapter *adap,
3039 unsigned short incr[NMTUS][NCCTRL_WIN])
3041 unsigned int mtu, w;
3043 for (mtu = 0; mtu < NMTUS; ++mtu)
3044 for (w = 0; w < NCCTRL_WIN; ++w) {
3045 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3046 0xffff0000 | (mtu << 5) | w);
3047 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3048 0x1fff;
3053 * t3_tp_get_mib_stats - read TP's MIB counters
3054 * @adap: the adapter
3055 * @tps: holds the returned counter values
3057 * Returns the values of TP's MIB counters.
3059 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3061 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3062 sizeof(*tps) / sizeof(u32), 0);
3065 #define ulp_region(adap, name, start, len) \
3066 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3067 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3068 (start) + (len) - 1); \
3069 start += len
3071 #define ulptx_region(adap, name, start, len) \
3072 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3073 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3074 (start) + (len) - 1)
3076 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3078 unsigned int m = p->chan_rx_size;
3080 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3081 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3082 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3083 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3084 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3085 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3086 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3087 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3091 * t3_set_proto_sram - set the contents of the protocol sram
3092 * @adapter: the adapter
3093 * @data: the protocol image
3095 * Write the contents of the protocol SRAM.
3097 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3099 int i;
3100 const __be32 *buf = (const __be32 *)data;
3102 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3103 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3104 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3105 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3106 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3107 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3109 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3110 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3111 return -EIO;
3113 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3115 return 0;
3118 void t3_config_trace_filter(struct adapter *adapter,
3119 const struct trace_params *tp, int filter_index,
3120 int invert, int enable)
3122 u32 addr, key[4], mask[4];
3124 key[0] = tp->sport | (tp->sip << 16);
3125 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3126 key[2] = tp->dip;
3127 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3129 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3130 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3131 mask[2] = tp->dip_mask;
3132 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3134 if (invert)
3135 key[3] |= (1 << 29);
3136 if (enable)
3137 key[3] |= (1 << 28);
3139 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3140 tp_wr_indirect(adapter, addr++, key[0]);
3141 tp_wr_indirect(adapter, addr++, mask[0]);
3142 tp_wr_indirect(adapter, addr++, key[1]);
3143 tp_wr_indirect(adapter, addr++, mask[1]);
3144 tp_wr_indirect(adapter, addr++, key[2]);
3145 tp_wr_indirect(adapter, addr++, mask[2]);
3146 tp_wr_indirect(adapter, addr++, key[3]);
3147 tp_wr_indirect(adapter, addr, mask[3]);
3148 t3_read_reg(adapter, A_TP_PIO_DATA);
3152 * t3_config_sched - configure a HW traffic scheduler
3153 * @adap: the adapter
3154 * @kbps: target rate in Kbps
3155 * @sched: the scheduler index
3157 * Configure a HW scheduler for the target rate
3159 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3161 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3162 unsigned int clk = adap->params.vpd.cclk * 1000;
3163 unsigned int selected_cpt = 0, selected_bpt = 0;
3165 if (kbps > 0) {
3166 kbps *= 125; /* -> bytes */
3167 for (cpt = 1; cpt <= 255; cpt++) {
3168 tps = clk / cpt;
3169 bpt = (kbps + tps / 2) / tps;
3170 if (bpt > 0 && bpt <= 255) {
3171 v = bpt * tps;
3172 delta = v >= kbps ? v - kbps : kbps - v;
3173 if (delta <= mindelta) {
3174 mindelta = delta;
3175 selected_cpt = cpt;
3176 selected_bpt = bpt;
3178 } else if (selected_cpt)
3179 break;
3181 if (!selected_cpt)
3182 return -EINVAL;
3184 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3185 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3186 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3187 if (sched & 1)
3188 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3189 else
3190 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3191 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3192 return 0;
3195 static int tp_init(struct adapter *adap, const struct tp_params *p)
3197 int busy = 0;
3199 tp_config(adap, p);
3200 t3_set_vlan_accel(adap, 3, 0);
3202 if (is_offload(adap)) {
3203 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3204 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3205 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3206 0, 1000, 5);
3207 if (busy)
3208 CH_ERR(adap, "TP initialization timed out\n");
3211 if (!busy)
3212 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3213 return busy;
3216 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3218 if (port_mask & ~((1 << adap->params.nports) - 1))
3219 return -EINVAL;
3220 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3221 port_mask << S_PORT0ACTIVE);
3222 return 0;
3226 * Perform the bits of HW initialization that are dependent on the Tx
3227 * channels being used.
3229 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3231 int i;
3233 if (chan_map != 3) { /* one channel */
3234 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3235 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3236 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3237 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3238 F_TPTXPORT1EN | F_PORT1ACTIVE));
3239 t3_write_reg(adap, A_PM1_TX_CFG,
3240 chan_map == 1 ? 0xffffffff : 0);
3241 } else { /* two channels */
3242 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3243 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3244 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3245 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3246 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3247 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3248 F_ENFORCEPKT);
3249 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3250 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3251 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3252 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3253 for (i = 0; i < 16; i++)
3254 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3255 (i << 16) | 0x1010);
3259 static int calibrate_xgm(struct adapter *adapter)
3261 if (uses_xaui(adapter)) {
3262 unsigned int v, i;
3264 for (i = 0; i < 5; ++i) {
3265 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3266 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3267 msleep(1);
3268 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3269 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3270 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3271 V_XAUIIMP(G_CALIMP(v) >> 2));
3272 return 0;
3275 CH_ERR(adapter, "MAC calibration failed\n");
3276 return -1;
3277 } else {
3278 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3279 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3280 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3281 F_XGM_IMPSETUPDATE);
3283 return 0;
3286 static void calibrate_xgm_t3b(struct adapter *adapter)
3288 if (!uses_xaui(adapter)) {
3289 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3290 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3291 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3292 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3293 F_XGM_IMPSETUPDATE);
3294 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3296 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3297 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3301 struct mc7_timing_params {
3302 unsigned char ActToPreDly;
3303 unsigned char ActToRdWrDly;
3304 unsigned char PreCyc;
3305 unsigned char RefCyc[5];
3306 unsigned char BkCyc;
3307 unsigned char WrToRdDly;
3308 unsigned char RdToWrDly;
3312 * Write a value to a register and check that the write completed. These
3313 * writes normally complete in a cycle or two, so one read should suffice.
3314 * The very first read exists to flush the posted write to the device.
3316 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3318 t3_write_reg(adapter, addr, val);
3319 t3_read_reg(adapter, addr); /* flush */
3320 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3321 return 0;
3322 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3323 return -EIO;
3326 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3328 static const unsigned int mc7_mode[] = {
3329 0x632, 0x642, 0x652, 0x432, 0x442
3331 static const struct mc7_timing_params mc7_timings[] = {
3332 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3333 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3334 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3335 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3336 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3339 u32 val;
3340 unsigned int width, density, slow, attempts;
3341 struct adapter *adapter = mc7->adapter;
3342 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3344 if (!mc7->size)
3345 return 0;
3347 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3348 slow = val & F_SLOW;
3349 width = G_WIDTH(val);
3350 density = G_DEN(val);
3352 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3353 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3354 msleep(1);
3356 if (!slow) {
3357 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3358 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3359 msleep(1);
3360 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3361 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3362 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3363 mc7->name);
3364 goto out_fail;
3368 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3369 V_ACTTOPREDLY(p->ActToPreDly) |
3370 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3371 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3372 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3374 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3375 val | F_CLKEN | F_TERM150);
3376 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3378 if (!slow)
3379 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3380 F_DLLENB);
3381 udelay(1);
3383 val = slow ? 3 : 6;
3384 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3385 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3386 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3387 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3388 goto out_fail;
3390 if (!slow) {
3391 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3392 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3393 udelay(5);
3396 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3397 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3398 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3399 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3400 mc7_mode[mem_type]) ||
3401 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3402 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3403 goto out_fail;
3405 /* clock value is in KHz */
3406 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3407 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3409 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3410 F_PERREFEN | V_PREREFDIV(mc7_clock));
3411 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3413 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3414 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3415 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3416 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3417 (mc7->size << width) - 1);
3418 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3419 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3421 attempts = 50;
3422 do {
3423 msleep(250);
3424 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3425 } while ((val & F_BUSY) && --attempts);
3426 if (val & F_BUSY) {
3427 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3428 goto out_fail;
3431 /* Enable normal memory accesses. */
3432 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3433 return 0;
3435 out_fail:
3436 return -1;
3439 static void config_pcie(struct adapter *adap)
3441 static const u16 ack_lat[4][6] = {
3442 {237, 416, 559, 1071, 2095, 4143},
3443 {128, 217, 289, 545, 1057, 2081},
3444 {73, 118, 154, 282, 538, 1050},
3445 {67, 107, 86, 150, 278, 534}
3447 static const u16 rpl_tmr[4][6] = {
3448 {711, 1248, 1677, 3213, 6285, 12429},
3449 {384, 651, 867, 1635, 3171, 6243},
3450 {219, 354, 462, 846, 1614, 3150},
3451 {201, 321, 258, 450, 834, 1602}
3454 u16 val;
3455 unsigned int log2_width, pldsize;
3456 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3458 pci_read_config_word(adap->pdev,
3459 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3460 &val);
3461 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3462 pci_read_config_word(adap->pdev,
3463 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3464 &val);
3466 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3467 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3468 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3469 log2_width = fls(adap->params.pci.width) - 1;
3470 acklat = ack_lat[log2_width][pldsize];
3471 if (val & 1) /* check LOsEnable */
3472 acklat += fst_trn_tx * 4;
3473 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3475 if (adap->params.rev == 0)
3476 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3477 V_T3A_ACKLAT(M_T3A_ACKLAT),
3478 V_T3A_ACKLAT(acklat));
3479 else
3480 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3481 V_ACKLAT(acklat));
3483 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3484 V_REPLAYLMT(rpllmt));
3486 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3487 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3488 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3489 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3493 * Initialize and configure T3 HW modules. This performs the
3494 * initialization steps that need to be done once after a card is reset.
3495 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3497 * fw_params are passed to FW and their value is platform dependent. Only the
3498 * top 8 bits are available for use, the rest must be 0.
3500 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3502 int err = -EIO, attempts, i;
3503 const struct vpd_params *vpd = &adapter->params.vpd;
3505 if (adapter->params.rev > 0)
3506 calibrate_xgm_t3b(adapter);
3507 else if (calibrate_xgm(adapter))
3508 goto out_err;
3510 if (vpd->mclk) {
3511 partition_mem(adapter, &adapter->params.tp);
3513 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3514 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3515 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3516 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3517 adapter->params.mc5.nfilters,
3518 adapter->params.mc5.nroutes))
3519 goto out_err;
3521 for (i = 0; i < 32; i++)
3522 if (clear_sge_ctxt(adapter, i, F_CQ))
3523 goto out_err;
3526 if (tp_init(adapter, &adapter->params.tp))
3527 goto out_err;
3529 t3_tp_set_coalescing_size(adapter,
3530 min(adapter->params.sge.max_pkt_size,
3531 MAX_RX_COALESCING_LEN), 1);
3532 t3_tp_set_max_rxsize(adapter,
3533 min(adapter->params.sge.max_pkt_size, 16384U));
3534 ulp_config(adapter, &adapter->params.tp);
3536 if (is_pcie(adapter))
3537 config_pcie(adapter);
3538 else
3539 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3540 F_DMASTOPEN | F_CLIDECEN);
3542 if (adapter->params.rev == T3_REV_C)
3543 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3544 F_CFG_CQE_SOP_MASK);
3546 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3547 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3548 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3549 chan_init_hw(adapter, adapter->params.chan_map);
3550 t3_sge_init(adapter, &adapter->params.sge);
3552 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3554 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3555 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3556 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3557 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3559 attempts = 100;
3560 do { /* wait for uP to initialize */
3561 msleep(20);
3562 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3563 if (!attempts) {
3564 CH_ERR(adapter, "uP initialization timed out\n");
3565 goto out_err;
3568 err = 0;
3569 out_err:
3570 return err;
3574 * get_pci_mode - determine a card's PCI mode
3575 * @adapter: the adapter
3576 * @p: where to store the PCI settings
3578 * Determines a card's PCI mode and associated parameters, such as speed
3579 * and width.
3581 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3583 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3584 u32 pci_mode, pcie_cap;
3586 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3587 if (pcie_cap) {
3588 u16 val;
3590 p->variant = PCI_VARIANT_PCIE;
3591 p->pcie_cap_addr = pcie_cap;
3592 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3593 &val);
3594 p->width = (val >> 4) & 0x3f;
3595 return;
3598 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3599 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3600 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3601 pci_mode = G_PCIXINITPAT(pci_mode);
3602 if (pci_mode == 0)
3603 p->variant = PCI_VARIANT_PCI;
3604 else if (pci_mode < 4)
3605 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3606 else if (pci_mode < 8)
3607 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3608 else
3609 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3613 * init_link_config - initialize a link's SW state
3614 * @lc: structure holding the link state
3615 * @ai: information about the current card
3617 * Initializes the SW state maintained for each link, including the link's
3618 * capabilities and default speed/duplex/flow-control/autonegotiation
3619 * settings.
3621 static void init_link_config(struct link_config *lc, unsigned int caps)
3623 lc->supported = caps;
3624 lc->requested_speed = lc->speed = SPEED_INVALID;
3625 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3626 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3627 if (lc->supported & SUPPORTED_Autoneg) {
3628 lc->advertising = lc->supported;
3629 lc->autoneg = AUTONEG_ENABLE;
3630 lc->requested_fc |= PAUSE_AUTONEG;
3631 } else {
3632 lc->advertising = 0;
3633 lc->autoneg = AUTONEG_DISABLE;
3638 * mc7_calc_size - calculate MC7 memory size
3639 * @cfg: the MC7 configuration
3641 * Calculates the size of an MC7 memory in bytes from the value of its
3642 * configuration register.
3644 static unsigned int mc7_calc_size(u32 cfg)
3646 unsigned int width = G_WIDTH(cfg);
3647 unsigned int banks = !!(cfg & F_BKS) + 1;
3648 unsigned int org = !!(cfg & F_ORG) + 1;
3649 unsigned int density = G_DEN(cfg);
3650 unsigned int MBs = ((256 << density) * banks) / (org << width);
3652 return MBs << 20;
3655 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3656 unsigned int base_addr, const char *name)
3658 u32 cfg;
3660 mc7->adapter = adapter;
3661 mc7->name = name;
3662 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3663 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3664 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3665 mc7->width = G_WIDTH(cfg);
3668 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3670 mac->adapter = adapter;
3671 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3672 mac->nucast = 1;
3674 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3675 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3676 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3677 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3678 F_ENRGMII, 0);
3682 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3684 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3686 mi1_init(adapter, ai);
3687 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3688 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3689 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3690 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3691 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3692 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3694 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3695 val |= F_ENRGMII;
3697 /* Enable MAC clocks so we can access the registers */
3698 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3699 t3_read_reg(adapter, A_XGM_PORT_CFG);
3701 val |= F_CLKDIVRESET_;
3702 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3703 t3_read_reg(adapter, A_XGM_PORT_CFG);
3704 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3705 t3_read_reg(adapter, A_XGM_PORT_CFG);
3709 * Reset the adapter.
3710 * Older PCIe cards lose their config space during reset, PCI-X
3711 * ones don't.
3713 int t3_reset_adapter(struct adapter *adapter)
3715 int i, save_and_restore_pcie =
3716 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3717 uint16_t devid = 0;
3719 if (save_and_restore_pcie)
3720 pci_save_state(adapter->pdev);
3721 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3724 * Delay. Give Some time to device to reset fully.
3725 * XXX The delay time should be modified.
3727 for (i = 0; i < 10; i++) {
3728 msleep(50);
3729 pci_read_config_word(adapter->pdev, 0x00, &devid);
3730 if (devid == 0x1425)
3731 break;
3734 if (devid != 0x1425)
3735 return -1;
3737 if (save_and_restore_pcie)
3738 pci_restore_state(adapter->pdev);
3739 return 0;
3742 static int init_parity(struct adapter *adap)
3744 int i, err, addr;
3746 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3747 return -EBUSY;
3749 for (err = i = 0; !err && i < 16; i++)
3750 err = clear_sge_ctxt(adap, i, F_EGRESS);
3751 for (i = 0xfff0; !err && i <= 0xffff; i++)
3752 err = clear_sge_ctxt(adap, i, F_EGRESS);
3753 for (i = 0; !err && i < SGE_QSETS; i++)
3754 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3755 if (err)
3756 return err;
3758 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3759 for (i = 0; i < 4; i++)
3760 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3761 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3762 F_IBQDBGWR | V_IBQDBGQID(i) |
3763 V_IBQDBGADDR(addr));
3764 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3765 F_IBQDBGBUSY, 0, 2, 1);
3766 if (err)
3767 return err;
3769 return 0;
3773 * Initialize adapter SW state for the various HW modules, set initial values
3774 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3775 * interface.
3777 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3778 int reset)
3780 int ret;
3781 unsigned int i, j = -1;
3783 get_pci_mode(adapter, &adapter->params.pci);
3785 adapter->params.info = ai;
3786 adapter->params.nports = ai->nports0 + ai->nports1;
3787 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3788 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3790 * We used to only run the "adapter check task" once a second if
3791 * we had PHYs which didn't support interrupts (we would check
3792 * their link status once a second). Now we check other conditions
3793 * in that routine which could potentially impose a very high
3794 * interrupt load on the system. As such, we now always scan the
3795 * adapter state once a second ...
3797 adapter->params.linkpoll_period = 10;
3798 adapter->params.stats_update_period = is_10G(adapter) ?
3799 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3800 adapter->params.pci.vpd_cap_addr =
3801 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3802 ret = get_vpd_params(adapter, &adapter->params.vpd);
3803 if (ret < 0)
3804 return ret;
3806 if (reset && t3_reset_adapter(adapter))
3807 return -1;
3809 t3_sge_prep(adapter, &adapter->params.sge);
3811 if (adapter->params.vpd.mclk) {
3812 struct tp_params *p = &adapter->params.tp;
3814 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3815 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3816 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3818 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3819 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3820 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3821 p->cm_size = t3_mc7_size(&adapter->cm);
3822 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3823 p->chan_tx_size = p->pmtx_size / p->nchan;
3824 p->rx_pg_size = 64 * 1024;
3825 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3826 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3827 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3828 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3829 adapter->params.rev > 0 ? 12 : 6;
3832 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3833 t3_mc7_size(&adapter->pmtx) &&
3834 t3_mc7_size(&adapter->cm);
3836 if (is_offload(adapter)) {
3837 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3838 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3839 DEFAULT_NFILTERS : 0;
3840 adapter->params.mc5.nroutes = 0;
3841 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3843 init_mtus(adapter->params.mtus);
3844 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3847 early_hw_init(adapter, ai);
3848 ret = init_parity(adapter);
3849 if (ret)
3850 return ret;
3852 for_each_port(adapter, i) {
3853 u8 hw_addr[6];
3854 const struct port_type_info *pti;
3855 struct port_info *p = adap2pinfo(adapter, i);
3857 while (!adapter->params.vpd.port_type[++j])
3860 pti = &port_types[adapter->params.vpd.port_type[j]];
3861 if (!pti->phy_prep) {
3862 CH_ALERT(adapter, "Invalid port type index %d\n",
3863 adapter->params.vpd.port_type[j]);
3864 return -EINVAL;
3867 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3868 ai->mdio_ops);
3869 if (ret)
3870 return ret;
3871 p->phy.mdio.dev = adapter->port[i];
3872 mac_prep(&p->mac, adapter, j);
3875 * The VPD EEPROM stores the base Ethernet address for the
3876 * card. A port's address is derived from the base by adding
3877 * the port's index to the base's low octet.
3879 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3880 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3882 memcpy(adapter->port[i]->dev_addr, hw_addr,
3883 ETH_ALEN);
3884 memcpy(adapter->port[i]->perm_addr, hw_addr,
3885 ETH_ALEN);
3886 init_link_config(&p->link_config, p->phy.caps);
3887 p->phy.ops->power_down(&p->phy, 1);
3890 * If the PHY doesn't support interrupts for link status
3891 * changes, schedule a scan of the adapter links at least
3892 * once a second.
3894 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3895 adapter->params.linkpoll_period > 10)
3896 adapter->params.linkpoll_period = 10;
3899 return 0;
3902 void t3_led_ready(struct adapter *adapter)
3904 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3905 F_GPIO0_OUT_VAL);
3908 int t3_replay_prep_adapter(struct adapter *adapter)
3910 const struct adapter_info *ai = adapter->params.info;
3911 unsigned int i, j = -1;
3912 int ret;
3914 early_hw_init(adapter, ai);
3915 ret = init_parity(adapter);
3916 if (ret)
3917 return ret;
3919 for_each_port(adapter, i) {
3920 const struct port_type_info *pti;
3921 struct port_info *p = adap2pinfo(adapter, i);
3923 while (!adapter->params.vpd.port_type[++j])
3926 pti = &port_types[adapter->params.vpd.port_type[j]];
3927 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3928 if (ret)
3929 return ret;
3930 p->phy.ops->power_down(&p->phy, 1);
3933 return 0;