cxgb3: More flexible support for PHY interrupts.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / cxgb3 / t3_hw.c
blobbfce761156a1c0ef0c3e38d612f4104e9049bfa7
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
213 if (mmd_addr)
214 return -EINVAL;
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233 if (mmd_addr)
234 return -EINVAL;
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
246 static const struct mdio_ops mi1_mdio_ops = {
247 t3_mi1_read,
248 t3_mi1_write
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
274 int ret;
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
281 MDIO_ATTEMPTS, 10);
282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
292 int ret;
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
300 MDIO_ATTEMPTS, 10);
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
306 static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
325 int ret;
326 unsigned int val;
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
333 return ret;
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
348 int err;
349 unsigned int ctl;
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
364 return ctl ? -1 : 0;
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
377 int err;
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_set_phy_speed_duplex - force PHY speed and duplex
412 * @phy: the PHY to operate on
413 * @speed: requested PHY speed
414 * @duplex: requested PHY duplex
416 * Force a 10/100/1000 PHY's speed and duplex. This also disables
417 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
419 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
421 int err;
422 unsigned int ctl;
424 err = mdio_read(phy, 0, MII_BMCR, &ctl);
425 if (err)
426 return err;
428 if (speed >= 0) {
429 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
430 if (speed == SPEED_100)
431 ctl |= BMCR_SPEED100;
432 else if (speed == SPEED_1000)
433 ctl |= BMCR_SPEED1000;
435 if (duplex >= 0) {
436 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
437 if (duplex == DUPLEX_FULL)
438 ctl |= BMCR_FULLDPLX;
440 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
441 ctl |= BMCR_ANENABLE;
442 return mdio_write(phy, 0, MII_BMCR, ctl);
445 static const struct adapter_info t3_adap_info[] = {
446 {2, 0,
447 F_GPIO2_OEN | F_GPIO4_OEN |
448 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
449 &mi1_mdio_ops, "Chelsio PE9000"},
450 {2, 0,
451 F_GPIO2_OEN | F_GPIO4_OEN |
452 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
453 &mi1_mdio_ops, "Chelsio T302"},
454 {1, 0,
455 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
456 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
457 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T310"},
459 {2, 0,
460 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
461 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
462 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
463 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
464 &mi1_mdio_ext_ops, "Chelsio T320"},
468 * Return the adapter_info structure with a given index. Out-of-range indices
469 * return NULL.
471 const struct adapter_info *t3_get_adapter_info(unsigned int id)
473 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
476 struct port_type_info {
477 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
478 int phy_addr, const struct mdio_ops *ops);
481 static const struct port_type_info port_types[] = {
482 { NULL },
483 { t3_ael1002_phy_prep },
484 { t3_vsc8211_phy_prep },
485 { NULL},
486 { t3_xaui_direct_phy_prep },
487 { NULL },
488 { t3_qt2045_phy_prep },
489 { t3_ael1006_phy_prep },
490 { NULL },
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
500 struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 u32 v;
545 unsigned int base = adapter->params.pci.vpd_cap_addr;
547 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
548 return -EINVAL;
550 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
551 do {
552 udelay(10);
553 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
554 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
556 if (!(val & PCI_VPD_ADDR_F)) {
557 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
558 return -EIO;
560 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
561 *data = cpu_to_le32(v);
562 return 0;
566 * t3_seeprom_write - write a VPD EEPROM location
567 * @adapter: adapter to write
568 * @addr: EEPROM address
569 * @data: value to write
571 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
572 * VPD ROM capability.
574 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
576 u16 val;
577 int attempts = EEPROM_MAX_POLL;
578 unsigned int base = adapter->params.pci.vpd_cap_addr;
580 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
581 return -EINVAL;
583 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
584 le32_to_cpu(data));
585 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
586 addr | PCI_VPD_ADDR_F);
587 do {
588 msleep(1);
589 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
590 } while ((val & PCI_VPD_ADDR_F) && --attempts);
592 if (val & PCI_VPD_ADDR_F) {
593 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
594 return -EIO;
596 return 0;
600 * t3_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: 1 to enable write protection, 0 to disable it
604 * Enables or disables write protection on the serial EEPROM.
606 int t3_seeprom_wp(struct adapter *adapter, int enable)
608 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
612 * Convert a character holding a hex digit to a number.
614 static unsigned int hex2int(unsigned char c)
616 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
620 * get_vpd_params - read VPD parameters from VPD EEPROM
621 * @adapter: adapter to read
622 * @p: where to store the parameters
624 * Reads card parameters stored in VPD EEPROM.
626 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
628 int i, addr, ret;
629 struct t3_vpd vpd;
632 * Card information is normally at VPD_BASE but some early cards had
633 * it at 0.
635 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
636 if (ret)
637 return ret;
638 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
640 for (i = 0; i < sizeof(vpd); i += 4) {
641 ret = t3_seeprom_read(adapter, addr + i,
642 (__le32 *)((u8 *)&vpd + i));
643 if (ret)
644 return ret;
647 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
648 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
649 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
650 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
651 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
652 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
654 /* Old eeproms didn't have port information */
655 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
656 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
657 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
658 } else {
659 p->port_type[0] = hex2int(vpd.port0_data[0]);
660 p->port_type[1] = hex2int(vpd.port1_data[0]);
661 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
662 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
665 for (i = 0; i < 6; i++)
666 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
667 hex2int(vpd.na_data[2 * i + 1]);
668 return 0;
671 /* serial flash and firmware constants */
672 enum {
673 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
674 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
675 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
677 /* flash command opcodes */
678 SF_PROG_PAGE = 2, /* program page */
679 SF_WR_DISABLE = 4, /* disable writes */
680 SF_RD_STATUS = 5, /* read status register */
681 SF_WR_ENABLE = 6, /* enable writes */
682 SF_RD_DATA_FAST = 0xb, /* read flash */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
686 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
687 FW_MIN_SIZE = 8 /* at least version and csum */
691 * sf1_read - read data from the serial flash
692 * @adapter: the adapter
693 * @byte_cnt: number of bytes to read
694 * @cont: whether another operation will be chained
695 * @valp: where to store the read data
697 * Reads up to 4 bytes of data from the serial flash. The location of
698 * the read needs to be specified prior to calling this by issuing the
699 * appropriate commands to the serial flash.
701 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
702 u32 *valp)
704 int ret;
706 if (!byte_cnt || byte_cnt > 4)
707 return -EINVAL;
708 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
709 return -EBUSY;
710 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
711 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
712 if (!ret)
713 *valp = t3_read_reg(adapter, A_SF_DATA);
714 return ret;
718 * sf1_write - write data to the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to write
721 * @cont: whether another operation will be chained
722 * @val: value to write
724 * Writes up to 4 bytes of data to the serial flash. The location of
725 * the write needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
728 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 u32 val)
731 if (!byte_cnt || byte_cnt > 4)
732 return -EINVAL;
733 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
734 return -EBUSY;
735 t3_write_reg(adapter, A_SF_DATA, val);
736 t3_write_reg(adapter, A_SF_OP,
737 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
738 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
742 * flash_wait_op - wait for a flash operation to complete
743 * @adapter: the adapter
744 * @attempts: max number of polls of the status register
745 * @delay: delay between polls in ms
747 * Wait for a flash operation to complete by polling the status register.
749 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
751 int ret;
752 u32 status;
754 while (1) {
755 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
756 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
757 return ret;
758 if (!(status & 1))
759 return 0;
760 if (--attempts == 0)
761 return -EAGAIN;
762 if (delay)
763 msleep(delay);
768 * t3_read_flash - read words from serial flash
769 * @adapter: the adapter
770 * @addr: the start address for the read
771 * @nwords: how many 32-bit words to read
772 * @data: where to store the read data
773 * @byte_oriented: whether to store data as bytes or as words
775 * Read the specified number of 32-bit words from the serial flash.
776 * If @byte_oriented is set the read data is stored as a byte array
777 * (i.e., big-endian), otherwise as 32-bit words in the platform's
778 * natural endianess.
780 int t3_read_flash(struct adapter *adapter, unsigned int addr,
781 unsigned int nwords, u32 *data, int byte_oriented)
783 int ret;
785 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
786 return -EINVAL;
788 addr = swab32(addr) | SF_RD_DATA_FAST;
790 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
791 (ret = sf1_read(adapter, 1, 1, data)) != 0)
792 return ret;
794 for (; nwords; nwords--, data++) {
795 ret = sf1_read(adapter, 4, nwords > 1, data);
796 if (ret)
797 return ret;
798 if (byte_oriented)
799 *data = htonl(*data);
801 return 0;
805 * t3_write_flash - write up to a page of data to the serial flash
806 * @adapter: the adapter
807 * @addr: the start address to write
808 * @n: length of data to write
809 * @data: the data to write
811 * Writes up to a page of data (256 bytes) to the serial flash starting
812 * at the given address.
814 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
815 unsigned int n, const u8 *data)
817 int ret;
818 u32 buf[64];
819 unsigned int i, c, left, val, offset = addr & 0xff;
821 if (addr + n > SF_SIZE || offset + n > 256)
822 return -EINVAL;
824 val = swab32(addr) | SF_PROG_PAGE;
826 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
827 (ret = sf1_write(adapter, 4, 1, val)) != 0)
828 return ret;
830 for (left = n; left; left -= c) {
831 c = min(left, 4U);
832 for (val = 0, i = 0; i < c; ++i)
833 val = (val << 8) + *data++;
835 ret = sf1_write(adapter, c, c != left, val);
836 if (ret)
837 return ret;
839 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
840 return ret;
842 /* Read the page to verify the write succeeded */
843 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
844 if (ret)
845 return ret;
847 if (memcmp(data - n, (u8 *) buf + offset, n))
848 return -EIO;
849 return 0;
853 * t3_get_tp_version - read the tp sram version
854 * @adapter: the adapter
855 * @vers: where to place the version
857 * Reads the protocol sram version from sram.
859 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
861 int ret;
863 /* Get version loaded in SRAM */
864 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
865 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
866 1, 1, 5, 1);
867 if (ret)
868 return ret;
870 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
872 return 0;
876 * t3_check_tpsram_version - read the tp sram version
877 * @adapter: the adapter
878 * @must_load: set to 1 if loading a new microcode image is required
880 * Reads the protocol sram version from flash.
882 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
884 int ret;
885 u32 vers;
886 unsigned int major, minor;
888 if (adapter->params.rev == T3_REV_A)
889 return 0;
891 *must_load = 1;
893 ret = t3_get_tp_version(adapter, &vers);
894 if (ret)
895 return ret;
897 major = G_TP_VERSION_MAJOR(vers);
898 minor = G_TP_VERSION_MINOR(vers);
900 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
901 return 0;
903 if (major != TP_VERSION_MAJOR)
904 CH_ERR(adapter, "found wrong TP version (%u.%u), "
905 "driver needs version %d.%d\n", major, minor,
906 TP_VERSION_MAJOR, TP_VERSION_MINOR);
907 else {
908 *must_load = 0;
909 CH_ERR(adapter, "found wrong TP version (%u.%u), "
910 "driver compiled for version %d.%d\n", major, minor,
911 TP_VERSION_MAJOR, TP_VERSION_MINOR);
913 return -EINVAL;
917 * t3_check_tpsram - check if provided protocol SRAM
918 * is compatible with this driver
919 * @adapter: the adapter
920 * @tp_sram: the firmware image to write
921 * @size: image size
923 * Checks if an adapter's tp sram is compatible with the driver.
924 * Returns 0 if the versions are compatible, a negative error otherwise.
926 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
927 unsigned int size)
929 u32 csum;
930 unsigned int i;
931 const __be32 *p = (const __be32 *)tp_sram;
933 /* Verify checksum */
934 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
935 csum += ntohl(p[i]);
936 if (csum != 0xffffffff) {
937 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
938 csum);
939 return -EINVAL;
942 return 0;
945 enum fw_version_type {
946 FW_VERSION_N3,
947 FW_VERSION_T3
951 * t3_get_fw_version - read the firmware version
952 * @adapter: the adapter
953 * @vers: where to place the version
955 * Reads the FW version from flash.
957 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
959 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
963 * t3_check_fw_version - check if the FW is compatible with this driver
964 * @adapter: the adapter
965 * @must_load: set to 1 if loading a new FW image is required
967 * Checks if an adapter's FW is compatible with the driver. Returns 0
968 * if the versions are compatible, a negative error otherwise.
970 int t3_check_fw_version(struct adapter *adapter, int *must_load)
972 int ret;
973 u32 vers;
974 unsigned int type, major, minor;
976 *must_load = 1;
977 ret = t3_get_fw_version(adapter, &vers);
978 if (ret)
979 return ret;
981 type = G_FW_VERSION_TYPE(vers);
982 major = G_FW_VERSION_MAJOR(vers);
983 minor = G_FW_VERSION_MINOR(vers);
985 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
986 minor == FW_VERSION_MINOR)
987 return 0;
989 if (major != FW_VERSION_MAJOR)
990 CH_ERR(adapter, "found wrong FW version(%u.%u), "
991 "driver needs version %u.%u\n", major, minor,
992 FW_VERSION_MAJOR, FW_VERSION_MINOR);
993 else if (minor < FW_VERSION_MINOR) {
994 *must_load = 0;
995 CH_WARN(adapter, "found old FW minor version(%u.%u), "
996 "driver compiled for version %u.%u\n", major, minor,
997 FW_VERSION_MAJOR, FW_VERSION_MINOR);
998 } else {
999 CH_WARN(adapter, "found newer FW version(%u.%u), "
1000 "driver compiled for version %u.%u\n", major, minor,
1001 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1002 return 0;
1004 return -EINVAL;
1008 * t3_flash_erase_sectors - erase a range of flash sectors
1009 * @adapter: the adapter
1010 * @start: the first sector to erase
1011 * @end: the last sector to erase
1013 * Erases the sectors in the given range.
1015 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1017 while (start <= end) {
1018 int ret;
1020 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1021 (ret = sf1_write(adapter, 4, 0,
1022 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1023 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1024 return ret;
1025 start++;
1027 return 0;
1031 * t3_load_fw - download firmware
1032 * @adapter: the adapter
1033 * @fw_data: the firmware image to write
1034 * @size: image size
1036 * Write the supplied firmware image to the card's serial flash.
1037 * The FW image has the following sections: @size - 8 bytes of code and
1038 * data, followed by 4 bytes of FW version, followed by the 32-bit
1039 * 1's complement checksum of the whole image.
1041 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1043 u32 csum;
1044 unsigned int i;
1045 const __be32 *p = (const __be32 *)fw_data;
1046 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1048 if ((size & 3) || size < FW_MIN_SIZE)
1049 return -EINVAL;
1050 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1051 return -EFBIG;
1053 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1054 csum += ntohl(p[i]);
1055 if (csum != 0xffffffff) {
1056 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1057 csum);
1058 return -EINVAL;
1061 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1062 if (ret)
1063 goto out;
1065 size -= 8; /* trim off version and checksum */
1066 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1067 unsigned int chunk_size = min(size, 256U);
1069 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1070 if (ret)
1071 goto out;
1073 addr += chunk_size;
1074 fw_data += chunk_size;
1075 size -= chunk_size;
1078 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1079 out:
1080 if (ret)
1081 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1082 return ret;
1085 #define CIM_CTL_BASE 0x2000
1088 * t3_cim_ctl_blk_read - read a block from CIM control region
1090 * @adap: the adapter
1091 * @addr: the start address within the CIM control region
1092 * @n: number of words to read
1093 * @valp: where to store the result
1095 * Reads a block of 4-byte words from the CIM control region.
1097 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1098 unsigned int n, unsigned int *valp)
1100 int ret = 0;
1102 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1103 return -EBUSY;
1105 for ( ; !ret && n--; addr += 4) {
1106 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1107 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1108 0, 5, 2);
1109 if (!ret)
1110 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1112 return ret;
1117 * t3_link_changed - handle interface link changes
1118 * @adapter: the adapter
1119 * @port_id: the port index that changed link state
1121 * Called when a port's link settings change to propagate the new values
1122 * to the associated PHY and MAC. After performing the common tasks it
1123 * invokes an OS-specific handler.
1125 void t3_link_changed(struct adapter *adapter, int port_id)
1127 int link_ok, speed, duplex, fc;
1128 struct port_info *pi = adap2pinfo(adapter, port_id);
1129 struct cphy *phy = &pi->phy;
1130 struct cmac *mac = &pi->mac;
1131 struct link_config *lc = &pi->link_config;
1133 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1135 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1136 uses_xaui(adapter)) {
1137 if (link_ok)
1138 t3b_pcs_reset(mac);
1139 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1140 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1142 lc->link_ok = link_ok;
1143 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1144 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1145 if (lc->requested_fc & PAUSE_AUTONEG)
1146 fc &= lc->requested_fc;
1147 else
1148 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1150 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1151 /* Set MAC speed, duplex, and flow control to match PHY. */
1152 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1153 lc->fc = fc;
1156 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1160 * t3_link_start - apply link configuration to MAC/PHY
1161 * @phy: the PHY to setup
1162 * @mac: the MAC to setup
1163 * @lc: the requested link configuration
1165 * Set up a port's MAC and PHY according to a desired link configuration.
1166 * - If the PHY can auto-negotiate first decide what to advertise, then
1167 * enable/disable auto-negotiation as desired, and reset.
1168 * - If the PHY does not auto-negotiate just reset it.
1169 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1170 * otherwise do it later based on the outcome of auto-negotiation.
1172 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1174 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1176 lc->link_ok = 0;
1177 if (lc->supported & SUPPORTED_Autoneg) {
1178 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1179 if (fc) {
1180 lc->advertising |= ADVERTISED_Asym_Pause;
1181 if (fc & PAUSE_RX)
1182 lc->advertising |= ADVERTISED_Pause;
1184 phy->ops->advertise(phy, lc->advertising);
1186 if (lc->autoneg == AUTONEG_DISABLE) {
1187 lc->speed = lc->requested_speed;
1188 lc->duplex = lc->requested_duplex;
1189 lc->fc = (unsigned char)fc;
1190 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1191 fc);
1192 /* Also disables autoneg */
1193 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1194 phy->ops->reset(phy, 0);
1195 } else
1196 phy->ops->autoneg_enable(phy);
1197 } else {
1198 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1199 lc->fc = (unsigned char)fc;
1200 phy->ops->reset(phy, 0);
1202 return 0;
1206 * t3_set_vlan_accel - control HW VLAN extraction
1207 * @adapter: the adapter
1208 * @ports: bitmap of adapter ports to operate on
1209 * @on: enable (1) or disable (0) HW VLAN extraction
1211 * Enables or disables HW extraction of VLAN tags for the given port.
1213 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1215 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1216 ports << S_VLANEXTRACTIONENABLE,
1217 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1220 struct intr_info {
1221 unsigned int mask; /* bits to check in interrupt status */
1222 const char *msg; /* message to print or NULL */
1223 short stat_idx; /* stat counter to increment or -1 */
1224 unsigned short fatal; /* whether the condition reported is fatal */
1228 * t3_handle_intr_status - table driven interrupt handler
1229 * @adapter: the adapter that generated the interrupt
1230 * @reg: the interrupt status register to process
1231 * @mask: a mask to apply to the interrupt status
1232 * @acts: table of interrupt actions
1233 * @stats: statistics counters tracking interrupt occurences
1235 * A table driven interrupt handler that applies a set of masks to an
1236 * interrupt status word and performs the corresponding actions if the
1237 * interrupts described by the mask have occured. The actions include
1238 * optionally printing a warning or alert message, and optionally
1239 * incrementing a stat counter. The table is terminated by an entry
1240 * specifying mask 0. Returns the number of fatal interrupt conditions.
1242 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1243 unsigned int mask,
1244 const struct intr_info *acts,
1245 unsigned long *stats)
1247 int fatal = 0;
1248 unsigned int status = t3_read_reg(adapter, reg) & mask;
1250 for (; acts->mask; ++acts) {
1251 if (!(status & acts->mask))
1252 continue;
1253 if (acts->fatal) {
1254 fatal++;
1255 CH_ALERT(adapter, "%s (0x%x)\n",
1256 acts->msg, status & acts->mask);
1257 } else if (acts->msg)
1258 CH_WARN(adapter, "%s (0x%x)\n",
1259 acts->msg, status & acts->mask);
1260 if (acts->stat_idx >= 0)
1261 stats[acts->stat_idx]++;
1263 if (status) /* clear processed interrupts */
1264 t3_write_reg(adapter, reg, status);
1265 return fatal;
1268 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1269 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1270 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1271 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1272 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1273 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1274 F_HIRCQPARITYERROR)
1275 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1276 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1277 F_NFASRCHFAIL)
1278 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1279 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1280 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1281 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1282 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1283 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1284 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1285 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1286 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1287 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1288 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1289 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1290 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1291 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1292 F_TXPARERR | V_BISTERR(M_BISTERR))
1293 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1294 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1295 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1296 #define ULPTX_INTR_MASK 0xfc
1297 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1298 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1299 F_ZERO_SWITCH_ERROR)
1300 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1301 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1302 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1303 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1304 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1305 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1306 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1307 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1308 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1309 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1310 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1311 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1312 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1313 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1314 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1315 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1316 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1317 V_MCAPARERRENB(M_MCAPARERRENB))
1318 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1319 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1320 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1321 F_MPS0 | F_CPL_SWITCH)
1324 * Interrupt handler for the PCIX1 module.
1326 static void pci_intr_handler(struct adapter *adapter)
1328 static const struct intr_info pcix1_intr_info[] = {
1329 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1330 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1331 {F_RCVTARABT, "PCI received target abort", -1, 1},
1332 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1333 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1334 {F_DETPARERR, "PCI detected parity error", -1, 1},
1335 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1336 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1337 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1339 {F_DETCORECCERR, "PCI correctable ECC error",
1340 STAT_PCI_CORR_ECC, 0},
1341 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1342 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1343 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1345 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1347 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1349 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1350 "error", -1, 1},
1354 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1355 pcix1_intr_info, adapter->irq_stats))
1356 t3_fatal_err(adapter);
1360 * Interrupt handler for the PCIE module.
1362 static void pcie_intr_handler(struct adapter *adapter)
1364 static const struct intr_info pcie_intr_info[] = {
1365 {F_PEXERR, "PCI PEX error", -1, 1},
1366 {F_UNXSPLCPLERRR,
1367 "PCI unexpected split completion DMA read error", -1, 1},
1368 {F_UNXSPLCPLERRC,
1369 "PCI unexpected split completion DMA command error", -1, 1},
1370 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1371 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1372 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1373 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1374 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1375 "PCI MSI-X table/PBA parity error", -1, 1},
1376 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1377 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1378 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1379 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1380 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1384 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1385 CH_ALERT(adapter, "PEX error code 0x%x\n",
1386 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1388 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1389 pcie_intr_info, adapter->irq_stats))
1390 t3_fatal_err(adapter);
1394 * TP interrupt handler.
1396 static void tp_intr_handler(struct adapter *adapter)
1398 static const struct intr_info tp_intr_info[] = {
1399 {0xffffff, "TP parity error", -1, 1},
1400 {0x1000000, "TP out of Rx pages", -1, 1},
1401 {0x2000000, "TP out of Tx pages", -1, 1},
1405 static struct intr_info tp_intr_info_t3c[] = {
1406 {0x1fffffff, "TP parity error", -1, 1},
1407 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1408 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1412 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1413 adapter->params.rev < T3_REV_C ?
1414 tp_intr_info : tp_intr_info_t3c, NULL))
1415 t3_fatal_err(adapter);
1419 * CIM interrupt handler.
1421 static void cim_intr_handler(struct adapter *adapter)
1423 static const struct intr_info cim_intr_info[] = {
1424 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1425 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1426 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1427 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1428 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1429 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1430 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1431 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1432 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1433 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1434 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1435 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1436 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1437 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1438 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1439 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1440 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1441 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1442 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1443 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1444 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1445 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1446 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1447 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1451 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1452 cim_intr_info, NULL))
1453 t3_fatal_err(adapter);
1457 * ULP RX interrupt handler.
1459 static void ulprx_intr_handler(struct adapter *adapter)
1461 static const struct intr_info ulprx_intr_info[] = {
1462 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1463 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1464 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1465 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1466 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1467 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1468 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1469 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1473 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1474 ulprx_intr_info, NULL))
1475 t3_fatal_err(adapter);
1479 * ULP TX interrupt handler.
1481 static void ulptx_intr_handler(struct adapter *adapter)
1483 static const struct intr_info ulptx_intr_info[] = {
1484 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1485 STAT_ULP_CH0_PBL_OOB, 0},
1486 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1487 STAT_ULP_CH1_PBL_OOB, 0},
1488 {0xfc, "ULP TX parity error", -1, 1},
1492 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1493 ulptx_intr_info, adapter->irq_stats))
1494 t3_fatal_err(adapter);
1497 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1498 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1499 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1500 F_ICSPI1_TX_FRAMING_ERROR)
1501 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1502 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1503 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1504 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1507 * PM TX interrupt handler.
1509 static void pmtx_intr_handler(struct adapter *adapter)
1511 static const struct intr_info pmtx_intr_info[] = {
1512 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1513 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1514 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1515 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1516 "PMTX ispi parity error", -1, 1},
1517 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1518 "PMTX ospi parity error", -1, 1},
1522 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1523 pmtx_intr_info, NULL))
1524 t3_fatal_err(adapter);
1527 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1528 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1529 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1530 F_IESPI1_TX_FRAMING_ERROR)
1531 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1532 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1533 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1534 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1537 * PM RX interrupt handler.
1539 static void pmrx_intr_handler(struct adapter *adapter)
1541 static const struct intr_info pmrx_intr_info[] = {
1542 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1543 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1544 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1545 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1546 "PMRX ispi parity error", -1, 1},
1547 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1548 "PMRX ospi parity error", -1, 1},
1552 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1553 pmrx_intr_info, NULL))
1554 t3_fatal_err(adapter);
1558 * CPL switch interrupt handler.
1560 static void cplsw_intr_handler(struct adapter *adapter)
1562 static const struct intr_info cplsw_intr_info[] = {
1563 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1564 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1565 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1566 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1567 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1568 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1572 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1573 cplsw_intr_info, NULL))
1574 t3_fatal_err(adapter);
1578 * MPS interrupt handler.
1580 static void mps_intr_handler(struct adapter *adapter)
1582 static const struct intr_info mps_intr_info[] = {
1583 {0x1ff, "MPS parity error", -1, 1},
1587 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1588 mps_intr_info, NULL))
1589 t3_fatal_err(adapter);
1592 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1595 * MC7 interrupt handler.
1597 static void mc7_intr_handler(struct mc7 *mc7)
1599 struct adapter *adapter = mc7->adapter;
1600 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1602 if (cause & F_CE) {
1603 mc7->stats.corr_err++;
1604 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1605 "data 0x%x 0x%x 0x%x\n", mc7->name,
1606 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1607 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1608 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1609 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1612 if (cause & F_UE) {
1613 mc7->stats.uncorr_err++;
1614 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1615 "data 0x%x 0x%x 0x%x\n", mc7->name,
1616 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1617 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1618 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1619 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1622 if (G_PE(cause)) {
1623 mc7->stats.parity_err++;
1624 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1625 mc7->name, G_PE(cause));
1628 if (cause & F_AE) {
1629 u32 addr = 0;
1631 if (adapter->params.rev > 0)
1632 addr = t3_read_reg(adapter,
1633 mc7->offset + A_MC7_ERR_ADDR);
1634 mc7->stats.addr_err++;
1635 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1636 mc7->name, addr);
1639 if (cause & MC7_INTR_FATAL)
1640 t3_fatal_err(adapter);
1642 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1645 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1646 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1648 * XGMAC interrupt handler.
1650 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1652 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1653 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1655 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1656 mac->stats.tx_fifo_parity_err++;
1657 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1659 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1660 mac->stats.rx_fifo_parity_err++;
1661 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1663 if (cause & F_TXFIFO_UNDERRUN)
1664 mac->stats.tx_fifo_urun++;
1665 if (cause & F_RXFIFO_OVERFLOW)
1666 mac->stats.rx_fifo_ovfl++;
1667 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1668 mac->stats.serdes_signal_loss++;
1669 if (cause & F_XAUIPCSCTCERR)
1670 mac->stats.xaui_pcs_ctc_err++;
1671 if (cause & F_XAUIPCSALIGNCHANGE)
1672 mac->stats.xaui_pcs_align_change++;
1674 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1675 if (cause & XGM_INTR_FATAL)
1676 t3_fatal_err(adap);
1677 return cause != 0;
1681 * Interrupt handler for PHY events.
1683 int t3_phy_intr_handler(struct adapter *adapter)
1685 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1687 for_each_port(adapter, i) {
1688 struct port_info *p = adap2pinfo(adapter, i);
1690 if (!(p->phy.caps & SUPPORTED_IRQ))
1691 continue;
1693 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1694 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1696 if (phy_cause & cphy_cause_link_change)
1697 t3_link_changed(adapter, i);
1698 if (phy_cause & cphy_cause_fifo_error)
1699 p->phy.fifo_errors++;
1703 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1704 return 0;
1708 * T3 slow path (non-data) interrupt handler.
1710 int t3_slow_intr_handler(struct adapter *adapter)
1712 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1714 cause &= adapter->slow_intr_mask;
1715 if (!cause)
1716 return 0;
1717 if (cause & F_PCIM0) {
1718 if (is_pcie(adapter))
1719 pcie_intr_handler(adapter);
1720 else
1721 pci_intr_handler(adapter);
1723 if (cause & F_SGE3)
1724 t3_sge_err_intr_handler(adapter);
1725 if (cause & F_MC7_PMRX)
1726 mc7_intr_handler(&adapter->pmrx);
1727 if (cause & F_MC7_PMTX)
1728 mc7_intr_handler(&adapter->pmtx);
1729 if (cause & F_MC7_CM)
1730 mc7_intr_handler(&adapter->cm);
1731 if (cause & F_CIM)
1732 cim_intr_handler(adapter);
1733 if (cause & F_TP1)
1734 tp_intr_handler(adapter);
1735 if (cause & F_ULP2_RX)
1736 ulprx_intr_handler(adapter);
1737 if (cause & F_ULP2_TX)
1738 ulptx_intr_handler(adapter);
1739 if (cause & F_PM1_RX)
1740 pmrx_intr_handler(adapter);
1741 if (cause & F_PM1_TX)
1742 pmtx_intr_handler(adapter);
1743 if (cause & F_CPL_SWITCH)
1744 cplsw_intr_handler(adapter);
1745 if (cause & F_MPS0)
1746 mps_intr_handler(adapter);
1747 if (cause & F_MC5A)
1748 t3_mc5_intr_handler(&adapter->mc5);
1749 if (cause & F_XGMAC0_0)
1750 mac_intr_handler(adapter, 0);
1751 if (cause & F_XGMAC0_1)
1752 mac_intr_handler(adapter, 1);
1753 if (cause & F_T3DBG)
1754 t3_os_ext_intr_handler(adapter);
1756 /* Clear the interrupts just processed. */
1757 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1758 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1759 return 1;
1762 static unsigned int calc_gpio_intr(struct adapter *adap)
1764 unsigned int i, gpi_intr = 0;
1766 for_each_port(adap, i)
1767 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1768 adapter_info(adap)->gpio_intr[i])
1769 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1770 return gpi_intr;
1774 * t3_intr_enable - enable interrupts
1775 * @adapter: the adapter whose interrupts should be enabled
1777 * Enable interrupts by setting the interrupt enable registers of the
1778 * various HW modules and then enabling the top-level interrupt
1779 * concentrator.
1781 void t3_intr_enable(struct adapter *adapter)
1783 static const struct addr_val_pair intr_en_avp[] = {
1784 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1785 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1786 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1787 MC7_INTR_MASK},
1788 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1789 MC7_INTR_MASK},
1790 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1791 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1792 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1793 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1794 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1795 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1798 adapter->slow_intr_mask = PL_INTR_MASK;
1800 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1801 t3_write_reg(adapter, A_TP_INT_ENABLE,
1802 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1804 if (adapter->params.rev > 0) {
1805 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1806 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1807 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1808 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1809 F_PBL_BOUND_ERR_CH1);
1810 } else {
1811 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1812 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1815 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1817 if (is_pcie(adapter))
1818 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1819 else
1820 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1821 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1822 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1826 * t3_intr_disable - disable a card's interrupts
1827 * @adapter: the adapter whose interrupts should be disabled
1829 * Disable interrupts. We only disable the top-level interrupt
1830 * concentrator and the SGE data interrupts.
1832 void t3_intr_disable(struct adapter *adapter)
1834 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1835 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1836 adapter->slow_intr_mask = 0;
1840 * t3_intr_clear - clear all interrupts
1841 * @adapter: the adapter whose interrupts should be cleared
1843 * Clears all interrupts.
1845 void t3_intr_clear(struct adapter *adapter)
1847 static const unsigned int cause_reg_addr[] = {
1848 A_SG_INT_CAUSE,
1849 A_SG_RSPQ_FL_STATUS,
1850 A_PCIX_INT_CAUSE,
1851 A_MC7_INT_CAUSE,
1852 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1853 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1854 A_CIM_HOST_INT_CAUSE,
1855 A_TP_INT_CAUSE,
1856 A_MC5_DB_INT_CAUSE,
1857 A_ULPRX_INT_CAUSE,
1858 A_ULPTX_INT_CAUSE,
1859 A_CPL_INTR_CAUSE,
1860 A_PM1_TX_INT_CAUSE,
1861 A_PM1_RX_INT_CAUSE,
1862 A_MPS_INT_CAUSE,
1863 A_T3DBG_INT_CAUSE,
1865 unsigned int i;
1867 /* Clear PHY and MAC interrupts for each port. */
1868 for_each_port(adapter, i)
1869 t3_port_intr_clear(adapter, i);
1871 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1872 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1874 if (is_pcie(adapter))
1875 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1876 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1877 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1881 * t3_port_intr_enable - enable port-specific interrupts
1882 * @adapter: associated adapter
1883 * @idx: index of port whose interrupts should be enabled
1885 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1886 * adapter port.
1888 void t3_port_intr_enable(struct adapter *adapter, int idx)
1890 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1892 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1893 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1894 phy->ops->intr_enable(phy);
1898 * t3_port_intr_disable - disable port-specific interrupts
1899 * @adapter: associated adapter
1900 * @idx: index of port whose interrupts should be disabled
1902 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1903 * adapter port.
1905 void t3_port_intr_disable(struct adapter *adapter, int idx)
1907 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1909 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1910 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1911 phy->ops->intr_disable(phy);
1915 * t3_port_intr_clear - clear port-specific interrupts
1916 * @adapter: associated adapter
1917 * @idx: index of port whose interrupts to clear
1919 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1920 * adapter port.
1922 void t3_port_intr_clear(struct adapter *adapter, int idx)
1924 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1926 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1927 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1928 phy->ops->intr_clear(phy);
1931 #define SG_CONTEXT_CMD_ATTEMPTS 100
1934 * t3_sge_write_context - write an SGE context
1935 * @adapter: the adapter
1936 * @id: the context id
1937 * @type: the context type
1939 * Program an SGE context with the values already loaded in the
1940 * CONTEXT_DATA? registers.
1942 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1943 unsigned int type)
1945 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1946 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1947 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1948 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1949 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1950 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1951 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1952 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1955 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1956 unsigned int type)
1958 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1959 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1960 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1961 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1962 return t3_sge_write_context(adap, id, type);
1966 * t3_sge_init_ecntxt - initialize an SGE egress context
1967 * @adapter: the adapter to configure
1968 * @id: the context id
1969 * @gts_enable: whether to enable GTS for the context
1970 * @type: the egress context type
1971 * @respq: associated response queue
1972 * @base_addr: base address of queue
1973 * @size: number of queue entries
1974 * @token: uP token
1975 * @gen: initial generation value for the context
1976 * @cidx: consumer pointer
1978 * Initialize an SGE egress context and make it ready for use. If the
1979 * platform allows concurrent context operations, the caller is
1980 * responsible for appropriate locking.
1982 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1983 enum sge_context_type type, int respq, u64 base_addr,
1984 unsigned int size, unsigned int token, int gen,
1985 unsigned int cidx)
1987 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1989 if (base_addr & 0xfff) /* must be 4K aligned */
1990 return -EINVAL;
1991 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1992 return -EBUSY;
1994 base_addr >>= 12;
1995 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1996 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1997 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1998 V_EC_BASE_LO(base_addr & 0xffff));
1999 base_addr >>= 16;
2000 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2001 base_addr >>= 32;
2002 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2003 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2004 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2005 F_EC_VALID);
2006 return t3_sge_write_context(adapter, id, F_EGRESS);
2010 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2011 * @adapter: the adapter to configure
2012 * @id: the context id
2013 * @gts_enable: whether to enable GTS for the context
2014 * @base_addr: base address of queue
2015 * @size: number of queue entries
2016 * @bsize: size of each buffer for this queue
2017 * @cong_thres: threshold to signal congestion to upstream producers
2018 * @gen: initial generation value for the context
2019 * @cidx: consumer pointer
2021 * Initialize an SGE free list context and make it ready for use. The
2022 * caller is responsible for ensuring only one context operation occurs
2023 * at a time.
2025 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2026 int gts_enable, u64 base_addr, unsigned int size,
2027 unsigned int bsize, unsigned int cong_thres, int gen,
2028 unsigned int cidx)
2030 if (base_addr & 0xfff) /* must be 4K aligned */
2031 return -EINVAL;
2032 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2033 return -EBUSY;
2035 base_addr >>= 12;
2036 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2037 base_addr >>= 32;
2038 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2039 V_FL_BASE_HI((u32) base_addr) |
2040 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2041 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2042 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2043 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2044 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2045 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2046 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2047 return t3_sge_write_context(adapter, id, F_FREELIST);
2051 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2052 * @adapter: the adapter to configure
2053 * @id: the context id
2054 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2055 * @base_addr: base address of queue
2056 * @size: number of queue entries
2057 * @fl_thres: threshold for selecting the normal or jumbo free list
2058 * @gen: initial generation value for the context
2059 * @cidx: consumer pointer
2061 * Initialize an SGE response queue context and make it ready for use.
2062 * The caller is responsible for ensuring only one context operation
2063 * occurs at a time.
2065 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2066 int irq_vec_idx, u64 base_addr, unsigned int size,
2067 unsigned int fl_thres, int gen, unsigned int cidx)
2069 unsigned int intr = 0;
2071 if (base_addr & 0xfff) /* must be 4K aligned */
2072 return -EINVAL;
2073 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2074 return -EBUSY;
2076 base_addr >>= 12;
2077 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2078 V_CQ_INDEX(cidx));
2079 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2080 base_addr >>= 32;
2081 if (irq_vec_idx >= 0)
2082 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2083 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2084 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2085 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2086 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2090 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2091 * @adapter: the adapter to configure
2092 * @id: the context id
2093 * @base_addr: base address of queue
2094 * @size: number of queue entries
2095 * @rspq: response queue for async notifications
2096 * @ovfl_mode: CQ overflow mode
2097 * @credits: completion queue credits
2098 * @credit_thres: the credit threshold
2100 * Initialize an SGE completion queue context and make it ready for use.
2101 * The caller is responsible for ensuring only one context operation
2102 * occurs at a time.
2104 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2105 unsigned int size, int rspq, int ovfl_mode,
2106 unsigned int credits, unsigned int credit_thres)
2108 if (base_addr & 0xfff) /* must be 4K aligned */
2109 return -EINVAL;
2110 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2111 return -EBUSY;
2113 base_addr >>= 12;
2114 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2115 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2116 base_addr >>= 32;
2117 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2118 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2119 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2120 V_CQ_ERR(ovfl_mode));
2121 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2122 V_CQ_CREDIT_THRES(credit_thres));
2123 return t3_sge_write_context(adapter, id, F_CQ);
2127 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2128 * @adapter: the adapter
2129 * @id: the egress context id
2130 * @enable: enable (1) or disable (0) the context
2132 * Enable or disable an SGE egress context. The caller is responsible for
2133 * ensuring only one context operation occurs at a time.
2135 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2137 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2138 return -EBUSY;
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2141 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2142 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2143 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2144 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2145 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2146 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2147 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2148 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2152 * t3_sge_disable_fl - disable an SGE free-buffer list
2153 * @adapter: the adapter
2154 * @id: the free list context id
2156 * Disable an SGE free-buffer list. The caller is responsible for
2157 * ensuring only one context operation occurs at a time.
2159 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2161 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2162 return -EBUSY;
2164 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2165 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2166 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2167 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2168 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2169 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2170 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2171 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2172 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2176 * t3_sge_disable_rspcntxt - disable an SGE response queue
2177 * @adapter: the adapter
2178 * @id: the response queue context id
2180 * Disable an SGE response queue. The caller is responsible for
2181 * ensuring only one context operation occurs at a time.
2183 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2185 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2186 return -EBUSY;
2188 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2189 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2190 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2191 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2192 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2193 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2194 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2195 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2196 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2200 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2201 * @adapter: the adapter
2202 * @id: the completion queue context id
2204 * Disable an SGE completion queue. The caller is responsible for
2205 * ensuring only one context operation occurs at a time.
2207 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2209 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2210 return -EBUSY;
2212 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2213 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2214 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2215 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2216 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2217 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2218 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2219 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2220 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2224 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2225 * @adapter: the adapter
2226 * @id: the context id
2227 * @op: the operation to perform
2229 * Perform the selected operation on an SGE completion queue context.
2230 * The caller is responsible for ensuring only one context operation
2231 * occurs at a time.
2233 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2234 unsigned int credits)
2236 u32 val;
2238 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2239 return -EBUSY;
2241 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2242 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2243 V_CONTEXT(id) | F_CQ);
2244 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2245 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2246 return -EIO;
2248 if (op >= 2 && op < 7) {
2249 if (adapter->params.rev > 0)
2250 return G_CQ_INDEX(val);
2252 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2253 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2254 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2255 F_CONTEXT_CMD_BUSY, 0,
2256 SG_CONTEXT_CMD_ATTEMPTS, 1))
2257 return -EIO;
2258 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2260 return 0;
2264 * t3_sge_read_context - read an SGE context
2265 * @type: the context type
2266 * @adapter: the adapter
2267 * @id: the context id
2268 * @data: holds the retrieved context
2270 * Read an SGE egress context. The caller is responsible for ensuring
2271 * only one context operation occurs at a time.
2273 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2274 unsigned int id, u32 data[4])
2276 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2277 return -EBUSY;
2279 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2280 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2281 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2282 SG_CONTEXT_CMD_ATTEMPTS, 1))
2283 return -EIO;
2284 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2285 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2286 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2287 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2288 return 0;
2292 * t3_sge_read_ecntxt - read an SGE egress context
2293 * @adapter: the adapter
2294 * @id: the context id
2295 * @data: holds the retrieved context
2297 * Read an SGE egress context. The caller is responsible for ensuring
2298 * only one context operation occurs at a time.
2300 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2302 if (id >= 65536)
2303 return -EINVAL;
2304 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2308 * t3_sge_read_cq - read an SGE CQ context
2309 * @adapter: the adapter
2310 * @id: the context id
2311 * @data: holds the retrieved context
2313 * Read an SGE CQ context. The caller is responsible for ensuring
2314 * only one context operation occurs at a time.
2316 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2318 if (id >= 65536)
2319 return -EINVAL;
2320 return t3_sge_read_context(F_CQ, adapter, id, data);
2324 * t3_sge_read_fl - read an SGE free-list context
2325 * @adapter: the adapter
2326 * @id: the context id
2327 * @data: holds the retrieved context
2329 * Read an SGE free-list context. The caller is responsible for ensuring
2330 * only one context operation occurs at a time.
2332 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2334 if (id >= SGE_QSETS * 2)
2335 return -EINVAL;
2336 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2340 * t3_sge_read_rspq - read an SGE response queue context
2341 * @adapter: the adapter
2342 * @id: the context id
2343 * @data: holds the retrieved context
2345 * Read an SGE response queue context. The caller is responsible for
2346 * ensuring only one context operation occurs at a time.
2348 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2350 if (id >= SGE_QSETS)
2351 return -EINVAL;
2352 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2356 * t3_config_rss - configure Rx packet steering
2357 * @adapter: the adapter
2358 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2359 * @cpus: values for the CPU lookup table (0xff terminated)
2360 * @rspq: values for the response queue lookup table (0xffff terminated)
2362 * Programs the receive packet steering logic. @cpus and @rspq provide
2363 * the values for the CPU and response queue lookup tables. If they
2364 * provide fewer values than the size of the tables the supplied values
2365 * are used repeatedly until the tables are fully populated.
2367 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2368 const u8 * cpus, const u16 *rspq)
2370 int i, j, cpu_idx = 0, q_idx = 0;
2372 if (cpus)
2373 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2374 u32 val = i << 16;
2376 for (j = 0; j < 2; ++j) {
2377 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2378 if (cpus[cpu_idx] == 0xff)
2379 cpu_idx = 0;
2381 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2384 if (rspq)
2385 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2386 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2387 (i << 16) | rspq[q_idx++]);
2388 if (rspq[q_idx] == 0xffff)
2389 q_idx = 0;
2392 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2396 * t3_read_rss - read the contents of the RSS tables
2397 * @adapter: the adapter
2398 * @lkup: holds the contents of the RSS lookup table
2399 * @map: holds the contents of the RSS map table
2401 * Reads the contents of the receive packet steering tables.
2403 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2405 int i;
2406 u32 val;
2408 if (lkup)
2409 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2410 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2411 0xffff0000 | i);
2412 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2413 if (!(val & 0x80000000))
2414 return -EAGAIN;
2415 *lkup++ = val;
2416 *lkup++ = (val >> 8);
2419 if (map)
2420 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2421 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2422 0xffff0000 | i);
2423 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2424 if (!(val & 0x80000000))
2425 return -EAGAIN;
2426 *map++ = val;
2428 return 0;
2432 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2433 * @adap: the adapter
2434 * @enable: 1 to select offload mode, 0 for regular NIC
2436 * Switches TP to NIC/offload mode.
2438 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2440 if (is_offload(adap) || !enable)
2441 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2442 V_NICMODE(!enable));
2446 * pm_num_pages - calculate the number of pages of the payload memory
2447 * @mem_size: the size of the payload memory
2448 * @pg_size: the size of each payload memory page
2450 * Calculate the number of pages, each of the given size, that fit in a
2451 * memory of the specified size, respecting the HW requirement that the
2452 * number of pages must be a multiple of 24.
2454 static inline unsigned int pm_num_pages(unsigned int mem_size,
2455 unsigned int pg_size)
2457 unsigned int n = mem_size / pg_size;
2459 return n - n % 24;
2462 #define mem_region(adap, start, size, reg) \
2463 t3_write_reg((adap), A_ ## reg, (start)); \
2464 start += size
2467 * partition_mem - partition memory and configure TP memory settings
2468 * @adap: the adapter
2469 * @p: the TP parameters
2471 * Partitions context and payload memory and configures TP's memory
2472 * registers.
2474 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2476 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2477 unsigned int timers = 0, timers_shift = 22;
2479 if (adap->params.rev > 0) {
2480 if (tids <= 16 * 1024) {
2481 timers = 1;
2482 timers_shift = 16;
2483 } else if (tids <= 64 * 1024) {
2484 timers = 2;
2485 timers_shift = 18;
2486 } else if (tids <= 256 * 1024) {
2487 timers = 3;
2488 timers_shift = 20;
2492 t3_write_reg(adap, A_TP_PMM_SIZE,
2493 p->chan_rx_size | (p->chan_tx_size >> 16));
2495 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2496 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2497 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2498 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2499 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2501 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2502 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2503 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2505 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2506 /* Add a bit of headroom and make multiple of 24 */
2507 pstructs += 48;
2508 pstructs -= pstructs % 24;
2509 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2511 m = tids * TCB_SIZE;
2512 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2513 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2514 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2515 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2516 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2517 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2518 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2519 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2521 m = (m + 4095) & ~0xfff;
2522 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2523 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2525 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2526 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2527 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2528 if (tids < m)
2529 adap->params.mc5.nservers += m - tids;
2532 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2533 u32 val)
2535 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2536 t3_write_reg(adap, A_TP_PIO_DATA, val);
2539 static void tp_config(struct adapter *adap, const struct tp_params *p)
2541 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2542 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2543 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2544 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2545 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2546 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2547 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2548 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2549 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2550 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2551 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2552 F_IPV6ENABLE | F_NICMODE);
2553 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2554 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2555 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2556 adap->params.rev > 0 ? F_ENABLEESND :
2557 F_T3A_ENABLEESND);
2559 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2560 F_ENABLEEPCMDAFULL,
2561 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2562 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2563 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2564 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2565 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2566 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2567 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2569 if (adap->params.rev > 0) {
2570 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2571 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2572 F_TXPACEAUTO);
2573 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2574 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2575 } else
2576 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2578 if (adap->params.rev == T3_REV_C)
2579 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2580 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2581 V_TABLELATENCYDELTA(4));
2583 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2584 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2585 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2586 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2589 /* Desired TP timer resolution in usec */
2590 #define TP_TMR_RES 50
2592 /* TCP timer values in ms */
2593 #define TP_DACK_TIMER 50
2594 #define TP_RTO_MIN 250
2597 * tp_set_timers - set TP timing parameters
2598 * @adap: the adapter to set
2599 * @core_clk: the core clock frequency in Hz
2601 * Set TP's timing parameters, such as the various timer resolutions and
2602 * the TCP timer values.
2604 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2606 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2607 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2608 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2609 unsigned int tps = core_clk >> tre;
2611 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2612 V_DELAYEDACKRESOLUTION(dack_re) |
2613 V_TIMESTAMPRESOLUTION(tstamp_re));
2614 t3_write_reg(adap, A_TP_DACK_TIMER,
2615 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2616 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2617 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2618 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2619 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2620 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2621 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2622 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2623 V_KEEPALIVEMAX(9));
2625 #define SECONDS * tps
2627 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2628 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2629 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2630 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2631 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2632 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2633 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2634 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2635 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2637 #undef SECONDS
2641 * t3_tp_set_coalescing_size - set receive coalescing size
2642 * @adap: the adapter
2643 * @size: the receive coalescing size
2644 * @psh: whether a set PSH bit should deliver coalesced data
2646 * Set the receive coalescing size and PSH bit handling.
2648 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2650 u32 val;
2652 if (size > MAX_RX_COALESCING_LEN)
2653 return -EINVAL;
2655 val = t3_read_reg(adap, A_TP_PARA_REG3);
2656 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2658 if (size) {
2659 val |= F_RXCOALESCEENABLE;
2660 if (psh)
2661 val |= F_RXCOALESCEPSHEN;
2662 size = min(MAX_RX_COALESCING_LEN, size);
2663 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2664 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2666 t3_write_reg(adap, A_TP_PARA_REG3, val);
2667 return 0;
2671 * t3_tp_set_max_rxsize - set the max receive size
2672 * @adap: the adapter
2673 * @size: the max receive size
2675 * Set TP's max receive size. This is the limit that applies when
2676 * receive coalescing is disabled.
2678 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2680 t3_write_reg(adap, A_TP_PARA_REG7,
2681 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2684 static void init_mtus(unsigned short mtus[])
2687 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2688 * it can accomodate max size TCP/IP headers when SACK and timestamps
2689 * are enabled and still have at least 8 bytes of payload.
2691 mtus[0] = 88;
2692 mtus[1] = 88;
2693 mtus[2] = 256;
2694 mtus[3] = 512;
2695 mtus[4] = 576;
2696 mtus[5] = 1024;
2697 mtus[6] = 1280;
2698 mtus[7] = 1492;
2699 mtus[8] = 1500;
2700 mtus[9] = 2002;
2701 mtus[10] = 2048;
2702 mtus[11] = 4096;
2703 mtus[12] = 4352;
2704 mtus[13] = 8192;
2705 mtus[14] = 9000;
2706 mtus[15] = 9600;
2710 * Initial congestion control parameters.
2712 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2714 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2715 a[9] = 2;
2716 a[10] = 3;
2717 a[11] = 4;
2718 a[12] = 5;
2719 a[13] = 6;
2720 a[14] = 7;
2721 a[15] = 8;
2722 a[16] = 9;
2723 a[17] = 10;
2724 a[18] = 14;
2725 a[19] = 17;
2726 a[20] = 21;
2727 a[21] = 25;
2728 a[22] = 30;
2729 a[23] = 35;
2730 a[24] = 45;
2731 a[25] = 60;
2732 a[26] = 80;
2733 a[27] = 100;
2734 a[28] = 200;
2735 a[29] = 300;
2736 a[30] = 400;
2737 a[31] = 500;
2739 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2740 b[9] = b[10] = 1;
2741 b[11] = b[12] = 2;
2742 b[13] = b[14] = b[15] = b[16] = 3;
2743 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2744 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2745 b[28] = b[29] = 6;
2746 b[30] = b[31] = 7;
2749 /* The minimum additive increment value for the congestion control table */
2750 #define CC_MIN_INCR 2U
2753 * t3_load_mtus - write the MTU and congestion control HW tables
2754 * @adap: the adapter
2755 * @mtus: the unrestricted values for the MTU table
2756 * @alphs: the values for the congestion control alpha parameter
2757 * @beta: the values for the congestion control beta parameter
2758 * @mtu_cap: the maximum permitted effective MTU
2760 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2761 * Update the high-speed congestion control table with the supplied alpha,
2762 * beta, and MTUs.
2764 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2765 unsigned short alpha[NCCTRL_WIN],
2766 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2768 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2769 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2770 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2771 28672, 40960, 57344, 81920, 114688, 163840, 229376
2774 unsigned int i, w;
2776 for (i = 0; i < NMTUS; ++i) {
2777 unsigned int mtu = min(mtus[i], mtu_cap);
2778 unsigned int log2 = fls(mtu);
2780 if (!(mtu & ((1 << log2) >> 2))) /* round */
2781 log2--;
2782 t3_write_reg(adap, A_TP_MTU_TABLE,
2783 (i << 24) | (log2 << 16) | mtu);
2785 for (w = 0; w < NCCTRL_WIN; ++w) {
2786 unsigned int inc;
2788 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2789 CC_MIN_INCR);
2791 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2792 (w << 16) | (beta[w] << 13) | inc);
2798 * t3_read_hw_mtus - returns the values in the HW MTU table
2799 * @adap: the adapter
2800 * @mtus: where to store the HW MTU values
2802 * Reads the HW MTU table.
2804 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2806 int i;
2808 for (i = 0; i < NMTUS; ++i) {
2809 unsigned int val;
2811 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2812 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2813 mtus[i] = val & 0x3fff;
2818 * t3_get_cong_cntl_tab - reads the congestion control table
2819 * @adap: the adapter
2820 * @incr: where to store the alpha values
2822 * Reads the additive increments programmed into the HW congestion
2823 * control table.
2825 void t3_get_cong_cntl_tab(struct adapter *adap,
2826 unsigned short incr[NMTUS][NCCTRL_WIN])
2828 unsigned int mtu, w;
2830 for (mtu = 0; mtu < NMTUS; ++mtu)
2831 for (w = 0; w < NCCTRL_WIN; ++w) {
2832 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2833 0xffff0000 | (mtu << 5) | w);
2834 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2835 0x1fff;
2840 * t3_tp_get_mib_stats - read TP's MIB counters
2841 * @adap: the adapter
2842 * @tps: holds the returned counter values
2844 * Returns the values of TP's MIB counters.
2846 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2848 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2849 sizeof(*tps) / sizeof(u32), 0);
2852 #define ulp_region(adap, name, start, len) \
2853 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2854 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2855 (start) + (len) - 1); \
2856 start += len
2858 #define ulptx_region(adap, name, start, len) \
2859 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2860 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2861 (start) + (len) - 1)
2863 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2865 unsigned int m = p->chan_rx_size;
2867 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2868 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2869 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2870 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2871 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2872 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2873 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2874 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2878 * t3_set_proto_sram - set the contents of the protocol sram
2879 * @adapter: the adapter
2880 * @data: the protocol image
2882 * Write the contents of the protocol SRAM.
2884 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2886 int i;
2887 const __be32 *buf = (const __be32 *)data;
2889 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2890 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2891 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2892 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2893 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2894 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2896 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2897 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2898 return -EIO;
2900 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2902 return 0;
2905 void t3_config_trace_filter(struct adapter *adapter,
2906 const struct trace_params *tp, int filter_index,
2907 int invert, int enable)
2909 u32 addr, key[4], mask[4];
2911 key[0] = tp->sport | (tp->sip << 16);
2912 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2913 key[2] = tp->dip;
2914 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2916 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2917 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2918 mask[2] = tp->dip_mask;
2919 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2921 if (invert)
2922 key[3] |= (1 << 29);
2923 if (enable)
2924 key[3] |= (1 << 28);
2926 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2927 tp_wr_indirect(adapter, addr++, key[0]);
2928 tp_wr_indirect(adapter, addr++, mask[0]);
2929 tp_wr_indirect(adapter, addr++, key[1]);
2930 tp_wr_indirect(adapter, addr++, mask[1]);
2931 tp_wr_indirect(adapter, addr++, key[2]);
2932 tp_wr_indirect(adapter, addr++, mask[2]);
2933 tp_wr_indirect(adapter, addr++, key[3]);
2934 tp_wr_indirect(adapter, addr, mask[3]);
2935 t3_read_reg(adapter, A_TP_PIO_DATA);
2939 * t3_config_sched - configure a HW traffic scheduler
2940 * @adap: the adapter
2941 * @kbps: target rate in Kbps
2942 * @sched: the scheduler index
2944 * Configure a HW scheduler for the target rate
2946 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2948 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2949 unsigned int clk = adap->params.vpd.cclk * 1000;
2950 unsigned int selected_cpt = 0, selected_bpt = 0;
2952 if (kbps > 0) {
2953 kbps *= 125; /* -> bytes */
2954 for (cpt = 1; cpt <= 255; cpt++) {
2955 tps = clk / cpt;
2956 bpt = (kbps + tps / 2) / tps;
2957 if (bpt > 0 && bpt <= 255) {
2958 v = bpt * tps;
2959 delta = v >= kbps ? v - kbps : kbps - v;
2960 if (delta <= mindelta) {
2961 mindelta = delta;
2962 selected_cpt = cpt;
2963 selected_bpt = bpt;
2965 } else if (selected_cpt)
2966 break;
2968 if (!selected_cpt)
2969 return -EINVAL;
2971 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2972 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2973 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2974 if (sched & 1)
2975 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2976 else
2977 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2978 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2979 return 0;
2982 static int tp_init(struct adapter *adap, const struct tp_params *p)
2984 int busy = 0;
2986 tp_config(adap, p);
2987 t3_set_vlan_accel(adap, 3, 0);
2989 if (is_offload(adap)) {
2990 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2991 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2992 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2993 0, 1000, 5);
2994 if (busy)
2995 CH_ERR(adap, "TP initialization timed out\n");
2998 if (!busy)
2999 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3000 return busy;
3003 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3005 if (port_mask & ~((1 << adap->params.nports) - 1))
3006 return -EINVAL;
3007 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3008 port_mask << S_PORT0ACTIVE);
3009 return 0;
3013 * Perform the bits of HW initialization that are dependent on the number
3014 * of available ports.
3016 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3018 int i;
3020 if (nports == 1) {
3021 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3022 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3023 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3024 F_PORT0ACTIVE | F_ENFORCEPKT);
3025 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3026 } else {
3027 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3028 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3029 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3030 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3031 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3032 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3033 F_ENFORCEPKT);
3034 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3035 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3036 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3037 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3038 for (i = 0; i < 16; i++)
3039 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3040 (i << 16) | 0x1010);
3044 static int calibrate_xgm(struct adapter *adapter)
3046 if (uses_xaui(adapter)) {
3047 unsigned int v, i;
3049 for (i = 0; i < 5; ++i) {
3050 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3051 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3052 msleep(1);
3053 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3054 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3055 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3056 V_XAUIIMP(G_CALIMP(v) >> 2));
3057 return 0;
3060 CH_ERR(adapter, "MAC calibration failed\n");
3061 return -1;
3062 } else {
3063 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3064 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3065 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3066 F_XGM_IMPSETUPDATE);
3068 return 0;
3071 static void calibrate_xgm_t3b(struct adapter *adapter)
3073 if (!uses_xaui(adapter)) {
3074 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3075 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3076 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3077 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3078 F_XGM_IMPSETUPDATE);
3079 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3081 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3082 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3086 struct mc7_timing_params {
3087 unsigned char ActToPreDly;
3088 unsigned char ActToRdWrDly;
3089 unsigned char PreCyc;
3090 unsigned char RefCyc[5];
3091 unsigned char BkCyc;
3092 unsigned char WrToRdDly;
3093 unsigned char RdToWrDly;
3097 * Write a value to a register and check that the write completed. These
3098 * writes normally complete in a cycle or two, so one read should suffice.
3099 * The very first read exists to flush the posted write to the device.
3101 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3103 t3_write_reg(adapter, addr, val);
3104 t3_read_reg(adapter, addr); /* flush */
3105 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3106 return 0;
3107 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3108 return -EIO;
3111 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3113 static const unsigned int mc7_mode[] = {
3114 0x632, 0x642, 0x652, 0x432, 0x442
3116 static const struct mc7_timing_params mc7_timings[] = {
3117 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3118 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3119 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3120 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3121 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3124 u32 val;
3125 unsigned int width, density, slow, attempts;
3126 struct adapter *adapter = mc7->adapter;
3127 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3129 if (!mc7->size)
3130 return 0;
3132 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3133 slow = val & F_SLOW;
3134 width = G_WIDTH(val);
3135 density = G_DEN(val);
3137 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3138 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3139 msleep(1);
3141 if (!slow) {
3142 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3143 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3144 msleep(1);
3145 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3146 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3147 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3148 mc7->name);
3149 goto out_fail;
3153 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3154 V_ACTTOPREDLY(p->ActToPreDly) |
3155 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3156 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3157 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3159 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3160 val | F_CLKEN | F_TERM150);
3161 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3163 if (!slow)
3164 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3165 F_DLLENB);
3166 udelay(1);
3168 val = slow ? 3 : 6;
3169 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3170 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3171 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3172 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3173 goto out_fail;
3175 if (!slow) {
3176 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3177 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3178 udelay(5);
3181 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3182 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3183 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3184 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3185 mc7_mode[mem_type]) ||
3186 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3187 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3188 goto out_fail;
3190 /* clock value is in KHz */
3191 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3192 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3194 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3195 F_PERREFEN | V_PREREFDIV(mc7_clock));
3196 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3198 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3199 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3200 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3201 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3202 (mc7->size << width) - 1);
3203 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3204 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3206 attempts = 50;
3207 do {
3208 msleep(250);
3209 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3210 } while ((val & F_BUSY) && --attempts);
3211 if (val & F_BUSY) {
3212 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3213 goto out_fail;
3216 /* Enable normal memory accesses. */
3217 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3218 return 0;
3220 out_fail:
3221 return -1;
3224 static void config_pcie(struct adapter *adap)
3226 static const u16 ack_lat[4][6] = {
3227 {237, 416, 559, 1071, 2095, 4143},
3228 {128, 217, 289, 545, 1057, 2081},
3229 {73, 118, 154, 282, 538, 1050},
3230 {67, 107, 86, 150, 278, 534}
3232 static const u16 rpl_tmr[4][6] = {
3233 {711, 1248, 1677, 3213, 6285, 12429},
3234 {384, 651, 867, 1635, 3171, 6243},
3235 {219, 354, 462, 846, 1614, 3150},
3236 {201, 321, 258, 450, 834, 1602}
3239 u16 val;
3240 unsigned int log2_width, pldsize;
3241 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3243 pci_read_config_word(adap->pdev,
3244 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3245 &val);
3246 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3247 pci_read_config_word(adap->pdev,
3248 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3249 &val);
3251 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3252 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3253 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3254 log2_width = fls(adap->params.pci.width) - 1;
3255 acklat = ack_lat[log2_width][pldsize];
3256 if (val & 1) /* check LOsEnable */
3257 acklat += fst_trn_tx * 4;
3258 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3260 if (adap->params.rev == 0)
3261 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3262 V_T3A_ACKLAT(M_T3A_ACKLAT),
3263 V_T3A_ACKLAT(acklat));
3264 else
3265 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3266 V_ACKLAT(acklat));
3268 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3269 V_REPLAYLMT(rpllmt));
3271 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3272 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3273 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3274 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3278 * Initialize and configure T3 HW modules. This performs the
3279 * initialization steps that need to be done once after a card is reset.
3280 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3282 * fw_params are passed to FW and their value is platform dependent. Only the
3283 * top 8 bits are available for use, the rest must be 0.
3285 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3287 int err = -EIO, attempts, i;
3288 const struct vpd_params *vpd = &adapter->params.vpd;
3290 if (adapter->params.rev > 0)
3291 calibrate_xgm_t3b(adapter);
3292 else if (calibrate_xgm(adapter))
3293 goto out_err;
3295 if (vpd->mclk) {
3296 partition_mem(adapter, &adapter->params.tp);
3298 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3299 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3300 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3301 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3302 adapter->params.mc5.nfilters,
3303 adapter->params.mc5.nroutes))
3304 goto out_err;
3306 for (i = 0; i < 32; i++)
3307 if (clear_sge_ctxt(adapter, i, F_CQ))
3308 goto out_err;
3311 if (tp_init(adapter, &adapter->params.tp))
3312 goto out_err;
3314 t3_tp_set_coalescing_size(adapter,
3315 min(adapter->params.sge.max_pkt_size,
3316 MAX_RX_COALESCING_LEN), 1);
3317 t3_tp_set_max_rxsize(adapter,
3318 min(adapter->params.sge.max_pkt_size, 16384U));
3319 ulp_config(adapter, &adapter->params.tp);
3321 if (is_pcie(adapter))
3322 config_pcie(adapter);
3323 else
3324 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3325 F_DMASTOPEN | F_CLIDECEN);
3327 if (adapter->params.rev == T3_REV_C)
3328 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3329 F_CFG_CQE_SOP_MASK);
3331 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3332 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3333 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3334 init_hw_for_avail_ports(adapter, adapter->params.nports);
3335 t3_sge_init(adapter, &adapter->params.sge);
3337 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3339 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3340 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3341 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3342 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3344 attempts = 100;
3345 do { /* wait for uP to initialize */
3346 msleep(20);
3347 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3348 if (!attempts) {
3349 CH_ERR(adapter, "uP initialization timed out\n");
3350 goto out_err;
3353 err = 0;
3354 out_err:
3355 return err;
3359 * get_pci_mode - determine a card's PCI mode
3360 * @adapter: the adapter
3361 * @p: where to store the PCI settings
3363 * Determines a card's PCI mode and associated parameters, such as speed
3364 * and width.
3366 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3368 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3369 u32 pci_mode, pcie_cap;
3371 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3372 if (pcie_cap) {
3373 u16 val;
3375 p->variant = PCI_VARIANT_PCIE;
3376 p->pcie_cap_addr = pcie_cap;
3377 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3378 &val);
3379 p->width = (val >> 4) & 0x3f;
3380 return;
3383 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3384 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3385 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3386 pci_mode = G_PCIXINITPAT(pci_mode);
3387 if (pci_mode == 0)
3388 p->variant = PCI_VARIANT_PCI;
3389 else if (pci_mode < 4)
3390 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3391 else if (pci_mode < 8)
3392 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3393 else
3394 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3398 * init_link_config - initialize a link's SW state
3399 * @lc: structure holding the link state
3400 * @ai: information about the current card
3402 * Initializes the SW state maintained for each link, including the link's
3403 * capabilities and default speed/duplex/flow-control/autonegotiation
3404 * settings.
3406 static void init_link_config(struct link_config *lc, unsigned int caps)
3408 lc->supported = caps;
3409 lc->requested_speed = lc->speed = SPEED_INVALID;
3410 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3411 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3412 if (lc->supported & SUPPORTED_Autoneg) {
3413 lc->advertising = lc->supported;
3414 lc->autoneg = AUTONEG_ENABLE;
3415 lc->requested_fc |= PAUSE_AUTONEG;
3416 } else {
3417 lc->advertising = 0;
3418 lc->autoneg = AUTONEG_DISABLE;
3423 * mc7_calc_size - calculate MC7 memory size
3424 * @cfg: the MC7 configuration
3426 * Calculates the size of an MC7 memory in bytes from the value of its
3427 * configuration register.
3429 static unsigned int mc7_calc_size(u32 cfg)
3431 unsigned int width = G_WIDTH(cfg);
3432 unsigned int banks = !!(cfg & F_BKS) + 1;
3433 unsigned int org = !!(cfg & F_ORG) + 1;
3434 unsigned int density = G_DEN(cfg);
3435 unsigned int MBs = ((256 << density) * banks) / (org << width);
3437 return MBs << 20;
3440 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3441 unsigned int base_addr, const char *name)
3443 u32 cfg;
3445 mc7->adapter = adapter;
3446 mc7->name = name;
3447 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3448 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3449 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3450 mc7->width = G_WIDTH(cfg);
3453 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3455 mac->adapter = adapter;
3456 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3457 mac->nucast = 1;
3459 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3460 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3461 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3462 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3463 F_ENRGMII, 0);
3467 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3469 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3471 mi1_init(adapter, ai);
3472 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3473 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3474 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3475 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3476 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3477 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3479 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3480 val |= F_ENRGMII;
3482 /* Enable MAC clocks so we can access the registers */
3483 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3484 t3_read_reg(adapter, A_XGM_PORT_CFG);
3486 val |= F_CLKDIVRESET_;
3487 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3488 t3_read_reg(adapter, A_XGM_PORT_CFG);
3489 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3490 t3_read_reg(adapter, A_XGM_PORT_CFG);
3494 * Reset the adapter.
3495 * Older PCIe cards lose their config space during reset, PCI-X
3496 * ones don't.
3498 int t3_reset_adapter(struct adapter *adapter)
3500 int i, save_and_restore_pcie =
3501 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3502 uint16_t devid = 0;
3504 if (save_and_restore_pcie)
3505 pci_save_state(adapter->pdev);
3506 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3509 * Delay. Give Some time to device to reset fully.
3510 * XXX The delay time should be modified.
3512 for (i = 0; i < 10; i++) {
3513 msleep(50);
3514 pci_read_config_word(adapter->pdev, 0x00, &devid);
3515 if (devid == 0x1425)
3516 break;
3519 if (devid != 0x1425)
3520 return -1;
3522 if (save_and_restore_pcie)
3523 pci_restore_state(adapter->pdev);
3524 return 0;
3527 static int init_parity(struct adapter *adap)
3529 int i, err, addr;
3531 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3532 return -EBUSY;
3534 for (err = i = 0; !err && i < 16; i++)
3535 err = clear_sge_ctxt(adap, i, F_EGRESS);
3536 for (i = 0xfff0; !err && i <= 0xffff; i++)
3537 err = clear_sge_ctxt(adap, i, F_EGRESS);
3538 for (i = 0; !err && i < SGE_QSETS; i++)
3539 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3540 if (err)
3541 return err;
3543 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3544 for (i = 0; i < 4; i++)
3545 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3546 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3547 F_IBQDBGWR | V_IBQDBGQID(i) |
3548 V_IBQDBGADDR(addr));
3549 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3550 F_IBQDBGBUSY, 0, 2, 1);
3551 if (err)
3552 return err;
3554 return 0;
3558 * Initialize adapter SW state for the various HW modules, set initial values
3559 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3560 * interface.
3562 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3563 int reset)
3565 int ret;
3566 unsigned int i, j = -1;
3568 get_pci_mode(adapter, &adapter->params.pci);
3570 adapter->params.info = ai;
3571 adapter->params.nports = ai->nports;
3572 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3573 adapter->params.linkpoll_period = 0;
3574 adapter->params.stats_update_period = is_10G(adapter) ?
3575 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3576 adapter->params.pci.vpd_cap_addr =
3577 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3578 ret = get_vpd_params(adapter, &adapter->params.vpd);
3579 if (ret < 0)
3580 return ret;
3582 if (reset && t3_reset_adapter(adapter))
3583 return -1;
3585 t3_sge_prep(adapter, &adapter->params.sge);
3587 if (adapter->params.vpd.mclk) {
3588 struct tp_params *p = &adapter->params.tp;
3590 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3591 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3592 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3594 p->nchan = ai->nports;
3595 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3596 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3597 p->cm_size = t3_mc7_size(&adapter->cm);
3598 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3599 p->chan_tx_size = p->pmtx_size / p->nchan;
3600 p->rx_pg_size = 64 * 1024;
3601 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3602 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3603 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3604 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3605 adapter->params.rev > 0 ? 12 : 6;
3608 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3609 t3_mc7_size(&adapter->pmtx) &&
3610 t3_mc7_size(&adapter->cm);
3612 if (is_offload(adapter)) {
3613 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3614 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3615 DEFAULT_NFILTERS : 0;
3616 adapter->params.mc5.nroutes = 0;
3617 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3619 init_mtus(adapter->params.mtus);
3620 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3623 early_hw_init(adapter, ai);
3624 ret = init_parity(adapter);
3625 if (ret)
3626 return ret;
3628 for_each_port(adapter, i) {
3629 u8 hw_addr[6];
3630 const struct port_type_info *pti;
3631 struct port_info *p = adap2pinfo(adapter, i);
3633 while (!adapter->params.vpd.port_type[++j])
3636 pti = &port_types[adapter->params.vpd.port_type[j]];
3637 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3638 ai->mdio_ops);
3639 if (ret)
3640 return ret;
3641 mac_prep(&p->mac, adapter, j);
3644 * The VPD EEPROM stores the base Ethernet address for the
3645 * card. A port's address is derived from the base by adding
3646 * the port's index to the base's low octet.
3648 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3649 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3651 memcpy(adapter->port[i]->dev_addr, hw_addr,
3652 ETH_ALEN);
3653 memcpy(adapter->port[i]->perm_addr, hw_addr,
3654 ETH_ALEN);
3655 init_link_config(&p->link_config, p->phy.caps);
3656 p->phy.ops->power_down(&p->phy, 1);
3657 if (!(p->phy.caps & SUPPORTED_IRQ))
3658 adapter->params.linkpoll_period = 10;
3661 return 0;
3664 void t3_led_ready(struct adapter *adapter)
3666 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3667 F_GPIO0_OUT_VAL);
3670 int t3_replay_prep_adapter(struct adapter *adapter)
3672 const struct adapter_info *ai = adapter->params.info;
3673 unsigned int i, j = -1;
3674 int ret;
3676 early_hw_init(adapter, ai);
3677 ret = init_parity(adapter);
3678 if (ret)
3679 return ret;
3681 for_each_port(adapter, i) {
3682 const struct port_type_info *pti;
3683 struct port_info *p = adap2pinfo(adapter, i);
3685 while (!adapter->params.vpd.port_type[++j])
3688 pti = &port_types[adapter->params.vpd.port_type[j]];
3689 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3690 if (ret)
3691 return ret;
3692 p->phy.ops->power_down(&p->phy, 1);
3695 return 0;