cxgb3 - FW versioning
[firewire-audio.git] / drivers / net / cxgb3 / t3_hw.c
blob4545acb3a2d5e27b2a8d686bf621f02b96957aef
1 /*
2 * This file is part of the Chelsio T3 Ethernet driver.
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
12 #include "common.h"
13 #include "regs.h"
14 #include "sge_defs.h"
15 #include "firmware_exports.h"
17 /**
18 * t3_wait_op_done_val - wait until an operation is completed
19 * @adapter: the adapter performing the operation
20 * @reg: the register to check for completion
21 * @mask: a single-bit field within @reg that indicates completion
22 * @polarity: the value of the field when the operation is completed
23 * @attempts: number of check iterations
24 * @delay: delay in usecs between iterations
25 * @valp: where to store the value of the register at completion time
27 * Wait until an operation is completed by checking a bit in a register
28 * up to @attempts times. If @valp is not NULL the value of the register
29 * at the time it indicated completion is stored there. Returns 0 if the
30 * operation completes and -EAGAIN otherwise.
33 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
34 int polarity, int attempts, int delay, u32 *valp)
36 while (1) {
37 u32 val = t3_read_reg(adapter, reg);
39 if (!!(val & mask) == polarity) {
40 if (valp)
41 *valp = val;
42 return 0;
44 if (--attempts == 0)
45 return -EAGAIN;
46 if (delay)
47 udelay(delay);
51 /**
52 * t3_write_regs - write a bunch of registers
53 * @adapter: the adapter to program
54 * @p: an array of register address/register value pairs
55 * @n: the number of address/value pairs
56 * @offset: register address offset
58 * Takes an array of register address/register value pairs and writes each
59 * value to the corresponding register. Register addresses are adjusted
60 * by the supplied offset.
62 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
63 int n, unsigned int offset)
65 while (n--) {
66 t3_write_reg(adapter, p->reg_addr + offset, p->val);
67 p++;
71 /**
72 * t3_set_reg_field - set a register field to a value
73 * @adapter: the adapter to program
74 * @addr: the register address
75 * @mask: specifies the portion of the register to modify
76 * @val: the new value for the register field
78 * Sets a register field specified by the supplied mask to the
79 * given value.
81 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
82 u32 val)
84 u32 v = t3_read_reg(adapter, addr) & ~mask;
86 t3_write_reg(adapter, addr, v | val);
87 t3_read_reg(adapter, addr); /* flush */
90 /**
91 * t3_read_indirect - read indirectly addressed registers
92 * @adap: the adapter
93 * @addr_reg: register holding the indirect address
94 * @data_reg: register holding the value of the indirect register
95 * @vals: where the read register values are stored
96 * @start_idx: index of first indirect register to read
97 * @nregs: how many indirect registers to read
99 * Reads registers that are accessed indirectly through an address/data
100 * register pair.
102 void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
103 unsigned int data_reg, u32 *vals, unsigned int nregs,
104 unsigned int start_idx)
106 while (nregs--) {
107 t3_write_reg(adap, addr_reg, start_idx);
108 *vals++ = t3_read_reg(adap, data_reg);
109 start_idx++;
114 * t3_mc7_bd_read - read from MC7 through backdoor accesses
115 * @mc7: identifies MC7 to read from
116 * @start: index of first 64-bit word to read
117 * @n: number of 64-bit words to read
118 * @buf: where to store the read result
120 * Read n 64-bit words from MC7 starting at word start, using backdoor
121 * accesses.
123 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
124 u64 *buf)
126 static const int shift[] = { 0, 0, 16, 24 };
127 static const int step[] = { 0, 32, 16, 8 };
129 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
130 struct adapter *adap = mc7->adapter;
132 if (start >= size64 || start + n > size64)
133 return -EINVAL;
135 start *= (8 << mc7->width);
136 while (n--) {
137 int i;
138 u64 val64 = 0;
140 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
141 int attempts = 10;
142 u32 val;
144 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
145 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
146 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
147 while ((val & F_BUSY) && attempts--)
148 val = t3_read_reg(adap,
149 mc7->offset + A_MC7_BD_OP);
150 if (val & F_BUSY)
151 return -EIO;
153 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
154 if (mc7->width == 0) {
155 val64 = t3_read_reg(adap,
156 mc7->offset +
157 A_MC7_BD_DATA0);
158 val64 |= (u64) val << 32;
159 } else {
160 if (mc7->width > 1)
161 val >>= shift[mc7->width];
162 val64 |= (u64) val << (step[mc7->width] * i);
164 start += 8;
166 *buf++ = val64;
168 return 0;
172 * Initialize MI1.
174 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
176 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
177 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
178 V_CLKDIV(clkdiv);
180 if (!(ai->caps & SUPPORTED_10000baseT_Full))
181 val |= V_ST(1);
182 t3_write_reg(adap, A_MI1_CFG, val);
185 #define MDIO_ATTEMPTS 10
188 * MI1 read/write operations for direct-addressed PHYs.
190 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
191 int reg_addr, unsigned int *valp)
193 int ret;
194 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
196 if (mmd_addr)
197 return -EINVAL;
199 mutex_lock(&adapter->mdio_lock);
200 t3_write_reg(adapter, A_MI1_ADDR, addr);
201 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
202 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
203 if (!ret)
204 *valp = t3_read_reg(adapter, A_MI1_DATA);
205 mutex_unlock(&adapter->mdio_lock);
206 return ret;
209 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
210 int reg_addr, unsigned int val)
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 if (mmd_addr)
216 return -EINVAL;
218 mutex_lock(&adapter->mdio_lock);
219 t3_write_reg(adapter, A_MI1_ADDR, addr);
220 t3_write_reg(adapter, A_MI1_DATA, val);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
227 static const struct mdio_ops mi1_mdio_ops = {
228 mi1_read,
229 mi1_write
233 * MI1 read/write operations for indirect-addressed PHYs.
235 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
236 int reg_addr, unsigned int *valp)
238 int ret;
239 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
241 mutex_lock(&adapter->mdio_lock);
242 t3_write_reg(adapter, A_MI1_ADDR, addr);
243 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
244 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
245 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
246 if (!ret) {
247 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
248 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
249 MDIO_ATTEMPTS, 20);
250 if (!ret)
251 *valp = t3_read_reg(adapter, A_MI1_DATA);
253 mutex_unlock(&adapter->mdio_lock);
254 return ret;
257 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
258 int reg_addr, unsigned int val)
260 int ret;
261 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
263 mutex_lock(&adapter->mdio_lock);
264 t3_write_reg(adapter, A_MI1_ADDR, addr);
265 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
266 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
267 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
268 if (!ret) {
269 t3_write_reg(adapter, A_MI1_DATA, val);
270 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
271 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
272 MDIO_ATTEMPTS, 20);
274 mutex_unlock(&adapter->mdio_lock);
275 return ret;
278 static const struct mdio_ops mi1_mdio_ext_ops = {
279 mi1_ext_read,
280 mi1_ext_write
284 * t3_mdio_change_bits - modify the value of a PHY register
285 * @phy: the PHY to operate on
286 * @mmd: the device address
287 * @reg: the register address
288 * @clear: what part of the register value to mask off
289 * @set: what part of the register value to set
291 * Changes the value of a PHY register by applying a mask to its current
292 * value and ORing the result with a new value.
294 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
295 unsigned int set)
297 int ret;
298 unsigned int val;
300 ret = mdio_read(phy, mmd, reg, &val);
301 if (!ret) {
302 val &= ~clear;
303 ret = mdio_write(phy, mmd, reg, val | set);
305 return ret;
309 * t3_phy_reset - reset a PHY block
310 * @phy: the PHY to operate on
311 * @mmd: the device address of the PHY block to reset
312 * @wait: how long to wait for the reset to complete in 1ms increments
314 * Resets a PHY block and optionally waits for the reset to complete.
315 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
316 * for 10G PHYs.
318 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
320 int err;
321 unsigned int ctl;
323 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
324 if (err || !wait)
325 return err;
327 do {
328 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
329 if (err)
330 return err;
331 ctl &= BMCR_RESET;
332 if (ctl)
333 msleep(1);
334 } while (ctl && --wait);
336 return ctl ? -1 : 0;
340 * t3_phy_advertise - set the PHY advertisement registers for autoneg
341 * @phy: the PHY to operate on
342 * @advert: bitmap of capabilities the PHY should advertise
344 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
345 * requested capabilities.
347 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
349 int err;
350 unsigned int val = 0;
352 err = mdio_read(phy, 0, MII_CTRL1000, &val);
353 if (err)
354 return err;
356 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
357 if (advert & ADVERTISED_1000baseT_Half)
358 val |= ADVERTISE_1000HALF;
359 if (advert & ADVERTISED_1000baseT_Full)
360 val |= ADVERTISE_1000FULL;
362 err = mdio_write(phy, 0, MII_CTRL1000, val);
363 if (err)
364 return err;
366 val = 1;
367 if (advert & ADVERTISED_10baseT_Half)
368 val |= ADVERTISE_10HALF;
369 if (advert & ADVERTISED_10baseT_Full)
370 val |= ADVERTISE_10FULL;
371 if (advert & ADVERTISED_100baseT_Half)
372 val |= ADVERTISE_100HALF;
373 if (advert & ADVERTISED_100baseT_Full)
374 val |= ADVERTISE_100FULL;
375 if (advert & ADVERTISED_Pause)
376 val |= ADVERTISE_PAUSE_CAP;
377 if (advert & ADVERTISED_Asym_Pause)
378 val |= ADVERTISE_PAUSE_ASYM;
379 return mdio_write(phy, 0, MII_ADVERTISE, val);
383 * t3_set_phy_speed_duplex - force PHY speed and duplex
384 * @phy: the PHY to operate on
385 * @speed: requested PHY speed
386 * @duplex: requested PHY duplex
388 * Force a 10/100/1000 PHY's speed and duplex. This also disables
389 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
391 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
393 int err;
394 unsigned int ctl;
396 err = mdio_read(phy, 0, MII_BMCR, &ctl);
397 if (err)
398 return err;
400 if (speed >= 0) {
401 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
402 if (speed == SPEED_100)
403 ctl |= BMCR_SPEED100;
404 else if (speed == SPEED_1000)
405 ctl |= BMCR_SPEED1000;
407 if (duplex >= 0) {
408 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
409 if (duplex == DUPLEX_FULL)
410 ctl |= BMCR_FULLDPLX;
412 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
413 ctl |= BMCR_ANENABLE;
414 return mdio_write(phy, 0, MII_BMCR, ctl);
417 static const struct adapter_info t3_adap_info[] = {
418 {2, 0, 0, 0,
419 F_GPIO2_OEN | F_GPIO4_OEN |
420 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
421 SUPPORTED_OFFLOAD,
422 &mi1_mdio_ops, "Chelsio PE9000"},
423 {2, 0, 0, 0,
424 F_GPIO2_OEN | F_GPIO4_OEN |
425 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
426 SUPPORTED_OFFLOAD,
427 &mi1_mdio_ops, "Chelsio T302"},
428 {1, 0, 0, 0,
429 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
430 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
431 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
432 &mi1_mdio_ext_ops, "Chelsio T310"},
433 {2, 0, 0, 0,
434 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
435 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
436 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
437 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
438 &mi1_mdio_ext_ops, "Chelsio T320"},
442 * Return the adapter_info structure with a given index. Out-of-range indices
443 * return NULL.
445 const struct adapter_info *t3_get_adapter_info(unsigned int id)
447 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
450 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
451 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
452 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
454 static const struct port_type_info port_types[] = {
455 {NULL},
456 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
457 "10GBASE-XR"},
458 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
459 "10/100/1000BASE-T"},
460 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
461 "10/100/1000BASE-T"},
462 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
463 {NULL, CAPS_10G, "10GBASE-KX4"},
464 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
465 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
466 "10GBASE-SR"},
467 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
470 #undef CAPS_1G
471 #undef CAPS_10G
473 #define VPD_ENTRY(name, len) \
474 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
477 * Partial EEPROM Vital Product Data structure. Includes only the ID and
478 * VPD-R sections.
480 struct t3_vpd {
481 u8 id_tag;
482 u8 id_len[2];
483 u8 id_data[16];
484 u8 vpdr_tag;
485 u8 vpdr_len[2];
486 VPD_ENTRY(pn, 16); /* part number */
487 VPD_ENTRY(ec, 16); /* EC level */
488 VPD_ENTRY(sn, 16); /* serial number */
489 VPD_ENTRY(na, 12); /* MAC address base */
490 VPD_ENTRY(cclk, 6); /* core clock */
491 VPD_ENTRY(mclk, 6); /* mem clock */
492 VPD_ENTRY(uclk, 6); /* uP clk */
493 VPD_ENTRY(mdc, 6); /* MDIO clk */
494 VPD_ENTRY(mt, 2); /* mem timing */
495 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
496 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
497 VPD_ENTRY(port0, 2); /* PHY0 complex */
498 VPD_ENTRY(port1, 2); /* PHY1 complex */
499 VPD_ENTRY(port2, 2); /* PHY2 complex */
500 VPD_ENTRY(port3, 2); /* PHY3 complex */
501 VPD_ENTRY(rv, 1); /* csum */
502 u32 pad; /* for multiple-of-4 sizing and alignment */
505 #define EEPROM_MAX_POLL 4
506 #define EEPROM_STAT_ADDR 0x4000
507 #define VPD_BASE 0xc00
510 * t3_seeprom_read - read a VPD EEPROM location
511 * @adapter: adapter to read
512 * @addr: EEPROM address
513 * @data: where to store the read data
515 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
516 * VPD ROM capability. A zero is written to the flag bit when the
517 * addres is written to the control register. The hardware device will
518 * set the flag to 1 when 4 bytes have been read into the data register.
520 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
522 u16 val;
523 int attempts = EEPROM_MAX_POLL;
524 unsigned int base = adapter->params.pci.vpd_cap_addr;
526 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
527 return -EINVAL;
529 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
530 do {
531 udelay(10);
532 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
533 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
535 if (!(val & PCI_VPD_ADDR_F)) {
536 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
537 return -EIO;
539 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
540 *data = le32_to_cpu(*data);
541 return 0;
545 * t3_seeprom_write - write a VPD EEPROM location
546 * @adapter: adapter to write
547 * @addr: EEPROM address
548 * @data: value to write
550 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
551 * VPD ROM capability.
553 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
555 u16 val;
556 int attempts = EEPROM_MAX_POLL;
557 unsigned int base = adapter->params.pci.vpd_cap_addr;
559 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
560 return -EINVAL;
562 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
563 cpu_to_le32(data));
564 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
565 addr | PCI_VPD_ADDR_F);
566 do {
567 msleep(1);
568 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
569 } while ((val & PCI_VPD_ADDR_F) && --attempts);
571 if (val & PCI_VPD_ADDR_F) {
572 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
573 return -EIO;
575 return 0;
579 * t3_seeprom_wp - enable/disable EEPROM write protection
580 * @adapter: the adapter
581 * @enable: 1 to enable write protection, 0 to disable it
583 * Enables or disables write protection on the serial EEPROM.
585 int t3_seeprom_wp(struct adapter *adapter, int enable)
587 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
591 * Convert a character holding a hex digit to a number.
593 static unsigned int hex2int(unsigned char c)
595 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
599 * get_vpd_params - read VPD parameters from VPD EEPROM
600 * @adapter: adapter to read
601 * @p: where to store the parameters
603 * Reads card parameters stored in VPD EEPROM.
605 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
607 int i, addr, ret;
608 struct t3_vpd vpd;
611 * Card information is normally at VPD_BASE but some early cards had
612 * it at 0.
614 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
615 if (ret)
616 return ret;
617 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
619 for (i = 0; i < sizeof(vpd); i += 4) {
620 ret = t3_seeprom_read(adapter, addr + i,
621 (u32 *)((u8 *)&vpd + i));
622 if (ret)
623 return ret;
626 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
627 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
628 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
629 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
630 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
632 /* Old eeproms didn't have port information */
633 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
634 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
635 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
636 } else {
637 p->port_type[0] = hex2int(vpd.port0_data[0]);
638 p->port_type[1] = hex2int(vpd.port1_data[0]);
639 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
640 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
643 for (i = 0; i < 6; i++)
644 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
645 hex2int(vpd.na_data[2 * i + 1]);
646 return 0;
649 /* serial flash and firmware constants */
650 enum {
651 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
652 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
653 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
655 /* flash command opcodes */
656 SF_PROG_PAGE = 2, /* program page */
657 SF_WR_DISABLE = 4, /* disable writes */
658 SF_RD_STATUS = 5, /* read status register */
659 SF_WR_ENABLE = 6, /* enable writes */
660 SF_RD_DATA_FAST = 0xb, /* read flash */
661 SF_ERASE_SECTOR = 0xd8, /* erase sector */
663 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
664 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
668 * sf1_read - read data from the serial flash
669 * @adapter: the adapter
670 * @byte_cnt: number of bytes to read
671 * @cont: whether another operation will be chained
672 * @valp: where to store the read data
674 * Reads up to 4 bytes of data from the serial flash. The location of
675 * the read needs to be specified prior to calling this by issuing the
676 * appropriate commands to the serial flash.
678 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
679 u32 *valp)
681 int ret;
683 if (!byte_cnt || byte_cnt > 4)
684 return -EINVAL;
685 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
686 return -EBUSY;
687 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
688 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
689 if (!ret)
690 *valp = t3_read_reg(adapter, A_SF_DATA);
691 return ret;
695 * sf1_write - write data to the serial flash
696 * @adapter: the adapter
697 * @byte_cnt: number of bytes to write
698 * @cont: whether another operation will be chained
699 * @val: value to write
701 * Writes up to 4 bytes of data to the serial flash. The location of
702 * the write needs to be specified prior to calling this by issuing the
703 * appropriate commands to the serial flash.
705 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
706 u32 val)
708 if (!byte_cnt || byte_cnt > 4)
709 return -EINVAL;
710 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
711 return -EBUSY;
712 t3_write_reg(adapter, A_SF_DATA, val);
713 t3_write_reg(adapter, A_SF_OP,
714 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
715 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
719 * flash_wait_op - wait for a flash operation to complete
720 * @adapter: the adapter
721 * @attempts: max number of polls of the status register
722 * @delay: delay between polls in ms
724 * Wait for a flash operation to complete by polling the status register.
726 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
728 int ret;
729 u32 status;
731 while (1) {
732 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
733 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
734 return ret;
735 if (!(status & 1))
736 return 0;
737 if (--attempts == 0)
738 return -EAGAIN;
739 if (delay)
740 msleep(delay);
745 * t3_read_flash - read words from serial flash
746 * @adapter: the adapter
747 * @addr: the start address for the read
748 * @nwords: how many 32-bit words to read
749 * @data: where to store the read data
750 * @byte_oriented: whether to store data as bytes or as words
752 * Read the specified number of 32-bit words from the serial flash.
753 * If @byte_oriented is set the read data is stored as a byte array
754 * (i.e., big-endian), otherwise as 32-bit words in the platform's
755 * natural endianess.
757 int t3_read_flash(struct adapter *adapter, unsigned int addr,
758 unsigned int nwords, u32 *data, int byte_oriented)
760 int ret;
762 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
763 return -EINVAL;
765 addr = swab32(addr) | SF_RD_DATA_FAST;
767 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
768 (ret = sf1_read(adapter, 1, 1, data)) != 0)
769 return ret;
771 for (; nwords; nwords--, data++) {
772 ret = sf1_read(adapter, 4, nwords > 1, data);
773 if (ret)
774 return ret;
775 if (byte_oriented)
776 *data = htonl(*data);
778 return 0;
782 * t3_write_flash - write up to a page of data to the serial flash
783 * @adapter: the adapter
784 * @addr: the start address to write
785 * @n: length of data to write
786 * @data: the data to write
788 * Writes up to a page of data (256 bytes) to the serial flash starting
789 * at the given address.
791 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
792 unsigned int n, const u8 *data)
794 int ret;
795 u32 buf[64];
796 unsigned int i, c, left, val, offset = addr & 0xff;
798 if (addr + n > SF_SIZE || offset + n > 256)
799 return -EINVAL;
801 val = swab32(addr) | SF_PROG_PAGE;
803 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
804 (ret = sf1_write(adapter, 4, 1, val)) != 0)
805 return ret;
807 for (left = n; left; left -= c) {
808 c = min(left, 4U);
809 for (val = 0, i = 0; i < c; ++i)
810 val = (val << 8) + *data++;
812 ret = sf1_write(adapter, c, c != left, val);
813 if (ret)
814 return ret;
816 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
817 return ret;
819 /* Read the page to verify the write succeeded */
820 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
821 if (ret)
822 return ret;
824 if (memcmp(data - n, (u8 *) buf + offset, n))
825 return -EIO;
826 return 0;
829 enum fw_version_type {
830 FW_VERSION_N3,
831 FW_VERSION_T3
835 * t3_get_fw_version - read the firmware version
836 * @adapter: the adapter
837 * @vers: where to place the version
839 * Reads the FW version from flash.
841 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
843 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
847 * t3_check_fw_version - check if the FW is compatible with this driver
848 * @adapter: the adapter
850 * Checks if an adapter's FW is compatible with the driver. Returns 0
851 * if the versions are compatible, a negative error otherwise.
853 int t3_check_fw_version(struct adapter *adapter)
855 int ret;
856 u32 vers;
857 unsigned int type, major, minor;
859 ret = t3_get_fw_version(adapter, &vers);
860 if (ret)
861 return ret;
863 type = G_FW_VERSION_TYPE(vers);
864 major = G_FW_VERSION_MAJOR(vers);
865 minor = G_FW_VERSION_MINOR(vers);
867 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
868 return 0;
870 CH_ERR(adapter, "found wrong FW version(%u.%u), "
871 "driver needs version 3.1\n", major, minor);
872 return -EINVAL;
876 * t3_flash_erase_sectors - erase a range of flash sectors
877 * @adapter: the adapter
878 * @start: the first sector to erase
879 * @end: the last sector to erase
881 * Erases the sectors in the given range.
883 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
885 while (start <= end) {
886 int ret;
888 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 0,
890 SF_ERASE_SECTOR | (start << 8))) != 0 ||
891 (ret = flash_wait_op(adapter, 5, 500)) != 0)
892 return ret;
893 start++;
895 return 0;
899 * t3_load_fw - download firmware
900 * @adapter: the adapter
901 * @fw_data: the firrware image to write
902 * @size: image size
904 * Write the supplied firmware image to the card's serial flash.
905 * The FW image has the following sections: @size - 8 bytes of code and
906 * data, followed by 4 bytes of FW version, followed by the 32-bit
907 * 1's complement checksum of the whole image.
909 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
911 u32 csum;
912 unsigned int i;
913 const u32 *p = (const u32 *)fw_data;
914 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
916 if (size & 3)
917 return -EINVAL;
918 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
919 return -EFBIG;
921 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
922 csum += ntohl(p[i]);
923 if (csum != 0xffffffff) {
924 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
925 csum);
926 return -EINVAL;
929 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
930 if (ret)
931 goto out;
933 size -= 8; /* trim off version and checksum */
934 for (addr = FW_FLASH_BOOT_ADDR; size;) {
935 unsigned int chunk_size = min(size, 256U);
937 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
938 if (ret)
939 goto out;
941 addr += chunk_size;
942 fw_data += chunk_size;
943 size -= chunk_size;
946 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
947 out:
948 if (ret)
949 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
950 return ret;
953 #define CIM_CTL_BASE 0x2000
956 * t3_cim_ctl_blk_read - read a block from CIM control region
958 * @adap: the adapter
959 * @addr: the start address within the CIM control region
960 * @n: number of words to read
961 * @valp: where to store the result
963 * Reads a block of 4-byte words from the CIM control region.
965 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
966 unsigned int n, unsigned int *valp)
968 int ret = 0;
970 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
971 return -EBUSY;
973 for ( ; !ret && n--; addr += 4) {
974 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
975 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
976 0, 5, 2);
977 if (!ret)
978 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
980 return ret;
985 * t3_link_changed - handle interface link changes
986 * @adapter: the adapter
987 * @port_id: the port index that changed link state
989 * Called when a port's link settings change to propagate the new values
990 * to the associated PHY and MAC. After performing the common tasks it
991 * invokes an OS-specific handler.
993 void t3_link_changed(struct adapter *adapter, int port_id)
995 int link_ok, speed, duplex, fc;
996 struct port_info *pi = adap2pinfo(adapter, port_id);
997 struct cphy *phy = &pi->phy;
998 struct cmac *mac = &pi->mac;
999 struct link_config *lc = &pi->link_config;
1001 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1003 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1004 uses_xaui(adapter)) {
1005 if (link_ok)
1006 t3b_pcs_reset(mac);
1007 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1008 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1010 lc->link_ok = link_ok;
1011 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1012 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1013 if (lc->requested_fc & PAUSE_AUTONEG)
1014 fc &= lc->requested_fc;
1015 else
1016 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1018 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1019 /* Set MAC speed, duplex, and flow control to match PHY. */
1020 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1021 lc->fc = fc;
1024 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1028 * t3_link_start - apply link configuration to MAC/PHY
1029 * @phy: the PHY to setup
1030 * @mac: the MAC to setup
1031 * @lc: the requested link configuration
1033 * Set up a port's MAC and PHY according to a desired link configuration.
1034 * - If the PHY can auto-negotiate first decide what to advertise, then
1035 * enable/disable auto-negotiation as desired, and reset.
1036 * - If the PHY does not auto-negotiate just reset it.
1037 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1038 * otherwise do it later based on the outcome of auto-negotiation.
1040 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1042 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1044 lc->link_ok = 0;
1045 if (lc->supported & SUPPORTED_Autoneg) {
1046 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1047 if (fc) {
1048 lc->advertising |= ADVERTISED_Asym_Pause;
1049 if (fc & PAUSE_RX)
1050 lc->advertising |= ADVERTISED_Pause;
1052 phy->ops->advertise(phy, lc->advertising);
1054 if (lc->autoneg == AUTONEG_DISABLE) {
1055 lc->speed = lc->requested_speed;
1056 lc->duplex = lc->requested_duplex;
1057 lc->fc = (unsigned char)fc;
1058 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1059 fc);
1060 /* Also disables autoneg */
1061 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1062 phy->ops->reset(phy, 0);
1063 } else
1064 phy->ops->autoneg_enable(phy);
1065 } else {
1066 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1067 lc->fc = (unsigned char)fc;
1068 phy->ops->reset(phy, 0);
1070 return 0;
1074 * t3_set_vlan_accel - control HW VLAN extraction
1075 * @adapter: the adapter
1076 * @ports: bitmap of adapter ports to operate on
1077 * @on: enable (1) or disable (0) HW VLAN extraction
1079 * Enables or disables HW extraction of VLAN tags for the given port.
1081 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1083 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1084 ports << S_VLANEXTRACTIONENABLE,
1085 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1088 struct intr_info {
1089 unsigned int mask; /* bits to check in interrupt status */
1090 const char *msg; /* message to print or NULL */
1091 short stat_idx; /* stat counter to increment or -1 */
1092 unsigned short fatal:1; /* whether the condition reported is fatal */
1096 * t3_handle_intr_status - table driven interrupt handler
1097 * @adapter: the adapter that generated the interrupt
1098 * @reg: the interrupt status register to process
1099 * @mask: a mask to apply to the interrupt status
1100 * @acts: table of interrupt actions
1101 * @stats: statistics counters tracking interrupt occurences
1103 * A table driven interrupt handler that applies a set of masks to an
1104 * interrupt status word and performs the corresponding actions if the
1105 * interrupts described by the mask have occured. The actions include
1106 * optionally printing a warning or alert message, and optionally
1107 * incrementing a stat counter. The table is terminated by an entry
1108 * specifying mask 0. Returns the number of fatal interrupt conditions.
1110 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1111 unsigned int mask,
1112 const struct intr_info *acts,
1113 unsigned long *stats)
1115 int fatal = 0;
1116 unsigned int status = t3_read_reg(adapter, reg) & mask;
1118 for (; acts->mask; ++acts) {
1119 if (!(status & acts->mask))
1120 continue;
1121 if (acts->fatal) {
1122 fatal++;
1123 CH_ALERT(adapter, "%s (0x%x)\n",
1124 acts->msg, status & acts->mask);
1125 } else if (acts->msg)
1126 CH_WARN(adapter, "%s (0x%x)\n",
1127 acts->msg, status & acts->mask);
1128 if (acts->stat_idx >= 0)
1129 stats[acts->stat_idx]++;
1131 if (status) /* clear processed interrupts */
1132 t3_write_reg(adapter, reg, status);
1133 return fatal;
1136 #define SGE_INTR_MASK (F_RSPQDISABLED)
1137 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1138 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1139 F_NFASRCHFAIL)
1140 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1141 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1142 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1143 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1144 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1145 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1146 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1147 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1148 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1149 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1150 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1151 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1152 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1153 V_BISTERR(M_BISTERR) | F_PEXERR)
1154 #define ULPRX_INTR_MASK F_PARERR
1155 #define ULPTX_INTR_MASK 0
1156 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1157 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1158 F_ZERO_SWITCH_ERROR)
1159 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1160 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1161 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1162 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1163 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1164 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1165 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1166 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1167 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1168 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1169 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1170 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1171 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1172 V_MCAPARERRENB(M_MCAPARERRENB))
1173 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1174 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1175 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1176 F_MPS0 | F_CPL_SWITCH)
1179 * Interrupt handler for the PCIX1 module.
1181 static void pci_intr_handler(struct adapter *adapter)
1183 static const struct intr_info pcix1_intr_info[] = {
1184 { F_PEXERR, "PCI PEX error", -1, 1 },
1185 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1186 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1187 {F_RCVTARABT, "PCI received target abort", -1, 1},
1188 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1189 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1190 {F_DETPARERR, "PCI detected parity error", -1, 1},
1191 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1192 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1193 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1195 {F_DETCORECCERR, "PCI correctable ECC error",
1196 STAT_PCI_CORR_ECC, 0},
1197 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1198 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1199 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1201 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1203 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1205 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1206 "error", -1, 1},
1210 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1211 pcix1_intr_info, adapter->irq_stats))
1212 t3_fatal_err(adapter);
1216 * Interrupt handler for the PCIE module.
1218 static void pcie_intr_handler(struct adapter *adapter)
1220 static const struct intr_info pcie_intr_info[] = {
1221 {F_UNXSPLCPLERRR,
1222 "PCI unexpected split completion DMA read error", -1, 1},
1223 {F_UNXSPLCPLERRC,
1224 "PCI unexpected split completion DMA command error", -1, 1},
1225 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1226 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1227 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1228 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1229 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1230 "PCI MSI-X table/PBA parity error", -1, 1},
1231 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1235 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1236 pcie_intr_info, adapter->irq_stats))
1237 t3_fatal_err(adapter);
1241 * TP interrupt handler.
1243 static void tp_intr_handler(struct adapter *adapter)
1245 static const struct intr_info tp_intr_info[] = {
1246 {0xffffff, "TP parity error", -1, 1},
1247 {0x1000000, "TP out of Rx pages", -1, 1},
1248 {0x2000000, "TP out of Tx pages", -1, 1},
1252 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1253 tp_intr_info, NULL))
1254 t3_fatal_err(adapter);
1258 * CIM interrupt handler.
1260 static void cim_intr_handler(struct adapter *adapter)
1262 static const struct intr_info cim_intr_info[] = {
1263 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1264 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1265 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1266 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1267 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1268 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1269 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1270 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1271 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1272 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1273 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1274 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1278 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1279 cim_intr_info, NULL))
1280 t3_fatal_err(adapter);
1284 * ULP RX interrupt handler.
1286 static void ulprx_intr_handler(struct adapter *adapter)
1288 static const struct intr_info ulprx_intr_info[] = {
1289 {F_PARERR, "ULP RX parity error", -1, 1},
1293 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1294 ulprx_intr_info, NULL))
1295 t3_fatal_err(adapter);
1299 * ULP TX interrupt handler.
1301 static void ulptx_intr_handler(struct adapter *adapter)
1303 static const struct intr_info ulptx_intr_info[] = {
1304 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1305 STAT_ULP_CH0_PBL_OOB, 0},
1306 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1307 STAT_ULP_CH1_PBL_OOB, 0},
1311 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1312 ulptx_intr_info, adapter->irq_stats))
1313 t3_fatal_err(adapter);
1316 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1317 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1318 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1319 F_ICSPI1_TX_FRAMING_ERROR)
1320 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1321 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1322 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1323 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1326 * PM TX interrupt handler.
1328 static void pmtx_intr_handler(struct adapter *adapter)
1330 static const struct intr_info pmtx_intr_info[] = {
1331 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1332 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1333 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1334 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1335 "PMTX ispi parity error", -1, 1},
1336 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1337 "PMTX ospi parity error", -1, 1},
1341 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1342 pmtx_intr_info, NULL))
1343 t3_fatal_err(adapter);
1346 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1347 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1348 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1349 F_IESPI1_TX_FRAMING_ERROR)
1350 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1351 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1352 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1353 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1356 * PM RX interrupt handler.
1358 static void pmrx_intr_handler(struct adapter *adapter)
1360 static const struct intr_info pmrx_intr_info[] = {
1361 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1362 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1363 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1364 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1365 "PMRX ispi parity error", -1, 1},
1366 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1367 "PMRX ospi parity error", -1, 1},
1371 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1372 pmrx_intr_info, NULL))
1373 t3_fatal_err(adapter);
1377 * CPL switch interrupt handler.
1379 static void cplsw_intr_handler(struct adapter *adapter)
1381 static const struct intr_info cplsw_intr_info[] = {
1382 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1383 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1384 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1385 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1386 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1390 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1391 cplsw_intr_info, NULL))
1392 t3_fatal_err(adapter);
1396 * MPS interrupt handler.
1398 static void mps_intr_handler(struct adapter *adapter)
1400 static const struct intr_info mps_intr_info[] = {
1401 {0x1ff, "MPS parity error", -1, 1},
1405 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1406 mps_intr_info, NULL))
1407 t3_fatal_err(adapter);
1410 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1413 * MC7 interrupt handler.
1415 static void mc7_intr_handler(struct mc7 *mc7)
1417 struct adapter *adapter = mc7->adapter;
1418 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1420 if (cause & F_CE) {
1421 mc7->stats.corr_err++;
1422 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1423 "data 0x%x 0x%x 0x%x\n", mc7->name,
1424 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1425 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1426 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1427 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1430 if (cause & F_UE) {
1431 mc7->stats.uncorr_err++;
1432 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1433 "data 0x%x 0x%x 0x%x\n", mc7->name,
1434 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1435 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1436 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1437 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1440 if (G_PE(cause)) {
1441 mc7->stats.parity_err++;
1442 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1443 mc7->name, G_PE(cause));
1446 if (cause & F_AE) {
1447 u32 addr = 0;
1449 if (adapter->params.rev > 0)
1450 addr = t3_read_reg(adapter,
1451 mc7->offset + A_MC7_ERR_ADDR);
1452 mc7->stats.addr_err++;
1453 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1454 mc7->name, addr);
1457 if (cause & MC7_INTR_FATAL)
1458 t3_fatal_err(adapter);
1460 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1463 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1464 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1466 * XGMAC interrupt handler.
1468 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1470 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1471 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1473 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1474 mac->stats.tx_fifo_parity_err++;
1475 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1477 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1478 mac->stats.rx_fifo_parity_err++;
1479 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1481 if (cause & F_TXFIFO_UNDERRUN)
1482 mac->stats.tx_fifo_urun++;
1483 if (cause & F_RXFIFO_OVERFLOW)
1484 mac->stats.rx_fifo_ovfl++;
1485 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1486 mac->stats.serdes_signal_loss++;
1487 if (cause & F_XAUIPCSCTCERR)
1488 mac->stats.xaui_pcs_ctc_err++;
1489 if (cause & F_XAUIPCSALIGNCHANGE)
1490 mac->stats.xaui_pcs_align_change++;
1492 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1493 if (cause & XGM_INTR_FATAL)
1494 t3_fatal_err(adap);
1495 return cause != 0;
1499 * Interrupt handler for PHY events.
1501 int t3_phy_intr_handler(struct adapter *adapter)
1503 static const int intr_gpio_bits[] = { 8, 0x20 };
1505 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1507 for_each_port(adapter, i) {
1508 if (cause & intr_gpio_bits[i]) {
1509 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1510 int phy_cause = phy->ops->intr_handler(phy);
1512 if (phy_cause & cphy_cause_link_change)
1513 t3_link_changed(adapter, i);
1514 if (phy_cause & cphy_cause_fifo_error)
1515 phy->fifo_errors++;
1519 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1520 return 0;
1524 * T3 slow path (non-data) interrupt handler.
1526 int t3_slow_intr_handler(struct adapter *adapter)
1528 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1530 cause &= adapter->slow_intr_mask;
1531 if (!cause)
1532 return 0;
1533 if (cause & F_PCIM0) {
1534 if (is_pcie(adapter))
1535 pcie_intr_handler(adapter);
1536 else
1537 pci_intr_handler(adapter);
1539 if (cause & F_SGE3)
1540 t3_sge_err_intr_handler(adapter);
1541 if (cause & F_MC7_PMRX)
1542 mc7_intr_handler(&adapter->pmrx);
1543 if (cause & F_MC7_PMTX)
1544 mc7_intr_handler(&adapter->pmtx);
1545 if (cause & F_MC7_CM)
1546 mc7_intr_handler(&adapter->cm);
1547 if (cause & F_CIM)
1548 cim_intr_handler(adapter);
1549 if (cause & F_TP1)
1550 tp_intr_handler(adapter);
1551 if (cause & F_ULP2_RX)
1552 ulprx_intr_handler(adapter);
1553 if (cause & F_ULP2_TX)
1554 ulptx_intr_handler(adapter);
1555 if (cause & F_PM1_RX)
1556 pmrx_intr_handler(adapter);
1557 if (cause & F_PM1_TX)
1558 pmtx_intr_handler(adapter);
1559 if (cause & F_CPL_SWITCH)
1560 cplsw_intr_handler(adapter);
1561 if (cause & F_MPS0)
1562 mps_intr_handler(adapter);
1563 if (cause & F_MC5A)
1564 t3_mc5_intr_handler(&adapter->mc5);
1565 if (cause & F_XGMAC0_0)
1566 mac_intr_handler(adapter, 0);
1567 if (cause & F_XGMAC0_1)
1568 mac_intr_handler(adapter, 1);
1569 if (cause & F_T3DBG)
1570 t3_os_ext_intr_handler(adapter);
1572 /* Clear the interrupts just processed. */
1573 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1574 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1575 return 1;
1579 * t3_intr_enable - enable interrupts
1580 * @adapter: the adapter whose interrupts should be enabled
1582 * Enable interrupts by setting the interrupt enable registers of the
1583 * various HW modules and then enabling the top-level interrupt
1584 * concentrator.
1586 void t3_intr_enable(struct adapter *adapter)
1588 static const struct addr_val_pair intr_en_avp[] = {
1589 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1590 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1591 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1592 MC7_INTR_MASK},
1593 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1594 MC7_INTR_MASK},
1595 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1596 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1597 {A_TP_INT_ENABLE, 0x3bfffff},
1598 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1599 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1600 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1601 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1604 adapter->slow_intr_mask = PL_INTR_MASK;
1606 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1608 if (adapter->params.rev > 0) {
1609 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1610 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1611 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1612 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1613 F_PBL_BOUND_ERR_CH1);
1614 } else {
1615 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1616 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1619 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1620 adapter_info(adapter)->gpio_intr);
1621 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1622 adapter_info(adapter)->gpio_intr);
1623 if (is_pcie(adapter))
1624 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1625 else
1626 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1627 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1628 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1632 * t3_intr_disable - disable a card's interrupts
1633 * @adapter: the adapter whose interrupts should be disabled
1635 * Disable interrupts. We only disable the top-level interrupt
1636 * concentrator and the SGE data interrupts.
1638 void t3_intr_disable(struct adapter *adapter)
1640 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1641 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1642 adapter->slow_intr_mask = 0;
1646 * t3_intr_clear - clear all interrupts
1647 * @adapter: the adapter whose interrupts should be cleared
1649 * Clears all interrupts.
1651 void t3_intr_clear(struct adapter *adapter)
1653 static const unsigned int cause_reg_addr[] = {
1654 A_SG_INT_CAUSE,
1655 A_SG_RSPQ_FL_STATUS,
1656 A_PCIX_INT_CAUSE,
1657 A_MC7_INT_CAUSE,
1658 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1659 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1660 A_CIM_HOST_INT_CAUSE,
1661 A_TP_INT_CAUSE,
1662 A_MC5_DB_INT_CAUSE,
1663 A_ULPRX_INT_CAUSE,
1664 A_ULPTX_INT_CAUSE,
1665 A_CPL_INTR_CAUSE,
1666 A_PM1_TX_INT_CAUSE,
1667 A_PM1_RX_INT_CAUSE,
1668 A_MPS_INT_CAUSE,
1669 A_T3DBG_INT_CAUSE,
1671 unsigned int i;
1673 /* Clear PHY and MAC interrupts for each port. */
1674 for_each_port(adapter, i)
1675 t3_port_intr_clear(adapter, i);
1677 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1678 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1680 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1681 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1685 * t3_port_intr_enable - enable port-specific interrupts
1686 * @adapter: associated adapter
1687 * @idx: index of port whose interrupts should be enabled
1689 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1690 * adapter port.
1692 void t3_port_intr_enable(struct adapter *adapter, int idx)
1694 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1696 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1697 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1698 phy->ops->intr_enable(phy);
1702 * t3_port_intr_disable - disable port-specific interrupts
1703 * @adapter: associated adapter
1704 * @idx: index of port whose interrupts should be disabled
1706 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1707 * adapter port.
1709 void t3_port_intr_disable(struct adapter *adapter, int idx)
1711 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1713 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1714 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1715 phy->ops->intr_disable(phy);
1719 * t3_port_intr_clear - clear port-specific interrupts
1720 * @adapter: associated adapter
1721 * @idx: index of port whose interrupts to clear
1723 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1724 * adapter port.
1726 void t3_port_intr_clear(struct adapter *adapter, int idx)
1728 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1730 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1731 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1732 phy->ops->intr_clear(phy);
1736 * t3_sge_write_context - write an SGE context
1737 * @adapter: the adapter
1738 * @id: the context id
1739 * @type: the context type
1741 * Program an SGE context with the values already loaded in the
1742 * CONTEXT_DATA? registers.
1744 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1745 unsigned int type)
1747 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1748 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1749 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1750 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1751 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1752 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1753 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1754 0, 5, 1);
1758 * t3_sge_init_ecntxt - initialize an SGE egress context
1759 * @adapter: the adapter to configure
1760 * @id: the context id
1761 * @gts_enable: whether to enable GTS for the context
1762 * @type: the egress context type
1763 * @respq: associated response queue
1764 * @base_addr: base address of queue
1765 * @size: number of queue entries
1766 * @token: uP token
1767 * @gen: initial generation value for the context
1768 * @cidx: consumer pointer
1770 * Initialize an SGE egress context and make it ready for use. If the
1771 * platform allows concurrent context operations, the caller is
1772 * responsible for appropriate locking.
1774 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1775 enum sge_context_type type, int respq, u64 base_addr,
1776 unsigned int size, unsigned int token, int gen,
1777 unsigned int cidx)
1779 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1781 if (base_addr & 0xfff) /* must be 4K aligned */
1782 return -EINVAL;
1783 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1784 return -EBUSY;
1786 base_addr >>= 12;
1787 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1788 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1789 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1790 V_EC_BASE_LO(base_addr & 0xffff));
1791 base_addr >>= 16;
1792 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1793 base_addr >>= 32;
1794 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1795 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1796 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1797 F_EC_VALID);
1798 return t3_sge_write_context(adapter, id, F_EGRESS);
1802 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1803 * @adapter: the adapter to configure
1804 * @id: the context id
1805 * @gts_enable: whether to enable GTS for the context
1806 * @base_addr: base address of queue
1807 * @size: number of queue entries
1808 * @bsize: size of each buffer for this queue
1809 * @cong_thres: threshold to signal congestion to upstream producers
1810 * @gen: initial generation value for the context
1811 * @cidx: consumer pointer
1813 * Initialize an SGE free list context and make it ready for use. The
1814 * caller is responsible for ensuring only one context operation occurs
1815 * at a time.
1817 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1818 int gts_enable, u64 base_addr, unsigned int size,
1819 unsigned int bsize, unsigned int cong_thres, int gen,
1820 unsigned int cidx)
1822 if (base_addr & 0xfff) /* must be 4K aligned */
1823 return -EINVAL;
1824 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1825 return -EBUSY;
1827 base_addr >>= 12;
1828 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1829 base_addr >>= 32;
1830 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1831 V_FL_BASE_HI((u32) base_addr) |
1832 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1833 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1834 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1835 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1836 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1837 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1838 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1839 return t3_sge_write_context(adapter, id, F_FREELIST);
1843 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1844 * @adapter: the adapter to configure
1845 * @id: the context id
1846 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1847 * @base_addr: base address of queue
1848 * @size: number of queue entries
1849 * @fl_thres: threshold for selecting the normal or jumbo free list
1850 * @gen: initial generation value for the context
1851 * @cidx: consumer pointer
1853 * Initialize an SGE response queue context and make it ready for use.
1854 * The caller is responsible for ensuring only one context operation
1855 * occurs at a time.
1857 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1858 int irq_vec_idx, u64 base_addr, unsigned int size,
1859 unsigned int fl_thres, int gen, unsigned int cidx)
1861 unsigned int intr = 0;
1863 if (base_addr & 0xfff) /* must be 4K aligned */
1864 return -EINVAL;
1865 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1866 return -EBUSY;
1868 base_addr >>= 12;
1869 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1870 V_CQ_INDEX(cidx));
1871 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1872 base_addr >>= 32;
1873 if (irq_vec_idx >= 0)
1874 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1875 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1876 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1877 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1878 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1882 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1883 * @adapter: the adapter to configure
1884 * @id: the context id
1885 * @base_addr: base address of queue
1886 * @size: number of queue entries
1887 * @rspq: response queue for async notifications
1888 * @ovfl_mode: CQ overflow mode
1889 * @credits: completion queue credits
1890 * @credit_thres: the credit threshold
1892 * Initialize an SGE completion queue context and make it ready for use.
1893 * The caller is responsible for ensuring only one context operation
1894 * occurs at a time.
1896 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1897 unsigned int size, int rspq, int ovfl_mode,
1898 unsigned int credits, unsigned int credit_thres)
1900 if (base_addr & 0xfff) /* must be 4K aligned */
1901 return -EINVAL;
1902 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1903 return -EBUSY;
1905 base_addr >>= 12;
1906 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1907 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1908 base_addr >>= 32;
1909 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1910 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1911 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1912 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1913 V_CQ_CREDIT_THRES(credit_thres));
1914 return t3_sge_write_context(adapter, id, F_CQ);
1918 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1919 * @adapter: the adapter
1920 * @id: the egress context id
1921 * @enable: enable (1) or disable (0) the context
1923 * Enable or disable an SGE egress context. The caller is responsible for
1924 * ensuring only one context operation occurs at a time.
1926 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1928 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1929 return -EBUSY;
1931 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1932 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1933 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1934 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1935 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1936 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1937 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1938 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1939 0, 5, 1);
1943 * t3_sge_disable_fl - disable an SGE free-buffer list
1944 * @adapter: the adapter
1945 * @id: the free list context id
1947 * Disable an SGE free-buffer list. The caller is responsible for
1948 * ensuring only one context operation occurs at a time.
1950 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1952 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1953 return -EBUSY;
1955 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1956 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1957 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1958 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1959 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1960 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1961 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1962 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1963 0, 5, 1);
1967 * t3_sge_disable_rspcntxt - disable an SGE response queue
1968 * @adapter: the adapter
1969 * @id: the response queue context id
1971 * Disable an SGE response queue. The caller is responsible for
1972 * ensuring only one context operation occurs at a time.
1974 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1976 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1977 return -EBUSY;
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
1980 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1981 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1982 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1983 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
1984 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1985 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
1986 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1987 0, 5, 1);
1991 * t3_sge_disable_cqcntxt - disable an SGE completion queue
1992 * @adapter: the adapter
1993 * @id: the completion queue context id
1995 * Disable an SGE completion queue. The caller is responsible for
1996 * ensuring only one context operation occurs at a time.
1998 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2000 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2001 return -EBUSY;
2003 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2004 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2005 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2006 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2007 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2008 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2009 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2010 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2011 0, 5, 1);
2015 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2016 * @adapter: the adapter
2017 * @id: the context id
2018 * @op: the operation to perform
2020 * Perform the selected operation on an SGE completion queue context.
2021 * The caller is responsible for ensuring only one context operation
2022 * occurs at a time.
2024 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2025 unsigned int credits)
2027 u32 val;
2029 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2030 return -EBUSY;
2032 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2033 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2034 V_CONTEXT(id) | F_CQ);
2035 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2036 0, 5, 1, &val))
2037 return -EIO;
2039 if (op >= 2 && op < 7) {
2040 if (adapter->params.rev > 0)
2041 return G_CQ_INDEX(val);
2043 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2044 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2045 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2046 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2047 return -EIO;
2048 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2050 return 0;
2054 * t3_sge_read_context - read an SGE context
2055 * @type: the context type
2056 * @adapter: the adapter
2057 * @id: the context id
2058 * @data: holds the retrieved context
2060 * Read an SGE egress context. The caller is responsible for ensuring
2061 * only one context operation occurs at a time.
2063 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2064 unsigned int id, u32 data[4])
2066 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2067 return -EBUSY;
2069 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2070 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2071 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2072 5, 1))
2073 return -EIO;
2074 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2075 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2076 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2077 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2078 return 0;
2082 * t3_sge_read_ecntxt - read an SGE egress context
2083 * @adapter: the adapter
2084 * @id: the context id
2085 * @data: holds the retrieved context
2087 * Read an SGE egress context. The caller is responsible for ensuring
2088 * only one context operation occurs at a time.
2090 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2092 if (id >= 65536)
2093 return -EINVAL;
2094 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2098 * t3_sge_read_cq - read an SGE CQ context
2099 * @adapter: the adapter
2100 * @id: the context id
2101 * @data: holds the retrieved context
2103 * Read an SGE CQ context. The caller is responsible for ensuring
2104 * only one context operation occurs at a time.
2106 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2108 if (id >= 65536)
2109 return -EINVAL;
2110 return t3_sge_read_context(F_CQ, adapter, id, data);
2114 * t3_sge_read_fl - read an SGE free-list context
2115 * @adapter: the adapter
2116 * @id: the context id
2117 * @data: holds the retrieved context
2119 * Read an SGE free-list context. The caller is responsible for ensuring
2120 * only one context operation occurs at a time.
2122 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2124 if (id >= SGE_QSETS * 2)
2125 return -EINVAL;
2126 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2130 * t3_sge_read_rspq - read an SGE response queue context
2131 * @adapter: the adapter
2132 * @id: the context id
2133 * @data: holds the retrieved context
2135 * Read an SGE response queue context. The caller is responsible for
2136 * ensuring only one context operation occurs at a time.
2138 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2140 if (id >= SGE_QSETS)
2141 return -EINVAL;
2142 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2146 * t3_config_rss - configure Rx packet steering
2147 * @adapter: the adapter
2148 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2149 * @cpus: values for the CPU lookup table (0xff terminated)
2150 * @rspq: values for the response queue lookup table (0xffff terminated)
2152 * Programs the receive packet steering logic. @cpus and @rspq provide
2153 * the values for the CPU and response queue lookup tables. If they
2154 * provide fewer values than the size of the tables the supplied values
2155 * are used repeatedly until the tables are fully populated.
2157 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2158 const u8 * cpus, const u16 *rspq)
2160 int i, j, cpu_idx = 0, q_idx = 0;
2162 if (cpus)
2163 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2164 u32 val = i << 16;
2166 for (j = 0; j < 2; ++j) {
2167 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2168 if (cpus[cpu_idx] == 0xff)
2169 cpu_idx = 0;
2171 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2174 if (rspq)
2175 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2176 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2177 (i << 16) | rspq[q_idx++]);
2178 if (rspq[q_idx] == 0xffff)
2179 q_idx = 0;
2182 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2186 * t3_read_rss - read the contents of the RSS tables
2187 * @adapter: the adapter
2188 * @lkup: holds the contents of the RSS lookup table
2189 * @map: holds the contents of the RSS map table
2191 * Reads the contents of the receive packet steering tables.
2193 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2195 int i;
2196 u32 val;
2198 if (lkup)
2199 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2200 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2201 0xffff0000 | i);
2202 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2203 if (!(val & 0x80000000))
2204 return -EAGAIN;
2205 *lkup++ = val;
2206 *lkup++ = (val >> 8);
2209 if (map)
2210 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2211 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2212 0xffff0000 | i);
2213 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2214 if (!(val & 0x80000000))
2215 return -EAGAIN;
2216 *map++ = val;
2218 return 0;
2222 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2223 * @adap: the adapter
2224 * @enable: 1 to select offload mode, 0 for regular NIC
2226 * Switches TP to NIC/offload mode.
2228 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2230 if (is_offload(adap) || !enable)
2231 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2232 V_NICMODE(!enable));
2236 * pm_num_pages - calculate the number of pages of the payload memory
2237 * @mem_size: the size of the payload memory
2238 * @pg_size: the size of each payload memory page
2240 * Calculate the number of pages, each of the given size, that fit in a
2241 * memory of the specified size, respecting the HW requirement that the
2242 * number of pages must be a multiple of 24.
2244 static inline unsigned int pm_num_pages(unsigned int mem_size,
2245 unsigned int pg_size)
2247 unsigned int n = mem_size / pg_size;
2249 return n - n % 24;
2252 #define mem_region(adap, start, size, reg) \
2253 t3_write_reg((adap), A_ ## reg, (start)); \
2254 start += size
2257 * partition_mem - partition memory and configure TP memory settings
2258 * @adap: the adapter
2259 * @p: the TP parameters
2261 * Partitions context and payload memory and configures TP's memory
2262 * registers.
2264 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2266 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2267 unsigned int timers = 0, timers_shift = 22;
2269 if (adap->params.rev > 0) {
2270 if (tids <= 16 * 1024) {
2271 timers = 1;
2272 timers_shift = 16;
2273 } else if (tids <= 64 * 1024) {
2274 timers = 2;
2275 timers_shift = 18;
2276 } else if (tids <= 256 * 1024) {
2277 timers = 3;
2278 timers_shift = 20;
2282 t3_write_reg(adap, A_TP_PMM_SIZE,
2283 p->chan_rx_size | (p->chan_tx_size >> 16));
2285 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2286 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2287 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2288 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2289 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2291 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2292 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2293 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2295 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2296 /* Add a bit of headroom and make multiple of 24 */
2297 pstructs += 48;
2298 pstructs -= pstructs % 24;
2299 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2301 m = tids * TCB_SIZE;
2302 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2303 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2304 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2305 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2306 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2307 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2308 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2309 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2311 m = (m + 4095) & ~0xfff;
2312 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2313 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2315 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2316 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2317 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2318 if (tids < m)
2319 adap->params.mc5.nservers += m - tids;
2322 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2323 u32 val)
2325 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2326 t3_write_reg(adap, A_TP_PIO_DATA, val);
2329 static void tp_config(struct adapter *adap, const struct tp_params *p)
2331 unsigned int v;
2333 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2334 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2335 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2336 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2337 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2338 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2339 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2340 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2341 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2342 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2343 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2344 F_IPV6ENABLE | F_NICMODE);
2345 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2346 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2347 t3_set_reg_field(adap, A_TP_PARA_REG6,
2348 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2351 v = t3_read_reg(adap, A_TP_PC_CONFIG);
2352 v &= ~(F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL);
2353 t3_write_reg(adap, A_TP_PC_CONFIG, v | F_TXDEFERENABLE |
2354 F_MODULATEUNIONMODE | F_HEARBEATDACK |
2355 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2357 v = t3_read_reg(adap, A_TP_PC_CONFIG2);
2358 v &= ~F_CHDRAFULL;
2359 t3_write_reg(adap, A_TP_PC_CONFIG2, v);
2361 if (adap->params.rev > 0) {
2362 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2363 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2364 F_TXPACEAUTO);
2365 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2366 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2367 } else
2368 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2370 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2371 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2372 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2375 /* Desired TP timer resolution in usec */
2376 #define TP_TMR_RES 50
2378 /* TCP timer values in ms */
2379 #define TP_DACK_TIMER 50
2380 #define TP_RTO_MIN 250
2383 * tp_set_timers - set TP timing parameters
2384 * @adap: the adapter to set
2385 * @core_clk: the core clock frequency in Hz
2387 * Set TP's timing parameters, such as the various timer resolutions and
2388 * the TCP timer values.
2390 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2392 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2393 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2394 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2395 unsigned int tps = core_clk >> tre;
2397 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2398 V_DELAYEDACKRESOLUTION(dack_re) |
2399 V_TIMESTAMPRESOLUTION(tstamp_re));
2400 t3_write_reg(adap, A_TP_DACK_TIMER,
2401 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2402 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2403 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2404 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2405 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2406 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2407 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2408 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2409 V_KEEPALIVEMAX(9));
2411 #define SECONDS * tps
2413 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2414 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2415 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2416 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2417 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2418 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2419 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2420 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2421 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2423 #undef SECONDS
2427 * t3_tp_set_coalescing_size - set receive coalescing size
2428 * @adap: the adapter
2429 * @size: the receive coalescing size
2430 * @psh: whether a set PSH bit should deliver coalesced data
2432 * Set the receive coalescing size and PSH bit handling.
2434 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2436 u32 val;
2438 if (size > MAX_RX_COALESCING_LEN)
2439 return -EINVAL;
2441 val = t3_read_reg(adap, A_TP_PARA_REG3);
2442 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2444 if (size) {
2445 val |= F_RXCOALESCEENABLE;
2446 if (psh)
2447 val |= F_RXCOALESCEPSHEN;
2448 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2449 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2451 t3_write_reg(adap, A_TP_PARA_REG3, val);
2452 return 0;
2456 * t3_tp_set_max_rxsize - set the max receive size
2457 * @adap: the adapter
2458 * @size: the max receive size
2460 * Set TP's max receive size. This is the limit that applies when
2461 * receive coalescing is disabled.
2463 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2465 t3_write_reg(adap, A_TP_PARA_REG7,
2466 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2469 static void __devinit init_mtus(unsigned short mtus[])
2472 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2473 * it can accomodate max size TCP/IP headers when SACK and timestamps
2474 * are enabled and still have at least 8 bytes of payload.
2476 mtus[0] = 88;
2477 mtus[1] = 256;
2478 mtus[2] = 512;
2479 mtus[3] = 576;
2480 mtus[4] = 808;
2481 mtus[5] = 1024;
2482 mtus[6] = 1280;
2483 mtus[7] = 1492;
2484 mtus[8] = 1500;
2485 mtus[9] = 2002;
2486 mtus[10] = 2048;
2487 mtus[11] = 4096;
2488 mtus[12] = 4352;
2489 mtus[13] = 8192;
2490 mtus[14] = 9000;
2491 mtus[15] = 9600;
2495 * Initial congestion control parameters.
2497 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2499 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2500 a[9] = 2;
2501 a[10] = 3;
2502 a[11] = 4;
2503 a[12] = 5;
2504 a[13] = 6;
2505 a[14] = 7;
2506 a[15] = 8;
2507 a[16] = 9;
2508 a[17] = 10;
2509 a[18] = 14;
2510 a[19] = 17;
2511 a[20] = 21;
2512 a[21] = 25;
2513 a[22] = 30;
2514 a[23] = 35;
2515 a[24] = 45;
2516 a[25] = 60;
2517 a[26] = 80;
2518 a[27] = 100;
2519 a[28] = 200;
2520 a[29] = 300;
2521 a[30] = 400;
2522 a[31] = 500;
2524 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2525 b[9] = b[10] = 1;
2526 b[11] = b[12] = 2;
2527 b[13] = b[14] = b[15] = b[16] = 3;
2528 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2529 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2530 b[28] = b[29] = 6;
2531 b[30] = b[31] = 7;
2534 /* The minimum additive increment value for the congestion control table */
2535 #define CC_MIN_INCR 2U
2538 * t3_load_mtus - write the MTU and congestion control HW tables
2539 * @adap: the adapter
2540 * @mtus: the unrestricted values for the MTU table
2541 * @alphs: the values for the congestion control alpha parameter
2542 * @beta: the values for the congestion control beta parameter
2543 * @mtu_cap: the maximum permitted effective MTU
2545 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2546 * Update the high-speed congestion control table with the supplied alpha,
2547 * beta, and MTUs.
2549 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2550 unsigned short alpha[NCCTRL_WIN],
2551 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2553 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2554 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2555 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2556 28672, 40960, 57344, 81920, 114688, 163840, 229376
2559 unsigned int i, w;
2561 for (i = 0; i < NMTUS; ++i) {
2562 unsigned int mtu = min(mtus[i], mtu_cap);
2563 unsigned int log2 = fls(mtu);
2565 if (!(mtu & ((1 << log2) >> 2))) /* round */
2566 log2--;
2567 t3_write_reg(adap, A_TP_MTU_TABLE,
2568 (i << 24) | (log2 << 16) | mtu);
2570 for (w = 0; w < NCCTRL_WIN; ++w) {
2571 unsigned int inc;
2573 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2574 CC_MIN_INCR);
2576 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2577 (w << 16) | (beta[w] << 13) | inc);
2583 * t3_read_hw_mtus - returns the values in the HW MTU table
2584 * @adap: the adapter
2585 * @mtus: where to store the HW MTU values
2587 * Reads the HW MTU table.
2589 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2591 int i;
2593 for (i = 0; i < NMTUS; ++i) {
2594 unsigned int val;
2596 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2597 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2598 mtus[i] = val & 0x3fff;
2603 * t3_get_cong_cntl_tab - reads the congestion control table
2604 * @adap: the adapter
2605 * @incr: where to store the alpha values
2607 * Reads the additive increments programmed into the HW congestion
2608 * control table.
2610 void t3_get_cong_cntl_tab(struct adapter *adap,
2611 unsigned short incr[NMTUS][NCCTRL_WIN])
2613 unsigned int mtu, w;
2615 for (mtu = 0; mtu < NMTUS; ++mtu)
2616 for (w = 0; w < NCCTRL_WIN; ++w) {
2617 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2618 0xffff0000 | (mtu << 5) | w);
2619 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2620 0x1fff;
2625 * t3_tp_get_mib_stats - read TP's MIB counters
2626 * @adap: the adapter
2627 * @tps: holds the returned counter values
2629 * Returns the values of TP's MIB counters.
2631 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2633 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2634 sizeof(*tps) / sizeof(u32), 0);
2637 #define ulp_region(adap, name, start, len) \
2638 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2639 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2640 (start) + (len) - 1); \
2641 start += len
2643 #define ulptx_region(adap, name, start, len) \
2644 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2645 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2646 (start) + (len) - 1)
2648 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2650 unsigned int m = p->chan_rx_size;
2652 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2653 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2654 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2655 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2656 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2657 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2658 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2659 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2662 void t3_config_trace_filter(struct adapter *adapter,
2663 const struct trace_params *tp, int filter_index,
2664 int invert, int enable)
2666 u32 addr, key[4], mask[4];
2668 key[0] = tp->sport | (tp->sip << 16);
2669 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2670 key[2] = tp->dip;
2671 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2673 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2674 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2675 mask[2] = tp->dip_mask;
2676 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2678 if (invert)
2679 key[3] |= (1 << 29);
2680 if (enable)
2681 key[3] |= (1 << 28);
2683 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2684 tp_wr_indirect(adapter, addr++, key[0]);
2685 tp_wr_indirect(adapter, addr++, mask[0]);
2686 tp_wr_indirect(adapter, addr++, key[1]);
2687 tp_wr_indirect(adapter, addr++, mask[1]);
2688 tp_wr_indirect(adapter, addr++, key[2]);
2689 tp_wr_indirect(adapter, addr++, mask[2]);
2690 tp_wr_indirect(adapter, addr++, key[3]);
2691 tp_wr_indirect(adapter, addr, mask[3]);
2692 t3_read_reg(adapter, A_TP_PIO_DATA);
2696 * t3_config_sched - configure a HW traffic scheduler
2697 * @adap: the adapter
2698 * @kbps: target rate in Kbps
2699 * @sched: the scheduler index
2701 * Configure a HW scheduler for the target rate
2703 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2705 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2706 unsigned int clk = adap->params.vpd.cclk * 1000;
2707 unsigned int selected_cpt = 0, selected_bpt = 0;
2709 if (kbps > 0) {
2710 kbps *= 125; /* -> bytes */
2711 for (cpt = 1; cpt <= 255; cpt++) {
2712 tps = clk / cpt;
2713 bpt = (kbps + tps / 2) / tps;
2714 if (bpt > 0 && bpt <= 255) {
2715 v = bpt * tps;
2716 delta = v >= kbps ? v - kbps : kbps - v;
2717 if (delta <= mindelta) {
2718 mindelta = delta;
2719 selected_cpt = cpt;
2720 selected_bpt = bpt;
2722 } else if (selected_cpt)
2723 break;
2725 if (!selected_cpt)
2726 return -EINVAL;
2728 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2729 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2730 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2731 if (sched & 1)
2732 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2733 else
2734 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2735 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2736 return 0;
2739 static int tp_init(struct adapter *adap, const struct tp_params *p)
2741 int busy = 0;
2743 tp_config(adap, p);
2744 t3_set_vlan_accel(adap, 3, 0);
2746 if (is_offload(adap)) {
2747 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2748 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2749 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2750 0, 1000, 5);
2751 if (busy)
2752 CH_ERR(adap, "TP initialization timed out\n");
2755 if (!busy)
2756 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2757 return busy;
2760 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2762 if (port_mask & ~((1 << adap->params.nports) - 1))
2763 return -EINVAL;
2764 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2765 port_mask << S_PORT0ACTIVE);
2766 return 0;
2770 * Perform the bits of HW initialization that are dependent on the number
2771 * of available ports.
2773 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2775 int i;
2777 if (nports == 1) {
2778 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2779 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2780 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2781 F_PORT0ACTIVE | F_ENFORCEPKT);
2782 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2783 } else {
2784 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2785 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2786 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2787 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2788 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2789 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2790 F_ENFORCEPKT);
2791 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2792 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2793 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2794 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2795 for (i = 0; i < 16; i++)
2796 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2797 (i << 16) | 0x1010);
2801 static int calibrate_xgm(struct adapter *adapter)
2803 if (uses_xaui(adapter)) {
2804 unsigned int v, i;
2806 for (i = 0; i < 5; ++i) {
2807 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2808 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2809 msleep(1);
2810 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2811 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2812 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2813 V_XAUIIMP(G_CALIMP(v) >> 2));
2814 return 0;
2817 CH_ERR(adapter, "MAC calibration failed\n");
2818 return -1;
2819 } else {
2820 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2821 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2822 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2823 F_XGM_IMPSETUPDATE);
2825 return 0;
2828 static void calibrate_xgm_t3b(struct adapter *adapter)
2830 if (!uses_xaui(adapter)) {
2831 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2832 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2833 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2834 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2835 F_XGM_IMPSETUPDATE);
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2838 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2839 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2843 struct mc7_timing_params {
2844 unsigned char ActToPreDly;
2845 unsigned char ActToRdWrDly;
2846 unsigned char PreCyc;
2847 unsigned char RefCyc[5];
2848 unsigned char BkCyc;
2849 unsigned char WrToRdDly;
2850 unsigned char RdToWrDly;
2854 * Write a value to a register and check that the write completed. These
2855 * writes normally complete in a cycle or two, so one read should suffice.
2856 * The very first read exists to flush the posted write to the device.
2858 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2860 t3_write_reg(adapter, addr, val);
2861 t3_read_reg(adapter, addr); /* flush */
2862 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2863 return 0;
2864 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2865 return -EIO;
2868 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2870 static const unsigned int mc7_mode[] = {
2871 0x632, 0x642, 0x652, 0x432, 0x442
2873 static const struct mc7_timing_params mc7_timings[] = {
2874 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2875 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2876 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2877 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2878 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2881 u32 val;
2882 unsigned int width, density, slow, attempts;
2883 struct adapter *adapter = mc7->adapter;
2884 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2886 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2887 slow = val & F_SLOW;
2888 width = G_WIDTH(val);
2889 density = G_DEN(val);
2891 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2892 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2893 msleep(1);
2895 if (!slow) {
2896 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2897 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2898 msleep(1);
2899 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2900 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2901 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2902 mc7->name);
2903 goto out_fail;
2907 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2908 V_ACTTOPREDLY(p->ActToPreDly) |
2909 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2910 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2911 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2913 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2914 val | F_CLKEN | F_TERM150);
2915 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2917 if (!slow)
2918 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2919 F_DLLENB);
2920 udelay(1);
2922 val = slow ? 3 : 6;
2923 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2924 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2925 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2926 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2927 goto out_fail;
2929 if (!slow) {
2930 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2931 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2932 udelay(5);
2935 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2936 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2937 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2939 mc7_mode[mem_type]) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2941 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2942 goto out_fail;
2944 /* clock value is in KHz */
2945 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2946 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2948 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2949 F_PERREFEN | V_PREREFDIV(mc7_clock));
2950 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2952 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2953 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2954 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2955 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2956 (mc7->size << width) - 1);
2957 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2958 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2960 attempts = 50;
2961 do {
2962 msleep(250);
2963 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2964 } while ((val & F_BUSY) && --attempts);
2965 if (val & F_BUSY) {
2966 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2967 goto out_fail;
2970 /* Enable normal memory accesses. */
2971 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2972 return 0;
2974 out_fail:
2975 return -1;
2978 static void config_pcie(struct adapter *adap)
2980 static const u16 ack_lat[4][6] = {
2981 {237, 416, 559, 1071, 2095, 4143},
2982 {128, 217, 289, 545, 1057, 2081},
2983 {73, 118, 154, 282, 538, 1050},
2984 {67, 107, 86, 150, 278, 534}
2986 static const u16 rpl_tmr[4][6] = {
2987 {711, 1248, 1677, 3213, 6285, 12429},
2988 {384, 651, 867, 1635, 3171, 6243},
2989 {219, 354, 462, 846, 1614, 3150},
2990 {201, 321, 258, 450, 834, 1602}
2993 u16 val;
2994 unsigned int log2_width, pldsize;
2995 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
2997 pci_read_config_word(adap->pdev,
2998 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
2999 &val);
3000 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3001 pci_read_config_word(adap->pdev,
3002 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3003 &val);
3005 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3006 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3007 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3008 log2_width = fls(adap->params.pci.width) - 1;
3009 acklat = ack_lat[log2_width][pldsize];
3010 if (val & 1) /* check LOsEnable */
3011 acklat += fst_trn_tx * 4;
3012 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3014 if (adap->params.rev == 0)
3015 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3016 V_T3A_ACKLAT(M_T3A_ACKLAT),
3017 V_T3A_ACKLAT(acklat));
3018 else
3019 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3020 V_ACKLAT(acklat));
3022 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3023 V_REPLAYLMT(rpllmt));
3025 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3026 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3030 * Initialize and configure T3 HW modules. This performs the
3031 * initialization steps that need to be done once after a card is reset.
3032 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3034 * fw_params are passed to FW and their value is platform dependent. Only the
3035 * top 8 bits are available for use, the rest must be 0.
3037 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3039 int err = -EIO, attempts = 100;
3040 const struct vpd_params *vpd = &adapter->params.vpd;
3042 if (adapter->params.rev > 0)
3043 calibrate_xgm_t3b(adapter);
3044 else if (calibrate_xgm(adapter))
3045 goto out_err;
3047 if (vpd->mclk) {
3048 partition_mem(adapter, &adapter->params.tp);
3050 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3051 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3052 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3053 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3054 adapter->params.mc5.nfilters,
3055 adapter->params.mc5.nroutes))
3056 goto out_err;
3059 if (tp_init(adapter, &adapter->params.tp))
3060 goto out_err;
3062 t3_tp_set_coalescing_size(adapter,
3063 min(adapter->params.sge.max_pkt_size,
3064 MAX_RX_COALESCING_LEN), 1);
3065 t3_tp_set_max_rxsize(adapter,
3066 min(adapter->params.sge.max_pkt_size, 16384U));
3067 ulp_config(adapter, &adapter->params.tp);
3069 if (is_pcie(adapter))
3070 config_pcie(adapter);
3071 else
3072 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3074 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3075 init_hw_for_avail_ports(adapter, adapter->params.nports);
3076 t3_sge_init(adapter, &adapter->params.sge);
3078 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3079 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3080 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3081 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3083 do { /* wait for uP to initialize */
3084 msleep(20);
3085 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3086 if (!attempts)
3087 goto out_err;
3089 err = 0;
3090 out_err:
3091 return err;
3095 * get_pci_mode - determine a card's PCI mode
3096 * @adapter: the adapter
3097 * @p: where to store the PCI settings
3099 * Determines a card's PCI mode and associated parameters, such as speed
3100 * and width.
3102 static void __devinit get_pci_mode(struct adapter *adapter,
3103 struct pci_params *p)
3105 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3106 u32 pci_mode, pcie_cap;
3108 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3109 if (pcie_cap) {
3110 u16 val;
3112 p->variant = PCI_VARIANT_PCIE;
3113 p->pcie_cap_addr = pcie_cap;
3114 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3115 &val);
3116 p->width = (val >> 4) & 0x3f;
3117 return;
3120 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3121 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3122 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3123 pci_mode = G_PCIXINITPAT(pci_mode);
3124 if (pci_mode == 0)
3125 p->variant = PCI_VARIANT_PCI;
3126 else if (pci_mode < 4)
3127 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3128 else if (pci_mode < 8)
3129 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3130 else
3131 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3135 * init_link_config - initialize a link's SW state
3136 * @lc: structure holding the link state
3137 * @ai: information about the current card
3139 * Initializes the SW state maintained for each link, including the link's
3140 * capabilities and default speed/duplex/flow-control/autonegotiation
3141 * settings.
3143 static void __devinit init_link_config(struct link_config *lc,
3144 unsigned int caps)
3146 lc->supported = caps;
3147 lc->requested_speed = lc->speed = SPEED_INVALID;
3148 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3149 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3150 if (lc->supported & SUPPORTED_Autoneg) {
3151 lc->advertising = lc->supported;
3152 lc->autoneg = AUTONEG_ENABLE;
3153 lc->requested_fc |= PAUSE_AUTONEG;
3154 } else {
3155 lc->advertising = 0;
3156 lc->autoneg = AUTONEG_DISABLE;
3161 * mc7_calc_size - calculate MC7 memory size
3162 * @cfg: the MC7 configuration
3164 * Calculates the size of an MC7 memory in bytes from the value of its
3165 * configuration register.
3167 static unsigned int __devinit mc7_calc_size(u32 cfg)
3169 unsigned int width = G_WIDTH(cfg);
3170 unsigned int banks = !!(cfg & F_BKS) + 1;
3171 unsigned int org = !!(cfg & F_ORG) + 1;
3172 unsigned int density = G_DEN(cfg);
3173 unsigned int MBs = ((256 << density) * banks) / (org << width);
3175 return MBs << 20;
3178 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3179 unsigned int base_addr, const char *name)
3181 u32 cfg;
3183 mc7->adapter = adapter;
3184 mc7->name = name;
3185 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3186 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3187 mc7->size = mc7_calc_size(cfg);
3188 mc7->width = G_WIDTH(cfg);
3191 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3193 mac->adapter = adapter;
3194 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3195 mac->nucast = 1;
3197 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3198 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3199 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3200 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3201 F_ENRGMII, 0);
3205 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3207 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3209 mi1_init(adapter, ai);
3210 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3211 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3212 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3213 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3215 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3216 val |= F_ENRGMII;
3218 /* Enable MAC clocks so we can access the registers */
3219 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3220 t3_read_reg(adapter, A_XGM_PORT_CFG);
3222 val |= F_CLKDIVRESET_;
3223 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3224 t3_read_reg(adapter, A_XGM_PORT_CFG);
3225 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3226 t3_read_reg(adapter, A_XGM_PORT_CFG);
3230 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3231 * ones don't.
3233 int t3_reset_adapter(struct adapter *adapter)
3235 int i;
3236 uint16_t devid = 0;
3238 if (is_pcie(adapter))
3239 pci_save_state(adapter->pdev);
3240 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3243 * Delay. Give Some time to device to reset fully.
3244 * XXX The delay time should be modified.
3246 for (i = 0; i < 10; i++) {
3247 msleep(50);
3248 pci_read_config_word(adapter->pdev, 0x00, &devid);
3249 if (devid == 0x1425)
3250 break;
3253 if (devid != 0x1425)
3254 return -1;
3256 if (is_pcie(adapter))
3257 pci_restore_state(adapter->pdev);
3258 return 0;
3262 * Initialize adapter SW state for the various HW modules, set initial values
3263 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3264 * interface.
3266 int __devinit t3_prep_adapter(struct adapter *adapter,
3267 const struct adapter_info *ai, int reset)
3269 int ret;
3270 unsigned int i, j = 0;
3272 get_pci_mode(adapter, &adapter->params.pci);
3274 adapter->params.info = ai;
3275 adapter->params.nports = ai->nports;
3276 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3277 adapter->params.linkpoll_period = 0;
3278 adapter->params.stats_update_period = is_10G(adapter) ?
3279 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3280 adapter->params.pci.vpd_cap_addr =
3281 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3282 ret = get_vpd_params(adapter, &adapter->params.vpd);
3283 if (ret < 0)
3284 return ret;
3286 if (reset && t3_reset_adapter(adapter))
3287 return -1;
3289 t3_sge_prep(adapter, &adapter->params.sge);
3291 if (adapter->params.vpd.mclk) {
3292 struct tp_params *p = &adapter->params.tp;
3294 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3295 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3296 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3298 p->nchan = ai->nports;
3299 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3300 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3301 p->cm_size = t3_mc7_size(&adapter->cm);
3302 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3303 p->chan_tx_size = p->pmtx_size / p->nchan;
3304 p->rx_pg_size = 64 * 1024;
3305 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3306 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3307 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3308 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3309 adapter->params.rev > 0 ? 12 : 6;
3311 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3312 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3313 DEFAULT_NFILTERS : 0;
3314 adapter->params.mc5.nroutes = 0;
3315 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3317 init_mtus(adapter->params.mtus);
3318 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3321 early_hw_init(adapter, ai);
3323 for_each_port(adapter, i) {
3324 u8 hw_addr[6];
3325 struct port_info *p = adap2pinfo(adapter, i);
3327 while (!adapter->params.vpd.port_type[j])
3328 ++j;
3330 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3331 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3332 ai->mdio_ops);
3333 mac_prep(&p->mac, adapter, j);
3334 ++j;
3337 * The VPD EEPROM stores the base Ethernet address for the
3338 * card. A port's address is derived from the base by adding
3339 * the port's index to the base's low octet.
3341 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3342 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3344 memcpy(adapter->port[i]->dev_addr, hw_addr,
3345 ETH_ALEN);
3346 memcpy(adapter->port[i]->perm_addr, hw_addr,
3347 ETH_ALEN);
3348 init_link_config(&p->link_config, p->port_type->caps);
3349 p->phy.ops->power_down(&p->phy, 1);
3350 if (!(p->port_type->caps & SUPPORTED_IRQ))
3351 adapter->params.linkpoll_period = 10;
3354 return 0;
3357 void t3_led_ready(struct adapter *adapter)
3359 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3360 F_GPIO0_OUT_VAL);