cxgb3 - FW version update
[linux-2.6/btrfs-unstable.git] / drivers / net / cxgb3 / t3_hw.c
blobeaa7a2e89a30df876e3182bed3df133df21d65ee
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 if (mmd_addr)
217 return -EINVAL;
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
229 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235 if (mmd_addr)
236 return -EINVAL;
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
247 static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
277 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
298 static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
317 int ret;
318 unsigned int val;
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
325 return ret;
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
340 int err;
341 unsigned int ctl;
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
356 return ctl ? -1 : 0;
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
369 int err;
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
413 int err;
414 unsigned int ctl;
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
437 static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
465 const struct adapter_info *t3_get_adapter_info(unsigned int id)
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
490 #undef CAPS_1G
491 #undef CAPS_10G
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
500 struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
595 return 0;
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter *adapter, int enable)
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c)
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
627 int i, addr, ret;
628 struct t3_vpd vpd;
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
669 /* serial flash and firmware constants */
670 enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
698 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
701 int ret;
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
725 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
744 * Wait for a flash operation to complete by polling the status register.
746 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
748 int ret;
749 u32 status;
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
777 int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
780 int ret;
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
785 addr = swab32(addr) | SF_RD_DATA_FAST;
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
798 return 0;
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
811 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
821 val = swab32(addr) | SF_PROG_PAGE;
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
849 enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
859 * Reads the FW version from flash.
861 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
873 int t3_check_fw_version(struct adapter *adapter)
875 int ret;
876 u32 vers;
877 unsigned int type, major, minor;
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
887 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
888 minor == FW_VERSION_MINOR)
889 return 0;
891 CH_ERR(adapter, "found wrong FW version(%u.%u), "
892 "driver needs version %u.%u\n", major, minor,
893 FW_VERSION_MAJOR, FW_VERSION_MINOR);
894 return -EINVAL;
898 * t3_flash_erase_sectors - erase a range of flash sectors
899 * @adapter: the adapter
900 * @start: the first sector to erase
901 * @end: the last sector to erase
903 * Erases the sectors in the given range.
905 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
907 while (start <= end) {
908 int ret;
910 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
911 (ret = sf1_write(adapter, 4, 0,
912 SF_ERASE_SECTOR | (start << 8))) != 0 ||
913 (ret = flash_wait_op(adapter, 5, 500)) != 0)
914 return ret;
915 start++;
917 return 0;
921 * t3_load_fw - download firmware
922 * @adapter: the adapter
923 * @fw_data: the firrware image to write
924 * @size: image size
926 * Write the supplied firmware image to the card's serial flash.
927 * The FW image has the following sections: @size - 8 bytes of code and
928 * data, followed by 4 bytes of FW version, followed by the 32-bit
929 * 1's complement checksum of the whole image.
931 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
933 u32 csum;
934 unsigned int i;
935 const u32 *p = (const u32 *)fw_data;
936 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
938 if (size & 3)
939 return -EINVAL;
940 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
941 return -EFBIG;
943 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
944 csum += ntohl(p[i]);
945 if (csum != 0xffffffff) {
946 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
947 csum);
948 return -EINVAL;
951 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
952 if (ret)
953 goto out;
955 size -= 8; /* trim off version and checksum */
956 for (addr = FW_FLASH_BOOT_ADDR; size;) {
957 unsigned int chunk_size = min(size, 256U);
959 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
960 if (ret)
961 goto out;
963 addr += chunk_size;
964 fw_data += chunk_size;
965 size -= chunk_size;
968 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
969 out:
970 if (ret)
971 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
972 return ret;
975 #define CIM_CTL_BASE 0x2000
978 * t3_cim_ctl_blk_read - read a block from CIM control region
980 * @adap: the adapter
981 * @addr: the start address within the CIM control region
982 * @n: number of words to read
983 * @valp: where to store the result
985 * Reads a block of 4-byte words from the CIM control region.
987 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
988 unsigned int n, unsigned int *valp)
990 int ret = 0;
992 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
993 return -EBUSY;
995 for ( ; !ret && n--; addr += 4) {
996 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
997 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
998 0, 5, 2);
999 if (!ret)
1000 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1002 return ret;
1007 * t3_link_changed - handle interface link changes
1008 * @adapter: the adapter
1009 * @port_id: the port index that changed link state
1011 * Called when a port's link settings change to propagate the new values
1012 * to the associated PHY and MAC. After performing the common tasks it
1013 * invokes an OS-specific handler.
1015 void t3_link_changed(struct adapter *adapter, int port_id)
1017 int link_ok, speed, duplex, fc;
1018 struct port_info *pi = adap2pinfo(adapter, port_id);
1019 struct cphy *phy = &pi->phy;
1020 struct cmac *mac = &pi->mac;
1021 struct link_config *lc = &pi->link_config;
1023 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1025 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1026 uses_xaui(adapter)) {
1027 if (link_ok)
1028 t3b_pcs_reset(mac);
1029 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1030 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1032 lc->link_ok = link_ok;
1033 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1034 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1035 if (lc->requested_fc & PAUSE_AUTONEG)
1036 fc &= lc->requested_fc;
1037 else
1038 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1040 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1041 /* Set MAC speed, duplex, and flow control to match PHY. */
1042 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1043 lc->fc = fc;
1046 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1050 * t3_link_start - apply link configuration to MAC/PHY
1051 * @phy: the PHY to setup
1052 * @mac: the MAC to setup
1053 * @lc: the requested link configuration
1055 * Set up a port's MAC and PHY according to a desired link configuration.
1056 * - If the PHY can auto-negotiate first decide what to advertise, then
1057 * enable/disable auto-negotiation as desired, and reset.
1058 * - If the PHY does not auto-negotiate just reset it.
1059 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1060 * otherwise do it later based on the outcome of auto-negotiation.
1062 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1064 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1066 lc->link_ok = 0;
1067 if (lc->supported & SUPPORTED_Autoneg) {
1068 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1069 if (fc) {
1070 lc->advertising |= ADVERTISED_Asym_Pause;
1071 if (fc & PAUSE_RX)
1072 lc->advertising |= ADVERTISED_Pause;
1074 phy->ops->advertise(phy, lc->advertising);
1076 if (lc->autoneg == AUTONEG_DISABLE) {
1077 lc->speed = lc->requested_speed;
1078 lc->duplex = lc->requested_duplex;
1079 lc->fc = (unsigned char)fc;
1080 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1081 fc);
1082 /* Also disables autoneg */
1083 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1084 phy->ops->reset(phy, 0);
1085 } else
1086 phy->ops->autoneg_enable(phy);
1087 } else {
1088 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1089 lc->fc = (unsigned char)fc;
1090 phy->ops->reset(phy, 0);
1092 return 0;
1096 * t3_set_vlan_accel - control HW VLAN extraction
1097 * @adapter: the adapter
1098 * @ports: bitmap of adapter ports to operate on
1099 * @on: enable (1) or disable (0) HW VLAN extraction
1101 * Enables or disables HW extraction of VLAN tags for the given port.
1103 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1105 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1106 ports << S_VLANEXTRACTIONENABLE,
1107 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1110 struct intr_info {
1111 unsigned int mask; /* bits to check in interrupt status */
1112 const char *msg; /* message to print or NULL */
1113 short stat_idx; /* stat counter to increment or -1 */
1114 unsigned short fatal:1; /* whether the condition reported is fatal */
1118 * t3_handle_intr_status - table driven interrupt handler
1119 * @adapter: the adapter that generated the interrupt
1120 * @reg: the interrupt status register to process
1121 * @mask: a mask to apply to the interrupt status
1122 * @acts: table of interrupt actions
1123 * @stats: statistics counters tracking interrupt occurences
1125 * A table driven interrupt handler that applies a set of masks to an
1126 * interrupt status word and performs the corresponding actions if the
1127 * interrupts described by the mask have occured. The actions include
1128 * optionally printing a warning or alert message, and optionally
1129 * incrementing a stat counter. The table is terminated by an entry
1130 * specifying mask 0. Returns the number of fatal interrupt conditions.
1132 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1133 unsigned int mask,
1134 const struct intr_info *acts,
1135 unsigned long *stats)
1137 int fatal = 0;
1138 unsigned int status = t3_read_reg(adapter, reg) & mask;
1140 for (; acts->mask; ++acts) {
1141 if (!(status & acts->mask))
1142 continue;
1143 if (acts->fatal) {
1144 fatal++;
1145 CH_ALERT(adapter, "%s (0x%x)\n",
1146 acts->msg, status & acts->mask);
1147 } else if (acts->msg)
1148 CH_WARN(adapter, "%s (0x%x)\n",
1149 acts->msg, status & acts->mask);
1150 if (acts->stat_idx >= 0)
1151 stats[acts->stat_idx]++;
1153 if (status) /* clear processed interrupts */
1154 t3_write_reg(adapter, reg, status);
1155 return fatal;
1158 #define SGE_INTR_MASK (F_RSPQDISABLED)
1159 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1160 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1161 F_NFASRCHFAIL)
1162 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1163 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1164 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1165 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1166 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1167 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1168 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1169 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1170 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1171 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1172 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1173 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1174 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1175 V_BISTERR(M_BISTERR) | F_PEXERR)
1176 #define ULPRX_INTR_MASK F_PARERR
1177 #define ULPTX_INTR_MASK 0
1178 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1179 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1180 F_ZERO_SWITCH_ERROR)
1181 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1182 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1183 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1184 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1185 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1186 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1187 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1188 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1189 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1190 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1191 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1192 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1193 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1194 V_MCAPARERRENB(M_MCAPARERRENB))
1195 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1196 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1197 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1198 F_MPS0 | F_CPL_SWITCH)
1201 * Interrupt handler for the PCIX1 module.
1203 static void pci_intr_handler(struct adapter *adapter)
1205 static const struct intr_info pcix1_intr_info[] = {
1206 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1207 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1208 {F_RCVTARABT, "PCI received target abort", -1, 1},
1209 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1210 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1211 {F_DETPARERR, "PCI detected parity error", -1, 1},
1212 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1213 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1214 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1216 {F_DETCORECCERR, "PCI correctable ECC error",
1217 STAT_PCI_CORR_ECC, 0},
1218 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1219 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1220 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1222 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1224 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1226 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1227 "error", -1, 1},
1231 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1232 pcix1_intr_info, adapter->irq_stats))
1233 t3_fatal_err(adapter);
1237 * Interrupt handler for the PCIE module.
1239 static void pcie_intr_handler(struct adapter *adapter)
1241 static const struct intr_info pcie_intr_info[] = {
1242 {F_PEXERR, "PCI PEX error", -1, 1},
1243 {F_UNXSPLCPLERRR,
1244 "PCI unexpected split completion DMA read error", -1, 1},
1245 {F_UNXSPLCPLERRC,
1246 "PCI unexpected split completion DMA command error", -1, 1},
1247 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1248 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1249 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1250 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1251 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1252 "PCI MSI-X table/PBA parity error", -1, 1},
1253 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1257 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1258 pcie_intr_info, adapter->irq_stats))
1259 t3_fatal_err(adapter);
1263 * TP interrupt handler.
1265 static void tp_intr_handler(struct adapter *adapter)
1267 static const struct intr_info tp_intr_info[] = {
1268 {0xffffff, "TP parity error", -1, 1},
1269 {0x1000000, "TP out of Rx pages", -1, 1},
1270 {0x2000000, "TP out of Tx pages", -1, 1},
1274 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1275 tp_intr_info, NULL))
1276 t3_fatal_err(adapter);
1280 * CIM interrupt handler.
1282 static void cim_intr_handler(struct adapter *adapter)
1284 static const struct intr_info cim_intr_info[] = {
1285 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1286 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1287 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1288 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1289 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1290 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1291 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1292 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1293 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1294 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1295 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1296 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1300 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1301 cim_intr_info, NULL))
1302 t3_fatal_err(adapter);
1306 * ULP RX interrupt handler.
1308 static void ulprx_intr_handler(struct adapter *adapter)
1310 static const struct intr_info ulprx_intr_info[] = {
1311 {F_PARERR, "ULP RX parity error", -1, 1},
1315 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1316 ulprx_intr_info, NULL))
1317 t3_fatal_err(adapter);
1321 * ULP TX interrupt handler.
1323 static void ulptx_intr_handler(struct adapter *adapter)
1325 static const struct intr_info ulptx_intr_info[] = {
1326 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1327 STAT_ULP_CH0_PBL_OOB, 0},
1328 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1329 STAT_ULP_CH1_PBL_OOB, 0},
1333 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1334 ulptx_intr_info, adapter->irq_stats))
1335 t3_fatal_err(adapter);
1338 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1339 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1340 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1341 F_ICSPI1_TX_FRAMING_ERROR)
1342 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1343 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1344 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1345 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1348 * PM TX interrupt handler.
1350 static void pmtx_intr_handler(struct adapter *adapter)
1352 static const struct intr_info pmtx_intr_info[] = {
1353 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1354 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1355 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1356 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1357 "PMTX ispi parity error", -1, 1},
1358 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1359 "PMTX ospi parity error", -1, 1},
1363 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1364 pmtx_intr_info, NULL))
1365 t3_fatal_err(adapter);
1368 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1369 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1370 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1371 F_IESPI1_TX_FRAMING_ERROR)
1372 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1373 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1374 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1375 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1378 * PM RX interrupt handler.
1380 static void pmrx_intr_handler(struct adapter *adapter)
1382 static const struct intr_info pmrx_intr_info[] = {
1383 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1384 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1385 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1386 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1387 "PMRX ispi parity error", -1, 1},
1388 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1389 "PMRX ospi parity error", -1, 1},
1393 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1394 pmrx_intr_info, NULL))
1395 t3_fatal_err(adapter);
1399 * CPL switch interrupt handler.
1401 static void cplsw_intr_handler(struct adapter *adapter)
1403 static const struct intr_info cplsw_intr_info[] = {
1404 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1405 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1406 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1407 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1408 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1412 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1413 cplsw_intr_info, NULL))
1414 t3_fatal_err(adapter);
1418 * MPS interrupt handler.
1420 static void mps_intr_handler(struct adapter *adapter)
1422 static const struct intr_info mps_intr_info[] = {
1423 {0x1ff, "MPS parity error", -1, 1},
1427 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1428 mps_intr_info, NULL))
1429 t3_fatal_err(adapter);
1432 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1435 * MC7 interrupt handler.
1437 static void mc7_intr_handler(struct mc7 *mc7)
1439 struct adapter *adapter = mc7->adapter;
1440 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1442 if (cause & F_CE) {
1443 mc7->stats.corr_err++;
1444 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1445 "data 0x%x 0x%x 0x%x\n", mc7->name,
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1448 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1449 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1452 if (cause & F_UE) {
1453 mc7->stats.uncorr_err++;
1454 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1455 "data 0x%x 0x%x 0x%x\n", mc7->name,
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1458 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1459 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1462 if (G_PE(cause)) {
1463 mc7->stats.parity_err++;
1464 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1465 mc7->name, G_PE(cause));
1468 if (cause & F_AE) {
1469 u32 addr = 0;
1471 if (adapter->params.rev > 0)
1472 addr = t3_read_reg(adapter,
1473 mc7->offset + A_MC7_ERR_ADDR);
1474 mc7->stats.addr_err++;
1475 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1476 mc7->name, addr);
1479 if (cause & MC7_INTR_FATAL)
1480 t3_fatal_err(adapter);
1482 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1485 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1486 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1488 * XGMAC interrupt handler.
1490 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1492 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1493 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1495 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1496 mac->stats.tx_fifo_parity_err++;
1497 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1499 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1500 mac->stats.rx_fifo_parity_err++;
1501 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1503 if (cause & F_TXFIFO_UNDERRUN)
1504 mac->stats.tx_fifo_urun++;
1505 if (cause & F_RXFIFO_OVERFLOW)
1506 mac->stats.rx_fifo_ovfl++;
1507 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1508 mac->stats.serdes_signal_loss++;
1509 if (cause & F_XAUIPCSCTCERR)
1510 mac->stats.xaui_pcs_ctc_err++;
1511 if (cause & F_XAUIPCSALIGNCHANGE)
1512 mac->stats.xaui_pcs_align_change++;
1514 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1515 if (cause & XGM_INTR_FATAL)
1516 t3_fatal_err(adap);
1517 return cause != 0;
1521 * Interrupt handler for PHY events.
1523 int t3_phy_intr_handler(struct adapter *adapter)
1525 static const int intr_gpio_bits[] = { 8, 0x20 };
1527 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1529 for_each_port(adapter, i) {
1530 if (cause & intr_gpio_bits[i]) {
1531 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1532 int phy_cause = phy->ops->intr_handler(phy);
1534 if (phy_cause & cphy_cause_link_change)
1535 t3_link_changed(adapter, i);
1536 if (phy_cause & cphy_cause_fifo_error)
1537 phy->fifo_errors++;
1541 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1542 return 0;
1546 * T3 slow path (non-data) interrupt handler.
1548 int t3_slow_intr_handler(struct adapter *adapter)
1550 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1552 cause &= adapter->slow_intr_mask;
1553 if (!cause)
1554 return 0;
1555 if (cause & F_PCIM0) {
1556 if (is_pcie(adapter))
1557 pcie_intr_handler(adapter);
1558 else
1559 pci_intr_handler(adapter);
1561 if (cause & F_SGE3)
1562 t3_sge_err_intr_handler(adapter);
1563 if (cause & F_MC7_PMRX)
1564 mc7_intr_handler(&adapter->pmrx);
1565 if (cause & F_MC7_PMTX)
1566 mc7_intr_handler(&adapter->pmtx);
1567 if (cause & F_MC7_CM)
1568 mc7_intr_handler(&adapter->cm);
1569 if (cause & F_CIM)
1570 cim_intr_handler(adapter);
1571 if (cause & F_TP1)
1572 tp_intr_handler(adapter);
1573 if (cause & F_ULP2_RX)
1574 ulprx_intr_handler(adapter);
1575 if (cause & F_ULP2_TX)
1576 ulptx_intr_handler(adapter);
1577 if (cause & F_PM1_RX)
1578 pmrx_intr_handler(adapter);
1579 if (cause & F_PM1_TX)
1580 pmtx_intr_handler(adapter);
1581 if (cause & F_CPL_SWITCH)
1582 cplsw_intr_handler(adapter);
1583 if (cause & F_MPS0)
1584 mps_intr_handler(adapter);
1585 if (cause & F_MC5A)
1586 t3_mc5_intr_handler(&adapter->mc5);
1587 if (cause & F_XGMAC0_0)
1588 mac_intr_handler(adapter, 0);
1589 if (cause & F_XGMAC0_1)
1590 mac_intr_handler(adapter, 1);
1591 if (cause & F_T3DBG)
1592 t3_os_ext_intr_handler(adapter);
1594 /* Clear the interrupts just processed. */
1595 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1596 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1597 return 1;
1601 * t3_intr_enable - enable interrupts
1602 * @adapter: the adapter whose interrupts should be enabled
1604 * Enable interrupts by setting the interrupt enable registers of the
1605 * various HW modules and then enabling the top-level interrupt
1606 * concentrator.
1608 void t3_intr_enable(struct adapter *adapter)
1610 static const struct addr_val_pair intr_en_avp[] = {
1611 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1612 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1616 MC7_INTR_MASK},
1617 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1618 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1619 {A_TP_INT_ENABLE, 0x3bfffff},
1620 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1621 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1622 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1623 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1626 adapter->slow_intr_mask = PL_INTR_MASK;
1628 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1630 if (adapter->params.rev > 0) {
1631 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1632 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1633 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1634 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1635 F_PBL_BOUND_ERR_CH1);
1636 } else {
1637 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1638 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1641 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1642 adapter_info(adapter)->gpio_intr);
1643 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1644 adapter_info(adapter)->gpio_intr);
1645 if (is_pcie(adapter))
1646 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1647 else
1648 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1649 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1650 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1654 * t3_intr_disable - disable a card's interrupts
1655 * @adapter: the adapter whose interrupts should be disabled
1657 * Disable interrupts. We only disable the top-level interrupt
1658 * concentrator and the SGE data interrupts.
1660 void t3_intr_disable(struct adapter *adapter)
1662 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1663 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1664 adapter->slow_intr_mask = 0;
1668 * t3_intr_clear - clear all interrupts
1669 * @adapter: the adapter whose interrupts should be cleared
1671 * Clears all interrupts.
1673 void t3_intr_clear(struct adapter *adapter)
1675 static const unsigned int cause_reg_addr[] = {
1676 A_SG_INT_CAUSE,
1677 A_SG_RSPQ_FL_STATUS,
1678 A_PCIX_INT_CAUSE,
1679 A_MC7_INT_CAUSE,
1680 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1681 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1682 A_CIM_HOST_INT_CAUSE,
1683 A_TP_INT_CAUSE,
1684 A_MC5_DB_INT_CAUSE,
1685 A_ULPRX_INT_CAUSE,
1686 A_ULPTX_INT_CAUSE,
1687 A_CPL_INTR_CAUSE,
1688 A_PM1_TX_INT_CAUSE,
1689 A_PM1_RX_INT_CAUSE,
1690 A_MPS_INT_CAUSE,
1691 A_T3DBG_INT_CAUSE,
1693 unsigned int i;
1695 /* Clear PHY and MAC interrupts for each port. */
1696 for_each_port(adapter, i)
1697 t3_port_intr_clear(adapter, i);
1699 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1700 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1702 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1703 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1707 * t3_port_intr_enable - enable port-specific interrupts
1708 * @adapter: associated adapter
1709 * @idx: index of port whose interrupts should be enabled
1711 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1712 * adapter port.
1714 void t3_port_intr_enable(struct adapter *adapter, int idx)
1716 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1718 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1719 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1720 phy->ops->intr_enable(phy);
1724 * t3_port_intr_disable - disable port-specific interrupts
1725 * @adapter: associated adapter
1726 * @idx: index of port whose interrupts should be disabled
1728 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1729 * adapter port.
1731 void t3_port_intr_disable(struct adapter *adapter, int idx)
1733 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1735 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1736 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1737 phy->ops->intr_disable(phy);
1741 * t3_port_intr_clear - clear port-specific interrupts
1742 * @adapter: associated adapter
1743 * @idx: index of port whose interrupts to clear
1745 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1746 * adapter port.
1748 void t3_port_intr_clear(struct adapter *adapter, int idx)
1750 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1752 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1753 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1754 phy->ops->intr_clear(phy);
1758 * t3_sge_write_context - write an SGE context
1759 * @adapter: the adapter
1760 * @id: the context id
1761 * @type: the context type
1763 * Program an SGE context with the values already loaded in the
1764 * CONTEXT_DATA? registers.
1766 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1767 unsigned int type)
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1772 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1773 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1774 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1775 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1776 0, 5, 1);
1780 * t3_sge_init_ecntxt - initialize an SGE egress context
1781 * @adapter: the adapter to configure
1782 * @id: the context id
1783 * @gts_enable: whether to enable GTS for the context
1784 * @type: the egress context type
1785 * @respq: associated response queue
1786 * @base_addr: base address of queue
1787 * @size: number of queue entries
1788 * @token: uP token
1789 * @gen: initial generation value for the context
1790 * @cidx: consumer pointer
1792 * Initialize an SGE egress context and make it ready for use. If the
1793 * platform allows concurrent context operations, the caller is
1794 * responsible for appropriate locking.
1796 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1797 enum sge_context_type type, int respq, u64 base_addr,
1798 unsigned int size, unsigned int token, int gen,
1799 unsigned int cidx)
1801 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1803 if (base_addr & 0xfff) /* must be 4K aligned */
1804 return -EINVAL;
1805 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1806 return -EBUSY;
1808 base_addr >>= 12;
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1810 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1811 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1812 V_EC_BASE_LO(base_addr & 0xffff));
1813 base_addr >>= 16;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1815 base_addr >>= 32;
1816 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1817 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1818 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1819 F_EC_VALID);
1820 return t3_sge_write_context(adapter, id, F_EGRESS);
1824 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1825 * @adapter: the adapter to configure
1826 * @id: the context id
1827 * @gts_enable: whether to enable GTS for the context
1828 * @base_addr: base address of queue
1829 * @size: number of queue entries
1830 * @bsize: size of each buffer for this queue
1831 * @cong_thres: threshold to signal congestion to upstream producers
1832 * @gen: initial generation value for the context
1833 * @cidx: consumer pointer
1835 * Initialize an SGE free list context and make it ready for use. The
1836 * caller is responsible for ensuring only one context operation occurs
1837 * at a time.
1839 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1840 int gts_enable, u64 base_addr, unsigned int size,
1841 unsigned int bsize, unsigned int cong_thres, int gen,
1842 unsigned int cidx)
1844 if (base_addr & 0xfff) /* must be 4K aligned */
1845 return -EINVAL;
1846 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1847 return -EBUSY;
1849 base_addr >>= 12;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1851 base_addr >>= 32;
1852 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1853 V_FL_BASE_HI((u32) base_addr) |
1854 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1855 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1856 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1857 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1858 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1859 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1860 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1861 return t3_sge_write_context(adapter, id, F_FREELIST);
1865 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1866 * @adapter: the adapter to configure
1867 * @id: the context id
1868 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1869 * @base_addr: base address of queue
1870 * @size: number of queue entries
1871 * @fl_thres: threshold for selecting the normal or jumbo free list
1872 * @gen: initial generation value for the context
1873 * @cidx: consumer pointer
1875 * Initialize an SGE response queue context and make it ready for use.
1876 * The caller is responsible for ensuring only one context operation
1877 * occurs at a time.
1879 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1880 int irq_vec_idx, u64 base_addr, unsigned int size,
1881 unsigned int fl_thres, int gen, unsigned int cidx)
1883 unsigned int intr = 0;
1885 if (base_addr & 0xfff) /* must be 4K aligned */
1886 return -EINVAL;
1887 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1888 return -EBUSY;
1890 base_addr >>= 12;
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1892 V_CQ_INDEX(cidx));
1893 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1894 base_addr >>= 32;
1895 if (irq_vec_idx >= 0)
1896 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1898 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1899 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1900 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1904 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1905 * @adapter: the adapter to configure
1906 * @id: the context id
1907 * @base_addr: base address of queue
1908 * @size: number of queue entries
1909 * @rspq: response queue for async notifications
1910 * @ovfl_mode: CQ overflow mode
1911 * @credits: completion queue credits
1912 * @credit_thres: the credit threshold
1914 * Initialize an SGE completion queue context and make it ready for use.
1915 * The caller is responsible for ensuring only one context operation
1916 * occurs at a time.
1918 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1919 unsigned int size, int rspq, int ovfl_mode,
1920 unsigned int credits, unsigned int credit_thres)
1922 if (base_addr & 0xfff) /* must be 4K aligned */
1923 return -EINVAL;
1924 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1925 return -EBUSY;
1927 base_addr >>= 12;
1928 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1930 base_addr >>= 32;
1931 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1932 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1933 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1934 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1935 V_CQ_CREDIT_THRES(credit_thres));
1936 return t3_sge_write_context(adapter, id, F_CQ);
1940 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1941 * @adapter: the adapter
1942 * @id: the egress context id
1943 * @enable: enable (1) or disable (0) the context
1945 * Enable or disable an SGE egress context. The caller is responsible for
1946 * ensuring only one context operation occurs at a time.
1948 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1950 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1951 return -EBUSY;
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1955 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1956 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1957 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1958 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1959 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1960 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1961 0, 5, 1);
1965 * t3_sge_disable_fl - disable an SGE free-buffer list
1966 * @adapter: the adapter
1967 * @id: the free list context id
1969 * Disable an SGE free-buffer list. The caller is responsible for
1970 * ensuring only one context operation occurs at a time.
1972 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1974 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1975 return -EBUSY;
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1980 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1981 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1982 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1983 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1984 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1985 0, 5, 1);
1989 * t3_sge_disable_rspcntxt - disable an SGE response queue
1990 * @adapter: the adapter
1991 * @id: the response queue context id
1993 * Disable an SGE response queue. The caller is responsible for
1994 * ensuring only one context operation occurs at a time.
1996 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1998 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1999 return -EBUSY;
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2005 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2006 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2007 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2008 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2009 0, 5, 1);
2013 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2014 * @adapter: the adapter
2015 * @id: the completion queue context id
2017 * Disable an SGE completion queue. The caller is responsible for
2018 * ensuring only one context operation occurs at a time.
2020 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2022 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2023 return -EBUSY;
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2029 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2030 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2031 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2032 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2033 0, 5, 1);
2037 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2038 * @adapter: the adapter
2039 * @id: the context id
2040 * @op: the operation to perform
2042 * Perform the selected operation on an SGE completion queue context.
2043 * The caller is responsible for ensuring only one context operation
2044 * occurs at a time.
2046 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2047 unsigned int credits)
2049 u32 val;
2051 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2052 return -EBUSY;
2054 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2055 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2056 V_CONTEXT(id) | F_CQ);
2057 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2058 0, 5, 1, &val))
2059 return -EIO;
2061 if (op >= 2 && op < 7) {
2062 if (adapter->params.rev > 0)
2063 return G_CQ_INDEX(val);
2065 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2066 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2067 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2068 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2069 return -EIO;
2070 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2072 return 0;
2076 * t3_sge_read_context - read an SGE context
2077 * @type: the context type
2078 * @adapter: the adapter
2079 * @id: the context id
2080 * @data: holds the retrieved context
2082 * Read an SGE egress context. The caller is responsible for ensuring
2083 * only one context operation occurs at a time.
2085 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2086 unsigned int id, u32 data[4])
2088 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2089 return -EBUSY;
2091 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2092 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2093 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2094 5, 1))
2095 return -EIO;
2096 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2097 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2098 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2099 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2100 return 0;
2104 * t3_sge_read_ecntxt - read an SGE egress context
2105 * @adapter: the adapter
2106 * @id: the context id
2107 * @data: holds the retrieved context
2109 * Read an SGE egress context. The caller is responsible for ensuring
2110 * only one context operation occurs at a time.
2112 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2114 if (id >= 65536)
2115 return -EINVAL;
2116 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2120 * t3_sge_read_cq - read an SGE CQ context
2121 * @adapter: the adapter
2122 * @id: the context id
2123 * @data: holds the retrieved context
2125 * Read an SGE CQ context. The caller is responsible for ensuring
2126 * only one context operation occurs at a time.
2128 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2130 if (id >= 65536)
2131 return -EINVAL;
2132 return t3_sge_read_context(F_CQ, adapter, id, data);
2136 * t3_sge_read_fl - read an SGE free-list context
2137 * @adapter: the adapter
2138 * @id: the context id
2139 * @data: holds the retrieved context
2141 * Read an SGE free-list context. The caller is responsible for ensuring
2142 * only one context operation occurs at a time.
2144 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2146 if (id >= SGE_QSETS * 2)
2147 return -EINVAL;
2148 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2152 * t3_sge_read_rspq - read an SGE response queue context
2153 * @adapter: the adapter
2154 * @id: the context id
2155 * @data: holds the retrieved context
2157 * Read an SGE response queue context. The caller is responsible for
2158 * ensuring only one context operation occurs at a time.
2160 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2162 if (id >= SGE_QSETS)
2163 return -EINVAL;
2164 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2168 * t3_config_rss - configure Rx packet steering
2169 * @adapter: the adapter
2170 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2171 * @cpus: values for the CPU lookup table (0xff terminated)
2172 * @rspq: values for the response queue lookup table (0xffff terminated)
2174 * Programs the receive packet steering logic. @cpus and @rspq provide
2175 * the values for the CPU and response queue lookup tables. If they
2176 * provide fewer values than the size of the tables the supplied values
2177 * are used repeatedly until the tables are fully populated.
2179 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2180 const u8 * cpus, const u16 *rspq)
2182 int i, j, cpu_idx = 0, q_idx = 0;
2184 if (cpus)
2185 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2186 u32 val = i << 16;
2188 for (j = 0; j < 2; ++j) {
2189 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2190 if (cpus[cpu_idx] == 0xff)
2191 cpu_idx = 0;
2193 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2196 if (rspq)
2197 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2198 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2199 (i << 16) | rspq[q_idx++]);
2200 if (rspq[q_idx] == 0xffff)
2201 q_idx = 0;
2204 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2208 * t3_read_rss - read the contents of the RSS tables
2209 * @adapter: the adapter
2210 * @lkup: holds the contents of the RSS lookup table
2211 * @map: holds the contents of the RSS map table
2213 * Reads the contents of the receive packet steering tables.
2215 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2217 int i;
2218 u32 val;
2220 if (lkup)
2221 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2222 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2223 0xffff0000 | i);
2224 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2225 if (!(val & 0x80000000))
2226 return -EAGAIN;
2227 *lkup++ = val;
2228 *lkup++ = (val >> 8);
2231 if (map)
2232 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2233 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2234 0xffff0000 | i);
2235 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2236 if (!(val & 0x80000000))
2237 return -EAGAIN;
2238 *map++ = val;
2240 return 0;
2244 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2245 * @adap: the adapter
2246 * @enable: 1 to select offload mode, 0 for regular NIC
2248 * Switches TP to NIC/offload mode.
2250 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2252 if (is_offload(adap) || !enable)
2253 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2254 V_NICMODE(!enable));
2258 * pm_num_pages - calculate the number of pages of the payload memory
2259 * @mem_size: the size of the payload memory
2260 * @pg_size: the size of each payload memory page
2262 * Calculate the number of pages, each of the given size, that fit in a
2263 * memory of the specified size, respecting the HW requirement that the
2264 * number of pages must be a multiple of 24.
2266 static inline unsigned int pm_num_pages(unsigned int mem_size,
2267 unsigned int pg_size)
2269 unsigned int n = mem_size / pg_size;
2271 return n - n % 24;
2274 #define mem_region(adap, start, size, reg) \
2275 t3_write_reg((adap), A_ ## reg, (start)); \
2276 start += size
2279 * partition_mem - partition memory and configure TP memory settings
2280 * @adap: the adapter
2281 * @p: the TP parameters
2283 * Partitions context and payload memory and configures TP's memory
2284 * registers.
2286 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2288 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2289 unsigned int timers = 0, timers_shift = 22;
2291 if (adap->params.rev > 0) {
2292 if (tids <= 16 * 1024) {
2293 timers = 1;
2294 timers_shift = 16;
2295 } else if (tids <= 64 * 1024) {
2296 timers = 2;
2297 timers_shift = 18;
2298 } else if (tids <= 256 * 1024) {
2299 timers = 3;
2300 timers_shift = 20;
2304 t3_write_reg(adap, A_TP_PMM_SIZE,
2305 p->chan_rx_size | (p->chan_tx_size >> 16));
2307 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2308 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2309 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2310 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2311 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2313 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2314 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2315 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2317 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2318 /* Add a bit of headroom and make multiple of 24 */
2319 pstructs += 48;
2320 pstructs -= pstructs % 24;
2321 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2323 m = tids * TCB_SIZE;
2324 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2325 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2326 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2327 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2328 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2329 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2330 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2331 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2333 m = (m + 4095) & ~0xfff;
2334 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2335 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2337 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2338 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2339 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2340 if (tids < m)
2341 adap->params.mc5.nservers += m - tids;
2344 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2345 u32 val)
2347 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2348 t3_write_reg(adap, A_TP_PIO_DATA, val);
2351 static void tp_config(struct adapter *adap, const struct tp_params *p)
2353 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2354 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2355 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2356 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2357 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2358 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2359 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2360 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2361 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2362 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2363 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2364 F_IPV6ENABLE | F_NICMODE);
2365 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2366 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2367 t3_set_reg_field(adap, A_TP_PARA_REG6,
2368 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2371 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2372 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2373 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2374 F_RXCONGESTIONMODE);
2375 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2377 if (adap->params.rev > 0) {
2378 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2379 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2380 F_TXPACEAUTO);
2381 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2383 } else
2384 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2386 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2387 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2388 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2391 /* Desired TP timer resolution in usec */
2392 #define TP_TMR_RES 50
2394 /* TCP timer values in ms */
2395 #define TP_DACK_TIMER 50
2396 #define TP_RTO_MIN 250
2399 * tp_set_timers - set TP timing parameters
2400 * @adap: the adapter to set
2401 * @core_clk: the core clock frequency in Hz
2403 * Set TP's timing parameters, such as the various timer resolutions and
2404 * the TCP timer values.
2406 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2408 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2409 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2410 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2411 unsigned int tps = core_clk >> tre;
2413 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2414 V_DELAYEDACKRESOLUTION(dack_re) |
2415 V_TIMESTAMPRESOLUTION(tstamp_re));
2416 t3_write_reg(adap, A_TP_DACK_TIMER,
2417 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2420 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2421 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2422 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2423 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2424 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2425 V_KEEPALIVEMAX(9));
2427 #define SECONDS * tps
2429 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2430 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2431 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2433 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2434 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2435 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2436 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2437 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2439 #undef SECONDS
2443 * t3_tp_set_coalescing_size - set receive coalescing size
2444 * @adap: the adapter
2445 * @size: the receive coalescing size
2446 * @psh: whether a set PSH bit should deliver coalesced data
2448 * Set the receive coalescing size and PSH bit handling.
2450 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2452 u32 val;
2454 if (size > MAX_RX_COALESCING_LEN)
2455 return -EINVAL;
2457 val = t3_read_reg(adap, A_TP_PARA_REG3);
2458 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2460 if (size) {
2461 val |= F_RXCOALESCEENABLE;
2462 if (psh)
2463 val |= F_RXCOALESCEPSHEN;
2464 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2465 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2467 t3_write_reg(adap, A_TP_PARA_REG3, val);
2468 return 0;
2472 * t3_tp_set_max_rxsize - set the max receive size
2473 * @adap: the adapter
2474 * @size: the max receive size
2476 * Set TP's max receive size. This is the limit that applies when
2477 * receive coalescing is disabled.
2479 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2481 t3_write_reg(adap, A_TP_PARA_REG7,
2482 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2485 static void __devinit init_mtus(unsigned short mtus[])
2488 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2489 * it can accomodate max size TCP/IP headers when SACK and timestamps
2490 * are enabled and still have at least 8 bytes of payload.
2492 mtus[0] = 88;
2493 mtus[1] = 256;
2494 mtus[2] = 512;
2495 mtus[3] = 576;
2496 mtus[4] = 808;
2497 mtus[5] = 1024;
2498 mtus[6] = 1280;
2499 mtus[7] = 1492;
2500 mtus[8] = 1500;
2501 mtus[9] = 2002;
2502 mtus[10] = 2048;
2503 mtus[11] = 4096;
2504 mtus[12] = 4352;
2505 mtus[13] = 8192;
2506 mtus[14] = 9000;
2507 mtus[15] = 9600;
2511 * Initial congestion control parameters.
2513 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2515 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2516 a[9] = 2;
2517 a[10] = 3;
2518 a[11] = 4;
2519 a[12] = 5;
2520 a[13] = 6;
2521 a[14] = 7;
2522 a[15] = 8;
2523 a[16] = 9;
2524 a[17] = 10;
2525 a[18] = 14;
2526 a[19] = 17;
2527 a[20] = 21;
2528 a[21] = 25;
2529 a[22] = 30;
2530 a[23] = 35;
2531 a[24] = 45;
2532 a[25] = 60;
2533 a[26] = 80;
2534 a[27] = 100;
2535 a[28] = 200;
2536 a[29] = 300;
2537 a[30] = 400;
2538 a[31] = 500;
2540 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2541 b[9] = b[10] = 1;
2542 b[11] = b[12] = 2;
2543 b[13] = b[14] = b[15] = b[16] = 3;
2544 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2545 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2546 b[28] = b[29] = 6;
2547 b[30] = b[31] = 7;
2550 /* The minimum additive increment value for the congestion control table */
2551 #define CC_MIN_INCR 2U
2554 * t3_load_mtus - write the MTU and congestion control HW tables
2555 * @adap: the adapter
2556 * @mtus: the unrestricted values for the MTU table
2557 * @alphs: the values for the congestion control alpha parameter
2558 * @beta: the values for the congestion control beta parameter
2559 * @mtu_cap: the maximum permitted effective MTU
2561 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2562 * Update the high-speed congestion control table with the supplied alpha,
2563 * beta, and MTUs.
2565 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2566 unsigned short alpha[NCCTRL_WIN],
2567 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2569 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2570 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2571 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2572 28672, 40960, 57344, 81920, 114688, 163840, 229376
2575 unsigned int i, w;
2577 for (i = 0; i < NMTUS; ++i) {
2578 unsigned int mtu = min(mtus[i], mtu_cap);
2579 unsigned int log2 = fls(mtu);
2581 if (!(mtu & ((1 << log2) >> 2))) /* round */
2582 log2--;
2583 t3_write_reg(adap, A_TP_MTU_TABLE,
2584 (i << 24) | (log2 << 16) | mtu);
2586 for (w = 0; w < NCCTRL_WIN; ++w) {
2587 unsigned int inc;
2589 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2590 CC_MIN_INCR);
2592 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2593 (w << 16) | (beta[w] << 13) | inc);
2599 * t3_read_hw_mtus - returns the values in the HW MTU table
2600 * @adap: the adapter
2601 * @mtus: where to store the HW MTU values
2603 * Reads the HW MTU table.
2605 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2607 int i;
2609 for (i = 0; i < NMTUS; ++i) {
2610 unsigned int val;
2612 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2613 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2614 mtus[i] = val & 0x3fff;
2619 * t3_get_cong_cntl_tab - reads the congestion control table
2620 * @adap: the adapter
2621 * @incr: where to store the alpha values
2623 * Reads the additive increments programmed into the HW congestion
2624 * control table.
2626 void t3_get_cong_cntl_tab(struct adapter *adap,
2627 unsigned short incr[NMTUS][NCCTRL_WIN])
2629 unsigned int mtu, w;
2631 for (mtu = 0; mtu < NMTUS; ++mtu)
2632 for (w = 0; w < NCCTRL_WIN; ++w) {
2633 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2634 0xffff0000 | (mtu << 5) | w);
2635 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2636 0x1fff;
2641 * t3_tp_get_mib_stats - read TP's MIB counters
2642 * @adap: the adapter
2643 * @tps: holds the returned counter values
2645 * Returns the values of TP's MIB counters.
2647 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2649 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2650 sizeof(*tps) / sizeof(u32), 0);
2653 #define ulp_region(adap, name, start, len) \
2654 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2655 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2656 (start) + (len) - 1); \
2657 start += len
2659 #define ulptx_region(adap, name, start, len) \
2660 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2661 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2662 (start) + (len) - 1)
2664 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2666 unsigned int m = p->chan_rx_size;
2668 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2669 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2670 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2671 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2672 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2673 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2674 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2675 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2678 void t3_config_trace_filter(struct adapter *adapter,
2679 const struct trace_params *tp, int filter_index,
2680 int invert, int enable)
2682 u32 addr, key[4], mask[4];
2684 key[0] = tp->sport | (tp->sip << 16);
2685 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2686 key[2] = tp->dip;
2687 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2689 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2690 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2691 mask[2] = tp->dip_mask;
2692 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2694 if (invert)
2695 key[3] |= (1 << 29);
2696 if (enable)
2697 key[3] |= (1 << 28);
2699 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2700 tp_wr_indirect(adapter, addr++, key[0]);
2701 tp_wr_indirect(adapter, addr++, mask[0]);
2702 tp_wr_indirect(adapter, addr++, key[1]);
2703 tp_wr_indirect(adapter, addr++, mask[1]);
2704 tp_wr_indirect(adapter, addr++, key[2]);
2705 tp_wr_indirect(adapter, addr++, mask[2]);
2706 tp_wr_indirect(adapter, addr++, key[3]);
2707 tp_wr_indirect(adapter, addr, mask[3]);
2708 t3_read_reg(adapter, A_TP_PIO_DATA);
2712 * t3_config_sched - configure a HW traffic scheduler
2713 * @adap: the adapter
2714 * @kbps: target rate in Kbps
2715 * @sched: the scheduler index
2717 * Configure a HW scheduler for the target rate
2719 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2721 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2722 unsigned int clk = adap->params.vpd.cclk * 1000;
2723 unsigned int selected_cpt = 0, selected_bpt = 0;
2725 if (kbps > 0) {
2726 kbps *= 125; /* -> bytes */
2727 for (cpt = 1; cpt <= 255; cpt++) {
2728 tps = clk / cpt;
2729 bpt = (kbps + tps / 2) / tps;
2730 if (bpt > 0 && bpt <= 255) {
2731 v = bpt * tps;
2732 delta = v >= kbps ? v - kbps : kbps - v;
2733 if (delta <= mindelta) {
2734 mindelta = delta;
2735 selected_cpt = cpt;
2736 selected_bpt = bpt;
2738 } else if (selected_cpt)
2739 break;
2741 if (!selected_cpt)
2742 return -EINVAL;
2744 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2745 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2746 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2747 if (sched & 1)
2748 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2749 else
2750 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2751 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2752 return 0;
2755 static int tp_init(struct adapter *adap, const struct tp_params *p)
2757 int busy = 0;
2759 tp_config(adap, p);
2760 t3_set_vlan_accel(adap, 3, 0);
2762 if (is_offload(adap)) {
2763 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2764 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2765 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2766 0, 1000, 5);
2767 if (busy)
2768 CH_ERR(adap, "TP initialization timed out\n");
2771 if (!busy)
2772 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2773 return busy;
2776 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2778 if (port_mask & ~((1 << adap->params.nports) - 1))
2779 return -EINVAL;
2780 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2781 port_mask << S_PORT0ACTIVE);
2782 return 0;
2786 * Perform the bits of HW initialization that are dependent on the number
2787 * of available ports.
2789 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2791 int i;
2793 if (nports == 1) {
2794 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2795 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2796 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2797 F_PORT0ACTIVE | F_ENFORCEPKT);
2798 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2799 } else {
2800 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2801 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2802 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2803 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2804 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2805 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2806 F_ENFORCEPKT);
2807 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2808 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2809 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2810 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2811 for (i = 0; i < 16; i++)
2812 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2813 (i << 16) | 0x1010);
2817 static int calibrate_xgm(struct adapter *adapter)
2819 if (uses_xaui(adapter)) {
2820 unsigned int v, i;
2822 for (i = 0; i < 5; ++i) {
2823 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2824 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 msleep(1);
2826 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2827 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2828 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2829 V_XAUIIMP(G_CALIMP(v) >> 2));
2830 return 0;
2833 CH_ERR(adapter, "MAC calibration failed\n");
2834 return -1;
2835 } else {
2836 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2837 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2838 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2839 F_XGM_IMPSETUPDATE);
2841 return 0;
2844 static void calibrate_xgm_t3b(struct adapter *adapter)
2846 if (!uses_xaui(adapter)) {
2847 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2848 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2849 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2851 F_XGM_IMPSETUPDATE);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2854 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2855 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2859 struct mc7_timing_params {
2860 unsigned char ActToPreDly;
2861 unsigned char ActToRdWrDly;
2862 unsigned char PreCyc;
2863 unsigned char RefCyc[5];
2864 unsigned char BkCyc;
2865 unsigned char WrToRdDly;
2866 unsigned char RdToWrDly;
2870 * Write a value to a register and check that the write completed. These
2871 * writes normally complete in a cycle or two, so one read should suffice.
2872 * The very first read exists to flush the posted write to the device.
2874 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2876 t3_write_reg(adapter, addr, val);
2877 t3_read_reg(adapter, addr); /* flush */
2878 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2879 return 0;
2880 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2881 return -EIO;
2884 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2886 static const unsigned int mc7_mode[] = {
2887 0x632, 0x642, 0x652, 0x432, 0x442
2889 static const struct mc7_timing_params mc7_timings[] = {
2890 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2891 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2892 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2893 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2894 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2897 u32 val;
2898 unsigned int width, density, slow, attempts;
2899 struct adapter *adapter = mc7->adapter;
2900 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2902 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2903 slow = val & F_SLOW;
2904 width = G_WIDTH(val);
2905 density = G_DEN(val);
2907 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2908 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2909 msleep(1);
2911 if (!slow) {
2912 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2913 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2914 msleep(1);
2915 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2916 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2917 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2918 mc7->name);
2919 goto out_fail;
2923 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2924 V_ACTTOPREDLY(p->ActToPreDly) |
2925 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2926 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2927 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2929 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2930 val | F_CLKEN | F_TERM150);
2931 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2933 if (!slow)
2934 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2935 F_DLLENB);
2936 udelay(1);
2938 val = slow ? 3 : 6;
2939 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2941 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2942 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2943 goto out_fail;
2945 if (!slow) {
2946 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2947 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2948 udelay(5);
2951 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2953 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2955 mc7_mode[mem_type]) ||
2956 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2957 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2958 goto out_fail;
2960 /* clock value is in KHz */
2961 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2962 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2964 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2965 F_PERREFEN | V_PREREFDIV(mc7_clock));
2966 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2968 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2970 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2972 (mc7->size << width) - 1);
2973 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2974 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2976 attempts = 50;
2977 do {
2978 msleep(250);
2979 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2980 } while ((val & F_BUSY) && --attempts);
2981 if (val & F_BUSY) {
2982 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2983 goto out_fail;
2986 /* Enable normal memory accesses. */
2987 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2988 return 0;
2990 out_fail:
2991 return -1;
2994 static void config_pcie(struct adapter *adap)
2996 static const u16 ack_lat[4][6] = {
2997 {237, 416, 559, 1071, 2095, 4143},
2998 {128, 217, 289, 545, 1057, 2081},
2999 {73, 118, 154, 282, 538, 1050},
3000 {67, 107, 86, 150, 278, 534}
3002 static const u16 rpl_tmr[4][6] = {
3003 {711, 1248, 1677, 3213, 6285, 12429},
3004 {384, 651, 867, 1635, 3171, 6243},
3005 {219, 354, 462, 846, 1614, 3150},
3006 {201, 321, 258, 450, 834, 1602}
3009 u16 val;
3010 unsigned int log2_width, pldsize;
3011 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3013 pci_read_config_word(adap->pdev,
3014 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3015 &val);
3016 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3017 pci_read_config_word(adap->pdev,
3018 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3019 &val);
3021 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3022 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3023 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3024 log2_width = fls(adap->params.pci.width) - 1;
3025 acklat = ack_lat[log2_width][pldsize];
3026 if (val & 1) /* check LOsEnable */
3027 acklat += fst_trn_tx * 4;
3028 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3030 if (adap->params.rev == 0)
3031 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3032 V_T3A_ACKLAT(M_T3A_ACKLAT),
3033 V_T3A_ACKLAT(acklat));
3034 else
3035 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3036 V_ACKLAT(acklat));
3038 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3039 V_REPLAYLMT(rpllmt));
3041 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3042 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3046 * Initialize and configure T3 HW modules. This performs the
3047 * initialization steps that need to be done once after a card is reset.
3048 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3050 * fw_params are passed to FW and their value is platform dependent. Only the
3051 * top 8 bits are available for use, the rest must be 0.
3053 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3055 int err = -EIO, attempts = 100;
3056 const struct vpd_params *vpd = &adapter->params.vpd;
3058 if (adapter->params.rev > 0)
3059 calibrate_xgm_t3b(adapter);
3060 else if (calibrate_xgm(adapter))
3061 goto out_err;
3063 if (vpd->mclk) {
3064 partition_mem(adapter, &adapter->params.tp);
3066 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3067 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3068 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3069 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3070 adapter->params.mc5.nfilters,
3071 adapter->params.mc5.nroutes))
3072 goto out_err;
3075 if (tp_init(adapter, &adapter->params.tp))
3076 goto out_err;
3078 t3_tp_set_coalescing_size(adapter,
3079 min(adapter->params.sge.max_pkt_size,
3080 MAX_RX_COALESCING_LEN), 1);
3081 t3_tp_set_max_rxsize(adapter,
3082 min(adapter->params.sge.max_pkt_size, 16384U));
3083 ulp_config(adapter, &adapter->params.tp);
3085 if (is_pcie(adapter))
3086 config_pcie(adapter);
3087 else
3088 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3090 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3091 init_hw_for_avail_ports(adapter, adapter->params.nports);
3092 t3_sge_init(adapter, &adapter->params.sge);
3094 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3095 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3096 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3097 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3099 do { /* wait for uP to initialize */
3100 msleep(20);
3101 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3102 if (!attempts)
3103 goto out_err;
3105 err = 0;
3106 out_err:
3107 return err;
3111 * get_pci_mode - determine a card's PCI mode
3112 * @adapter: the adapter
3113 * @p: where to store the PCI settings
3115 * Determines a card's PCI mode and associated parameters, such as speed
3116 * and width.
3118 static void __devinit get_pci_mode(struct adapter *adapter,
3119 struct pci_params *p)
3121 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3122 u32 pci_mode, pcie_cap;
3124 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3125 if (pcie_cap) {
3126 u16 val;
3128 p->variant = PCI_VARIANT_PCIE;
3129 p->pcie_cap_addr = pcie_cap;
3130 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3131 &val);
3132 p->width = (val >> 4) & 0x3f;
3133 return;
3136 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3137 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3138 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3139 pci_mode = G_PCIXINITPAT(pci_mode);
3140 if (pci_mode == 0)
3141 p->variant = PCI_VARIANT_PCI;
3142 else if (pci_mode < 4)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3144 else if (pci_mode < 8)
3145 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3146 else
3147 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3151 * init_link_config - initialize a link's SW state
3152 * @lc: structure holding the link state
3153 * @ai: information about the current card
3155 * Initializes the SW state maintained for each link, including the link's
3156 * capabilities and default speed/duplex/flow-control/autonegotiation
3157 * settings.
3159 static void __devinit init_link_config(struct link_config *lc,
3160 unsigned int caps)
3162 lc->supported = caps;
3163 lc->requested_speed = lc->speed = SPEED_INVALID;
3164 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3165 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3166 if (lc->supported & SUPPORTED_Autoneg) {
3167 lc->advertising = lc->supported;
3168 lc->autoneg = AUTONEG_ENABLE;
3169 lc->requested_fc |= PAUSE_AUTONEG;
3170 } else {
3171 lc->advertising = 0;
3172 lc->autoneg = AUTONEG_DISABLE;
3177 * mc7_calc_size - calculate MC7 memory size
3178 * @cfg: the MC7 configuration
3180 * Calculates the size of an MC7 memory in bytes from the value of its
3181 * configuration register.
3183 static unsigned int __devinit mc7_calc_size(u32 cfg)
3185 unsigned int width = G_WIDTH(cfg);
3186 unsigned int banks = !!(cfg & F_BKS) + 1;
3187 unsigned int org = !!(cfg & F_ORG) + 1;
3188 unsigned int density = G_DEN(cfg);
3189 unsigned int MBs = ((256 << density) * banks) / (org << width);
3191 return MBs << 20;
3194 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3195 unsigned int base_addr, const char *name)
3197 u32 cfg;
3199 mc7->adapter = adapter;
3200 mc7->name = name;
3201 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3202 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3203 mc7->size = mc7_calc_size(cfg);
3204 mc7->width = G_WIDTH(cfg);
3207 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3209 mac->adapter = adapter;
3210 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3211 mac->nucast = 1;
3213 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3214 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3215 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3216 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3217 F_ENRGMII, 0);
3221 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3223 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3225 mi1_init(adapter, ai);
3226 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3227 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3228 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3229 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3231 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3232 val |= F_ENRGMII;
3234 /* Enable MAC clocks so we can access the registers */
3235 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3236 t3_read_reg(adapter, A_XGM_PORT_CFG);
3238 val |= F_CLKDIVRESET_;
3239 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3242 t3_read_reg(adapter, A_XGM_PORT_CFG);
3246 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3247 * ones don't.
3249 int t3_reset_adapter(struct adapter *adapter)
3251 int i;
3252 uint16_t devid = 0;
3254 if (is_pcie(adapter))
3255 pci_save_state(adapter->pdev);
3256 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3259 * Delay. Give Some time to device to reset fully.
3260 * XXX The delay time should be modified.
3262 for (i = 0; i < 10; i++) {
3263 msleep(50);
3264 pci_read_config_word(adapter->pdev, 0x00, &devid);
3265 if (devid == 0x1425)
3266 break;
3269 if (devid != 0x1425)
3270 return -1;
3272 if (is_pcie(adapter))
3273 pci_restore_state(adapter->pdev);
3274 return 0;
3278 * Initialize adapter SW state for the various HW modules, set initial values
3279 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3280 * interface.
3282 int __devinit t3_prep_adapter(struct adapter *adapter,
3283 const struct adapter_info *ai, int reset)
3285 int ret;
3286 unsigned int i, j = 0;
3288 get_pci_mode(adapter, &adapter->params.pci);
3290 adapter->params.info = ai;
3291 adapter->params.nports = ai->nports;
3292 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3293 adapter->params.linkpoll_period = 0;
3294 adapter->params.stats_update_period = is_10G(adapter) ?
3295 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3296 adapter->params.pci.vpd_cap_addr =
3297 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3298 ret = get_vpd_params(adapter, &adapter->params.vpd);
3299 if (ret < 0)
3300 return ret;
3302 if (reset && t3_reset_adapter(adapter))
3303 return -1;
3305 t3_sge_prep(adapter, &adapter->params.sge);
3307 if (adapter->params.vpd.mclk) {
3308 struct tp_params *p = &adapter->params.tp;
3310 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3311 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3312 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3314 p->nchan = ai->nports;
3315 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3316 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3317 p->cm_size = t3_mc7_size(&adapter->cm);
3318 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3319 p->chan_tx_size = p->pmtx_size / p->nchan;
3320 p->rx_pg_size = 64 * 1024;
3321 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3322 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3323 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3324 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3325 adapter->params.rev > 0 ? 12 : 6;
3327 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3328 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3329 DEFAULT_NFILTERS : 0;
3330 adapter->params.mc5.nroutes = 0;
3331 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3333 init_mtus(adapter->params.mtus);
3334 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3337 early_hw_init(adapter, ai);
3339 for_each_port(adapter, i) {
3340 u8 hw_addr[6];
3341 struct port_info *p = adap2pinfo(adapter, i);
3343 while (!adapter->params.vpd.port_type[j])
3344 ++j;
3346 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3347 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3348 ai->mdio_ops);
3349 mac_prep(&p->mac, adapter, j);
3350 ++j;
3353 * The VPD EEPROM stores the base Ethernet address for the
3354 * card. A port's address is derived from the base by adding
3355 * the port's index to the base's low octet.
3357 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3358 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3360 memcpy(adapter->port[i]->dev_addr, hw_addr,
3361 ETH_ALEN);
3362 memcpy(adapter->port[i]->perm_addr, hw_addr,
3363 ETH_ALEN);
3364 init_link_config(&p->link_config, p->port_type->caps);
3365 p->phy.ops->power_down(&p->phy, 1);
3366 if (!(p->port_type->caps & SUPPORTED_IRQ))
3367 adapter->params.linkpoll_period = 10;
3370 return 0;
3373 void t3_led_ready(struct adapter *adapter)
3375 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3376 F_GPIO0_OUT_VAL);