cxgb4: add a missing error interrupt
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / cxgb4 / t4_hw.c
blob5c058ea36cd93bf9403f86ee6ceb7f4562accce4
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include "cxgb4.h"
38 #include "t4_regs.h"
39 #include "t4fw_api.h"
41 /**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
81 /**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
88 * Sets a register field specified by the supplied mask to the
89 * given value.
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
123 #if 0
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
145 #endif
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
150 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
151 u32 mbox_addr)
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
158 * Handle a FW assertion reported in a mailbox.
160 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
162 struct fw_debug_cmd asrt;
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
171 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
187 * @adap: the adapter
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
200 * otherwise we spin.
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
207 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
214 u32 v;
215 u64 res;
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
221 if ((size & 15) || size > MBOX_LEN)
222 return -EINVAL;
225 * If the device is off-line, as in EEH, commands will time out.
226 * Fail them early so we don't waste time waiting.
228 if (adap->pdev->error_state != pci_channel_io_normal)
229 return -EIO;
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
233 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
235 if (v != MBOX_OWNER_DRV)
236 return v ? -EBUSY : -ETIMEDOUT;
238 for (i = 0; i < size; i += 8)
239 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
241 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
242 t4_read_reg(adap, ctl_reg); /* flush write */
244 delay_idx = 0;
245 ms = delay[0];
247 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
248 if (sleep_ok) {
249 ms = delay[delay_idx]; /* last element may repeat */
250 if (delay_idx < ARRAY_SIZE(delay) - 1)
251 delay_idx++;
252 msleep(ms);
253 } else
254 mdelay(ms);
256 v = t4_read_reg(adap, ctl_reg);
257 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
258 if (!(v & MBMSGVALID)) {
259 t4_write_reg(adap, ctl_reg, 0);
260 continue;
263 res = t4_read_reg64(adap, data_reg);
264 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
265 fw_asrt(adap, data_reg);
266 res = FW_CMD_RETVAL(EIO);
267 } else if (rpl)
268 get_mbox_rpl(adap, rpl, size / 8, data_reg);
270 if (FW_CMD_RETVAL_GET((int)res))
271 dump_mbox(adap, mbox, data_reg);
272 t4_write_reg(adap, ctl_reg, 0);
273 return -FW_CMD_RETVAL_GET((int)res);
277 dump_mbox(adap, mbox, data_reg);
278 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
279 *(const u8 *)cmd, mbox);
280 return -ETIMEDOUT;
284 * t4_mc_read - read from MC through backdoor accesses
285 * @adap: the adapter
286 * @addr: address of first byte requested
287 * @data: 64 bytes of data containing the requested address
288 * @ecc: where to store the corresponding 64-bit ECC word
290 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
291 * that covers the requested address @addr. If @parity is not %NULL it
292 * is assigned the 64-bit ECC word for the read data.
294 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
296 int i;
298 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
299 return -EBUSY;
300 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
301 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
302 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
303 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
304 BIST_CMD_GAP(1));
305 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
306 if (i)
307 return i;
309 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
311 for (i = 15; i >= 0; i--)
312 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
313 if (ecc)
314 *ecc = t4_read_reg64(adap, MC_DATA(16));
315 #undef MC_DATA
316 return 0;
320 * t4_edc_read - read from EDC through backdoor accesses
321 * @adap: the adapter
322 * @idx: which EDC to access
323 * @addr: address of first byte requested
324 * @data: 64 bytes of data containing the requested address
325 * @ecc: where to store the corresponding 64-bit ECC word
327 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
328 * that covers the requested address @addr. If @parity is not %NULL it
329 * is assigned the 64-bit ECC word for the read data.
331 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
333 int i;
335 idx *= EDC_STRIDE;
336 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
337 return -EBUSY;
338 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
339 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
340 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
341 t4_write_reg(adap, EDC_BIST_CMD + idx,
342 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
343 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
344 if (i)
345 return i;
347 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
349 for (i = 15; i >= 0; i--)
350 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
351 if (ecc)
352 *ecc = t4_read_reg64(adap, EDC_DATA(16));
353 #undef EDC_DATA
354 return 0;
358 * Partial EEPROM Vital Product Data structure. Includes only the ID and
359 * VPD-R header.
361 struct t4_vpd_hdr {
362 u8 id_tag;
363 u8 id_len[2];
364 u8 id_data[ID_LEN];
365 u8 vpdr_tag;
366 u8 vpdr_len[2];
369 #define EEPROM_STAT_ADDR 0x7bfc
370 #define VPD_BASE 0
371 #define VPD_LEN 512
374 * t4_seeprom_wp - enable/disable EEPROM write protection
375 * @adapter: the adapter
376 * @enable: whether to enable or disable write protection
378 * Enables or disables write protection on the serial EEPROM.
380 int t4_seeprom_wp(struct adapter *adapter, bool enable)
382 unsigned int v = enable ? 0xc : 0;
383 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
384 return ret < 0 ? ret : 0;
388 * get_vpd_params - read VPD parameters from VPD EEPROM
389 * @adapter: adapter to read
390 * @p: where to store the parameters
392 * Reads card parameters stored in VPD EEPROM.
394 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
396 int i, ret;
397 int ec, sn, v2;
398 u8 vpd[VPD_LEN], csum;
399 unsigned int vpdr_len;
400 const struct t4_vpd_hdr *v;
402 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
403 if (ret < 0)
404 return ret;
406 v = (const struct t4_vpd_hdr *)vpd;
407 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
408 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
409 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
410 return -EINVAL;
413 #define FIND_VPD_KW(var, name) do { \
414 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
415 vpdr_len, name); \
416 if (var < 0) { \
417 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
418 return -EINVAL; \
420 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
421 } while (0)
423 FIND_VPD_KW(i, "RV");
424 for (csum = 0; i >= 0; i--)
425 csum += vpd[i];
427 if (csum) {
428 dev_err(adapter->pdev_dev,
429 "corrupted VPD EEPROM, actual csum %u\n", csum);
430 return -EINVAL;
433 FIND_VPD_KW(ec, "EC");
434 FIND_VPD_KW(sn, "SN");
435 FIND_VPD_KW(v2, "V2");
436 #undef FIND_VPD_KW
438 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
439 memcpy(p->id, v->id_data, ID_LEN);
440 strim(p->id);
441 memcpy(p->ec, vpd + ec, EC_LEN);
442 strim(p->ec);
443 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
444 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
445 strim(p->sn);
446 return 0;
449 /* serial flash and firmware constants */
450 enum {
451 SF_ATTEMPTS = 10, /* max retries for SF operations */
453 /* flash command opcodes */
454 SF_PROG_PAGE = 2, /* program page */
455 SF_WR_DISABLE = 4, /* disable writes */
456 SF_RD_STATUS = 5, /* read status register */
457 SF_WR_ENABLE = 6, /* enable writes */
458 SF_RD_DATA_FAST = 0xb, /* read flash */
459 SF_RD_ID = 0x9f, /* read ID */
460 SF_ERASE_SECTOR = 0xd8, /* erase sector */
462 FW_MAX_SIZE = 512 * 1024,
466 * sf1_read - read data from the serial flash
467 * @adapter: the adapter
468 * @byte_cnt: number of bytes to read
469 * @cont: whether another operation will be chained
470 * @lock: whether to lock SF for PL access only
471 * @valp: where to store the read data
473 * Reads up to 4 bytes of data from the serial flash. The location of
474 * the read needs to be specified prior to calling this by issuing the
475 * appropriate commands to the serial flash.
477 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
478 int lock, u32 *valp)
480 int ret;
482 if (!byte_cnt || byte_cnt > 4)
483 return -EINVAL;
484 if (t4_read_reg(adapter, SF_OP) & BUSY)
485 return -EBUSY;
486 cont = cont ? SF_CONT : 0;
487 lock = lock ? SF_LOCK : 0;
488 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
489 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
490 if (!ret)
491 *valp = t4_read_reg(adapter, SF_DATA);
492 return ret;
496 * sf1_write - write data to the serial flash
497 * @adapter: the adapter
498 * @byte_cnt: number of bytes to write
499 * @cont: whether another operation will be chained
500 * @lock: whether to lock SF for PL access only
501 * @val: value to write
503 * Writes up to 4 bytes of data to the serial flash. The location of
504 * the write needs to be specified prior to calling this by issuing the
505 * appropriate commands to the serial flash.
507 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
508 int lock, u32 val)
510 if (!byte_cnt || byte_cnt > 4)
511 return -EINVAL;
512 if (t4_read_reg(adapter, SF_OP) & BUSY)
513 return -EBUSY;
514 cont = cont ? SF_CONT : 0;
515 lock = lock ? SF_LOCK : 0;
516 t4_write_reg(adapter, SF_DATA, val);
517 t4_write_reg(adapter, SF_OP, lock |
518 cont | BYTECNT(byte_cnt - 1) | OP_WR);
519 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
523 * flash_wait_op - wait for a flash operation to complete
524 * @adapter: the adapter
525 * @attempts: max number of polls of the status register
526 * @delay: delay between polls in ms
528 * Wait for a flash operation to complete by polling the status register.
530 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
532 int ret;
533 u32 status;
535 while (1) {
536 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
537 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
538 return ret;
539 if (!(status & 1))
540 return 0;
541 if (--attempts == 0)
542 return -EAGAIN;
543 if (delay)
544 msleep(delay);
549 * t4_read_flash - read words from serial flash
550 * @adapter: the adapter
551 * @addr: the start address for the read
552 * @nwords: how many 32-bit words to read
553 * @data: where to store the read data
554 * @byte_oriented: whether to store data as bytes or as words
556 * Read the specified number of 32-bit words from the serial flash.
557 * If @byte_oriented is set the read data is stored as a byte array
558 * (i.e., big-endian), otherwise as 32-bit words in the platform's
559 * natural endianess.
561 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
562 unsigned int nwords, u32 *data, int byte_oriented)
564 int ret;
566 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
567 return -EINVAL;
569 addr = swab32(addr) | SF_RD_DATA_FAST;
571 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
572 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
573 return ret;
575 for ( ; nwords; nwords--, data++) {
576 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
577 if (nwords == 1)
578 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
579 if (ret)
580 return ret;
581 if (byte_oriented)
582 *data = htonl(*data);
584 return 0;
588 * t4_write_flash - write up to a page of data to the serial flash
589 * @adapter: the adapter
590 * @addr: the start address to write
591 * @n: length of data to write in bytes
592 * @data: the data to write
594 * Writes up to a page of data (256 bytes) to the serial flash starting
595 * at the given address. All the data must be written to the same page.
597 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
598 unsigned int n, const u8 *data)
600 int ret;
601 u32 buf[64];
602 unsigned int i, c, left, val, offset = addr & 0xff;
604 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
605 return -EINVAL;
607 val = swab32(addr) | SF_PROG_PAGE;
609 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
610 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
611 goto unlock;
613 for (left = n; left; left -= c) {
614 c = min(left, 4U);
615 for (val = 0, i = 0; i < c; ++i)
616 val = (val << 8) + *data++;
618 ret = sf1_write(adapter, c, c != left, 1, val);
619 if (ret)
620 goto unlock;
622 ret = flash_wait_op(adapter, 8, 1);
623 if (ret)
624 goto unlock;
626 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
628 /* Read the page to verify the write succeeded */
629 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
630 if (ret)
631 return ret;
633 if (memcmp(data - n, (u8 *)buf + offset, n)) {
634 dev_err(adapter->pdev_dev,
635 "failed to correctly write the flash page at %#x\n",
636 addr);
637 return -EIO;
639 return 0;
641 unlock:
642 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
643 return ret;
647 * get_fw_version - read the firmware version
648 * @adapter: the adapter
649 * @vers: where to place the version
651 * Reads the FW version from flash.
653 static int get_fw_version(struct adapter *adapter, u32 *vers)
655 return t4_read_flash(adapter, adapter->params.sf_fw_start +
656 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
660 * get_tp_version - read the TP microcode version
661 * @adapter: the adapter
662 * @vers: where to place the version
664 * Reads the TP microcode version from flash.
666 static int get_tp_version(struct adapter *adapter, u32 *vers)
668 return t4_read_flash(adapter, adapter->params.sf_fw_start +
669 offsetof(struct fw_hdr, tp_microcode_ver),
670 1, vers, 0);
674 * t4_check_fw_version - check if the FW is compatible with this driver
675 * @adapter: the adapter
677 * Checks if an adapter's FW is compatible with the driver. Returns 0
678 * if there's exact match, a negative error if the version could not be
679 * read or there's a major version mismatch, and a positive value if the
680 * expected major version is found but there's a minor version mismatch.
682 int t4_check_fw_version(struct adapter *adapter)
684 u32 api_vers[2];
685 int ret, major, minor, micro;
687 ret = get_fw_version(adapter, &adapter->params.fw_vers);
688 if (!ret)
689 ret = get_tp_version(adapter, &adapter->params.tp_vers);
690 if (!ret)
691 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
692 offsetof(struct fw_hdr, intfver_nic),
693 2, api_vers, 1);
694 if (ret)
695 return ret;
697 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
698 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
699 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
700 memcpy(adapter->params.api_vers, api_vers,
701 sizeof(adapter->params.api_vers));
703 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
704 dev_err(adapter->pdev_dev,
705 "card FW has major version %u, driver wants %u\n",
706 major, FW_VERSION_MAJOR);
707 return -EINVAL;
710 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
711 return 0; /* perfect match */
713 /* Minor/micro version mismatch. Report it but often it's OK. */
714 return 1;
718 * t4_flash_erase_sectors - erase a range of flash sectors
719 * @adapter: the adapter
720 * @start: the first sector to erase
721 * @end: the last sector to erase
723 * Erases the sectors in the given inclusive range.
725 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
727 int ret = 0;
729 while (start <= end) {
730 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
731 (ret = sf1_write(adapter, 4, 0, 1,
732 SF_ERASE_SECTOR | (start << 8))) != 0 ||
733 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
734 dev_err(adapter->pdev_dev,
735 "erase of flash sector %d failed, error %d\n",
736 start, ret);
737 break;
739 start++;
741 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
742 return ret;
746 * t4_load_fw - download firmware
747 * @adap: the adapter
748 * @fw_data: the firmware image to write
749 * @size: image size
751 * Write the supplied firmware image to the card's serial flash.
753 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
755 u32 csum;
756 int ret, addr;
757 unsigned int i;
758 u8 first_page[SF_PAGE_SIZE];
759 const u32 *p = (const u32 *)fw_data;
760 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
761 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
762 unsigned int fw_img_start = adap->params.sf_fw_start;
763 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
765 if (!size) {
766 dev_err(adap->pdev_dev, "FW image has no data\n");
767 return -EINVAL;
769 if (size & 511) {
770 dev_err(adap->pdev_dev,
771 "FW image size not multiple of 512 bytes\n");
772 return -EINVAL;
774 if (ntohs(hdr->len512) * 512 != size) {
775 dev_err(adap->pdev_dev,
776 "FW image size differs from size in FW header\n");
777 return -EINVAL;
779 if (size > FW_MAX_SIZE) {
780 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
781 FW_MAX_SIZE);
782 return -EFBIG;
785 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
786 csum += ntohl(p[i]);
788 if (csum != 0xffffffff) {
789 dev_err(adap->pdev_dev,
790 "corrupted firmware image, checksum %#x\n", csum);
791 return -EINVAL;
794 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
795 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
796 if (ret)
797 goto out;
800 * We write the correct version at the end so the driver can see a bad
801 * version if the FW write fails. Start by writing a copy of the
802 * first page with a bad version.
804 memcpy(first_page, fw_data, SF_PAGE_SIZE);
805 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
806 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
807 if (ret)
808 goto out;
810 addr = fw_img_start;
811 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
812 addr += SF_PAGE_SIZE;
813 fw_data += SF_PAGE_SIZE;
814 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
815 if (ret)
816 goto out;
819 ret = t4_write_flash(adap,
820 fw_img_start + offsetof(struct fw_hdr, fw_ver),
821 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
822 out:
823 if (ret)
824 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
825 ret);
826 return ret;
829 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
830 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
833 * t4_link_start - apply link configuration to MAC/PHY
834 * @phy: the PHY to setup
835 * @mac: the MAC to setup
836 * @lc: the requested link configuration
838 * Set up a port's MAC and PHY according to a desired link configuration.
839 * - If the PHY can auto-negotiate first decide what to advertise, then
840 * enable/disable auto-negotiation as desired, and reset.
841 * - If the PHY does not auto-negotiate just reset it.
842 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
843 * otherwise do it later based on the outcome of auto-negotiation.
845 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
846 struct link_config *lc)
848 struct fw_port_cmd c;
849 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
851 lc->link_ok = 0;
852 if (lc->requested_fc & PAUSE_RX)
853 fc |= FW_PORT_CAP_FC_RX;
854 if (lc->requested_fc & PAUSE_TX)
855 fc |= FW_PORT_CAP_FC_TX;
857 memset(&c, 0, sizeof(c));
858 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
859 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
860 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
861 FW_LEN16(c));
863 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
864 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
865 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
866 } else if (lc->autoneg == AUTONEG_DISABLE) {
867 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
868 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
869 } else
870 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
872 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
876 * t4_restart_aneg - restart autonegotiation
877 * @adap: the adapter
878 * @mbox: mbox to use for the FW command
879 * @port: the port id
881 * Restarts autonegotiation for the selected port.
883 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
885 struct fw_port_cmd c;
887 memset(&c, 0, sizeof(c));
888 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
889 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
890 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
891 FW_LEN16(c));
892 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
893 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
896 struct intr_info {
897 unsigned int mask; /* bits to check in interrupt status */
898 const char *msg; /* message to print or NULL */
899 short stat_idx; /* stat counter to increment or -1 */
900 unsigned short fatal; /* whether the condition reported is fatal */
904 * t4_handle_intr_status - table driven interrupt handler
905 * @adapter: the adapter that generated the interrupt
906 * @reg: the interrupt status register to process
907 * @acts: table of interrupt actions
909 * A table driven interrupt handler that applies a set of masks to an
910 * interrupt status word and performs the corresponding actions if the
911 * interrupts described by the mask have occured. The actions include
912 * optionally emitting a warning or alert message. The table is terminated
913 * by an entry specifying mask 0. Returns the number of fatal interrupt
914 * conditions.
916 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
917 const struct intr_info *acts)
919 int fatal = 0;
920 unsigned int mask = 0;
921 unsigned int status = t4_read_reg(adapter, reg);
923 for ( ; acts->mask; ++acts) {
924 if (!(status & acts->mask))
925 continue;
926 if (acts->fatal) {
927 fatal++;
928 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
929 status & acts->mask);
930 } else if (acts->msg && printk_ratelimit())
931 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
932 status & acts->mask);
933 mask |= acts->mask;
935 status &= mask;
936 if (status) /* clear processed interrupts */
937 t4_write_reg(adapter, reg, status);
938 return fatal;
942 * Interrupt handler for the PCIE module.
944 static void pcie_intr_handler(struct adapter *adapter)
946 static struct intr_info sysbus_intr_info[] = {
947 { RNPP, "RXNP array parity error", -1, 1 },
948 { RPCP, "RXPC array parity error", -1, 1 },
949 { RCIP, "RXCIF array parity error", -1, 1 },
950 { RCCP, "Rx completions control array parity error", -1, 1 },
951 { RFTP, "RXFT array parity error", -1, 1 },
952 { 0 }
954 static struct intr_info pcie_port_intr_info[] = {
955 { TPCP, "TXPC array parity error", -1, 1 },
956 { TNPP, "TXNP array parity error", -1, 1 },
957 { TFTP, "TXFT array parity error", -1, 1 },
958 { TCAP, "TXCA array parity error", -1, 1 },
959 { TCIP, "TXCIF array parity error", -1, 1 },
960 { RCAP, "RXCA array parity error", -1, 1 },
961 { OTDD, "outbound request TLP discarded", -1, 1 },
962 { RDPE, "Rx data parity error", -1, 1 },
963 { TDUE, "Tx uncorrectable data error", -1, 1 },
964 { 0 }
966 static struct intr_info pcie_intr_info[] = {
967 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
968 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
969 { MSIDATAPERR, "MSI data parity error", -1, 1 },
970 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
971 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
972 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
973 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
974 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
975 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
976 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
977 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
978 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
979 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
980 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
981 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
982 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
983 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
984 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
985 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
986 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
987 { FIDPERR, "PCI FID parity error", -1, 1 },
988 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
989 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
990 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
991 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
992 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
993 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
994 { PCIESINT, "PCI core secondary fault", -1, 1 },
995 { PCIEPINT, "PCI core primary fault", -1, 1 },
996 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
997 { 0 }
1000 int fat;
1002 fat = t4_handle_intr_status(adapter,
1003 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1004 sysbus_intr_info) +
1005 t4_handle_intr_status(adapter,
1006 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1007 pcie_port_intr_info) +
1008 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1009 if (fat)
1010 t4_fatal_err(adapter);
1014 * TP interrupt handler.
1016 static void tp_intr_handler(struct adapter *adapter)
1018 static struct intr_info tp_intr_info[] = {
1019 { 0x3fffffff, "TP parity error", -1, 1 },
1020 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1021 { 0 }
1024 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1025 t4_fatal_err(adapter);
1029 * SGE interrupt handler.
1031 static void sge_intr_handler(struct adapter *adapter)
1033 u64 v;
1035 static struct intr_info sge_intr_info[] = {
1036 { ERR_CPL_EXCEED_IQE_SIZE,
1037 "SGE received CPL exceeding IQE size", -1, 1 },
1038 { ERR_INVALID_CIDX_INC,
1039 "SGE GTS CIDX increment too large", -1, 0 },
1040 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1041 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1042 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1043 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1044 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1045 0 },
1046 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1047 0 },
1048 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1049 0 },
1050 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1051 0 },
1052 { ERR_ING_CTXT_PRIO,
1053 "SGE too many priority ingress contexts", -1, 0 },
1054 { ERR_EGR_CTXT_PRIO,
1055 "SGE too many priority egress contexts", -1, 0 },
1056 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1057 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1058 { 0 }
1061 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1062 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1063 if (v) {
1064 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1065 (unsigned long long)v);
1066 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1067 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1070 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1071 v != 0)
1072 t4_fatal_err(adapter);
1076 * CIM interrupt handler.
1078 static void cim_intr_handler(struct adapter *adapter)
1080 static struct intr_info cim_intr_info[] = {
1081 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1082 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1083 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1084 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1085 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1086 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1087 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1088 { 0 }
1090 static struct intr_info cim_upintr_info[] = {
1091 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1092 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1093 { ILLWRINT, "CIM illegal write", -1, 1 },
1094 { ILLRDINT, "CIM illegal read", -1, 1 },
1095 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1096 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1097 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1098 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1099 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1100 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1101 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1102 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1103 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1104 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1105 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1106 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1107 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1108 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1109 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1110 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1111 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1112 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1113 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1114 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1115 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1116 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1117 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1118 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1119 { 0 }
1122 int fat;
1124 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1125 cim_intr_info) +
1126 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1127 cim_upintr_info);
1128 if (fat)
1129 t4_fatal_err(adapter);
1133 * ULP RX interrupt handler.
1135 static void ulprx_intr_handler(struct adapter *adapter)
1137 static struct intr_info ulprx_intr_info[] = {
1138 { 0x1800000, "ULPRX context error", -1, 1 },
1139 { 0x7fffff, "ULPRX parity error", -1, 1 },
1140 { 0 }
1143 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1144 t4_fatal_err(adapter);
1148 * ULP TX interrupt handler.
1150 static void ulptx_intr_handler(struct adapter *adapter)
1152 static struct intr_info ulptx_intr_info[] = {
1153 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1154 0 },
1155 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1156 0 },
1157 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1158 0 },
1159 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1160 0 },
1161 { 0xfffffff, "ULPTX parity error", -1, 1 },
1162 { 0 }
1165 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1166 t4_fatal_err(adapter);
1170 * PM TX interrupt handler.
1172 static void pmtx_intr_handler(struct adapter *adapter)
1174 static struct intr_info pmtx_intr_info[] = {
1175 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1176 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1177 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1178 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1179 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1180 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1181 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1182 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1183 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1184 { 0 }
1187 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1188 t4_fatal_err(adapter);
1192 * PM RX interrupt handler.
1194 static void pmrx_intr_handler(struct adapter *adapter)
1196 static struct intr_info pmrx_intr_info[] = {
1197 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1198 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1199 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1200 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1201 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1202 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1203 { 0 }
1206 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1207 t4_fatal_err(adapter);
1211 * CPL switch interrupt handler.
1213 static void cplsw_intr_handler(struct adapter *adapter)
1215 static struct intr_info cplsw_intr_info[] = {
1216 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1217 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1218 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1219 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1220 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1221 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1222 { 0 }
1225 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1226 t4_fatal_err(adapter);
1230 * LE interrupt handler.
1232 static void le_intr_handler(struct adapter *adap)
1234 static struct intr_info le_intr_info[] = {
1235 { LIPMISS, "LE LIP miss", -1, 0 },
1236 { LIP0, "LE 0 LIP error", -1, 0 },
1237 { PARITYERR, "LE parity error", -1, 1 },
1238 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1239 { REQQPARERR, "LE request queue parity error", -1, 1 },
1240 { 0 }
1243 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1244 t4_fatal_err(adap);
1248 * MPS interrupt handler.
1250 static void mps_intr_handler(struct adapter *adapter)
1252 static struct intr_info mps_rx_intr_info[] = {
1253 { 0xffffff, "MPS Rx parity error", -1, 1 },
1254 { 0 }
1256 static struct intr_info mps_tx_intr_info[] = {
1257 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1258 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1259 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1260 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1261 { BUBBLE, "MPS Tx underflow", -1, 1 },
1262 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1263 { FRMERR, "MPS Tx framing error", -1, 1 },
1264 { 0 }
1266 static struct intr_info mps_trc_intr_info[] = {
1267 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1268 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1269 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1270 { 0 }
1272 static struct intr_info mps_stat_sram_intr_info[] = {
1273 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1274 { 0 }
1276 static struct intr_info mps_stat_tx_intr_info[] = {
1277 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1278 { 0 }
1280 static struct intr_info mps_stat_rx_intr_info[] = {
1281 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1282 { 0 }
1284 static struct intr_info mps_cls_intr_info[] = {
1285 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1286 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1287 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1288 { 0 }
1291 int fat;
1293 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1294 mps_rx_intr_info) +
1295 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1296 mps_tx_intr_info) +
1297 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1298 mps_trc_intr_info) +
1299 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1300 mps_stat_sram_intr_info) +
1301 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1302 mps_stat_tx_intr_info) +
1303 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1304 mps_stat_rx_intr_info) +
1305 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1306 mps_cls_intr_info);
1308 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1309 RXINT | TXINT | STATINT);
1310 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1311 if (fat)
1312 t4_fatal_err(adapter);
1315 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1318 * EDC/MC interrupt handler.
1320 static void mem_intr_handler(struct adapter *adapter, int idx)
1322 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1324 unsigned int addr, cnt_addr, v;
1326 if (idx <= MEM_EDC1) {
1327 addr = EDC_REG(EDC_INT_CAUSE, idx);
1328 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1329 } else {
1330 addr = MC_INT_CAUSE;
1331 cnt_addr = MC_ECC_STATUS;
1334 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1335 if (v & PERR_INT_CAUSE)
1336 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1337 name[idx]);
1338 if (v & ECC_CE_INT_CAUSE) {
1339 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1341 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1342 if (printk_ratelimit())
1343 dev_warn(adapter->pdev_dev,
1344 "%u %s correctable ECC data error%s\n",
1345 cnt, name[idx], cnt > 1 ? "s" : "");
1347 if (v & ECC_UE_INT_CAUSE)
1348 dev_alert(adapter->pdev_dev,
1349 "%s uncorrectable ECC data error\n", name[idx]);
1351 t4_write_reg(adapter, addr, v);
1352 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1353 t4_fatal_err(adapter);
1357 * MA interrupt handler.
1359 static void ma_intr_handler(struct adapter *adap)
1361 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1363 if (status & MEM_PERR_INT_CAUSE)
1364 dev_alert(adap->pdev_dev,
1365 "MA parity error, parity status %#x\n",
1366 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1367 if (status & MEM_WRAP_INT_CAUSE) {
1368 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1369 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1370 "client %u to address %#x\n",
1371 MEM_WRAP_CLIENT_NUM_GET(v),
1372 MEM_WRAP_ADDRESS_GET(v) << 4);
1374 t4_write_reg(adap, MA_INT_CAUSE, status);
1375 t4_fatal_err(adap);
1379 * SMB interrupt handler.
1381 static void smb_intr_handler(struct adapter *adap)
1383 static struct intr_info smb_intr_info[] = {
1384 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1385 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1386 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1387 { 0 }
1390 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1391 t4_fatal_err(adap);
1395 * NC-SI interrupt handler.
1397 static void ncsi_intr_handler(struct adapter *adap)
1399 static struct intr_info ncsi_intr_info[] = {
1400 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1401 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1402 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1403 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1404 { 0 }
1407 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1408 t4_fatal_err(adap);
1412 * XGMAC interrupt handler.
1414 static void xgmac_intr_handler(struct adapter *adap, int port)
1416 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1418 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1419 if (!v)
1420 return;
1422 if (v & TXFIFO_PRTY_ERR)
1423 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1424 port);
1425 if (v & RXFIFO_PRTY_ERR)
1426 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1427 port);
1428 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1429 t4_fatal_err(adap);
1433 * PL interrupt handler.
1435 static void pl_intr_handler(struct adapter *adap)
1437 static struct intr_info pl_intr_info[] = {
1438 { FATALPERR, "T4 fatal parity error", -1, 1 },
1439 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1440 { 0 }
1443 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1444 t4_fatal_err(adap);
1447 #define PF_INTR_MASK (PFSW | PFCIM)
1448 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1449 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1450 CPL_SWITCH | SGE | ULP_TX)
1453 * t4_slow_intr_handler - control path interrupt handler
1454 * @adapter: the adapter
1456 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1457 * The designation 'slow' is because it involves register reads, while
1458 * data interrupts typically don't involve any MMIOs.
1460 int t4_slow_intr_handler(struct adapter *adapter)
1462 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1464 if (!(cause & GLBL_INTR_MASK))
1465 return 0;
1466 if (cause & CIM)
1467 cim_intr_handler(adapter);
1468 if (cause & MPS)
1469 mps_intr_handler(adapter);
1470 if (cause & NCSI)
1471 ncsi_intr_handler(adapter);
1472 if (cause & PL)
1473 pl_intr_handler(adapter);
1474 if (cause & SMB)
1475 smb_intr_handler(adapter);
1476 if (cause & XGMAC0)
1477 xgmac_intr_handler(adapter, 0);
1478 if (cause & XGMAC1)
1479 xgmac_intr_handler(adapter, 1);
1480 if (cause & XGMAC_KR0)
1481 xgmac_intr_handler(adapter, 2);
1482 if (cause & XGMAC_KR1)
1483 xgmac_intr_handler(adapter, 3);
1484 if (cause & PCIE)
1485 pcie_intr_handler(adapter);
1486 if (cause & MC)
1487 mem_intr_handler(adapter, MEM_MC);
1488 if (cause & EDC0)
1489 mem_intr_handler(adapter, MEM_EDC0);
1490 if (cause & EDC1)
1491 mem_intr_handler(adapter, MEM_EDC1);
1492 if (cause & LE)
1493 le_intr_handler(adapter);
1494 if (cause & TP)
1495 tp_intr_handler(adapter);
1496 if (cause & MA)
1497 ma_intr_handler(adapter);
1498 if (cause & PM_TX)
1499 pmtx_intr_handler(adapter);
1500 if (cause & PM_RX)
1501 pmrx_intr_handler(adapter);
1502 if (cause & ULP_RX)
1503 ulprx_intr_handler(adapter);
1504 if (cause & CPL_SWITCH)
1505 cplsw_intr_handler(adapter);
1506 if (cause & SGE)
1507 sge_intr_handler(adapter);
1508 if (cause & ULP_TX)
1509 ulptx_intr_handler(adapter);
1511 /* Clear the interrupts just processed for which we are the master. */
1512 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1513 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1514 return 1;
1518 * t4_intr_enable - enable interrupts
1519 * @adapter: the adapter whose interrupts should be enabled
1521 * Enable PF-specific interrupts for the calling function and the top-level
1522 * interrupt concentrator for global interrupts. Interrupts are already
1523 * enabled at each module, here we just enable the roots of the interrupt
1524 * hierarchies.
1526 * Note: this function should be called only when the driver manages
1527 * non PF-specific interrupts from the various HW modules. Only one PCI
1528 * function at a time should be doing this.
1530 void t4_intr_enable(struct adapter *adapter)
1532 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1534 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1535 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1536 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1537 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1538 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1539 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1540 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1541 EGRESS_SIZE_ERR);
1542 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1543 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1547 * t4_intr_disable - disable interrupts
1548 * @adapter: the adapter whose interrupts should be disabled
1550 * Disable interrupts. We only disable the top-level interrupt
1551 * concentrators. The caller must be a PCI function managing global
1552 * interrupts.
1554 void t4_intr_disable(struct adapter *adapter)
1556 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1558 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1559 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1563 * t4_intr_clear - clear all interrupts
1564 * @adapter: the adapter whose interrupts should be cleared
1566 * Clears all interrupts. The caller must be a PCI function managing
1567 * global interrupts.
1569 void t4_intr_clear(struct adapter *adapter)
1571 static const unsigned int cause_reg[] = {
1572 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1573 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1574 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1575 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1576 MC_INT_CAUSE,
1577 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1578 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1579 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1580 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1581 TP_INT_CAUSE,
1582 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1583 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1584 MPS_RX_PERR_INT_CAUSE,
1585 CPL_INTR_CAUSE,
1586 MYPF_REG(PL_PF_INT_CAUSE),
1587 PL_PL_INT_CAUSE,
1588 LE_DB_INT_CAUSE,
1591 unsigned int i;
1593 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1594 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1596 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1597 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1601 * hash_mac_addr - return the hash value of a MAC address
1602 * @addr: the 48-bit Ethernet MAC address
1604 * Hashes a MAC address according to the hash function used by HW inexact
1605 * (hash) address matching.
1607 static int hash_mac_addr(const u8 *addr)
1609 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1610 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1611 a ^= b;
1612 a ^= (a >> 12);
1613 a ^= (a >> 6);
1614 return a & 0x3f;
1618 * t4_config_rss_range - configure a portion of the RSS mapping table
1619 * @adapter: the adapter
1620 * @mbox: mbox to use for the FW command
1621 * @viid: virtual interface whose RSS subtable is to be written
1622 * @start: start entry in the table to write
1623 * @n: how many table entries to write
1624 * @rspq: values for the response queue lookup table
1625 * @nrspq: number of values in @rspq
1627 * Programs the selected part of the VI's RSS mapping table with the
1628 * provided values. If @nrspq < @n the supplied values are used repeatedly
1629 * until the full table range is populated.
1631 * The caller must ensure the values in @rspq are in the range allowed for
1632 * @viid.
1634 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1635 int start, int n, const u16 *rspq, unsigned int nrspq)
1637 int ret;
1638 const u16 *rsp = rspq;
1639 const u16 *rsp_end = rspq + nrspq;
1640 struct fw_rss_ind_tbl_cmd cmd;
1642 memset(&cmd, 0, sizeof(cmd));
1643 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1644 FW_CMD_REQUEST | FW_CMD_WRITE |
1645 FW_RSS_IND_TBL_CMD_VIID(viid));
1646 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1648 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1649 while (n > 0) {
1650 int nq = min(n, 32);
1651 __be32 *qp = &cmd.iq0_to_iq2;
1653 cmd.niqid = htons(nq);
1654 cmd.startidx = htons(start);
1656 start += nq;
1657 n -= nq;
1659 while (nq > 0) {
1660 unsigned int v;
1662 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1663 if (++rsp >= rsp_end)
1664 rsp = rspq;
1665 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1666 if (++rsp >= rsp_end)
1667 rsp = rspq;
1668 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1669 if (++rsp >= rsp_end)
1670 rsp = rspq;
1672 *qp++ = htonl(v);
1673 nq -= 3;
1676 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1677 if (ret)
1678 return ret;
1680 return 0;
1684 * t4_config_glbl_rss - configure the global RSS mode
1685 * @adapter: the adapter
1686 * @mbox: mbox to use for the FW command
1687 * @mode: global RSS mode
1688 * @flags: mode-specific flags
1690 * Sets the global RSS mode.
1692 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1693 unsigned int flags)
1695 struct fw_rss_glb_config_cmd c;
1697 memset(&c, 0, sizeof(c));
1698 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1699 FW_CMD_REQUEST | FW_CMD_WRITE);
1700 c.retval_len16 = htonl(FW_LEN16(c));
1701 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1702 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1703 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1704 c.u.basicvirtual.mode_pkd =
1705 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1706 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1707 } else
1708 return -EINVAL;
1709 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1712 /* Read an RSS table row */
1713 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1715 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1716 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1717 5, 0, val);
1721 * t4_read_rss - read the contents of the RSS mapping table
1722 * @adapter: the adapter
1723 * @map: holds the contents of the RSS mapping table
1725 * Reads the contents of the RSS hash->queue mapping table.
1727 int t4_read_rss(struct adapter *adapter, u16 *map)
1729 u32 val;
1730 int i, ret;
1732 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1733 ret = rd_rss_row(adapter, i, &val);
1734 if (ret)
1735 return ret;
1736 *map++ = LKPTBLQUEUE0_GET(val);
1737 *map++ = LKPTBLQUEUE1_GET(val);
1739 return 0;
1743 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1744 * @adap: the adapter
1745 * @v4: holds the TCP/IP counter values
1746 * @v6: holds the TCP/IPv6 counter values
1748 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1749 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1751 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1752 struct tp_tcp_stats *v6)
1754 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1756 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1757 #define STAT(x) val[STAT_IDX(x)]
1758 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1760 if (v4) {
1761 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1762 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1763 v4->tcpOutRsts = STAT(OUT_RST);
1764 v4->tcpInSegs = STAT64(IN_SEG);
1765 v4->tcpOutSegs = STAT64(OUT_SEG);
1766 v4->tcpRetransSegs = STAT64(RXT_SEG);
1768 if (v6) {
1769 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1770 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1771 v6->tcpOutRsts = STAT(OUT_RST);
1772 v6->tcpInSegs = STAT64(IN_SEG);
1773 v6->tcpOutSegs = STAT64(OUT_SEG);
1774 v6->tcpRetransSegs = STAT64(RXT_SEG);
1776 #undef STAT64
1777 #undef STAT
1778 #undef STAT_IDX
1782 * t4_tp_get_err_stats - read TP's error MIB counters
1783 * @adap: the adapter
1784 * @st: holds the counter values
1786 * Returns the values of TP's error counters.
1788 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1791 12, TP_MIB_MAC_IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1793 8, TP_MIB_TNL_CNG_DROP_0);
1794 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1795 4, TP_MIB_TNL_DROP_0);
1796 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1797 4, TP_MIB_OFD_VLN_DROP_0);
1798 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1799 4, TP_MIB_TCP_V6IN_ERR_0);
1800 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1801 2, TP_MIB_OFD_ARP_DROP);
1805 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1806 * @adap: the adapter
1807 * @mtus: where to store the MTU values
1808 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1810 * Reads the HW path MTU table.
1812 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1814 u32 v;
1815 int i;
1817 for (i = 0; i < NMTUS; ++i) {
1818 t4_write_reg(adap, TP_MTU_TABLE,
1819 MTUINDEX(0xff) | MTUVALUE(i));
1820 v = t4_read_reg(adap, TP_MTU_TABLE);
1821 mtus[i] = MTUVALUE_GET(v);
1822 if (mtu_log)
1823 mtu_log[i] = MTUWIDTH_GET(v);
1828 * init_cong_ctrl - initialize congestion control parameters
1829 * @a: the alpha values for congestion control
1830 * @b: the beta values for congestion control
1832 * Initialize the congestion control parameters.
1834 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1836 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1837 a[9] = 2;
1838 a[10] = 3;
1839 a[11] = 4;
1840 a[12] = 5;
1841 a[13] = 6;
1842 a[14] = 7;
1843 a[15] = 8;
1844 a[16] = 9;
1845 a[17] = 10;
1846 a[18] = 14;
1847 a[19] = 17;
1848 a[20] = 21;
1849 a[21] = 25;
1850 a[22] = 30;
1851 a[23] = 35;
1852 a[24] = 45;
1853 a[25] = 60;
1854 a[26] = 80;
1855 a[27] = 100;
1856 a[28] = 200;
1857 a[29] = 300;
1858 a[30] = 400;
1859 a[31] = 500;
1861 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1862 b[9] = b[10] = 1;
1863 b[11] = b[12] = 2;
1864 b[13] = b[14] = b[15] = b[16] = 3;
1865 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1866 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1867 b[28] = b[29] = 6;
1868 b[30] = b[31] = 7;
1871 /* The minimum additive increment value for the congestion control table */
1872 #define CC_MIN_INCR 2U
1875 * t4_load_mtus - write the MTU and congestion control HW tables
1876 * @adap: the adapter
1877 * @mtus: the values for the MTU table
1878 * @alpha: the values for the congestion control alpha parameter
1879 * @beta: the values for the congestion control beta parameter
1881 * Write the HW MTU table with the supplied MTUs and the high-speed
1882 * congestion control table with the supplied alpha, beta, and MTUs.
1883 * We write the two tables together because the additive increments
1884 * depend on the MTUs.
1886 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1887 const unsigned short *alpha, const unsigned short *beta)
1889 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1890 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1891 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1892 28672, 40960, 57344, 81920, 114688, 163840, 229376
1895 unsigned int i, w;
1897 for (i = 0; i < NMTUS; ++i) {
1898 unsigned int mtu = mtus[i];
1899 unsigned int log2 = fls(mtu);
1901 if (!(mtu & ((1 << log2) >> 2))) /* round */
1902 log2--;
1903 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1904 MTUWIDTH(log2) | MTUVALUE(mtu));
1906 for (w = 0; w < NCCTRL_WIN; ++w) {
1907 unsigned int inc;
1909 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1910 CC_MIN_INCR);
1912 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1913 (w << 16) | (beta[w] << 13) | inc);
1919 * t4_set_trace_filter - configure one of the tracing filters
1920 * @adap: the adapter
1921 * @tp: the desired trace filter parameters
1922 * @idx: which filter to configure
1923 * @enable: whether to enable or disable the filter
1925 * Configures one of the tracing filters available in HW. If @enable is
1926 * %0 @tp is not examined and may be %NULL.
1928 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1929 int idx, int enable)
1931 int i, ofst = idx * 4;
1932 u32 data_reg, mask_reg, cfg;
1933 u32 multitrc = TRCMULTIFILTER;
1935 if (!enable) {
1936 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1937 goto out;
1940 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1941 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1942 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1943 return -EINVAL;
1945 if (tp->snap_len > 256) { /* must be tracer 0 */
1946 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1947 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1948 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1949 return -EINVAL; /* other tracers are enabled */
1950 multitrc = 0;
1951 } else if (idx) {
1952 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1953 if (TFCAPTUREMAX_GET(i) > 256 &&
1954 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1955 return -EINVAL;
1958 /* stop the tracer we'll be changing */
1959 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1961 /* disable tracing globally if running in the wrong single/multi mode */
1962 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1963 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1964 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1965 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1966 msleep(1);
1967 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1968 return -ETIMEDOUT;
1971 * At this point either the tracing is enabled and in the right mode or
1972 * disabled.
1975 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1976 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1977 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1979 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1980 t4_write_reg(adap, data_reg, tp->data[i]);
1981 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1983 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1984 TFCAPTUREMAX(tp->snap_len) |
1985 TFMINPKTSIZE(tp->min_len));
1986 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1987 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1988 TFPORT(tp->port) | TFEN |
1989 (tp->invert ? TFINVERTMATCH : 0));
1991 cfg &= ~TRCMULTIFILTER;
1992 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1993 out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1994 return 0;
1998 * t4_get_trace_filter - query one of the tracing filters
1999 * @adap: the adapter
2000 * @tp: the current trace filter parameters
2001 * @idx: which trace filter to query
2002 * @enabled: non-zero if the filter is enabled
2004 * Returns the current settings of one of the HW tracing filters.
2006 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2007 int *enabled)
2009 u32 ctla, ctlb;
2010 int i, ofst = idx * 4;
2011 u32 data_reg, mask_reg;
2013 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2014 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2016 *enabled = !!(ctla & TFEN);
2017 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2018 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2019 tp->skip_ofst = TFOFFSET_GET(ctla);
2020 tp->skip_len = TFLENGTH_GET(ctla);
2021 tp->invert = !!(ctla & TFINVERTMATCH);
2022 tp->port = TFPORT_GET(ctla);
2024 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2025 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2026 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2028 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2029 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2030 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2035 * get_mps_bg_map - return the buffer groups associated with a port
2036 * @adap: the adapter
2037 * @idx: the port index
2039 * Returns a bitmap indicating which MPS buffer groups are associated
2040 * with the given port. Bit i is set if buffer group i is used by the
2041 * port.
2043 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2045 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2047 if (n == 0)
2048 return idx == 0 ? 0xf : 0;
2049 if (n == 1)
2050 return idx < 2 ? (3 << (2 * idx)) : 0;
2051 return 1 << idx;
2055 * t4_get_port_stats - collect port statistics
2056 * @adap: the adapter
2057 * @idx: the port index
2058 * @p: the stats structure to fill
2060 * Collect statistics related to the given port from HW.
2062 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2064 u32 bgmap = get_mps_bg_map(adap, idx);
2066 #define GET_STAT(name) \
2067 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2068 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2070 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2071 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2072 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2073 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2074 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2075 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2076 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2077 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2078 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2079 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2080 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2081 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2082 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2083 p->tx_drop = GET_STAT(TX_PORT_DROP);
2084 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2085 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2086 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2087 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2088 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2089 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2090 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2091 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2092 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2094 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2095 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2096 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2097 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2098 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2099 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2100 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2101 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2102 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2103 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2104 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2105 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2106 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2107 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2108 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2109 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2110 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2111 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2112 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2113 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2114 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2115 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2116 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2117 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2118 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2119 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2120 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2122 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2123 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2124 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2125 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2126 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2127 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2128 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2129 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2131 #undef GET_STAT
2132 #undef GET_STAT_COM
2136 * t4_get_lb_stats - collect loopback port statistics
2137 * @adap: the adapter
2138 * @idx: the loopback port index
2139 * @p: the stats structure to fill
2141 * Return HW statistics for the given loopback port.
2143 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2145 u32 bgmap = get_mps_bg_map(adap, idx);
2147 #define GET_STAT(name) \
2148 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2149 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2151 p->octets = GET_STAT(BYTES);
2152 p->frames = GET_STAT(FRAMES);
2153 p->bcast_frames = GET_STAT(BCAST);
2154 p->mcast_frames = GET_STAT(MCAST);
2155 p->ucast_frames = GET_STAT(UCAST);
2156 p->error_frames = GET_STAT(ERROR);
2158 p->frames_64 = GET_STAT(64B);
2159 p->frames_65_127 = GET_STAT(65B_127B);
2160 p->frames_128_255 = GET_STAT(128B_255B);
2161 p->frames_256_511 = GET_STAT(256B_511B);
2162 p->frames_512_1023 = GET_STAT(512B_1023B);
2163 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2164 p->frames_1519_max = GET_STAT(1519B_MAX);
2165 p->drop = t4_read_reg(adap, PORT_REG(idx,
2166 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2168 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2169 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2170 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2171 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2172 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2173 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2174 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2175 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2177 #undef GET_STAT
2178 #undef GET_STAT_COM
2182 * t4_wol_magic_enable - enable/disable magic packet WoL
2183 * @adap: the adapter
2184 * @port: the physical port index
2185 * @addr: MAC address expected in magic packets, %NULL to disable
2187 * Enables/disables magic packet wake-on-LAN for the selected port.
2189 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2190 const u8 *addr)
2192 if (addr) {
2193 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2194 (addr[2] << 24) | (addr[3] << 16) |
2195 (addr[4] << 8) | addr[5]);
2196 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2197 (addr[0] << 8) | addr[1]);
2199 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2200 addr ? MAGICEN : 0);
2204 * t4_wol_pat_enable - enable/disable pattern-based WoL
2205 * @adap: the adapter
2206 * @port: the physical port index
2207 * @map: bitmap of which HW pattern filters to set
2208 * @mask0: byte mask for bytes 0-63 of a packet
2209 * @mask1: byte mask for bytes 64-127 of a packet
2210 * @crc: Ethernet CRC for selected bytes
2211 * @enable: enable/disable switch
2213 * Sets the pattern filters indicated in @map to mask out the bytes
2214 * specified in @mask0/@mask1 in received packets and compare the CRC of
2215 * the resulting packet against @crc. If @enable is %true pattern-based
2216 * WoL is enabled, otherwise disabled.
2218 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2219 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2221 int i;
2223 if (!enable) {
2224 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2225 PATEN, 0);
2226 return 0;
2228 if (map > 0xff)
2229 return -EINVAL;
2231 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2233 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2234 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2235 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2237 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2238 if (!(map & 1))
2239 continue;
2241 /* write byte masks */
2242 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2243 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2244 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2245 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2246 return -ETIMEDOUT;
2248 /* write CRC */
2249 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2250 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2251 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2252 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2253 return -ETIMEDOUT;
2255 #undef EPIO_REG
2257 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2258 return 0;
2261 #define INIT_CMD(var, cmd, rd_wr) do { \
2262 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2263 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2264 (var).retval_len16 = htonl(FW_LEN16(var)); \
2265 } while (0)
2268 * t4_mdio_rd - read a PHY register through MDIO
2269 * @adap: the adapter
2270 * @mbox: mailbox to use for the FW command
2271 * @phy_addr: the PHY address
2272 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2273 * @reg: the register to read
2274 * @valp: where to store the value
2276 * Issues a FW command through the given mailbox to read a PHY register.
2278 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2279 unsigned int mmd, unsigned int reg, u16 *valp)
2281 int ret;
2282 struct fw_ldst_cmd c;
2284 memset(&c, 0, sizeof(c));
2285 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2286 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2287 c.cycles_to_len16 = htonl(FW_LEN16(c));
2288 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2289 FW_LDST_CMD_MMD(mmd));
2290 c.u.mdio.raddr = htons(reg);
2292 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2293 if (ret == 0)
2294 *valp = ntohs(c.u.mdio.rval);
2295 return ret;
2299 * t4_mdio_wr - write a PHY register through MDIO
2300 * @adap: the adapter
2301 * @mbox: mailbox to use for the FW command
2302 * @phy_addr: the PHY address
2303 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2304 * @reg: the register to write
2305 * @valp: value to write
2307 * Issues a FW command through the given mailbox to write a PHY register.
2309 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2310 unsigned int mmd, unsigned int reg, u16 val)
2312 struct fw_ldst_cmd c;
2314 memset(&c, 0, sizeof(c));
2315 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2316 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2317 c.cycles_to_len16 = htonl(FW_LEN16(c));
2318 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2319 FW_LDST_CMD_MMD(mmd));
2320 c.u.mdio.raddr = htons(reg);
2321 c.u.mdio.rval = htons(val);
2323 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2327 * t4_fw_hello - establish communication with FW
2328 * @adap: the adapter
2329 * @mbox: mailbox to use for the FW command
2330 * @evt_mbox: mailbox to receive async FW events
2331 * @master: specifies the caller's willingness to be the device master
2332 * @state: returns the current device state
2334 * Issues a command to establish communication with FW.
2336 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2337 enum dev_master master, enum dev_state *state)
2339 int ret;
2340 struct fw_hello_cmd c;
2342 INIT_CMD(c, HELLO, WRITE);
2343 c.err_to_mbasyncnot = htonl(
2344 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2345 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2346 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2347 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2349 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2350 if (ret == 0 && state) {
2351 u32 v = ntohl(c.err_to_mbasyncnot);
2352 if (v & FW_HELLO_CMD_INIT)
2353 *state = DEV_STATE_INIT;
2354 else if (v & FW_HELLO_CMD_ERR)
2355 *state = DEV_STATE_ERR;
2356 else
2357 *state = DEV_STATE_UNINIT;
2359 return ret;
2363 * t4_fw_bye - end communication with FW
2364 * @adap: the adapter
2365 * @mbox: mailbox to use for the FW command
2367 * Issues a command to terminate communication with FW.
2369 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2371 struct fw_bye_cmd c;
2373 INIT_CMD(c, BYE, WRITE);
2374 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2378 * t4_init_cmd - ask FW to initialize the device
2379 * @adap: the adapter
2380 * @mbox: mailbox to use for the FW command
2382 * Issues a command to FW to partially initialize the device. This
2383 * performs initialization that generally doesn't depend on user input.
2385 int t4_early_init(struct adapter *adap, unsigned int mbox)
2387 struct fw_initialize_cmd c;
2389 INIT_CMD(c, INITIALIZE, WRITE);
2390 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2394 * t4_fw_reset - issue a reset to FW
2395 * @adap: the adapter
2396 * @mbox: mailbox to use for the FW command
2397 * @reset: specifies the type of reset to perform
2399 * Issues a reset command of the specified type to FW.
2401 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2403 struct fw_reset_cmd c;
2405 INIT_CMD(c, RESET, WRITE);
2406 c.val = htonl(reset);
2407 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2411 * t4_query_params - query FW or device parameters
2412 * @adap: the adapter
2413 * @mbox: mailbox to use for the FW command
2414 * @pf: the PF
2415 * @vf: the VF
2416 * @nparams: the number of parameters
2417 * @params: the parameter names
2418 * @val: the parameter values
2420 * Reads the value of FW or device parameters. Up to 7 parameters can be
2421 * queried at once.
2423 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2424 unsigned int vf, unsigned int nparams, const u32 *params,
2425 u32 *val)
2427 int i, ret;
2428 struct fw_params_cmd c;
2429 __be32 *p = &c.param[0].mnem;
2431 if (nparams > 7)
2432 return -EINVAL;
2434 memset(&c, 0, sizeof(c));
2435 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2436 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2437 FW_PARAMS_CMD_VFN(vf));
2438 c.retval_len16 = htonl(FW_LEN16(c));
2439 for (i = 0; i < nparams; i++, p += 2)
2440 *p = htonl(*params++);
2442 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2443 if (ret == 0)
2444 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2445 *val++ = ntohl(*p);
2446 return ret;
2450 * t4_set_params - sets FW or device parameters
2451 * @adap: the adapter
2452 * @mbox: mailbox to use for the FW command
2453 * @pf: the PF
2454 * @vf: the VF
2455 * @nparams: the number of parameters
2456 * @params: the parameter names
2457 * @val: the parameter values
2459 * Sets the value of FW or device parameters. Up to 7 parameters can be
2460 * specified at once.
2462 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2463 unsigned int vf, unsigned int nparams, const u32 *params,
2464 const u32 *val)
2466 struct fw_params_cmd c;
2467 __be32 *p = &c.param[0].mnem;
2469 if (nparams > 7)
2470 return -EINVAL;
2472 memset(&c, 0, sizeof(c));
2473 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2474 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2475 FW_PARAMS_CMD_VFN(vf));
2476 c.retval_len16 = htonl(FW_LEN16(c));
2477 while (nparams--) {
2478 *p++ = htonl(*params++);
2479 *p++ = htonl(*val++);
2482 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2486 * t4_cfg_pfvf - configure PF/VF resource limits
2487 * @adap: the adapter
2488 * @mbox: mailbox to use for the FW command
2489 * @pf: the PF being configured
2490 * @vf: the VF being configured
2491 * @txq: the max number of egress queues
2492 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2493 * @rxqi: the max number of interrupt-capable ingress queues
2494 * @rxq: the max number of interruptless ingress queues
2495 * @tc: the PCI traffic class
2496 * @vi: the max number of virtual interfaces
2497 * @cmask: the channel access rights mask for the PF/VF
2498 * @pmask: the port access rights mask for the PF/VF
2499 * @nexact: the maximum number of exact MPS filters
2500 * @rcaps: read capabilities
2501 * @wxcaps: write/execute capabilities
2503 * Configures resource limits and capabilities for a physical or virtual
2504 * function.
2506 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2507 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2508 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2509 unsigned int vi, unsigned int cmask, unsigned int pmask,
2510 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2512 struct fw_pfvf_cmd c;
2514 memset(&c, 0, sizeof(c));
2515 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2516 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2517 FW_PFVF_CMD_VFN(vf));
2518 c.retval_len16 = htonl(FW_LEN16(c));
2519 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2520 FW_PFVF_CMD_NIQ(rxq));
2521 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2522 FW_PFVF_CMD_PMASK(pmask) |
2523 FW_PFVF_CMD_NEQ(txq));
2524 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2525 FW_PFVF_CMD_NEXACTF(nexact));
2526 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2527 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2528 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2529 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2533 * t4_alloc_vi - allocate a virtual interface
2534 * @adap: the adapter
2535 * @mbox: mailbox to use for the FW command
2536 * @port: physical port associated with the VI
2537 * @pf: the PF owning the VI
2538 * @vf: the VF owning the VI
2539 * @nmac: number of MAC addresses needed (1 to 5)
2540 * @mac: the MAC addresses of the VI
2541 * @rss_size: size of RSS table slice associated with this VI
2543 * Allocates a virtual interface for the given physical port. If @mac is
2544 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2545 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2546 * stored consecutively so the space needed is @nmac * 6 bytes.
2547 * Returns a negative error number or the non-negative VI id.
2549 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2550 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2551 unsigned int *rss_size)
2553 int ret;
2554 struct fw_vi_cmd c;
2556 memset(&c, 0, sizeof(c));
2557 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2558 FW_CMD_WRITE | FW_CMD_EXEC |
2559 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2560 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2561 c.portid_pkd = FW_VI_CMD_PORTID(port);
2562 c.nmac = nmac - 1;
2564 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2565 if (ret)
2566 return ret;
2568 if (mac) {
2569 memcpy(mac, c.mac, sizeof(c.mac));
2570 switch (nmac) {
2571 case 5:
2572 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2573 case 4:
2574 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2575 case 3:
2576 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2577 case 2:
2578 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2581 if (rss_size)
2582 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2583 return ntohs(c.viid_pkd);
2587 * t4_free_vi - free a virtual interface
2588 * @adap: the adapter
2589 * @mbox: mailbox to use for the FW command
2590 * @pf: the PF owning the VI
2591 * @vf: the VF owning the VI
2592 * @viid: virtual interface identifiler
2594 * Free a previously allocated virtual interface.
2596 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2597 unsigned int vf, unsigned int viid)
2599 struct fw_vi_cmd c;
2601 memset(&c, 0, sizeof(c));
2602 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2603 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2604 FW_VI_CMD_VFN(vf));
2605 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2606 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2607 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2611 * t4_set_rxmode - set Rx properties of a virtual interface
2612 * @adap: the adapter
2613 * @mbox: mailbox to use for the FW command
2614 * @viid: the VI id
2615 * @mtu: the new MTU or -1
2616 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2617 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2618 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2619 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2620 * @sleep_ok: if true we may sleep while awaiting command completion
2622 * Sets Rx properties of a virtual interface.
2624 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2625 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2626 bool sleep_ok)
2628 struct fw_vi_rxmode_cmd c;
2630 /* convert to FW values */
2631 if (mtu < 0)
2632 mtu = FW_RXMODE_MTU_NO_CHG;
2633 if (promisc < 0)
2634 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2635 if (all_multi < 0)
2636 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2637 if (bcast < 0)
2638 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2639 if (vlanex < 0)
2640 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2642 memset(&c, 0, sizeof(c));
2643 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2644 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2645 c.retval_len16 = htonl(FW_LEN16(c));
2646 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2647 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2648 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2649 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2650 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2651 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2655 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2656 * @adap: the adapter
2657 * @mbox: mailbox to use for the FW command
2658 * @viid: the VI id
2659 * @free: if true any existing filters for this VI id are first removed
2660 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2661 * @addr: the MAC address(es)
2662 * @idx: where to store the index of each allocated filter
2663 * @hash: pointer to hash address filter bitmap
2664 * @sleep_ok: call is allowed to sleep
2666 * Allocates an exact-match filter for each of the supplied addresses and
2667 * sets it to the corresponding address. If @idx is not %NULL it should
2668 * have at least @naddr entries, each of which will be set to the index of
2669 * the filter allocated for the corresponding MAC address. If a filter
2670 * could not be allocated for an address its index is set to 0xffff.
2671 * If @hash is not %NULL addresses that fail to allocate an exact filter
2672 * are hashed and update the hash filter bitmap pointed at by @hash.
2674 * Returns a negative error number or the number of filters allocated.
2676 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2677 unsigned int viid, bool free, unsigned int naddr,
2678 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2680 int i, ret;
2681 struct fw_vi_mac_cmd c;
2682 struct fw_vi_mac_exact *p;
2684 if (naddr > 7)
2685 return -EINVAL;
2687 memset(&c, 0, sizeof(c));
2688 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2689 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2690 FW_VI_MAC_CMD_VIID(viid));
2691 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2692 FW_CMD_LEN16((naddr + 2) / 2));
2694 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2695 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2696 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2697 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2700 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2701 if (ret)
2702 return ret;
2704 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2705 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2707 if (idx)
2708 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2709 if (index < NEXACT_MAC)
2710 ret++;
2711 else if (hash)
2712 *hash |= (1 << hash_mac_addr(addr[i]));
2714 return ret;
2718 * t4_change_mac - modifies the exact-match filter for a MAC address
2719 * @adap: the adapter
2720 * @mbox: mailbox to use for the FW command
2721 * @viid: the VI id
2722 * @idx: index of existing filter for old value of MAC address, or -1
2723 * @addr: the new MAC address value
2724 * @persist: whether a new MAC allocation should be persistent
2725 * @add_smt: if true also add the address to the HW SMT
2727 * Modifies an exact-match filter and sets it to the new MAC address.
2728 * Note that in general it is not possible to modify the value of a given
2729 * filter so the generic way to modify an address filter is to free the one
2730 * being used by the old address value and allocate a new filter for the
2731 * new address value. @idx can be -1 if the address is a new addition.
2733 * Returns a negative error number or the index of the filter with the new
2734 * MAC value.
2736 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2737 int idx, const u8 *addr, bool persist, bool add_smt)
2739 int ret, mode;
2740 struct fw_vi_mac_cmd c;
2741 struct fw_vi_mac_exact *p = c.u.exact;
2743 if (idx < 0) /* new allocation */
2744 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2745 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2747 memset(&c, 0, sizeof(c));
2748 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2749 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2750 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2751 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2752 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2753 FW_VI_MAC_CMD_IDX(idx));
2754 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2756 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2757 if (ret == 0) {
2758 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2759 if (ret >= NEXACT_MAC)
2760 ret = -ENOMEM;
2762 return ret;
2766 * t4_set_addr_hash - program the MAC inexact-match hash filter
2767 * @adap: the adapter
2768 * @mbox: mailbox to use for the FW command
2769 * @viid: the VI id
2770 * @ucast: whether the hash filter should also match unicast addresses
2771 * @vec: the value to be written to the hash filter
2772 * @sleep_ok: call is allowed to sleep
2774 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2776 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2777 bool ucast, u64 vec, bool sleep_ok)
2779 struct fw_vi_mac_cmd c;
2781 memset(&c, 0, sizeof(c));
2782 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2783 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2784 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2785 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2786 FW_CMD_LEN16(1));
2787 c.u.hash.hashvec = cpu_to_be64(vec);
2788 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2792 * t4_enable_vi - enable/disable a virtual interface
2793 * @adap: the adapter
2794 * @mbox: mailbox to use for the FW command
2795 * @viid: the VI id
2796 * @rx_en: 1=enable Rx, 0=disable Rx
2797 * @tx_en: 1=enable Tx, 0=disable Tx
2799 * Enables/disables a virtual interface.
2801 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2802 bool rx_en, bool tx_en)
2804 struct fw_vi_enable_cmd c;
2806 memset(&c, 0, sizeof(c));
2807 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2808 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2809 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2810 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2811 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2815 * t4_identify_port - identify a VI's port by blinking its LED
2816 * @adap: the adapter
2817 * @mbox: mailbox to use for the FW command
2818 * @viid: the VI id
2819 * @nblinks: how many times to blink LED at 2.5 Hz
2821 * Identifies a VI's port by blinking its LED.
2823 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2824 unsigned int nblinks)
2826 struct fw_vi_enable_cmd c;
2828 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2829 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2830 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2831 c.blinkdur = htons(nblinks);
2832 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2836 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2837 * @adap: the adapter
2838 * @mbox: mailbox to use for the FW command
2839 * @start: %true to enable the queues, %false to disable them
2840 * @pf: the PF owning the queues
2841 * @vf: the VF owning the queues
2842 * @iqid: ingress queue id
2843 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2844 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2846 * Starts or stops an ingress queue and its associated FLs, if any.
2848 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2849 unsigned int pf, unsigned int vf, unsigned int iqid,
2850 unsigned int fl0id, unsigned int fl1id)
2852 struct fw_iq_cmd c;
2854 memset(&c, 0, sizeof(c));
2855 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2856 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2857 FW_IQ_CMD_VFN(vf));
2858 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2859 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2860 c.iqid = htons(iqid);
2861 c.fl0id = htons(fl0id);
2862 c.fl1id = htons(fl1id);
2863 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2867 * t4_iq_free - free an ingress queue and its FLs
2868 * @adap: the adapter
2869 * @mbox: mailbox to use for the FW command
2870 * @pf: the PF owning the queues
2871 * @vf: the VF owning the queues
2872 * @iqtype: the ingress queue type
2873 * @iqid: ingress queue id
2874 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2875 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2877 * Frees an ingress queue and its associated FLs, if any.
2879 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2880 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2881 unsigned int fl0id, unsigned int fl1id)
2883 struct fw_iq_cmd c;
2885 memset(&c, 0, sizeof(c));
2886 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2887 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2888 FW_IQ_CMD_VFN(vf));
2889 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2890 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2891 c.iqid = htons(iqid);
2892 c.fl0id = htons(fl0id);
2893 c.fl1id = htons(fl1id);
2894 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2898 * t4_eth_eq_free - free an Ethernet egress queue
2899 * @adap: the adapter
2900 * @mbox: mailbox to use for the FW command
2901 * @pf: the PF owning the queue
2902 * @vf: the VF owning the queue
2903 * @eqid: egress queue id
2905 * Frees an Ethernet egress queue.
2907 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2908 unsigned int vf, unsigned int eqid)
2910 struct fw_eq_eth_cmd c;
2912 memset(&c, 0, sizeof(c));
2913 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2914 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2915 FW_EQ_ETH_CMD_VFN(vf));
2916 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2917 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2918 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2922 * t4_ctrl_eq_free - free a control egress queue
2923 * @adap: the adapter
2924 * @mbox: mailbox to use for the FW command
2925 * @pf: the PF owning the queue
2926 * @vf: the VF owning the queue
2927 * @eqid: egress queue id
2929 * Frees a control egress queue.
2931 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2932 unsigned int vf, unsigned int eqid)
2934 struct fw_eq_ctrl_cmd c;
2936 memset(&c, 0, sizeof(c));
2937 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2938 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2939 FW_EQ_CTRL_CMD_VFN(vf));
2940 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2941 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2942 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2946 * t4_ofld_eq_free - free an offload egress queue
2947 * @adap: the adapter
2948 * @mbox: mailbox to use for the FW command
2949 * @pf: the PF owning the queue
2950 * @vf: the VF owning the queue
2951 * @eqid: egress queue id
2953 * Frees a control egress queue.
2955 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2956 unsigned int vf, unsigned int eqid)
2958 struct fw_eq_ofld_cmd c;
2960 memset(&c, 0, sizeof(c));
2961 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2962 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2963 FW_EQ_OFLD_CMD_VFN(vf));
2964 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2965 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2966 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2970 * t4_handle_fw_rpl - process a FW reply message
2971 * @adap: the adapter
2972 * @rpl: start of the FW message
2974 * Processes a FW message, such as link state change messages.
2976 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2978 u8 opcode = *(const u8 *)rpl;
2980 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2981 int speed = 0, fc = 0;
2982 const struct fw_port_cmd *p = (void *)rpl;
2983 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2984 int port = adap->chan_map[chan];
2985 struct port_info *pi = adap2pinfo(adap, port);
2986 struct link_config *lc = &pi->link_cfg;
2987 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2988 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2989 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2991 if (stat & FW_PORT_CMD_RXPAUSE)
2992 fc |= PAUSE_RX;
2993 if (stat & FW_PORT_CMD_TXPAUSE)
2994 fc |= PAUSE_TX;
2995 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2996 speed = SPEED_100;
2997 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2998 speed = SPEED_1000;
2999 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3000 speed = SPEED_10000;
3002 if (link_ok != lc->link_ok || speed != lc->speed ||
3003 fc != lc->fc) { /* something changed */
3004 lc->link_ok = link_ok;
3005 lc->speed = speed;
3006 lc->fc = fc;
3007 t4_os_link_changed(adap, port, link_ok);
3009 if (mod != pi->mod_type) {
3010 pi->mod_type = mod;
3011 t4_os_portmod_changed(adap, port);
3014 return 0;
3017 static void __devinit get_pci_mode(struct adapter *adapter,
3018 struct pci_params *p)
3020 u16 val;
3021 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3023 if (pcie_cap) {
3024 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3025 &val);
3026 p->speed = val & PCI_EXP_LNKSTA_CLS;
3027 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3032 * init_link_config - initialize a link's SW state
3033 * @lc: structure holding the link state
3034 * @caps: link capabilities
3036 * Initializes the SW state maintained for each link, including the link's
3037 * capabilities and default speed/flow-control/autonegotiation settings.
3039 static void __devinit init_link_config(struct link_config *lc,
3040 unsigned int caps)
3042 lc->supported = caps;
3043 lc->requested_speed = 0;
3044 lc->speed = 0;
3045 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3046 if (lc->supported & FW_PORT_CAP_ANEG) {
3047 lc->advertising = lc->supported & ADVERT_MASK;
3048 lc->autoneg = AUTONEG_ENABLE;
3049 lc->requested_fc |= PAUSE_AUTONEG;
3050 } else {
3051 lc->advertising = 0;
3052 lc->autoneg = AUTONEG_DISABLE;
3056 int t4_wait_dev_ready(struct adapter *adap)
3058 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3059 return 0;
3060 msleep(500);
3061 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3064 static int __devinit get_flash_params(struct adapter *adap)
3066 int ret;
3067 u32 info;
3069 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3070 if (!ret)
3071 ret = sf1_read(adap, 3, 0, 1, &info);
3072 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3073 if (ret)
3074 return ret;
3076 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3077 return -EINVAL;
3078 info >>= 16; /* log2 of size */
3079 if (info >= 0x14 && info < 0x18)
3080 adap->params.sf_nsec = 1 << (info - 16);
3081 else if (info == 0x18)
3082 adap->params.sf_nsec = 64;
3083 else
3084 return -EINVAL;
3085 adap->params.sf_size = 1 << info;
3086 adap->params.sf_fw_start =
3087 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3088 return 0;
3092 * t4_prep_adapter - prepare SW and HW for operation
3093 * @adapter: the adapter
3094 * @reset: if true perform a HW reset
3096 * Initialize adapter SW state for the various HW modules, set initial
3097 * values for some adapter tunables, take PHYs out of reset, and
3098 * initialize the MDIO interface.
3100 int __devinit t4_prep_adapter(struct adapter *adapter)
3102 int ret;
3104 ret = t4_wait_dev_ready(adapter);
3105 if (ret < 0)
3106 return ret;
3108 get_pci_mode(adapter, &adapter->params.pci);
3109 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3111 ret = get_flash_params(adapter);
3112 if (ret < 0) {
3113 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3114 return ret;
3117 ret = get_vpd_params(adapter, &adapter->params.vpd);
3118 if (ret < 0)
3119 return ret;
3121 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3124 * Default port for debugging in case we can't reach FW.
3126 adapter->params.nports = 1;
3127 adapter->params.portvec = 1;
3128 return 0;
3131 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3133 u8 addr[6];
3134 int ret, i, j = 0;
3135 struct fw_port_cmd c;
3137 memset(&c, 0, sizeof(c));
3139 for_each_port(adap, i) {
3140 unsigned int rss_size;
3141 struct port_info *p = adap2pinfo(adap, i);
3143 while ((adap->params.portvec & (1 << j)) == 0)
3144 j++;
3146 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3147 FW_CMD_REQUEST | FW_CMD_READ |
3148 FW_PORT_CMD_PORTID(j));
3149 c.action_to_len16 = htonl(
3150 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3151 FW_LEN16(c));
3152 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3153 if (ret)
3154 return ret;
3156 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3157 if (ret < 0)
3158 return ret;
3160 p->viid = ret;
3161 p->tx_chan = j;
3162 p->lport = j;
3163 p->rss_size = rss_size;
3164 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3165 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3166 adap->port[i]->dev_id = j;
3168 ret = ntohl(c.u.info.lstatus_to_modtype);
3169 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3170 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3171 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3172 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3174 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3175 j++;
3177 return 0;