netxen: annotate board_config and board_type
[linux-2.6/libata-dev.git] / drivers / net / netxen / netxen_nic_init.c
blob120b480c1e82b38d1397aba05e983985fa5de428
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
31 #include <linux/netdevice.h>
32 #include <linux/delay.h>
33 #include "netxen_nic.h"
34 #include "netxen_nic_hw.h"
35 #include "netxen_nic_phan_reg.h"
37 struct crb_addr_pair {
38 u32 addr;
39 u32 data;
42 #define NETXEN_MAX_CRB_XFORM 60
43 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
44 #define NETXEN_ADDR_ERROR (0xffffffff)
46 #define crb_addr_transform(name) \
47 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
48 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
50 #define NETXEN_NIC_XDMA_RESET 0x8000ff
52 static void
53 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid);
55 static void crb_addr_transform_setup(void)
57 crb_addr_transform(XDMA);
58 crb_addr_transform(TIMR);
59 crb_addr_transform(SRE);
60 crb_addr_transform(SQN3);
61 crb_addr_transform(SQN2);
62 crb_addr_transform(SQN1);
63 crb_addr_transform(SQN0);
64 crb_addr_transform(SQS3);
65 crb_addr_transform(SQS2);
66 crb_addr_transform(SQS1);
67 crb_addr_transform(SQS0);
68 crb_addr_transform(RPMX7);
69 crb_addr_transform(RPMX6);
70 crb_addr_transform(RPMX5);
71 crb_addr_transform(RPMX4);
72 crb_addr_transform(RPMX3);
73 crb_addr_transform(RPMX2);
74 crb_addr_transform(RPMX1);
75 crb_addr_transform(RPMX0);
76 crb_addr_transform(ROMUSB);
77 crb_addr_transform(SN);
78 crb_addr_transform(QMN);
79 crb_addr_transform(QMS);
80 crb_addr_transform(PGNI);
81 crb_addr_transform(PGND);
82 crb_addr_transform(PGN3);
83 crb_addr_transform(PGN2);
84 crb_addr_transform(PGN1);
85 crb_addr_transform(PGN0);
86 crb_addr_transform(PGSI);
87 crb_addr_transform(PGSD);
88 crb_addr_transform(PGS3);
89 crb_addr_transform(PGS2);
90 crb_addr_transform(PGS1);
91 crb_addr_transform(PGS0);
92 crb_addr_transform(PS);
93 crb_addr_transform(PH);
94 crb_addr_transform(NIU);
95 crb_addr_transform(I2Q);
96 crb_addr_transform(EG);
97 crb_addr_transform(MN);
98 crb_addr_transform(MS);
99 crb_addr_transform(CAS2);
100 crb_addr_transform(CAS1);
101 crb_addr_transform(CAS0);
102 crb_addr_transform(CAM);
103 crb_addr_transform(C2C1);
104 crb_addr_transform(C2C0);
105 crb_addr_transform(SMB);
106 crb_addr_transform(OCM0);
107 crb_addr_transform(I2C0);
110 int netxen_init_firmware(struct netxen_adapter *adapter)
112 u32 state = 0, loops = 0, err = 0;
114 /* Window 1 call */
115 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
117 if (state == PHAN_INITIALIZE_ACK)
118 return 0;
120 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
121 msleep(1);
122 /* Window 1 call */
123 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
125 loops++;
127 if (loops >= 2000) {
128 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
129 state);
130 err = -EIO;
131 return err;
133 /* Window 1 call */
134 adapter->pci_write_normalize(adapter,
135 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
136 adapter->pci_write_normalize(adapter,
137 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
138 adapter->pci_write_normalize(adapter,
139 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
140 adapter->pci_write_normalize(adapter,
141 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
143 return err;
146 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
148 struct netxen_recv_context *recv_ctx;
149 struct nx_host_rds_ring *rds_ring;
150 struct netxen_rx_buffer *rx_buf;
151 int i, ring;
153 recv_ctx = &adapter->recv_ctx;
154 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
155 rds_ring = &recv_ctx->rds_rings[ring];
156 for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
157 rx_buf = &(rds_ring->rx_buf_arr[i]);
158 if (rx_buf->state == NETXEN_BUFFER_FREE)
159 continue;
160 pci_unmap_single(adapter->pdev,
161 rx_buf->dma,
162 rds_ring->dma_size,
163 PCI_DMA_FROMDEVICE);
164 if (rx_buf->skb != NULL)
165 dev_kfree_skb_any(rx_buf->skb);
170 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
172 struct netxen_cmd_buffer *cmd_buf;
173 struct netxen_skb_frag *buffrag;
174 int i, j;
176 cmd_buf = adapter->cmd_buf_arr;
177 for (i = 0; i < adapter->max_tx_desc_count; i++) {
178 buffrag = cmd_buf->frag_array;
179 if (buffrag->dma) {
180 pci_unmap_single(adapter->pdev, buffrag->dma,
181 buffrag->length, PCI_DMA_TODEVICE);
182 buffrag->dma = 0ULL;
184 for (j = 0; j < cmd_buf->frag_count; j++) {
185 buffrag++;
186 if (buffrag->dma) {
187 pci_unmap_page(adapter->pdev, buffrag->dma,
188 buffrag->length,
189 PCI_DMA_TODEVICE);
190 buffrag->dma = 0ULL;
193 /* Free the skb we received in netxen_nic_xmit_frame */
194 if (cmd_buf->skb) {
195 dev_kfree_skb_any(cmd_buf->skb);
196 cmd_buf->skb = NULL;
198 cmd_buf++;
202 void netxen_free_sw_resources(struct netxen_adapter *adapter)
204 struct netxen_recv_context *recv_ctx;
205 struct nx_host_rds_ring *rds_ring;
206 int ring;
208 recv_ctx = &adapter->recv_ctx;
209 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
210 rds_ring = &recv_ctx->rds_rings[ring];
211 if (rds_ring->rx_buf_arr) {
212 vfree(rds_ring->rx_buf_arr);
213 rds_ring->rx_buf_arr = NULL;
217 if (adapter->cmd_buf_arr)
218 vfree(adapter->cmd_buf_arr);
219 return;
222 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
224 struct netxen_recv_context *recv_ctx;
225 struct nx_host_rds_ring *rds_ring;
226 struct netxen_rx_buffer *rx_buf;
227 int ring, i, num_rx_bufs;
229 struct netxen_cmd_buffer *cmd_buf_arr;
230 struct net_device *netdev = adapter->netdev;
232 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
233 if (cmd_buf_arr == NULL) {
234 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
235 netdev->name);
236 return -ENOMEM;
238 memset(cmd_buf_arr, 0, TX_RINGSIZE);
239 adapter->cmd_buf_arr = cmd_buf_arr;
241 recv_ctx = &adapter->recv_ctx;
242 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
243 rds_ring = &recv_ctx->rds_rings[ring];
244 switch (RCV_DESC_TYPE(ring)) {
245 case RCV_DESC_NORMAL:
246 rds_ring->max_rx_desc_count =
247 adapter->max_rx_desc_count;
248 rds_ring->flags = RCV_DESC_NORMAL;
249 if (adapter->ahw.cut_through) {
250 rds_ring->dma_size =
251 NX_CT_DEFAULT_RX_BUF_LEN;
252 rds_ring->skb_size =
253 NX_CT_DEFAULT_RX_BUF_LEN;
254 } else {
255 rds_ring->dma_size = RX_DMA_MAP_LEN;
256 rds_ring->skb_size =
257 MAX_RX_BUFFER_LENGTH;
259 break;
261 case RCV_DESC_JUMBO:
262 rds_ring->max_rx_desc_count =
263 adapter->max_jumbo_rx_desc_count;
264 rds_ring->flags = RCV_DESC_JUMBO;
265 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
266 rds_ring->dma_size =
267 NX_P3_RX_JUMBO_BUF_MAX_LEN;
268 else
269 rds_ring->dma_size =
270 NX_P2_RX_JUMBO_BUF_MAX_LEN;
271 rds_ring->skb_size =
272 rds_ring->dma_size + NET_IP_ALIGN;
273 break;
275 case RCV_RING_LRO:
276 rds_ring->max_rx_desc_count =
277 adapter->max_lro_rx_desc_count;
278 rds_ring->flags = RCV_DESC_LRO;
279 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
280 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
281 break;
284 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
285 vmalloc(RCV_BUFFSIZE);
286 if (rds_ring->rx_buf_arr == NULL) {
287 printk(KERN_ERR "%s: Failed to allocate "
288 "rx buffer ring %d\n",
289 netdev->name, ring);
290 /* free whatever was already allocated */
291 goto err_out;
293 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
294 INIT_LIST_HEAD(&rds_ring->free_list);
296 * Now go through all of them, set reference handles
297 * and put them in the queues.
299 num_rx_bufs = rds_ring->max_rx_desc_count;
300 rx_buf = rds_ring->rx_buf_arr;
301 for (i = 0; i < num_rx_bufs; i++) {
302 list_add_tail(&rx_buf->list,
303 &rds_ring->free_list);
304 rx_buf->ref_handle = i;
305 rx_buf->state = NETXEN_BUFFER_FREE;
306 rx_buf++;
310 return 0;
312 err_out:
313 netxen_free_sw_resources(adapter);
314 return -ENOMEM;
317 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
319 switch (adapter->ahw.port_type) {
320 case NETXEN_NIC_GBE:
321 adapter->enable_phy_interrupts =
322 netxen_niu_gbe_enable_phy_interrupts;
323 adapter->disable_phy_interrupts =
324 netxen_niu_gbe_disable_phy_interrupts;
325 adapter->macaddr_set = netxen_niu_macaddr_set;
326 adapter->set_mtu = netxen_nic_set_mtu_gb;
327 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
328 adapter->phy_read = netxen_niu_gbe_phy_read;
329 adapter->phy_write = netxen_niu_gbe_phy_write;
330 adapter->init_port = netxen_niu_gbe_init_port;
331 adapter->stop_port = netxen_niu_disable_gbe_port;
332 break;
334 case NETXEN_NIC_XGBE:
335 adapter->enable_phy_interrupts =
336 netxen_niu_xgbe_enable_phy_interrupts;
337 adapter->disable_phy_interrupts =
338 netxen_niu_xgbe_disable_phy_interrupts;
339 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
340 adapter->set_mtu = netxen_nic_set_mtu_xgb;
341 adapter->init_port = netxen_niu_xg_init_port;
342 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
343 adapter->stop_port = netxen_niu_disable_xg_port;
344 break;
346 default:
347 break;
350 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
351 adapter->set_mtu = nx_fw_cmd_set_mtu;
352 adapter->set_promisc = netxen_p3_nic_set_promisc;
357 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
358 * address to external PCI CRB address.
360 static u32 netxen_decode_crb_addr(u32 addr)
362 int i;
363 u32 base_addr, offset, pci_base;
365 crb_addr_transform_setup();
367 pci_base = NETXEN_ADDR_ERROR;
368 base_addr = addr & 0xfff00000;
369 offset = addr & 0x000fffff;
371 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
372 if (crb_addr_xform[i] == base_addr) {
373 pci_base = i << 20;
374 break;
377 if (pci_base == NETXEN_ADDR_ERROR)
378 return pci_base;
379 else
380 return (pci_base + offset);
383 static long rom_max_timeout = 100;
384 static long rom_lock_timeout = 10000;
386 static int rom_lock(struct netxen_adapter *adapter)
388 int iter;
389 u32 done = 0;
390 int timeout = 0;
392 while (!done) {
393 /* acquire semaphore2 from PCI HW block */
394 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
395 &done);
396 if (done == 1)
397 break;
398 if (timeout >= rom_lock_timeout)
399 return -EIO;
401 timeout++;
403 * Yield CPU
405 if (!in_atomic())
406 schedule();
407 else {
408 for (iter = 0; iter < 20; iter++)
409 cpu_relax(); /*This a nop instr on i386 */
412 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
413 return 0;
416 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
418 long timeout = 0;
419 long done = 0;
421 cond_resched();
423 while (done == 0) {
424 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
425 done &= 2;
426 timeout++;
427 if (timeout >= rom_max_timeout) {
428 printk("Timeout reached waiting for rom done");
429 return -EIO;
432 return 0;
435 static void netxen_rom_unlock(struct netxen_adapter *adapter)
437 u32 val;
439 /* release semaphore2 */
440 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
444 static int do_rom_fast_read(struct netxen_adapter *adapter,
445 int addr, int *valp)
447 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
448 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
449 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
450 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
451 if (netxen_wait_rom_done(adapter)) {
452 printk("Error waiting for rom done\n");
453 return -EIO;
455 /* reset abyte_cnt and dummy_byte_cnt */
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
457 udelay(10);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
460 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
461 return 0;
464 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
465 u8 *bytes, size_t size)
467 int addridx;
468 int ret = 0;
470 for (addridx = addr; addridx < (addr + size); addridx += 4) {
471 int v;
472 ret = do_rom_fast_read(adapter, addridx, &v);
473 if (ret != 0)
474 break;
475 *(__le32 *)bytes = cpu_to_le32(v);
476 bytes += 4;
479 return ret;
483 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
484 u8 *bytes, size_t size)
486 int ret;
488 ret = rom_lock(adapter);
489 if (ret < 0)
490 return ret;
492 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
494 netxen_rom_unlock(adapter);
495 return ret;
498 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
500 int ret;
502 if (rom_lock(adapter) != 0)
503 return -EIO;
505 ret = do_rom_fast_read(adapter, addr, valp);
506 netxen_rom_unlock(adapter);
507 return ret;
510 #define NETXEN_BOARDTYPE 0x4008
511 #define NETXEN_BOARDNUM 0x400c
512 #define NETXEN_CHIPNUM 0x4010
514 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
516 int addr, val;
517 int i, n, init_delay = 0;
518 struct crb_addr_pair *buf;
519 unsigned offset;
520 u32 off;
522 /* resetall */
523 rom_lock(adapter);
524 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
525 0xffffffff);
526 netxen_rom_unlock(adapter);
528 if (verbose) {
529 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
530 printk("P2 ROM board type: 0x%08x\n", val);
531 else
532 printk("Could not read board type\n");
533 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
534 printk("P2 ROM board num: 0x%08x\n", val);
535 else
536 printk("Could not read board number\n");
537 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
538 printk("P2 ROM chip num: 0x%08x\n", val);
539 else
540 printk("Could not read chip number\n");
543 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
544 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
545 (n != 0xcafecafe) ||
546 netxen_rom_fast_read(adapter, 4, &n) != 0) {
547 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
548 "n: %08x\n", netxen_nic_driver_name, n);
549 return -EIO;
551 offset = n & 0xffffU;
552 n = (n >> 16) & 0xffffU;
553 } else {
554 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
555 !(n & 0x80000000)) {
556 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
557 "n: %08x\n", netxen_nic_driver_name, n);
558 return -EIO;
560 offset = 1;
561 n &= ~0x80000000;
564 if (n < 1024) {
565 if (verbose)
566 printk(KERN_DEBUG "%s: %d CRB init values found"
567 " in ROM.\n", netxen_nic_driver_name, n);
568 } else {
569 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
570 " initialized.\n", __func__, n);
571 return -EIO;
574 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
575 if (buf == NULL) {
576 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
577 netxen_nic_driver_name);
578 return -ENOMEM;
580 for (i = 0; i < n; i++) {
581 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
582 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
583 kfree(buf);
584 return -EIO;
587 buf[i].addr = addr;
588 buf[i].data = val;
590 if (verbose)
591 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
592 netxen_nic_driver_name,
593 (u32)netxen_decode_crb_addr(addr), val);
595 for (i = 0; i < n; i++) {
597 off = netxen_decode_crb_addr(buf[i].addr);
598 if (off == NETXEN_ADDR_ERROR) {
599 printk(KERN_ERR"CRB init value out of range %x\n",
600 buf[i].addr);
601 continue;
603 off += NETXEN_PCI_CRBSPACE;
604 /* skipping cold reboot MAGIC */
605 if (off == NETXEN_CAM_RAM(0x1fc))
606 continue;
608 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
609 /* do not reset PCI */
610 if (off == (ROMUSB_GLB + 0xbc))
611 continue;
612 if (off == (ROMUSB_GLB + 0xa8))
613 continue;
614 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
615 continue;
616 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
617 continue;
618 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
619 continue;
620 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
621 buf[i].data = 0x1020;
622 /* skip the function enable register */
623 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
624 continue;
625 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
626 continue;
627 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
628 continue;
631 if (off == NETXEN_ADDR_ERROR) {
632 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
633 netxen_nic_driver_name, buf[i].addr);
634 continue;
637 init_delay = 1;
638 /* After writing this register, HW needs time for CRB */
639 /* to quiet down (else crb_window returns 0xffffffff) */
640 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
641 init_delay = 1000;
642 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
643 /* hold xdma in reset also */
644 buf[i].data = NETXEN_NIC_XDMA_RESET;
645 buf[i].data = 0x8000ff;
649 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
651 msleep(init_delay);
653 kfree(buf);
655 /* disable_peg_cache_all */
657 /* unreset_net_cache */
658 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
659 adapter->hw_read_wx(adapter,
660 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
661 netxen_crb_writelit_adapter(adapter,
662 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
665 /* p2dn replyCount */
666 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
667 /* disable_peg_cache 0 */
668 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
669 /* disable_peg_cache 1 */
670 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
672 /* peg_clr_all */
674 /* peg_clr 0 */
675 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
676 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
677 /* peg_clr 1 */
678 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
679 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
680 /* peg_clr 2 */
681 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
682 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
683 /* peg_clr 3 */
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
685 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
686 return 0;
689 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
691 uint64_t addr;
692 uint32_t hi;
693 uint32_t lo;
695 adapter->dummy_dma.addr =
696 pci_alloc_consistent(adapter->pdev,
697 NETXEN_HOST_DUMMY_DMA_SIZE,
698 &adapter->dummy_dma.phys_addr);
699 if (adapter->dummy_dma.addr == NULL) {
700 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
701 __func__);
702 return -ENOMEM;
705 addr = (uint64_t) adapter->dummy_dma.phys_addr;
706 hi = (addr >> 32) & 0xffffffff;
707 lo = addr & 0xffffffff;
709 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
710 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
712 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
713 uint32_t temp = 0;
714 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
717 return 0;
720 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
722 int i = 100;
724 if (!adapter->dummy_dma.addr)
725 return;
727 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
728 do {
729 if (dma_watchdog_shutdown_request(adapter) == 1)
730 break;
731 msleep(50);
732 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
733 break;
734 } while (--i);
737 if (i) {
738 pci_free_consistent(adapter->pdev,
739 NETXEN_HOST_DUMMY_DMA_SIZE,
740 adapter->dummy_dma.addr,
741 adapter->dummy_dma.phys_addr);
742 adapter->dummy_dma.addr = NULL;
743 } else {
744 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
745 adapter->netdev->name);
749 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
751 u32 val = 0;
752 int retries = 60;
754 if (!pegtune_val) {
755 do {
756 val = adapter->pci_read_normalize(adapter,
757 CRB_CMDPEG_STATE);
759 if (val == PHAN_INITIALIZE_COMPLETE ||
760 val == PHAN_INITIALIZE_ACK)
761 return 0;
763 msleep(500);
765 } while (--retries);
767 if (!retries) {
768 pegtune_val = adapter->pci_read_normalize(adapter,
769 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
770 printk(KERN_WARNING "netxen_phantom_init: init failed, "
771 "pegtune_val=%x\n", pegtune_val);
772 return -1;
776 return 0;
779 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
781 u32 val = 0;
782 int retries = 2000;
784 do {
785 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
787 if (val == PHAN_PEG_RCV_INITIALIZED)
788 return 0;
790 msleep(10);
792 } while (--retries);
794 if (!retries) {
795 printk(KERN_ERR "Receive Peg initialization not "
796 "complete, state: 0x%x.\n", val);
797 return -EIO;
800 return 0;
803 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
804 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
806 struct netxen_rx_buffer *buffer;
807 struct sk_buff *skb;
809 buffer = &rds_ring->rx_buf_arr[index];
811 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
812 PCI_DMA_FROMDEVICE);
814 skb = buffer->skb;
815 if (!skb)
816 goto no_skb;
818 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
819 adapter->stats.csummed++;
820 skb->ip_summed = CHECKSUM_UNNECESSARY;
821 } else
822 skb->ip_summed = CHECKSUM_NONE;
824 skb->dev = adapter->netdev;
826 buffer->skb = NULL;
828 no_skb:
829 buffer->state = NETXEN_BUFFER_FREE;
830 buffer->lro_current_frags = 0;
831 buffer->lro_expected_frags = 0;
832 list_add_tail(&buffer->list, &rds_ring->free_list);
833 return skb;
836 static void netxen_process_rcv(struct netxen_adapter *adapter,
837 struct status_desc *desc)
839 struct net_device *netdev = adapter->netdev;
840 u64 sts_data = le64_to_cpu(desc->status_desc_data);
841 int index = netxen_get_sts_refhandle(sts_data);
842 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
843 struct netxen_rx_buffer *buffer;
844 struct sk_buff *skb;
845 u32 length = netxen_get_sts_totallength(sts_data);
846 u32 desc_ctx;
847 u16 pkt_offset = 0, cksum;
848 struct nx_host_rds_ring *rds_ring;
850 desc_ctx = netxen_get_sts_type(sts_data);
851 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
852 return;
855 rds_ring = &recv_ctx->rds_rings[desc_ctx];
856 if (unlikely(index > rds_ring->max_rx_desc_count)) {
857 return;
859 buffer = &rds_ring->rx_buf_arr[index];
860 if (desc_ctx == RCV_DESC_LRO_CTXID) {
861 buffer->lro_current_frags++;
862 if (netxen_get_sts_desc_lro_last_frag(desc)) {
863 buffer->lro_expected_frags =
864 netxen_get_sts_desc_lro_cnt(desc);
865 buffer->lro_length = length;
867 if (buffer->lro_current_frags != buffer->lro_expected_frags) {
868 return;
872 cksum = netxen_get_sts_status(sts_data);
874 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
875 if (!skb)
876 return;
878 if (desc_ctx == RCV_DESC_LRO_CTXID) {
879 /* True length was only available on the last pkt */
880 skb_put(skb, buffer->lro_length);
881 } else {
882 if (length > rds_ring->skb_size)
883 skb_put(skb, rds_ring->skb_size);
884 else
885 skb_put(skb, length);
887 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
888 if (pkt_offset)
889 skb_pull(skb, pkt_offset);
892 skb->protocol = eth_type_trans(skb, netdev);
894 netif_receive_skb(skb);
896 adapter->stats.no_rcv++;
897 adapter->stats.rxbytes += length;
901 netxen_process_rcv_ring(struct netxen_adapter *adapter, int max)
903 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
904 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
905 struct status_desc *desc;
906 u32 consumer = recv_ctx->status_rx_consumer;
907 int count = 0, ring;
908 u64 sts_data;
909 u16 opcode;
911 while (count < max) {
912 desc = &desc_head[consumer];
913 sts_data = le64_to_cpu(desc->status_desc_data);
915 if (!(sts_data & STATUS_OWNER_HOST))
916 break;
918 opcode = netxen_get_sts_opcode(sts_data);
920 netxen_process_rcv(adapter, desc);
922 desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM);
924 consumer = get_next_index(consumer,
925 adapter->max_rx_desc_count);
926 count++;
929 for (ring = 0; ring < adapter->max_rds_rings; ring++)
930 netxen_post_rx_buffers_nodb(adapter, ring);
932 if (count) {
933 recv_ctx->status_rx_consumer = consumer;
934 adapter->pci_write_normalize(adapter,
935 recv_ctx->crb_sts_consumer, consumer);
938 return count;
941 /* Process Command status ring */
942 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
944 u32 last_consumer, consumer;
945 int count = 0, i;
946 struct netxen_cmd_buffer *buffer;
947 struct pci_dev *pdev = adapter->pdev;
948 struct net_device *netdev = adapter->netdev;
949 struct netxen_skb_frag *frag;
950 int done = 0;
952 last_consumer = adapter->last_cmd_consumer;
953 barrier(); /* cmd_consumer can change underneath */
954 consumer = le32_to_cpu(*(adapter->cmd_consumer));
956 while (last_consumer != consumer) {
957 buffer = &adapter->cmd_buf_arr[last_consumer];
958 if (buffer->skb) {
959 frag = &buffer->frag_array[0];
960 pci_unmap_single(pdev, frag->dma, frag->length,
961 PCI_DMA_TODEVICE);
962 frag->dma = 0ULL;
963 for (i = 1; i < buffer->frag_count; i++) {
964 frag++; /* Get the next frag */
965 pci_unmap_page(pdev, frag->dma, frag->length,
966 PCI_DMA_TODEVICE);
967 frag->dma = 0ULL;
970 adapter->stats.xmitfinished++;
971 dev_kfree_skb_any(buffer->skb);
972 buffer->skb = NULL;
975 last_consumer = get_next_index(last_consumer,
976 adapter->max_tx_desc_count);
977 if (++count >= MAX_STATUS_HANDLE)
978 break;
981 if (count) {
982 adapter->last_cmd_consumer = last_consumer;
983 smp_mb();
984 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
985 netif_tx_lock(netdev);
986 netif_wake_queue(netdev);
987 smp_mb();
988 netif_tx_unlock(netdev);
992 * If everything is freed up to consumer then check if the ring is full
993 * If the ring is full then check if more needs to be freed and
994 * schedule the call back again.
996 * This happens when there are 2 CPUs. One could be freeing and the
997 * other filling it. If the ring is full when we get out of here and
998 * the card has already interrupted the host then the host can miss the
999 * interrupt.
1001 * There is still a possible race condition and the host could miss an
1002 * interrupt. The card has to take care of this.
1004 barrier(); /* cmd_consumer can change underneath */
1005 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1006 done = (last_consumer == consumer);
1008 return (done);
1011 void
1012 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid)
1014 struct pci_dev *pdev = adapter->pdev;
1015 struct sk_buff *skb;
1016 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1017 struct nx_host_rds_ring *rds_ring = NULL;
1018 uint producer;
1019 struct rcv_desc *pdesc;
1020 struct netxen_rx_buffer *buffer;
1021 int count = 0;
1022 netxen_ctx_msg msg = 0;
1023 dma_addr_t dma;
1024 struct list_head *head;
1026 rds_ring = &recv_ctx->rds_rings[ringid];
1028 producer = rds_ring->producer;
1029 head = &rds_ring->free_list;
1031 /* We can start writing rx descriptors into the phantom memory. */
1032 while (!list_empty(head)) {
1034 skb = dev_alloc_skb(rds_ring->skb_size);
1035 if (unlikely(!skb)) {
1036 break;
1039 if (!adapter->ahw.cut_through)
1040 skb_reserve(skb, 2);
1042 dma = pci_map_single(pdev, skb->data,
1043 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1044 if (pci_dma_mapping_error(pdev, dma)) {
1045 dev_kfree_skb_any(skb);
1046 break;
1049 count++;
1050 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1051 list_del(&buffer->list);
1053 buffer->skb = skb;
1054 buffer->state = NETXEN_BUFFER_BUSY;
1055 buffer->dma = dma;
1057 /* make a rcv descriptor */
1058 pdesc = &rds_ring->desc_head[producer];
1059 pdesc->addr_buffer = cpu_to_le64(dma);
1060 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1061 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1063 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1065 /* if we did allocate buffers, then write the count to Phantom */
1066 if (count) {
1067 rds_ring->producer = producer;
1068 /* Window = 1 */
1069 adapter->pci_write_normalize(adapter,
1070 rds_ring->crb_rcv_producer,
1071 (producer-1) & (rds_ring->max_rx_desc_count-1));
1073 if (adapter->fw_major < 4) {
1075 * Write a doorbell msg to tell phanmon of change in
1076 * receive ring producer
1077 * Only for firmware version < 4.0.0
1079 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1080 netxen_set_msg_privid(msg);
1081 netxen_set_msg_count(msg,
1082 ((producer -
1083 1) & (rds_ring->
1084 max_rx_desc_count - 1)));
1085 netxen_set_msg_ctxid(msg, adapter->portnum);
1086 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1087 writel(msg,
1088 DB_NORMALIZE(adapter,
1089 NETXEN_RCV_PRODUCER_OFFSET));
1094 static void
1095 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid)
1097 struct pci_dev *pdev = adapter->pdev;
1098 struct sk_buff *skb;
1099 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1100 struct nx_host_rds_ring *rds_ring = NULL;
1101 u32 producer;
1102 struct rcv_desc *pdesc;
1103 struct netxen_rx_buffer *buffer;
1104 int count = 0;
1105 struct list_head *head;
1106 dma_addr_t dma;
1108 rds_ring = &recv_ctx->rds_rings[ringid];
1110 producer = rds_ring->producer;
1111 head = &rds_ring->free_list;
1112 /* We can start writing rx descriptors into the phantom memory. */
1113 while (!list_empty(head)) {
1115 skb = dev_alloc_skb(rds_ring->skb_size);
1116 if (unlikely(!skb)) {
1117 break;
1120 if (!adapter->ahw.cut_through)
1121 skb_reserve(skb, 2);
1123 dma = pci_map_single(pdev, skb->data,
1124 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1125 if (pci_dma_mapping_error(pdev, dma)) {
1126 dev_kfree_skb_any(skb);
1127 break;
1130 count++;
1131 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1132 list_del(&buffer->list);
1134 buffer->skb = skb;
1135 buffer->state = NETXEN_BUFFER_BUSY;
1136 buffer->dma = dma;
1138 /* make a rcv descriptor */
1139 pdesc = &rds_ring->desc_head[producer];
1140 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1141 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1142 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1144 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1147 /* if we did allocate buffers, then write the count to Phantom */
1148 if (count) {
1149 rds_ring->producer = producer;
1150 /* Window = 1 */
1151 adapter->pci_write_normalize(adapter,
1152 rds_ring->crb_rcv_producer,
1153 (producer-1) & (rds_ring->max_rx_desc_count-1));
1154 wmb();
1158 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1160 memset(&adapter->stats, 0, sizeof(adapter->stats));
1161 return;