mac80211/drivers: rewrite the rate control API
[linux-2.6/kvm.git] / drivers / net / netxen / netxen_nic_init.c
blob5bba675d0504594fc340ab3223956d167d4ef0ea
1 /*
2 * Copyright (C) 2003 - 2006 NetXen, Inc.
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
24 * info@netxen.com
25 * NetXen,
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
30 * Source file for NIC routines to initialize the Phantom Hardware
34 #include <linux/netdevice.h>
35 #include <linux/delay.h>
36 #include "netxen_nic.h"
37 #include "netxen_nic_hw.h"
38 #include "netxen_nic_phan_reg.h"
40 struct crb_addr_pair {
41 u32 addr;
42 u32 data;
45 #define NETXEN_MAX_CRB_XFORM 60
46 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
47 #define NETXEN_ADDR_ERROR (0xffffffff)
49 #define crb_addr_transform(name) \
50 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
51 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
53 #define NETXEN_NIC_XDMA_RESET 0x8000ff
55 static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
56 uint32_t ctx, uint32_t ringid);
58 #if 0
59 static void netxen_nic_locked_write_reg(struct netxen_adapter *adapter,
60 unsigned long off, int *data)
62 void __iomem *addr = pci_base_offset(adapter, off);
63 writel(*data, addr);
65 #endif /* 0 */
67 static void crb_addr_transform_setup(void)
69 crb_addr_transform(XDMA);
70 crb_addr_transform(TIMR);
71 crb_addr_transform(SRE);
72 crb_addr_transform(SQN3);
73 crb_addr_transform(SQN2);
74 crb_addr_transform(SQN1);
75 crb_addr_transform(SQN0);
76 crb_addr_transform(SQS3);
77 crb_addr_transform(SQS2);
78 crb_addr_transform(SQS1);
79 crb_addr_transform(SQS0);
80 crb_addr_transform(RPMX7);
81 crb_addr_transform(RPMX6);
82 crb_addr_transform(RPMX5);
83 crb_addr_transform(RPMX4);
84 crb_addr_transform(RPMX3);
85 crb_addr_transform(RPMX2);
86 crb_addr_transform(RPMX1);
87 crb_addr_transform(RPMX0);
88 crb_addr_transform(ROMUSB);
89 crb_addr_transform(SN);
90 crb_addr_transform(QMN);
91 crb_addr_transform(QMS);
92 crb_addr_transform(PGNI);
93 crb_addr_transform(PGND);
94 crb_addr_transform(PGN3);
95 crb_addr_transform(PGN2);
96 crb_addr_transform(PGN1);
97 crb_addr_transform(PGN0);
98 crb_addr_transform(PGSI);
99 crb_addr_transform(PGSD);
100 crb_addr_transform(PGS3);
101 crb_addr_transform(PGS2);
102 crb_addr_transform(PGS1);
103 crb_addr_transform(PGS0);
104 crb_addr_transform(PS);
105 crb_addr_transform(PH);
106 crb_addr_transform(NIU);
107 crb_addr_transform(I2Q);
108 crb_addr_transform(EG);
109 crb_addr_transform(MN);
110 crb_addr_transform(MS);
111 crb_addr_transform(CAS2);
112 crb_addr_transform(CAS1);
113 crb_addr_transform(CAS0);
114 crb_addr_transform(CAM);
115 crb_addr_transform(C2C1);
116 crb_addr_transform(C2C0);
117 crb_addr_transform(SMB);
118 crb_addr_transform(OCM0);
119 crb_addr_transform(I2C0);
122 int netxen_init_firmware(struct netxen_adapter *adapter)
124 u32 state = 0, loops = 0, err = 0;
126 /* Window 1 call */
127 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
129 if (state == PHAN_INITIALIZE_ACK)
130 return 0;
132 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
133 msleep(1);
134 /* Window 1 call */
135 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
137 loops++;
139 if (loops >= 2000) {
140 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
141 state);
142 err = -EIO;
143 return err;
145 /* Window 1 call */
146 adapter->pci_write_normalize(adapter,
147 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
148 adapter->pci_write_normalize(adapter,
149 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
150 adapter->pci_write_normalize(adapter,
151 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
152 adapter->pci_write_normalize(adapter,
153 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
155 return err;
158 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
160 struct netxen_recv_context *recv_ctx;
161 struct nx_host_rds_ring *rds_ring;
162 struct netxen_rx_buffer *rx_buf;
163 int i, ctxid, ring;
165 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
166 recv_ctx = &adapter->recv_ctx[ctxid];
167 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
168 rds_ring = &recv_ctx->rds_rings[ring];
169 for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
170 rx_buf = &(rds_ring->rx_buf_arr[i]);
171 if (rx_buf->state == NETXEN_BUFFER_FREE)
172 continue;
173 pci_unmap_single(adapter->pdev,
174 rx_buf->dma,
175 rds_ring->dma_size,
176 PCI_DMA_FROMDEVICE);
177 if (rx_buf->skb != NULL)
178 dev_kfree_skb_any(rx_buf->skb);
184 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
186 struct netxen_cmd_buffer *cmd_buf;
187 struct netxen_skb_frag *buffrag;
188 int i, j;
190 cmd_buf = adapter->cmd_buf_arr;
191 for (i = 0; i < adapter->max_tx_desc_count; i++) {
192 buffrag = cmd_buf->frag_array;
193 if (buffrag->dma) {
194 pci_unmap_single(adapter->pdev, buffrag->dma,
195 buffrag->length, PCI_DMA_TODEVICE);
196 buffrag->dma = 0ULL;
198 for (j = 0; j < cmd_buf->frag_count; j++) {
199 buffrag++;
200 if (buffrag->dma) {
201 pci_unmap_page(adapter->pdev, buffrag->dma,
202 buffrag->length,
203 PCI_DMA_TODEVICE);
204 buffrag->dma = 0ULL;
207 /* Free the skb we received in netxen_nic_xmit_frame */
208 if (cmd_buf->skb) {
209 dev_kfree_skb_any(cmd_buf->skb);
210 cmd_buf->skb = NULL;
212 cmd_buf++;
216 void netxen_free_sw_resources(struct netxen_adapter *adapter)
218 struct netxen_recv_context *recv_ctx;
219 struct nx_host_rds_ring *rds_ring;
220 int ctx, ring;
222 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
223 recv_ctx = &adapter->recv_ctx[ctx];
224 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
225 rds_ring = &recv_ctx->rds_rings[ring];
226 if (rds_ring->rx_buf_arr) {
227 vfree(rds_ring->rx_buf_arr);
228 rds_ring->rx_buf_arr = NULL;
232 if (adapter->cmd_buf_arr)
233 vfree(adapter->cmd_buf_arr);
234 return;
237 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
239 struct netxen_recv_context *recv_ctx;
240 struct nx_host_rds_ring *rds_ring;
241 struct netxen_rx_buffer *rx_buf;
242 int ctx, ring, i, num_rx_bufs;
244 struct netxen_cmd_buffer *cmd_buf_arr;
245 struct net_device *netdev = adapter->netdev;
247 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
248 if (cmd_buf_arr == NULL) {
249 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
250 netdev->name);
251 return -ENOMEM;
253 memset(cmd_buf_arr, 0, TX_RINGSIZE);
254 adapter->cmd_buf_arr = cmd_buf_arr;
256 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
257 recv_ctx = &adapter->recv_ctx[ctx];
258 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
259 rds_ring = &recv_ctx->rds_rings[ring];
260 switch (RCV_DESC_TYPE(ring)) {
261 case RCV_DESC_NORMAL:
262 rds_ring->max_rx_desc_count =
263 adapter->max_rx_desc_count;
264 rds_ring->flags = RCV_DESC_NORMAL;
265 if (adapter->ahw.cut_through) {
266 rds_ring->dma_size =
267 NX_CT_DEFAULT_RX_BUF_LEN;
268 rds_ring->skb_size =
269 NX_CT_DEFAULT_RX_BUF_LEN;
270 } else {
271 rds_ring->dma_size = RX_DMA_MAP_LEN;
272 rds_ring->skb_size =
273 MAX_RX_BUFFER_LENGTH;
275 break;
277 case RCV_DESC_JUMBO:
278 rds_ring->max_rx_desc_count =
279 adapter->max_jumbo_rx_desc_count;
280 rds_ring->flags = RCV_DESC_JUMBO;
281 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
282 rds_ring->dma_size =
283 NX_P3_RX_JUMBO_BUF_MAX_LEN;
284 else
285 rds_ring->dma_size =
286 NX_P2_RX_JUMBO_BUF_MAX_LEN;
287 rds_ring->skb_size =
288 rds_ring->dma_size + NET_IP_ALIGN;
289 break;
291 case RCV_RING_LRO:
292 rds_ring->max_rx_desc_count =
293 adapter->max_lro_rx_desc_count;
294 rds_ring->flags = RCV_DESC_LRO;
295 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
296 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
297 break;
300 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
301 vmalloc(RCV_BUFFSIZE);
302 if (rds_ring->rx_buf_arr == NULL) {
303 printk(KERN_ERR "%s: Failed to allocate "
304 "rx buffer ring %d\n",
305 netdev->name, ring);
306 /* free whatever was already allocated */
307 goto err_out;
309 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
310 INIT_LIST_HEAD(&rds_ring->free_list);
311 rds_ring->begin_alloc = 0;
313 * Now go through all of them, set reference handles
314 * and put them in the queues.
316 num_rx_bufs = rds_ring->max_rx_desc_count;
317 rx_buf = rds_ring->rx_buf_arr;
318 for (i = 0; i < num_rx_bufs; i++) {
319 list_add_tail(&rx_buf->list,
320 &rds_ring->free_list);
321 rx_buf->ref_handle = i;
322 rx_buf->state = NETXEN_BUFFER_FREE;
323 rx_buf++;
328 return 0;
330 err_out:
331 netxen_free_sw_resources(adapter);
332 return -ENOMEM;
335 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
337 switch (adapter->ahw.board_type) {
338 case NETXEN_NIC_GBE:
339 adapter->enable_phy_interrupts =
340 netxen_niu_gbe_enable_phy_interrupts;
341 adapter->disable_phy_interrupts =
342 netxen_niu_gbe_disable_phy_interrupts;
343 adapter->macaddr_set = netxen_niu_macaddr_set;
344 adapter->set_mtu = netxen_nic_set_mtu_gb;
345 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
346 adapter->phy_read = netxen_niu_gbe_phy_read;
347 adapter->phy_write = netxen_niu_gbe_phy_write;
348 adapter->init_port = netxen_niu_gbe_init_port;
349 adapter->stop_port = netxen_niu_disable_gbe_port;
350 break;
352 case NETXEN_NIC_XGBE:
353 adapter->enable_phy_interrupts =
354 netxen_niu_xgbe_enable_phy_interrupts;
355 adapter->disable_phy_interrupts =
356 netxen_niu_xgbe_disable_phy_interrupts;
357 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
358 adapter->set_mtu = netxen_nic_set_mtu_xgb;
359 adapter->init_port = netxen_niu_xg_init_port;
360 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
361 adapter->stop_port = netxen_niu_disable_xg_port;
362 break;
364 default:
365 break;
368 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
369 adapter->set_mtu = nx_fw_cmd_set_mtu;
370 adapter->set_promisc = netxen_p3_nic_set_promisc;
375 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
376 * address to external PCI CRB address.
378 static u32 netxen_decode_crb_addr(u32 addr)
380 int i;
381 u32 base_addr, offset, pci_base;
383 crb_addr_transform_setup();
385 pci_base = NETXEN_ADDR_ERROR;
386 base_addr = addr & 0xfff00000;
387 offset = addr & 0x000fffff;
389 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
390 if (crb_addr_xform[i] == base_addr) {
391 pci_base = i << 20;
392 break;
395 if (pci_base == NETXEN_ADDR_ERROR)
396 return pci_base;
397 else
398 return (pci_base + offset);
401 static long rom_max_timeout = 100;
402 static long rom_lock_timeout = 10000;
403 #if 0
404 static long rom_write_timeout = 700;
405 #endif
407 static int rom_lock(struct netxen_adapter *adapter)
409 int iter;
410 u32 done = 0;
411 int timeout = 0;
413 while (!done) {
414 /* acquire semaphore2 from PCI HW block */
415 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
416 &done);
417 if (done == 1)
418 break;
419 if (timeout >= rom_lock_timeout)
420 return -EIO;
422 timeout++;
424 * Yield CPU
426 if (!in_atomic())
427 schedule();
428 else {
429 for (iter = 0; iter < 20; iter++)
430 cpu_relax(); /*This a nop instr on i386 */
433 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
434 return 0;
437 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
439 long timeout = 0;
440 long done = 0;
442 while (done == 0) {
443 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
444 done &= 2;
445 timeout++;
446 if (timeout >= rom_max_timeout) {
447 printk("Timeout reached waiting for rom done");
448 return -EIO;
451 return 0;
454 #if 0
455 static int netxen_rom_wren(struct netxen_adapter *adapter)
457 /* Set write enable latch in ROM status register */
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
459 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
460 M25P_INSTR_WREN);
461 if (netxen_wait_rom_done(adapter)) {
462 return -1;
464 return 0;
467 static unsigned int netxen_rdcrbreg(struct netxen_adapter *adapter,
468 unsigned int addr)
470 unsigned int data = 0xdeaddead;
471 data = netxen_nic_reg_read(adapter, addr);
472 return data;
475 static int netxen_do_rom_rdsr(struct netxen_adapter *adapter)
477 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
478 M25P_INSTR_RDSR);
479 if (netxen_wait_rom_done(adapter)) {
480 return -1;
482 return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA);
484 #endif
486 static void netxen_rom_unlock(struct netxen_adapter *adapter)
488 u32 val;
490 /* release semaphore2 */
491 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
495 #if 0
496 static int netxen_rom_wip_poll(struct netxen_adapter *adapter)
498 long timeout = 0;
499 long wip = 1;
500 int val;
501 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
502 while (wip != 0) {
503 val = netxen_do_rom_rdsr(adapter);
504 wip = val & 1;
505 timeout++;
506 if (timeout > rom_max_timeout) {
507 return -1;
510 return 0;
513 static int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
514 int data)
516 if (netxen_rom_wren(adapter)) {
517 return -1;
519 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
520 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
521 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
522 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
523 M25P_INSTR_PP);
524 if (netxen_wait_rom_done(adapter)) {
525 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
526 return -1;
529 return netxen_rom_wip_poll(adapter);
531 #endif
533 static int do_rom_fast_read(struct netxen_adapter *adapter,
534 int addr, int *valp)
536 cond_resched();
538 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
539 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
540 udelay(100); /* prevent bursting on CRB */
541 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
542 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
543 if (netxen_wait_rom_done(adapter)) {
544 printk("Error waiting for rom done\n");
545 return -EIO;
547 /* reset abyte_cnt and dummy_byte_cnt */
548 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
549 udelay(100); /* prevent bursting on CRB */
550 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
552 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
553 return 0;
556 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
557 u8 *bytes, size_t size)
559 int addridx;
560 int ret = 0;
562 for (addridx = addr; addridx < (addr + size); addridx += 4) {
563 int v;
564 ret = do_rom_fast_read(adapter, addridx, &v);
565 if (ret != 0)
566 break;
567 *(__le32 *)bytes = cpu_to_le32(v);
568 bytes += 4;
571 return ret;
575 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
576 u8 *bytes, size_t size)
578 int ret;
580 ret = rom_lock(adapter);
581 if (ret < 0)
582 return ret;
584 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
586 netxen_rom_unlock(adapter);
587 return ret;
590 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
592 int ret;
594 if (rom_lock(adapter) != 0)
595 return -EIO;
597 ret = do_rom_fast_read(adapter, addr, valp);
598 netxen_rom_unlock(adapter);
599 return ret;
602 #if 0
603 int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
605 int ret = 0;
607 if (rom_lock(adapter) != 0) {
608 return -1;
610 ret = do_rom_fast_write(adapter, addr, data);
611 netxen_rom_unlock(adapter);
612 return ret;
615 static int do_rom_fast_write_words(struct netxen_adapter *adapter,
616 int addr, u8 *bytes, size_t size)
618 int addridx = addr;
619 int ret = 0;
621 while (addridx < (addr + size)) {
622 int last_attempt = 0;
623 int timeout = 0;
624 int data;
626 data = le32_to_cpu((*(__le32*)bytes));
627 ret = do_rom_fast_write(adapter, addridx, data);
628 if (ret < 0)
629 return ret;
631 while(1) {
632 int data1;
634 ret = do_rom_fast_read(adapter, addridx, &data1);
635 if (ret < 0)
636 return ret;
638 if (data1 == data)
639 break;
641 if (timeout++ >= rom_write_timeout) {
642 if (last_attempt++ < 4) {
643 ret = do_rom_fast_write(adapter,
644 addridx, data);
645 if (ret < 0)
646 return ret;
648 else {
649 printk(KERN_INFO "Data write did not "
650 "succeed at address 0x%x\n", addridx);
651 break;
656 bytes += 4;
657 addridx += 4;
660 return ret;
663 int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
664 u8 *bytes, size_t size)
666 int ret = 0;
668 ret = rom_lock(adapter);
669 if (ret < 0)
670 return ret;
672 ret = do_rom_fast_write_words(adapter, addr, bytes, size);
673 netxen_rom_unlock(adapter);
675 return ret;
678 static int netxen_rom_wrsr(struct netxen_adapter *adapter, int data)
680 int ret;
682 ret = netxen_rom_wren(adapter);
683 if (ret < 0)
684 return ret;
686 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
687 netxen_crb_writelit_adapter(adapter,
688 NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0x1);
690 ret = netxen_wait_rom_done(adapter);
691 if (ret < 0)
692 return ret;
694 return netxen_rom_wip_poll(adapter);
697 static int netxen_rom_rdsr(struct netxen_adapter *adapter)
699 int ret;
701 ret = rom_lock(adapter);
702 if (ret < 0)
703 return ret;
705 ret = netxen_do_rom_rdsr(adapter);
706 netxen_rom_unlock(adapter);
707 return ret;
710 int netxen_backup_crbinit(struct netxen_adapter *adapter)
712 int ret = FLASH_SUCCESS;
713 int val;
714 char *buffer = kmalloc(NETXEN_FLASH_SECTOR_SIZE, GFP_KERNEL);
716 if (!buffer)
717 return -ENOMEM;
718 /* unlock sector 63 */
719 val = netxen_rom_rdsr(adapter);
720 val = val & 0xe3;
721 ret = netxen_rom_wrsr(adapter, val);
722 if (ret != FLASH_SUCCESS)
723 goto out_kfree;
725 ret = netxen_rom_wip_poll(adapter);
726 if (ret != FLASH_SUCCESS)
727 goto out_kfree;
729 /* copy sector 0 to sector 63 */
730 ret = netxen_rom_fast_read_words(adapter, NETXEN_CRBINIT_START,
731 buffer, NETXEN_FLASH_SECTOR_SIZE);
732 if (ret != FLASH_SUCCESS)
733 goto out_kfree;
735 ret = netxen_rom_fast_write_words(adapter, NETXEN_FIXED_START,
736 buffer, NETXEN_FLASH_SECTOR_SIZE);
737 if (ret != FLASH_SUCCESS)
738 goto out_kfree;
740 /* lock sector 63 */
741 val = netxen_rom_rdsr(adapter);
742 if (!(val & 0x8)) {
743 val |= (0x1 << 2);
744 /* lock sector 63 */
745 if (netxen_rom_wrsr(adapter, val) == 0) {
746 ret = netxen_rom_wip_poll(adapter);
747 if (ret != FLASH_SUCCESS)
748 goto out_kfree;
750 /* lock SR writes */
751 ret = netxen_rom_wip_poll(adapter);
752 if (ret != FLASH_SUCCESS)
753 goto out_kfree;
757 out_kfree:
758 kfree(buffer);
759 return ret;
762 static int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
764 netxen_rom_wren(adapter);
765 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
766 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
767 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
768 M25P_INSTR_SE);
769 if (netxen_wait_rom_done(adapter)) {
770 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
771 return -1;
773 return netxen_rom_wip_poll(adapter);
776 static void check_erased_flash(struct netxen_adapter *adapter, int addr)
778 int i;
779 int val;
780 int count = 0, erased_errors = 0;
781 int range;
783 range = (addr == NETXEN_USER_START) ?
784 NETXEN_FIXED_START : addr + NETXEN_FLASH_SECTOR_SIZE;
786 for (i = addr; i < range; i += 4) {
787 netxen_rom_fast_read(adapter, i, &val);
788 if (val != 0xffffffff)
789 erased_errors++;
790 count++;
793 if (erased_errors)
794 printk(KERN_INFO "0x%x out of 0x%x words fail to be erased "
795 "for sector address: %x\n", erased_errors, count, addr);
798 int netxen_rom_se(struct netxen_adapter *adapter, int addr)
800 int ret = 0;
801 if (rom_lock(adapter) != 0) {
802 return -1;
804 ret = netxen_do_rom_se(adapter, addr);
805 netxen_rom_unlock(adapter);
806 msleep(30);
807 check_erased_flash(adapter, addr);
809 return ret;
812 static int netxen_flash_erase_sections(struct netxen_adapter *adapter,
813 int start, int end)
815 int ret = FLASH_SUCCESS;
816 int i;
818 for (i = start; i < end; i++) {
819 ret = netxen_rom_se(adapter, i * NETXEN_FLASH_SECTOR_SIZE);
820 if (ret)
821 break;
822 ret = netxen_rom_wip_poll(adapter);
823 if (ret < 0)
824 return ret;
827 return ret;
831 netxen_flash_erase_secondary(struct netxen_adapter *adapter)
833 int ret = FLASH_SUCCESS;
834 int start, end;
836 start = NETXEN_SECONDARY_START / NETXEN_FLASH_SECTOR_SIZE;
837 end = NETXEN_USER_START / NETXEN_FLASH_SECTOR_SIZE;
838 ret = netxen_flash_erase_sections(adapter, start, end);
840 return ret;
844 netxen_flash_erase_primary(struct netxen_adapter *adapter)
846 int ret = FLASH_SUCCESS;
847 int start, end;
849 start = NETXEN_PRIMARY_START / NETXEN_FLASH_SECTOR_SIZE;
850 end = NETXEN_SECONDARY_START / NETXEN_FLASH_SECTOR_SIZE;
851 ret = netxen_flash_erase_sections(adapter, start, end);
853 return ret;
856 void netxen_halt_pegs(struct netxen_adapter *adapter)
858 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c, 1);
859 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c, 1);
860 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c, 1);
861 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c, 1);
864 int netxen_flash_unlock(struct netxen_adapter *adapter)
866 int ret = 0;
868 ret = netxen_rom_wrsr(adapter, 0);
869 if (ret < 0)
870 return ret;
872 ret = netxen_rom_wren(adapter);
873 if (ret < 0)
874 return ret;
876 return ret;
878 #endif /* 0 */
880 #define NETXEN_BOARDTYPE 0x4008
881 #define NETXEN_BOARDNUM 0x400c
882 #define NETXEN_CHIPNUM 0x4010
884 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
886 int addr, val;
887 int i, init_delay = 0;
888 struct crb_addr_pair *buf;
889 unsigned offset, n;
890 u32 off;
892 /* resetall */
893 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
894 0xffffffff);
896 if (verbose) {
897 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
898 printk("P2 ROM board type: 0x%08x\n", val);
899 else
900 printk("Could not read board type\n");
901 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
902 printk("P2 ROM board num: 0x%08x\n", val);
903 else
904 printk("Could not read board number\n");
905 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
906 printk("P2 ROM chip num: 0x%08x\n", val);
907 else
908 printk("Could not read chip number\n");
911 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
912 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
913 (n != 0xcafecafeUL) ||
914 netxen_rom_fast_read(adapter, 4, &n) != 0) {
915 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
916 "n: %08x\n", netxen_nic_driver_name, n);
917 return -EIO;
919 offset = n & 0xffffU;
920 n = (n >> 16) & 0xffffU;
921 } else {
922 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
923 !(n & 0x80000000)) {
924 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
925 "n: %08x\n", netxen_nic_driver_name, n);
926 return -EIO;
928 offset = 1;
929 n &= ~0x80000000;
932 if (n < 1024) {
933 if (verbose)
934 printk(KERN_DEBUG "%s: %d CRB init values found"
935 " in ROM.\n", netxen_nic_driver_name, n);
936 } else {
937 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
938 " initialized.\n", __func__, n);
939 return -EIO;
942 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
943 if (buf == NULL) {
944 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
945 netxen_nic_driver_name);
946 return -ENOMEM;
948 for (i = 0; i < n; i++) {
949 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
950 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0)
951 return -EIO;
953 buf[i].addr = addr;
954 buf[i].data = val;
956 if (verbose)
957 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
958 netxen_nic_driver_name,
959 (u32)netxen_decode_crb_addr(addr), val);
961 for (i = 0; i < n; i++) {
963 off = netxen_decode_crb_addr(buf[i].addr);
964 if (off == NETXEN_ADDR_ERROR) {
965 printk(KERN_ERR"CRB init value out of range %x\n",
966 buf[i].addr);
967 continue;
969 off += NETXEN_PCI_CRBSPACE;
970 /* skipping cold reboot MAGIC */
971 if (off == NETXEN_CAM_RAM(0x1fc))
972 continue;
974 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
975 /* do not reset PCI */
976 if (off == (ROMUSB_GLB + 0xbc))
977 continue;
978 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
979 buf[i].data = 0x1020;
980 /* skip the function enable register */
981 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
982 continue;
983 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
984 continue;
985 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
986 continue;
989 if (off == NETXEN_ADDR_ERROR) {
990 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
991 netxen_nic_driver_name, buf[i].addr);
992 continue;
995 /* After writing this register, HW needs time for CRB */
996 /* to quiet down (else crb_window returns 0xffffffff) */
997 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
998 init_delay = 1;
999 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1000 /* hold xdma in reset also */
1001 buf[i].data = NETXEN_NIC_XDMA_RESET;
1005 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
1007 if (init_delay == 1) {
1008 msleep(1000);
1009 init_delay = 0;
1011 msleep(1);
1013 kfree(buf);
1015 /* disable_peg_cache_all */
1017 /* unreset_net_cache */
1018 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1019 adapter->hw_read_wx(adapter,
1020 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
1021 netxen_crb_writelit_adapter(adapter,
1022 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
1025 /* p2dn replyCount */
1026 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
1027 /* disable_peg_cache 0 */
1028 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
1029 /* disable_peg_cache 1 */
1030 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
1032 /* peg_clr_all */
1034 /* peg_clr 0 */
1035 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
1036 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
1037 /* peg_clr 1 */
1038 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
1039 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
1040 /* peg_clr 2 */
1041 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
1042 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
1043 /* peg_clr 3 */
1044 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
1045 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
1046 return 0;
1049 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
1051 uint64_t addr;
1052 uint32_t hi;
1053 uint32_t lo;
1055 adapter->dummy_dma.addr =
1056 pci_alloc_consistent(adapter->pdev,
1057 NETXEN_HOST_DUMMY_DMA_SIZE,
1058 &adapter->dummy_dma.phys_addr);
1059 if (adapter->dummy_dma.addr == NULL) {
1060 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
1061 __func__);
1062 return -ENOMEM;
1065 addr = (uint64_t) adapter->dummy_dma.phys_addr;
1066 hi = (addr >> 32) & 0xffffffff;
1067 lo = addr & 0xffffffff;
1069 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
1070 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
1072 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1073 uint32_t temp = 0;
1074 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
1077 return 0;
1080 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
1082 int i = 100;
1084 if (!adapter->dummy_dma.addr)
1085 return;
1087 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1088 do {
1089 if (dma_watchdog_shutdown_request(adapter) == 1)
1090 break;
1091 msleep(50);
1092 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
1093 break;
1094 } while (--i);
1097 if (i) {
1098 pci_free_consistent(adapter->pdev,
1099 NETXEN_HOST_DUMMY_DMA_SIZE,
1100 adapter->dummy_dma.addr,
1101 adapter->dummy_dma.phys_addr);
1102 adapter->dummy_dma.addr = NULL;
1103 } else {
1104 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
1105 adapter->netdev->name);
1109 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
1111 u32 val = 0;
1112 int retries = 60;
1114 if (!pegtune_val) {
1115 do {
1116 val = adapter->pci_read_normalize(adapter,
1117 CRB_CMDPEG_STATE);
1119 if (val == PHAN_INITIALIZE_COMPLETE ||
1120 val == PHAN_INITIALIZE_ACK)
1121 return 0;
1123 msleep(500);
1125 } while (--retries);
1127 if (!retries) {
1128 pegtune_val = adapter->pci_read_normalize(adapter,
1129 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
1130 printk(KERN_WARNING "netxen_phantom_init: init failed, "
1131 "pegtune_val=%x\n", pegtune_val);
1132 return -1;
1136 return 0;
1139 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
1141 u32 val = 0;
1142 int retries = 2000;
1144 do {
1145 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
1147 if (val == PHAN_PEG_RCV_INITIALIZED)
1148 return 0;
1150 msleep(10);
1152 } while (--retries);
1154 if (!retries) {
1155 printk(KERN_ERR "Receive Peg initialization not "
1156 "complete, state: 0x%x.\n", val);
1157 return -EIO;
1160 return 0;
1163 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1164 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
1166 struct netxen_rx_buffer *buffer;
1167 struct sk_buff *skb;
1169 buffer = &rds_ring->rx_buf_arr[index];
1171 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1172 PCI_DMA_FROMDEVICE);
1174 skb = buffer->skb;
1175 if (!skb)
1176 goto no_skb;
1178 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1179 adapter->stats.csummed++;
1180 skb->ip_summed = CHECKSUM_UNNECESSARY;
1181 } else
1182 skb->ip_summed = CHECKSUM_NONE;
1184 skb->dev = adapter->netdev;
1186 buffer->skb = NULL;
1188 no_skb:
1189 buffer->state = NETXEN_BUFFER_FREE;
1190 buffer->lro_current_frags = 0;
1191 buffer->lro_expected_frags = 0;
1192 list_add_tail(&buffer->list, &rds_ring->free_list);
1193 return skb;
1197 * netxen_process_rcv() send the received packet to the protocol stack.
1198 * and if the number of receives exceeds RX_BUFFERS_REFILL, then we
1199 * invoke the routine to send more rx buffers to the Phantom...
1201 static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1202 struct status_desc *desc, struct status_desc *frag_desc)
1204 struct net_device *netdev = adapter->netdev;
1205 u64 sts_data = le64_to_cpu(desc->status_desc_data);
1206 int index = netxen_get_sts_refhandle(sts_data);
1207 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
1208 struct netxen_rx_buffer *buffer;
1209 struct sk_buff *skb;
1210 u32 length = netxen_get_sts_totallength(sts_data);
1211 u32 desc_ctx;
1212 u16 pkt_offset = 0, cksum;
1213 struct nx_host_rds_ring *rds_ring;
1215 desc_ctx = netxen_get_sts_type(sts_data);
1216 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
1217 printk("%s: %s Bad Rcv descriptor ring\n",
1218 netxen_nic_driver_name, netdev->name);
1219 return;
1222 rds_ring = &recv_ctx->rds_rings[desc_ctx];
1223 if (unlikely(index > rds_ring->max_rx_desc_count)) {
1224 DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
1225 index, rds_ring->max_rx_desc_count);
1226 return;
1228 buffer = &rds_ring->rx_buf_arr[index];
1229 if (desc_ctx == RCV_DESC_LRO_CTXID) {
1230 buffer->lro_current_frags++;
1231 if (netxen_get_sts_desc_lro_last_frag(desc)) {
1232 buffer->lro_expected_frags =
1233 netxen_get_sts_desc_lro_cnt(desc);
1234 buffer->lro_length = length;
1236 if (buffer->lro_current_frags != buffer->lro_expected_frags) {
1237 if (buffer->lro_expected_frags != 0) {
1238 printk("LRO: (refhandle:%x) recv frag. "
1239 "wait for last. flags: %x expected:%d "
1240 "have:%d\n", index,
1241 netxen_get_sts_desc_lro_last_frag(desc),
1242 buffer->lro_expected_frags,
1243 buffer->lro_current_frags);
1245 return;
1249 cksum = netxen_get_sts_status(sts_data);
1251 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
1252 if (!skb)
1253 return;
1255 if (desc_ctx == RCV_DESC_LRO_CTXID) {
1256 /* True length was only available on the last pkt */
1257 skb_put(skb, buffer->lro_length);
1258 } else {
1259 if (length > rds_ring->skb_size)
1260 skb_put(skb, rds_ring->skb_size);
1261 else
1262 skb_put(skb, length);
1264 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
1265 if (pkt_offset)
1266 skb_pull(skb, pkt_offset);
1269 skb->protocol = eth_type_trans(skb, netdev);
1272 * rx buffer chaining is disabled, walk and free
1273 * any spurious rx buffer chain.
1275 if (frag_desc) {
1276 u16 i, nr_frags = desc->nr_frags;
1278 dev_kfree_skb_any(skb);
1279 for (i = 0; i < nr_frags; i++) {
1280 index = frag_desc->frag_handles[i];
1281 skb = netxen_process_rxbuf(adapter,
1282 rds_ring, index, cksum);
1283 if (skb)
1284 dev_kfree_skb_any(skb);
1286 adapter->stats.rxdropped++;
1287 } else {
1289 netif_receive_skb(skb);
1290 netdev->last_rx = jiffies;
1292 adapter->stats.no_rcv++;
1293 adapter->stats.rxbytes += length;
1297 /* Process Receive status ring */
1298 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1300 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
1301 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
1302 struct status_desc *desc, *frag_desc;
1303 u32 consumer = recv_ctx->status_rx_consumer;
1304 int count = 0, ring;
1305 u64 sts_data;
1306 u16 opcode;
1308 while (count < max) {
1309 desc = &desc_head[consumer];
1310 if (!(netxen_get_sts_owner(desc) & STATUS_OWNER_HOST)) {
1311 DPRINTK(ERR, "desc %p ownedby %x\n", desc,
1312 netxen_get_sts_owner(desc));
1313 break;
1316 sts_data = le64_to_cpu(desc->status_desc_data);
1317 opcode = netxen_get_sts_opcode(sts_data);
1318 frag_desc = NULL;
1319 if (opcode == NETXEN_NIC_RXPKT_DESC) {
1320 if (desc->nr_frags) {
1321 consumer = get_next_index(consumer,
1322 adapter->max_rx_desc_count);
1323 frag_desc = &desc_head[consumer];
1324 netxen_set_sts_owner(frag_desc,
1325 STATUS_OWNER_PHANTOM);
1329 netxen_process_rcv(adapter, ctxid, desc, frag_desc);
1331 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
1333 consumer = get_next_index(consumer,
1334 adapter->max_rx_desc_count);
1335 count++;
1337 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1338 netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
1340 /* update the consumer index in phantom */
1341 if (count) {
1342 recv_ctx->status_rx_consumer = consumer;
1344 /* Window = 1 */
1345 adapter->pci_write_normalize(adapter,
1346 recv_ctx->crb_sts_consumer, consumer);
1349 return count;
1352 /* Process Command status ring */
1353 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1355 u32 last_consumer, consumer;
1356 int count = 0, i;
1357 struct netxen_cmd_buffer *buffer;
1358 struct pci_dev *pdev = adapter->pdev;
1359 struct net_device *netdev = adapter->netdev;
1360 struct netxen_skb_frag *frag;
1361 int done = 0;
1363 last_consumer = adapter->last_cmd_consumer;
1364 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1366 while (last_consumer != consumer) {
1367 buffer = &adapter->cmd_buf_arr[last_consumer];
1368 if (buffer->skb) {
1369 frag = &buffer->frag_array[0];
1370 pci_unmap_single(pdev, frag->dma, frag->length,
1371 PCI_DMA_TODEVICE);
1372 frag->dma = 0ULL;
1373 for (i = 1; i < buffer->frag_count; i++) {
1374 frag++; /* Get the next frag */
1375 pci_unmap_page(pdev, frag->dma, frag->length,
1376 PCI_DMA_TODEVICE);
1377 frag->dma = 0ULL;
1380 adapter->stats.xmitfinished++;
1381 dev_kfree_skb_any(buffer->skb);
1382 buffer->skb = NULL;
1385 last_consumer = get_next_index(last_consumer,
1386 adapter->max_tx_desc_count);
1387 if (++count >= MAX_STATUS_HANDLE)
1388 break;
1391 if (count) {
1392 adapter->last_cmd_consumer = last_consumer;
1393 smp_mb();
1394 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1395 netif_tx_lock(netdev);
1396 netif_wake_queue(netdev);
1397 smp_mb();
1398 netif_tx_unlock(netdev);
1402 * If everything is freed up to consumer then check if the ring is full
1403 * If the ring is full then check if more needs to be freed and
1404 * schedule the call back again.
1406 * This happens when there are 2 CPUs. One could be freeing and the
1407 * other filling it. If the ring is full when we get out of here and
1408 * the card has already interrupted the host then the host can miss the
1409 * interrupt.
1411 * There is still a possible race condition and the host could miss an
1412 * interrupt. The card has to take care of this.
1414 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1415 done = (last_consumer == consumer);
1417 return (done);
1421 * netxen_post_rx_buffers puts buffer in the Phantom memory
1423 void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1425 struct pci_dev *pdev = adapter->pdev;
1426 struct sk_buff *skb;
1427 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1428 struct nx_host_rds_ring *rds_ring = NULL;
1429 uint producer;
1430 struct rcv_desc *pdesc;
1431 struct netxen_rx_buffer *buffer;
1432 int count = 0;
1433 int index = 0;
1434 netxen_ctx_msg msg = 0;
1435 dma_addr_t dma;
1436 struct list_head *head;
1438 rds_ring = &recv_ctx->rds_rings[ringid];
1440 producer = rds_ring->producer;
1441 index = rds_ring->begin_alloc;
1442 head = &rds_ring->free_list;
1444 /* We can start writing rx descriptors into the phantom memory. */
1445 while (!list_empty(head)) {
1447 skb = dev_alloc_skb(rds_ring->skb_size);
1448 if (unlikely(!skb)) {
1449 rds_ring->begin_alloc = index;
1450 break;
1453 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1454 list_del(&buffer->list);
1456 count++; /* now there should be no failure */
1457 pdesc = &rds_ring->desc_head[producer];
1459 if (!adapter->ahw.cut_through)
1460 skb_reserve(skb, 2);
1461 /* This will be setup when we receive the
1462 * buffer after it has been filled FSL TBD TBD
1463 * skb->dev = netdev;
1465 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
1466 PCI_DMA_FROMDEVICE);
1467 pdesc->addr_buffer = cpu_to_le64(dma);
1468 buffer->skb = skb;
1469 buffer->state = NETXEN_BUFFER_BUSY;
1470 buffer->dma = dma;
1471 /* make a rcv descriptor */
1472 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1473 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1474 DPRINTK(INFO, "done writing descripter\n");
1475 producer =
1476 get_next_index(producer, rds_ring->max_rx_desc_count);
1477 index = get_next_index(index, rds_ring->max_rx_desc_count);
1479 /* if we did allocate buffers, then write the count to Phantom */
1480 if (count) {
1481 rds_ring->begin_alloc = index;
1482 rds_ring->producer = producer;
1483 /* Window = 1 */
1484 adapter->pci_write_normalize(adapter,
1485 rds_ring->crb_rcv_producer,
1486 (producer-1) & (rds_ring->max_rx_desc_count-1));
1488 if (adapter->fw_major < 4) {
1490 * Write a doorbell msg to tell phanmon of change in
1491 * receive ring producer
1492 * Only for firmware version < 4.0.0
1494 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1495 netxen_set_msg_privid(msg);
1496 netxen_set_msg_count(msg,
1497 ((producer -
1498 1) & (rds_ring->
1499 max_rx_desc_count - 1)));
1500 netxen_set_msg_ctxid(msg, adapter->portnum);
1501 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1502 writel(msg,
1503 DB_NORMALIZE(adapter,
1504 NETXEN_RCV_PRODUCER_OFFSET));
1509 static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1510 uint32_t ctx, uint32_t ringid)
1512 struct pci_dev *pdev = adapter->pdev;
1513 struct sk_buff *skb;
1514 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1515 struct nx_host_rds_ring *rds_ring = NULL;
1516 u32 producer;
1517 struct rcv_desc *pdesc;
1518 struct netxen_rx_buffer *buffer;
1519 int count = 0;
1520 int index = 0;
1521 struct list_head *head;
1523 rds_ring = &recv_ctx->rds_rings[ringid];
1525 producer = rds_ring->producer;
1526 index = rds_ring->begin_alloc;
1527 head = &rds_ring->free_list;
1528 /* We can start writing rx descriptors into the phantom memory. */
1529 while (!list_empty(head)) {
1531 skb = dev_alloc_skb(rds_ring->skb_size);
1532 if (unlikely(!skb)) {
1533 rds_ring->begin_alloc = index;
1534 break;
1537 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1538 list_del(&buffer->list);
1540 count++; /* now there should be no failure */
1541 pdesc = &rds_ring->desc_head[producer];
1542 if (!adapter->ahw.cut_through)
1543 skb_reserve(skb, 2);
1544 buffer->skb = skb;
1545 buffer->state = NETXEN_BUFFER_BUSY;
1546 buffer->dma = pci_map_single(pdev, skb->data,
1547 rds_ring->dma_size,
1548 PCI_DMA_FROMDEVICE);
1550 /* make a rcv descriptor */
1551 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1552 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1553 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1554 producer =
1555 get_next_index(producer, rds_ring->max_rx_desc_count);
1556 index = get_next_index(index, rds_ring->max_rx_desc_count);
1557 buffer = &rds_ring->rx_buf_arr[index];
1560 /* if we did allocate buffers, then write the count to Phantom */
1561 if (count) {
1562 rds_ring->begin_alloc = index;
1563 rds_ring->producer = producer;
1564 /* Window = 1 */
1565 adapter->pci_write_normalize(adapter,
1566 rds_ring->crb_rcv_producer,
1567 (producer-1) & (rds_ring->max_rx_desc_count-1));
1568 wmb();
1572 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1574 memset(&adapter->stats, 0, sizeof(adapter->stats));
1575 return;