netxen: fix tx ring accounting
[linux-2.6/linux-2.6-openrd.git] / drivers / net / netxen / netxen_nic_main.c
blob71daa3d5f114174f7ec13015b49fa8d45cb9d2a6
1 /*
2 * Copyright (C) 2003 - 2009 NetXen, Inc.
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
24 * info@netxen.com
25 * NetXen Inc,
26 * 18922 Forge Drive
27 * Cupertino, CA 95014-0701
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <linux/ipv6.h>
43 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
47 char netxen_nic_driver_name[] = "netxen_nic";
48 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
49 NETXEN_NIC_LINUX_VERSIONID;
51 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
53 /* Default to restricted 1G auto-neg mode */
54 static int wol_port_mode = 5;
56 static int use_msi = 1;
58 static int use_msi_x = 1;
60 /* Local functions to NetXen NIC driver */
61 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
62 const struct pci_device_id *ent);
63 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
64 static int netxen_nic_open(struct net_device *netdev);
65 static int netxen_nic_close(struct net_device *netdev);
66 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
67 static void netxen_tx_timeout(struct net_device *netdev);
68 static void netxen_tx_timeout_task(struct work_struct *work);
69 static void netxen_watchdog(unsigned long);
70 static int netxen_nic_poll(struct napi_struct *napi, int budget);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void netxen_nic_poll_controller(struct net_device *netdev);
73 #endif
74 static irqreturn_t netxen_intr(int irq, void *data);
75 static irqreturn_t netxen_msi_intr(int irq, void *data);
76 static irqreturn_t netxen_msix_intr(int irq, void *data);
78 /* PCI Device ID Table */
79 #define ENTRY(device) \
80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,}
95 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
97 static struct workqueue_struct *netxen_workq;
98 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
99 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101 static void netxen_watchdog(unsigned long);
103 static uint32_t crb_cmd_producer[4] = {
104 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
105 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
108 void
109 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
110 struct nx_host_tx_ring *tx_ring)
112 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
114 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
115 netif_stop_queue(adapter->netdev);
116 smp_mb();
120 static uint32_t crb_cmd_consumer[4] = {
121 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
122 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
125 static inline void
126 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
127 struct nx_host_tx_ring *tx_ring)
129 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
132 static uint32_t msi_tgt_status[8] = {
133 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
134 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
135 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
136 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
139 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
141 static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
143 struct netxen_adapter *adapter = sds_ring->adapter;
145 NXWR32(adapter, sds_ring->crb_intr_mask, 0);
148 static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
150 struct netxen_adapter *adapter = sds_ring->adapter;
152 NXWR32(adapter, sds_ring->crb_intr_mask, 0x1);
154 if (!NETXEN_IS_MSI_FAMILY(adapter))
155 adapter->pci_write_immediate(adapter,
156 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
159 static int
160 netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
162 int size = sizeof(struct nx_host_sds_ring) * count;
164 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166 return (recv_ctx->sds_rings == NULL);
169 static void
170 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
172 if (recv_ctx->sds_rings != NULL)
173 kfree(recv_ctx->sds_rings);
176 static int
177 netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
179 int ring;
180 struct nx_host_sds_ring *sds_ring;
181 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
183 if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
184 return 1;
186 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
187 sds_ring = &recv_ctx->sds_rings[ring];
188 netif_napi_add(netdev, &sds_ring->napi,
189 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
192 return 0;
195 static void
196 netxen_napi_enable(struct netxen_adapter *adapter)
198 int ring;
199 struct nx_host_sds_ring *sds_ring;
200 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
202 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
203 sds_ring = &recv_ctx->sds_rings[ring];
204 napi_enable(&sds_ring->napi);
205 netxen_nic_enable_int(sds_ring);
209 static void
210 netxen_napi_disable(struct netxen_adapter *adapter)
212 int ring;
213 struct nx_host_sds_ring *sds_ring;
214 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
216 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
217 sds_ring = &recv_ctx->sds_rings[ring];
218 napi_disable(&sds_ring->napi);
219 netxen_nic_disable_int(sds_ring);
220 synchronize_irq(sds_ring->irq);
224 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
226 struct pci_dev *pdev = adapter->pdev;
227 uint64_t mask, cmask;
229 adapter->pci_using_dac = 0;
231 mask = DMA_BIT_MASK(32);
233 * Consistent DMA mask is set to 32 bit because it cannot be set to
234 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
235 * come off this pool.
237 cmask = DMA_BIT_MASK(32);
239 #ifndef CONFIG_IA64
240 if (revision_id >= NX_P3_B0)
241 mask = DMA_BIT_MASK(39);
242 else if (revision_id == NX_P2_C1)
243 mask = DMA_BIT_MASK(35);
244 #endif
245 if (pci_set_dma_mask(pdev, mask) == 0 &&
246 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
247 adapter->pci_using_dac = 1;
248 return 0;
251 return -EIO;
254 /* Update addressable range if firmware supports it */
255 static int
256 nx_update_dma_mask(struct netxen_adapter *adapter)
258 int change, shift, err;
259 uint64_t mask, old_mask;
260 struct pci_dev *pdev = adapter->pdev;
262 change = 0;
264 shift = NXRD32(adapter, CRB_DMA_SHIFT);
265 if (shift >= 32)
266 return 0;
268 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
269 change = 1;
270 else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
271 change = 1;
273 if (change) {
274 old_mask = pdev->dma_mask;
275 mask = (1ULL<<(32+shift)) - 1;
277 err = pci_set_dma_mask(pdev, mask);
278 if (err)
279 return pci_set_dma_mask(pdev, old_mask);
282 return 0;
285 static void netxen_check_options(struct netxen_adapter *adapter)
287 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
288 adapter->num_rxd = MAX_RCV_DESCRIPTORS_10G;
289 else if (adapter->ahw.port_type == NETXEN_NIC_GBE)
290 adapter->num_rxd = MAX_RCV_DESCRIPTORS_1G;
292 adapter->msix_supported = 0;
293 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
294 adapter->msix_supported = !!use_msi_x;
295 adapter->rss_supported = !!use_msi_x;
296 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
297 switch (adapter->ahw.board_type) {
298 case NETXEN_BRDTYPE_P2_SB31_10G:
299 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
300 adapter->msix_supported = !!use_msi_x;
301 adapter->rss_supported = !!use_msi_x;
302 break;
303 default:
304 break;
308 adapter->num_txd = MAX_CMD_DESCRIPTORS_HOST;
309 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS;
310 adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
312 return;
315 static int
316 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
318 u32 val, timeout;
320 if (first_boot == 0x55555555) {
321 /* This is the first boot after power up */
322 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
324 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
325 return 0;
327 /* PCI bus master workaround */
328 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
329 if (!(first_boot & 0x4)) {
330 first_boot |= 0x4;
331 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
332 first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
335 /* This is the first boot after power up */
336 first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
337 if (first_boot != 0x80000f) {
338 /* clear the register for future unloads/loads */
339 NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
340 return -EIO;
343 /* Start P2 boot loader */
344 val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
345 NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
346 timeout = 0;
347 do {
348 msleep(1);
349 val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
351 if (++timeout > 5000)
352 return -EIO;
354 } while (val == NETXEN_BDINFO_MAGIC);
356 return 0;
359 static void netxen_set_port_mode(struct netxen_adapter *adapter)
361 u32 val, data;
363 val = adapter->ahw.board_type;
364 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
365 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
366 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
367 data = NETXEN_PORT_MODE_802_3_AP;
368 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
369 } else if (port_mode == NETXEN_PORT_MODE_XG) {
370 data = NETXEN_PORT_MODE_XG;
371 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
372 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
373 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
374 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
375 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
376 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
377 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
378 } else {
379 data = NETXEN_PORT_MODE_AUTO_NEG;
380 NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
383 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
384 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
385 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
386 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
387 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
389 NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
393 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
395 u32 control;
396 int pos;
398 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
399 if (pos) {
400 pci_read_config_dword(pdev, pos, &control);
401 if (enable)
402 control |= PCI_MSIX_FLAGS_ENABLE;
403 else
404 control = 0;
405 pci_write_config_dword(pdev, pos, control);
409 static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
411 int i;
413 for (i = 0; i < count; i++)
414 adapter->msix_entries[i].entry = i;
417 static int
418 netxen_read_mac_addr(struct netxen_adapter *adapter)
420 int i;
421 unsigned char *p;
422 __le64 mac_addr;
423 struct net_device *netdev = adapter->netdev;
424 struct pci_dev *pdev = adapter->pdev;
426 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
427 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
428 return -EIO;
429 } else {
430 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
431 return -EIO;
434 p = (unsigned char *)&mac_addr;
435 for (i = 0; i < 6; i++)
436 netdev->dev_addr[i] = *(p + 5 - i);
438 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
440 /* set station address */
442 if (!is_valid_ether_addr(netdev->perm_addr))
443 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
445 return 0;
448 int netxen_nic_set_mac(struct net_device *netdev, void *p)
450 struct netxen_adapter *adapter = netdev_priv(netdev);
451 struct sockaddr *addr = p;
453 if (!is_valid_ether_addr(addr->sa_data))
454 return -EINVAL;
456 if (netif_running(netdev)) {
457 netif_device_detach(netdev);
458 netxen_napi_disable(adapter);
461 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
462 adapter->macaddr_set(adapter, addr->sa_data);
464 if (netif_running(netdev)) {
465 netif_device_attach(netdev);
466 netxen_napi_enable(adapter);
468 return 0;
471 static void netxen_set_multicast_list(struct net_device *dev)
473 struct netxen_adapter *adapter = netdev_priv(dev);
475 adapter->set_multi(dev);
478 static const struct net_device_ops netxen_netdev_ops = {
479 .ndo_open = netxen_nic_open,
480 .ndo_stop = netxen_nic_close,
481 .ndo_start_xmit = netxen_nic_xmit_frame,
482 .ndo_get_stats = netxen_nic_get_stats,
483 .ndo_validate_addr = eth_validate_addr,
484 .ndo_set_multicast_list = netxen_set_multicast_list,
485 .ndo_set_mac_address = netxen_nic_set_mac,
486 .ndo_change_mtu = netxen_nic_change_mtu,
487 .ndo_tx_timeout = netxen_tx_timeout,
488 #ifdef CONFIG_NET_POLL_CONTROLLER
489 .ndo_poll_controller = netxen_nic_poll_controller,
490 #endif
493 static void
494 netxen_setup_intr(struct netxen_adapter *adapter)
496 struct netxen_legacy_intr_set *legacy_intrp;
497 struct pci_dev *pdev = adapter->pdev;
498 int err, num_msix;
500 if (adapter->rss_supported) {
501 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
502 MSIX_ENTRIES_PER_ADAPTER : 2;
503 } else
504 num_msix = 1;
506 adapter->max_sds_rings = 1;
508 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
510 if (adapter->ahw.revision_id >= NX_P3_B0)
511 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
512 else
513 legacy_intrp = &legacy_intr[0];
514 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
515 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
516 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
517 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
519 netxen_set_msix_bit(pdev, 0);
521 if (adapter->msix_supported) {
523 netxen_init_msix_entries(adapter, num_msix);
524 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
525 if (err == 0) {
526 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
527 netxen_set_msix_bit(pdev, 1);
529 if (adapter->rss_supported)
530 adapter->max_sds_rings = num_msix;
532 dev_info(&pdev->dev, "using msi-x interrupts\n");
533 return;
536 if (err > 0)
537 pci_disable_msix(pdev);
539 /* fall through for msi */
542 if (use_msi && !pci_enable_msi(pdev)) {
543 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
544 adapter->msi_tgt_status =
545 msi_tgt_status[adapter->ahw.pci_func];
546 dev_info(&pdev->dev, "using msi interrupts\n");
547 adapter->msix_entries[0].vector = pdev->irq;
548 return;
551 dev_info(&pdev->dev, "using legacy interrupts\n");
552 adapter->msix_entries[0].vector = pdev->irq;
555 static void
556 netxen_teardown_intr(struct netxen_adapter *adapter)
558 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
559 pci_disable_msix(adapter->pdev);
560 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
561 pci_disable_msi(adapter->pdev);
564 static void
565 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
567 if (adapter->ahw.db_base != NULL)
568 iounmap(adapter->ahw.db_base);
569 if (adapter->ahw.pci_base0 != NULL)
570 iounmap(adapter->ahw.pci_base0);
571 if (adapter->ahw.pci_base1 != NULL)
572 iounmap(adapter->ahw.pci_base1);
573 if (adapter->ahw.pci_base2 != NULL)
574 iounmap(adapter->ahw.pci_base2);
577 static int
578 netxen_setup_pci_map(struct netxen_adapter *adapter)
580 void __iomem *mem_ptr0 = NULL;
581 void __iomem *mem_ptr1 = NULL;
582 void __iomem *mem_ptr2 = NULL;
583 void __iomem *db_ptr = NULL;
585 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
587 struct pci_dev *pdev = adapter->pdev;
588 int pci_func = adapter->ahw.pci_func;
590 int err = 0;
593 * Set the CRB window to invalid. If any register in window 0 is
594 * accessed it should set the window to 0 and then reset it to 1.
596 adapter->curr_window = 255;
597 adapter->ahw.qdr_sn_window = -1;
598 adapter->ahw.ddr_mn_window = -1;
600 /* remap phys address */
601 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
602 mem_len = pci_resource_len(pdev, 0);
603 pci_len0 = 0;
605 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
606 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
607 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
608 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
609 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
610 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
611 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
613 /* 128 Meg of memory */
614 if (mem_len == NETXEN_PCI_128MB_SIZE) {
615 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
616 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
617 SECOND_PAGE_GROUP_SIZE);
618 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
619 THIRD_PAGE_GROUP_SIZE);
620 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
621 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
622 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
623 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
624 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
625 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
626 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
627 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
628 adapter->pci_write_immediate =
629 netxen_nic_pci_write_immediate_2M;
630 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
631 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
632 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
634 mem_ptr0 = pci_ioremap_bar(pdev, 0);
635 if (mem_ptr0 == NULL) {
636 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
637 return -EIO;
639 pci_len0 = mem_len;
641 adapter->ahw.ddr_mn_window = 0;
642 adapter->ahw.qdr_sn_window = 0;
644 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
645 (pci_func * 0x20);
646 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
647 if (pci_func < 4)
648 adapter->ahw.ms_win_crb += (pci_func * 0x20);
649 else
650 adapter->ahw.ms_win_crb +=
651 0xA0 + ((pci_func - 4) * 0x10);
652 } else {
653 return -EIO;
656 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
658 adapter->ahw.pci_base0 = mem_ptr0;
659 adapter->ahw.pci_len0 = pci_len0;
660 adapter->ahw.pci_base1 = mem_ptr1;
661 adapter->ahw.pci_base2 = mem_ptr2;
663 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
664 goto skip_doorbell;
666 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
667 db_len = pci_resource_len(pdev, 4);
669 if (db_len == 0) {
670 printk(KERN_ERR "%s: doorbell is disabled\n",
671 netxen_nic_driver_name);
672 err = -EIO;
673 goto err_out;
676 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
677 if (!db_ptr) {
678 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
679 netxen_nic_driver_name);
680 err = -EIO;
681 goto err_out;
684 skip_doorbell:
685 adapter->ahw.db_base = db_ptr;
686 adapter->ahw.db_len = db_len;
687 return 0;
689 err_out:
690 netxen_cleanup_pci_map(adapter);
691 return err;
694 static int
695 netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
697 int val, err, first_boot;
698 struct pci_dev *pdev = adapter->pdev;
700 int first_driver = 0;
702 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
703 first_driver = (adapter->portnum == 0);
704 else
705 first_driver = (adapter->ahw.pci_func == 0);
707 if (!first_driver)
708 return 0;
710 first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
712 err = netxen_check_hw_init(adapter, first_boot);
713 if (err) {
714 dev_err(&pdev->dev, "error in init HW init sequence\n");
715 return err;
718 if (request_fw)
719 netxen_request_firmware(adapter);
721 if (first_boot != 0x55555555) {
722 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
723 netxen_pinit_from_rom(adapter, 0);
724 msleep(1);
727 NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
728 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
729 netxen_set_port_mode(adapter);
731 netxen_load_firmware(adapter);
733 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
735 /* Initialize multicast addr pool owners */
736 val = 0x7654;
737 if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
738 val |= 0x0f000000;
739 NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
743 err = netxen_initialize_adapter_offload(adapter);
744 if (err)
745 return err;
748 * Tell the hardware our version number.
750 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
751 | ((_NETXEN_NIC_LINUX_MINOR << 8))
752 | (_NETXEN_NIC_LINUX_SUBVERSION);
753 NXWR32(adapter, CRB_DRIVER_VERSION, val);
755 /* Handshake with the card before we register the devices. */
756 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
757 if (err) {
758 netxen_free_adapter_offload(adapter);
759 return err;
762 return 0;
765 static int
766 netxen_nic_request_irq(struct netxen_adapter *adapter)
768 irq_handler_t handler;
769 struct nx_host_sds_ring *sds_ring;
770 int err, ring;
772 unsigned long flags = IRQF_SAMPLE_RANDOM;
773 struct net_device *netdev = adapter->netdev;
774 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
776 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
777 handler = netxen_msix_intr;
778 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
779 handler = netxen_msi_intr;
780 else {
781 flags |= IRQF_SHARED;
782 handler = netxen_intr;
784 adapter->irq = netdev->irq;
786 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
787 sds_ring = &recv_ctx->sds_rings[ring];
788 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
789 err = request_irq(sds_ring->irq, handler,
790 flags, sds_ring->name, sds_ring);
791 if (err)
792 return err;
795 return 0;
798 static void
799 netxen_nic_free_irq(struct netxen_adapter *adapter)
801 int ring;
802 struct nx_host_sds_ring *sds_ring;
804 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
806 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
807 sds_ring = &recv_ctx->sds_rings[ring];
808 free_irq(sds_ring->irq, sds_ring);
812 static int
813 netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
815 int err;
817 err = adapter->init_port(adapter, adapter->physical_port);
818 if (err) {
819 printk(KERN_ERR "%s: Failed to initialize port %d\n",
820 netxen_nic_driver_name, adapter->portnum);
821 return err;
823 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
824 adapter->macaddr_set(adapter, netdev->dev_addr);
826 adapter->set_multi(netdev);
827 adapter->set_mtu(adapter, netdev->mtu);
829 adapter->ahw.linkup = 0;
831 netxen_napi_enable(adapter);
833 if (adapter->max_sds_rings > 1)
834 netxen_config_rss(adapter, 1);
836 if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
837 netxen_linkevent_request(adapter, 1);
838 else
839 netxen_nic_set_link_parameters(adapter);
841 mod_timer(&adapter->watchdog_timer, jiffies);
843 return 0;
846 static void
847 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
849 netif_carrier_off(netdev);
850 netif_stop_queue(netdev);
852 if (adapter->stop_port)
853 adapter->stop_port(adapter);
855 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
856 netxen_p3_free_mac_list(adapter);
858 netxen_napi_disable(adapter);
860 netxen_release_tx_buffers(adapter);
862 FLUSH_SCHEDULED_WORK();
863 del_timer_sync(&adapter->watchdog_timer);
867 static int
868 netxen_nic_attach(struct netxen_adapter *adapter)
870 struct net_device *netdev = adapter->netdev;
871 struct pci_dev *pdev = adapter->pdev;
872 int err, ring;
873 struct nx_host_rds_ring *rds_ring;
874 struct nx_host_tx_ring *tx_ring;
876 err = netxen_init_firmware(adapter);
877 if (err != 0) {
878 printk(KERN_ERR "Failed to init firmware\n");
879 return -EIO;
882 if (adapter->fw_major < 4)
883 adapter->max_rds_rings = 3;
884 else
885 adapter->max_rds_rings = 2;
887 err = netxen_alloc_sw_resources(adapter);
888 if (err) {
889 printk(KERN_ERR "%s: Error in setting sw resources\n",
890 netdev->name);
891 return err;
894 netxen_nic_clear_stats(adapter);
896 err = netxen_alloc_hw_resources(adapter);
897 if (err) {
898 printk(KERN_ERR "%s: Error in setting hw resources\n",
899 netdev->name);
900 goto err_out_free_sw;
903 if (adapter->fw_major < 4) {
904 tx_ring = adapter->tx_ring;
905 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
906 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
908 tx_ring->producer = 0;
909 tx_ring->sw_consumer = 0;
911 netxen_nic_update_cmd_producer(adapter, tx_ring);
912 netxen_nic_update_cmd_consumer(adapter, tx_ring);
915 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
916 rds_ring = &adapter->recv_ctx.rds_rings[ring];
917 netxen_post_rx_buffers(adapter, ring, rds_ring);
920 err = netxen_nic_request_irq(adapter);
921 if (err) {
922 dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
923 netdev->name);
924 goto err_out_free_rxbuf;
927 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
928 return 0;
930 err_out_free_rxbuf:
931 netxen_release_rx_buffers(adapter);
932 netxen_free_hw_resources(adapter);
933 err_out_free_sw:
934 netxen_free_sw_resources(adapter);
935 return err;
938 static void
939 netxen_nic_detach(struct netxen_adapter *adapter)
941 netxen_release_rx_buffers(adapter);
942 netxen_free_hw_resources(adapter);
943 netxen_nic_free_irq(adapter);
944 netxen_free_sw_resources(adapter);
946 adapter->is_up = 0;
949 static int __devinit
950 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
952 struct net_device *netdev = NULL;
953 struct netxen_adapter *adapter = NULL;
954 int i = 0, err;
955 int pci_func_id = PCI_FUNC(pdev->devfn);
956 uint8_t revision_id;
958 if (pdev->class != 0x020000) {
959 printk(KERN_DEBUG "NetXen function %d, class %x will not "
960 "be enabled.\n",pci_func_id, pdev->class);
961 return -ENODEV;
964 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
965 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
966 "will not be enabled.\n",
967 NX_P3_A0, NX_P3_B1);
968 return -ENODEV;
971 if ((err = pci_enable_device(pdev)))
972 return err;
974 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
975 err = -ENODEV;
976 goto err_out_disable_pdev;
979 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
980 goto err_out_disable_pdev;
982 pci_set_master(pdev);
984 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
985 if(!netdev) {
986 printk(KERN_ERR"%s: Failed to allocate memory for the "
987 "device block.Check system memory resource"
988 " usage.\n", netxen_nic_driver_name);
989 goto err_out_free_res;
992 SET_NETDEV_DEV(netdev, &pdev->dev);
994 adapter = netdev_priv(netdev);
995 adapter->netdev = netdev;
996 adapter->pdev = pdev;
997 adapter->ahw.pci_func = pci_func_id;
999 revision_id = pdev->revision;
1000 adapter->ahw.revision_id = revision_id;
1002 err = nx_set_dma_mask(adapter, revision_id);
1003 if (err)
1004 goto err_out_free_netdev;
1006 rwlock_init(&adapter->adapter_lock);
1007 spin_lock_init(&adapter->tx_clean_lock);
1008 INIT_LIST_HEAD(&adapter->mac_list);
1010 err = netxen_setup_pci_map(adapter);
1011 if (err)
1012 goto err_out_free_netdev;
1014 /* This will be reset for mezz cards */
1015 adapter->portnum = pci_func_id;
1016 adapter->rx_csum = 1;
1017 adapter->mc_enabled = 0;
1018 if (NX_IS_REVISION_P3(revision_id))
1019 adapter->max_mc_count = 38;
1020 else
1021 adapter->max_mc_count = 16;
1023 netdev->netdev_ops = &netxen_netdev_ops;
1024 netdev->watchdog_timeo = 2*HZ;
1026 netxen_nic_change_mtu(netdev, netdev->mtu);
1028 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1030 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1031 netdev->features |= (NETIF_F_GRO);
1032 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1034 if (NX_IS_REVISION_P3(revision_id)) {
1035 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1036 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1039 if (adapter->pci_using_dac) {
1040 netdev->features |= NETIF_F_HIGHDMA;
1041 netdev->vlan_features |= NETIF_F_HIGHDMA;
1044 if (netxen_nic_get_board_info(adapter) != 0) {
1045 printk("%s: Error getting board config info.\n",
1046 netxen_nic_driver_name);
1047 err = -EIO;
1048 goto err_out_iounmap;
1051 netxen_initialize_adapter_ops(adapter);
1053 /* Mezz cards have PCI function 0,2,3 enabled */
1054 switch (adapter->ahw.board_type) {
1055 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
1056 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
1057 if (pci_func_id >= 2)
1058 adapter->portnum = pci_func_id - 2;
1059 break;
1060 default:
1061 break;
1064 err = netxen_start_firmware(adapter, 1);
1065 if (err)
1066 goto err_out_iounmap;
1068 nx_update_dma_mask(adapter);
1070 netxen_nic_get_firmware_info(adapter);
1073 * See if the firmware gave us a virtual-physical port mapping.
1075 adapter->physical_port = adapter->portnum;
1076 if (adapter->fw_major < 4) {
1077 i = NXRD32(adapter, CRB_V2P(adapter->portnum));
1078 if (i != 0x55555555)
1079 adapter->physical_port = i;
1082 netxen_check_options(adapter);
1084 netxen_setup_intr(adapter);
1086 netdev->irq = adapter->msix_entries[0].vector;
1088 if (netxen_napi_add(adapter, netdev))
1089 goto err_out_disable_msi;
1091 init_timer(&adapter->watchdog_timer);
1092 adapter->watchdog_timer.function = &netxen_watchdog;
1093 adapter->watchdog_timer.data = (unsigned long)adapter;
1094 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
1095 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
1097 err = netxen_read_mac_addr(adapter);
1098 if (err)
1099 dev_warn(&pdev->dev, "failed to read mac addr\n");
1101 netif_carrier_off(netdev);
1102 netif_stop_queue(netdev);
1104 if ((err = register_netdev(netdev))) {
1105 printk(KERN_ERR "%s: register_netdev failed port #%d"
1106 " aborting\n", netxen_nic_driver_name,
1107 adapter->portnum);
1108 err = -EIO;
1109 goto err_out_disable_msi;
1112 pci_set_drvdata(pdev, adapter);
1114 switch (adapter->ahw.port_type) {
1115 case NETXEN_NIC_GBE:
1116 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1117 adapter->netdev->name);
1118 break;
1119 case NETXEN_NIC_XGBE:
1120 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1121 adapter->netdev->name);
1122 break;
1125 return 0;
1127 err_out_disable_msi:
1128 netxen_teardown_intr(adapter);
1130 netxen_free_adapter_offload(adapter);
1132 err_out_iounmap:
1133 netxen_cleanup_pci_map(adapter);
1135 err_out_free_netdev:
1136 free_netdev(netdev);
1138 err_out_free_res:
1139 pci_release_regions(pdev);
1141 err_out_disable_pdev:
1142 pci_set_drvdata(pdev, NULL);
1143 pci_disable_device(pdev);
1144 return err;
1147 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1149 struct netxen_adapter *adapter;
1150 struct net_device *netdev;
1152 adapter = pci_get_drvdata(pdev);
1153 if (adapter == NULL)
1154 return;
1156 netdev = adapter->netdev;
1158 unregister_netdev(netdev);
1160 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1161 netxen_nic_detach(adapter);
1164 if (adapter->portnum == 0)
1165 netxen_free_adapter_offload(adapter);
1167 netxen_teardown_intr(adapter);
1168 netxen_free_sds_rings(&adapter->recv_ctx);
1170 netxen_cleanup_pci_map(adapter);
1172 netxen_release_firmware(adapter);
1174 pci_release_regions(pdev);
1175 pci_disable_device(pdev);
1176 pci_set_drvdata(pdev, NULL);
1178 free_netdev(netdev);
1181 static int
1182 netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1185 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1186 struct net_device *netdev = adapter->netdev;
1188 netif_device_detach(netdev);
1190 if (netif_running(netdev))
1191 netxen_nic_down(adapter, netdev);
1193 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1194 netxen_nic_detach(adapter);
1196 pci_save_state(pdev);
1198 if (netxen_nic_wol_supported(adapter)) {
1199 pci_enable_wake(pdev, PCI_D3cold, 1);
1200 pci_enable_wake(pdev, PCI_D3hot, 1);
1203 pci_disable_device(pdev);
1204 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1206 return 0;
1209 static int
1210 netxen_nic_resume(struct pci_dev *pdev)
1212 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1213 struct net_device *netdev = adapter->netdev;
1214 int err;
1216 pci_set_power_state(pdev, PCI_D0);
1217 pci_restore_state(pdev);
1219 err = pci_enable_device(pdev);
1220 if (err)
1221 return err;
1223 adapter->curr_window = 255;
1225 err = netxen_start_firmware(adapter, 0);
1226 if (err) {
1227 dev_err(&pdev->dev, "failed to start firmware\n");
1228 return err;
1231 if (netif_running(netdev)) {
1232 err = netxen_nic_attach(adapter);
1233 if (err)
1234 return err;
1236 err = netxen_nic_up(adapter, netdev);
1237 if (err)
1238 return err;
1240 netif_device_attach(netdev);
1243 return 0;
1246 static int netxen_nic_open(struct net_device *netdev)
1248 struct netxen_adapter *adapter = netdev_priv(netdev);
1249 int err = 0;
1251 if (adapter->driver_mismatch)
1252 return -EIO;
1254 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1255 err = netxen_nic_attach(adapter);
1256 if (err)
1257 return err;
1260 err = netxen_nic_up(adapter, netdev);
1261 if (err)
1262 goto err_out;
1264 netif_start_queue(netdev);
1266 return 0;
1268 err_out:
1269 netxen_nic_detach(adapter);
1270 return err;
1274 * netxen_nic_close - Disables a network interface entry point
1276 static int netxen_nic_close(struct net_device *netdev)
1278 struct netxen_adapter *adapter = netdev_priv(netdev);
1280 netxen_nic_down(adapter, netdev);
1281 return 0;
1284 static bool netxen_tso_check(struct net_device *netdev,
1285 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1287 bool tso = false;
1288 u8 opcode = TX_ETHER_PKT;
1289 __be16 protocol = skb->protocol;
1290 u16 flags = 0;
1292 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1293 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1294 protocol = vh->h_vlan_encapsulated_proto;
1295 flags = FLAGS_VLAN_TAGGED;
1298 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1299 skb_shinfo(skb)->gso_size > 0) {
1301 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1302 desc->total_hdr_length =
1303 skb_transport_offset(skb) + tcp_hdrlen(skb);
1305 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1306 TX_TCP_LSO6 : TX_TCP_LSO;
1307 tso = true;
1309 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1310 u8 l4proto;
1312 if (protocol == cpu_to_be16(ETH_P_IP)) {
1313 l4proto = ip_hdr(skb)->protocol;
1315 if (l4proto == IPPROTO_TCP)
1316 opcode = TX_TCP_PKT;
1317 else if(l4proto == IPPROTO_UDP)
1318 opcode = TX_UDP_PKT;
1319 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1320 l4proto = ipv6_hdr(skb)->nexthdr;
1322 if (l4proto == IPPROTO_TCP)
1323 opcode = TX_TCPV6_PKT;
1324 else if(l4proto == IPPROTO_UDP)
1325 opcode = TX_UDPV6_PKT;
1328 desc->tcp_hdr_offset = skb_transport_offset(skb);
1329 desc->ip_hdr_offset = skb_network_offset(skb);
1330 netxen_set_tx_flags_opcode(desc, flags, opcode);
1331 return tso;
1334 static void
1335 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1336 struct netxen_cmd_buffer *pbuf, int last)
1338 int k;
1339 struct netxen_skb_frag *buffrag;
1341 buffrag = &pbuf->frag_array[0];
1342 pci_unmap_single(pdev, buffrag->dma,
1343 buffrag->length, PCI_DMA_TODEVICE);
1345 for (k = 1; k < last; k++) {
1346 buffrag = &pbuf->frag_array[k];
1347 pci_unmap_page(pdev, buffrag->dma,
1348 buffrag->length, PCI_DMA_TODEVICE);
1352 static inline void
1353 netxen_clear_cmddesc(u64 *desc)
1355 int i;
1356 for (i = 0; i < 8; i++)
1357 desc[i] = 0ULL;
1360 static int
1361 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1363 struct netxen_adapter *adapter = netdev_priv(netdev);
1364 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1365 unsigned int first_seg_len = skb->len - skb->data_len;
1366 struct netxen_cmd_buffer *pbuf;
1367 struct netxen_skb_frag *buffrag;
1368 struct cmd_desc_type0 *hwdesc;
1369 struct pci_dev *pdev = adapter->pdev;
1370 dma_addr_t temp_dma;
1371 int i, k;
1373 u32 producer;
1374 int frag_count, no_of_desc;
1375 u32 num_txd = tx_ring->num_desc;
1376 bool is_tso = false;
1378 frag_count = skb_shinfo(skb)->nr_frags + 1;
1380 /* 4 fragments per cmd des */
1381 no_of_desc = (frag_count + 3) >> 2;
1383 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1384 netif_stop_queue(netdev);
1385 return NETDEV_TX_BUSY;
1388 producer = tx_ring->producer;
1390 hwdesc = &tx_ring->desc_head[producer];
1391 netxen_clear_cmddesc((u64 *)hwdesc);
1392 pbuf = &tx_ring->cmd_buf_arr[producer];
1394 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1396 pbuf->skb = skb;
1397 pbuf->frag_count = frag_count;
1398 buffrag = &pbuf->frag_array[0];
1399 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1400 PCI_DMA_TODEVICE);
1401 if (pci_dma_mapping_error(pdev, temp_dma))
1402 goto drop_packet;
1404 buffrag->dma = temp_dma;
1405 buffrag->length = first_seg_len;
1406 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1407 netxen_set_tx_port(hwdesc, adapter->portnum);
1409 hwdesc->buffer_length[0] = cpu_to_le16(first_seg_len);
1410 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1412 for (i = 1, k = 1; i < frag_count; i++, k++) {
1413 struct skb_frag_struct *frag;
1414 int len, temp_len;
1415 unsigned long offset;
1417 /* move to next desc. if there is a need */
1418 if ((i & 0x3) == 0) {
1419 k = 0;
1420 producer = get_next_index(producer, num_txd);
1421 hwdesc = &tx_ring->desc_head[producer];
1422 netxen_clear_cmddesc((u64 *)hwdesc);
1423 pbuf = &tx_ring->cmd_buf_arr[producer];
1424 pbuf->skb = NULL;
1426 frag = &skb_shinfo(skb)->frags[i - 1];
1427 len = frag->size;
1428 offset = frag->page_offset;
1430 temp_len = len;
1431 temp_dma = pci_map_page(pdev, frag->page, offset,
1432 len, PCI_DMA_TODEVICE);
1433 if (pci_dma_mapping_error(pdev, temp_dma)) {
1434 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1435 goto drop_packet;
1438 buffrag++;
1439 buffrag->dma = temp_dma;
1440 buffrag->length = temp_len;
1442 hwdesc->buffer_length[k] = cpu_to_le16(temp_len);
1443 switch (k) {
1444 case 0:
1445 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1446 break;
1447 case 1:
1448 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1449 break;
1450 case 2:
1451 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1452 break;
1453 case 3:
1454 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1455 break;
1457 frag++;
1459 producer = get_next_index(producer, num_txd);
1461 /* For LSO, we need to copy the MAC/IP/TCP headers into
1462 * the descriptor ring
1464 if (is_tso) {
1465 int hdr_len, first_hdr_len, more_hdr;
1466 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1467 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1468 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1469 more_hdr = 1;
1470 } else {
1471 first_hdr_len = hdr_len;
1472 more_hdr = 0;
1474 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1475 hwdesc = &tx_ring->desc_head[producer];
1476 pbuf = &tx_ring->cmd_buf_arr[producer];
1477 pbuf->skb = NULL;
1479 /* copy the first 64 bytes */
1480 memcpy(((void *)hwdesc) + 2,
1481 (void *)(skb->data), first_hdr_len);
1482 producer = get_next_index(producer, num_txd);
1484 if (more_hdr) {
1485 hwdesc = &tx_ring->desc_head[producer];
1486 pbuf = &tx_ring->cmd_buf_arr[producer];
1487 pbuf->skb = NULL;
1488 /* copy the next 64 bytes - should be enough except
1489 * for pathological case
1491 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1492 hwdesc,
1493 (hdr_len -
1494 first_hdr_len));
1495 producer = get_next_index(producer, num_txd);
1499 tx_ring->producer = producer;
1500 adapter->stats.txbytes += skb->len;
1502 netxen_nic_update_cmd_producer(adapter, tx_ring);
1504 adapter->stats.xmitcalled++;
1506 return NETDEV_TX_OK;
1508 drop_packet:
1509 adapter->stats.txdropped++;
1510 dev_kfree_skb_any(skb);
1511 return NETDEV_TX_OK;
1514 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1516 struct net_device *netdev = adapter->netdev;
1517 uint32_t temp, temp_state, temp_val;
1518 int rv = 0;
1520 temp = NXRD32(adapter, CRB_TEMP_STATE);
1522 temp_state = nx_get_temp_state(temp);
1523 temp_val = nx_get_temp_val(temp);
1525 if (temp_state == NX_TEMP_PANIC) {
1526 printk(KERN_ALERT
1527 "%s: Device temperature %d degrees C exceeds"
1528 " maximum allowed. Hardware has been shut down.\n",
1529 netxen_nic_driver_name, temp_val);
1531 netif_carrier_off(netdev);
1532 netif_stop_queue(netdev);
1533 rv = 1;
1534 } else if (temp_state == NX_TEMP_WARN) {
1535 if (adapter->temp == NX_TEMP_NORMAL) {
1536 printk(KERN_ALERT
1537 "%s: Device temperature %d degrees C "
1538 "exceeds operating range."
1539 " Immediate action needed.\n",
1540 netxen_nic_driver_name, temp_val);
1542 } else {
1543 if (adapter->temp == NX_TEMP_WARN) {
1544 printk(KERN_INFO
1545 "%s: Device temperature is now %d degrees C"
1546 " in normal range.\n", netxen_nic_driver_name,
1547 temp_val);
1550 adapter->temp = temp_state;
1551 return rv;
1554 void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1556 struct net_device *netdev = adapter->netdev;
1558 if (adapter->ahw.linkup && !linkup) {
1559 printk(KERN_INFO "%s: %s NIC Link is down\n",
1560 netxen_nic_driver_name, netdev->name);
1561 adapter->ahw.linkup = 0;
1562 if (netif_running(netdev)) {
1563 netif_carrier_off(netdev);
1564 netif_stop_queue(netdev);
1567 if (!adapter->has_link_events)
1568 netxen_nic_set_link_parameters(adapter);
1570 } else if (!adapter->ahw.linkup && linkup) {
1571 printk(KERN_INFO "%s: %s NIC Link is up\n",
1572 netxen_nic_driver_name, netdev->name);
1573 adapter->ahw.linkup = 1;
1574 if (netif_running(netdev)) {
1575 netif_carrier_on(netdev);
1576 netif_wake_queue(netdev);
1579 if (!adapter->has_link_events)
1580 netxen_nic_set_link_parameters(adapter);
1584 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1586 u32 val, port, linkup;
1588 port = adapter->physical_port;
1590 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1591 val = NXRD32(adapter, CRB_XG_STATE_P3);
1592 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1593 linkup = (val == XG_LINK_UP_P3);
1594 } else {
1595 val = NXRD32(adapter, CRB_XG_STATE);
1596 if (adapter->ahw.port_type == NETXEN_NIC_GBE)
1597 linkup = (val >> port) & 1;
1598 else {
1599 val = (val >> port*8) & 0xff;
1600 linkup = (val == XG_LINK_UP);
1604 netxen_advert_link_change(adapter, linkup);
1607 static void netxen_watchdog(unsigned long v)
1609 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1611 SCHEDULE_WORK(&adapter->watchdog_task);
1614 void netxen_watchdog_task(struct work_struct *work)
1616 struct netxen_adapter *adapter =
1617 container_of(work, struct netxen_adapter, watchdog_task);
1619 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
1620 return;
1622 if (!adapter->has_link_events)
1623 netxen_nic_handle_phy_intr(adapter);
1625 if (netif_running(adapter->netdev))
1626 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1629 static void netxen_tx_timeout(struct net_device *netdev)
1631 struct netxen_adapter *adapter = (struct netxen_adapter *)
1632 netdev_priv(netdev);
1633 SCHEDULE_WORK(&adapter->tx_timeout_task);
1636 static void netxen_tx_timeout_task(struct work_struct *work)
1638 struct netxen_adapter *adapter =
1639 container_of(work, struct netxen_adapter, tx_timeout_task);
1641 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1642 netxen_nic_driver_name, adapter->netdev->name);
1644 netxen_napi_disable(adapter);
1646 adapter->netdev->trans_start = jiffies;
1648 netxen_napi_enable(adapter);
1649 netif_wake_queue(adapter->netdev);
1652 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1654 struct netxen_adapter *adapter = netdev_priv(netdev);
1655 struct net_device_stats *stats = &adapter->net_stats;
1657 memset(stats, 0, sizeof(*stats));
1659 stats->rx_packets = adapter->stats.no_rcv;
1660 stats->tx_packets = adapter->stats.xmitfinished;
1661 stats->rx_bytes = adapter->stats.rxbytes;
1662 stats->tx_bytes = adapter->stats.txbytes;
1663 stats->rx_dropped = adapter->stats.rxdropped;
1664 stats->tx_dropped = adapter->stats.txdropped;
1666 return stats;
1669 static irqreturn_t netxen_intr(int irq, void *data)
1671 struct nx_host_sds_ring *sds_ring = data;
1672 struct netxen_adapter *adapter = sds_ring->adapter;
1673 u32 status = 0;
1675 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1677 if (!(status & adapter->legacy_intr.int_vec_bit))
1678 return IRQ_NONE;
1680 if (adapter->ahw.revision_id >= NX_P3_B1) {
1681 /* check interrupt state machine, to be sure */
1682 status = adapter->pci_read_immediate(adapter,
1683 ISR_INT_STATE_REG);
1684 if (!ISR_LEGACY_INT_TRIGGERED(status))
1685 return IRQ_NONE;
1687 } else {
1688 unsigned long our_int = 0;
1690 our_int = NXRD32(adapter, CRB_INT_VECTOR);
1692 /* not our interrupt */
1693 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1694 return IRQ_NONE;
1696 /* claim interrupt */
1697 NXWR32(adapter, CRB_INT_VECTOR, (our_int & 0xffffffff));
1700 /* clear interrupt */
1701 if (adapter->fw_major < 4)
1702 netxen_nic_disable_int(sds_ring);
1704 adapter->pci_write_immediate(adapter,
1705 adapter->legacy_intr.tgt_status_reg,
1706 0xffffffff);
1707 /* read twice to ensure write is flushed */
1708 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1709 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1711 napi_schedule(&sds_ring->napi);
1713 return IRQ_HANDLED;
1716 static irqreturn_t netxen_msi_intr(int irq, void *data)
1718 struct nx_host_sds_ring *sds_ring = data;
1719 struct netxen_adapter *adapter = sds_ring->adapter;
1721 /* clear interrupt */
1722 adapter->pci_write_immediate(adapter,
1723 adapter->msi_tgt_status, 0xffffffff);
1725 napi_schedule(&sds_ring->napi);
1726 return IRQ_HANDLED;
1729 static irqreturn_t netxen_msix_intr(int irq, void *data)
1731 struct nx_host_sds_ring *sds_ring = data;
1733 napi_schedule(&sds_ring->napi);
1734 return IRQ_HANDLED;
1737 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1739 struct nx_host_sds_ring *sds_ring =
1740 container_of(napi, struct nx_host_sds_ring, napi);
1742 struct netxen_adapter *adapter = sds_ring->adapter;
1744 int tx_complete;
1745 int work_done;
1747 tx_complete = netxen_process_cmd_ring(adapter);
1749 work_done = netxen_process_rcv_ring(sds_ring, budget);
1751 if ((work_done < budget) && tx_complete) {
1752 napi_complete(&sds_ring->napi);
1753 netxen_nic_enable_int(sds_ring);
1756 return work_done;
1759 #ifdef CONFIG_NET_POLL_CONTROLLER
1760 static void netxen_nic_poll_controller(struct net_device *netdev)
1762 struct netxen_adapter *adapter = netdev_priv(netdev);
1763 disable_irq(adapter->irq);
1764 netxen_intr(adapter->irq, adapter);
1765 enable_irq(adapter->irq);
1767 #endif
1769 static struct pci_driver netxen_driver = {
1770 .name = netxen_nic_driver_name,
1771 .id_table = netxen_pci_tbl,
1772 .probe = netxen_nic_probe,
1773 .remove = __devexit_p(netxen_nic_remove),
1774 .suspend = netxen_nic_suspend,
1775 .resume = netxen_nic_resume
1778 /* Driver Registration on NetXen card */
1780 static int __init netxen_init_module(void)
1782 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1784 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1785 return -ENOMEM;
1787 return pci_register_driver(&netxen_driver);
1790 module_init(netxen_init_module);
1792 static void __exit netxen_exit_module(void)
1794 pci_unregister_driver(&netxen_driver);
1795 destroy_workqueue(netxen_workq);
1798 module_exit(netxen_exit_module);