2 * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
3 * and other Tigon based cards.
5 * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
7 * Thanks to Alteon and 3Com for providing hardware and documentation
8 * enabling me to write this driver.
10 * A mailing list for discussing the use of this driver has been
11 * setup, please subscribe to the lists if you have any questions
12 * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
13 * see how to subscribe.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
21 * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
22 * dump support. The trace dump support has not been
23 * integrated yet however.
24 * Troy Benjegerdes: Big Endian (PPC) patches.
25 * Nate Stahl: Better out of memory handling and stats support.
26 * Aman Singla: Nasty race between interrupt handler and tx code dealing
27 * with 'testing the tx_ret_csm and setting tx_full'
28 * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
29 * infrastructure and Sparc support
30 * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
31 * driver under Linux/Sparc64
32 * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
33 * ETHTOOL_GDRVINFO support
34 * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
35 * handler and close() cleanup.
36 * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
37 * memory mapped IO is enabled to
38 * make the driver work on RS/6000.
39 * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
40 * where the driver would disable
41 * bus master mode if it had to disable
42 * write and invalidate.
43 * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
45 * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
46 * rx producer index when
47 * flushing the Jumbo ring.
48 * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
50 * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
53 #include <linux/config.h>
54 #include <linux/module.h>
55 #include <linux/version.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/kernel.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/skbuff.h>
64 #include <linux/init.h>
65 #include <linux/delay.h>
67 #include <linux/highmem.h>
68 #include <linux/sockios.h>
70 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
71 #include <linux/if_vlan.h>
75 #include <linux/ethtool.h>
81 #include <asm/system.h>
84 #include <asm/byteorder.h>
85 #include <asm/uaccess.h>
90 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
91 #define ACE_IS_TIGON_I(ap) 0
92 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
94 #define ACE_IS_TIGON_I(ap) (ap->version == 1)
95 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
98 #ifndef PCI_VENDOR_ID_ALTEON
99 #define PCI_VENDOR_ID_ALTEON 0x12ae
101 #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
102 #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
103 #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
105 #ifndef PCI_DEVICE_ID_3COM_3C985
106 #define PCI_DEVICE_ID_3COM_3C985 0x0001
108 #ifndef PCI_VENDOR_ID_NETGEAR
109 #define PCI_VENDOR_ID_NETGEAR 0x1385
110 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
112 #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
113 #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
118 * Farallon used the DEC vendor ID by mistake and they seem not
121 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
122 #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
124 #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
125 #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
127 #ifndef PCI_VENDOR_ID_SGI
128 #define PCI_VENDOR_ID_SGI 0x10a9
130 #ifndef PCI_DEVICE_ID_SGI_ACENIC
131 #define PCI_DEVICE_ID_SGI_ACENIC 0x0009
134 #if LINUX_VERSION_CODE >= 0x20400
135 static struct pci_device_id acenic_pci_tbl
[] __initdata
= {
136 { PCI_VENDOR_ID_ALTEON
, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
,
137 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
138 { PCI_VENDOR_ID_ALTEON
, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER
,
139 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
140 { PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C985
,
141 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
142 { PCI_VENDOR_ID_NETGEAR
, PCI_DEVICE_ID_NETGEAR_GA620
,
143 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
144 { PCI_VENDOR_ID_NETGEAR
, PCI_DEVICE_ID_NETGEAR_GA620T
,
145 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
147 * Farallon used the DEC vendor ID on their cards incorrectly,
148 * then later Alteon's ID.
150 { PCI_VENDOR_ID_DEC
, PCI_DEVICE_ID_FARALLON_PN9000SX
,
151 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
152 { PCI_VENDOR_ID_ALTEON
, PCI_DEVICE_ID_FARALLON_PN9100T
,
153 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
154 { PCI_VENDOR_ID_SGI
, PCI_DEVICE_ID_SGI_ACENIC
,
155 PCI_ANY_ID
, PCI_ANY_ID
, PCI_CLASS_NETWORK_ETHERNET
<< 8, 0xffff00, },
158 MODULE_DEVICE_TABLE(pci
, acenic_pci_tbl
);
162 #ifndef MODULE_LICENSE
163 #define MODULE_LICENSE(a)
175 #define __devinit __init
178 #ifndef SMP_CACHE_BYTES
179 #define SMP_CACHE_BYTES L1_CACHE_BYTES
182 #ifndef SET_MODULE_OWNER
183 #define SET_MODULE_OWNER(dev) do{} while(0)
184 #define ACE_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
185 #define ACE_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
187 #define ACE_MOD_INC_USE_COUNT do{} while(0)
188 #define ACE_MOD_DEC_USE_COUNT do{} while(0)
191 #ifndef SET_NETDEV_DEV
192 #define SET_NETDEV_DEV(net, pdev) do{} while(0)
195 #if LINUX_VERSION_CODE >= 0x2051c
196 #define ace_sync_irq(irq) synchronize_irq(irq)
198 #define ace_sync_irq(irq) synchronize_irq()
201 #if LINUX_VERSION_CODE < 0x2051e
202 #define local_irq_save(flags) do{__save_flags(flags) ; \
204 #define local_irq_restore(flags) __restore_flags(flags)
207 #if (LINUX_VERSION_CODE < 0x02030d)
208 #define pci_resource_start(dev, bar) dev->base_address[bar]
209 #elif (LINUX_VERSION_CODE < 0x02032c)
210 #define pci_resource_start(dev, bar) dev->resource[bar].start
213 #if (LINUX_VERSION_CODE < 0x02030e)
214 #define net_device device
218 #if (LINUX_VERSION_CODE < 0x02032a)
219 typedef u32 dma_addr_t
;
221 static inline void *pci_alloc_consistent(struct pci_dev
*hwdev
, size_t size
,
222 dma_addr_t
*dma_handle
)
226 virt_ptr
= kmalloc(size
, GFP_KERNEL
);
229 *dma_handle
= virt_to_bus(virt_ptr
);
233 #define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
234 #define pci_map_page(cookie, page, off, size, dir) \
235 virt_to_bus(page_address(page)+(off))
236 #define pci_unmap_page(cookie, address, size, dir)
237 #define pci_set_dma_mask(dev, mask) \
238 (((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
239 #define pci_dma_supported(dev, mask) \
240 (((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
242 #elif (LINUX_VERSION_CODE < 0x02040d)
245 * 2.4.13 introduced pci_map_page()/pci_unmap_page() - for 2.4.12 and prior,
246 * fall back on pci_map_single()/pci_unnmap_single().
248 * We are guaranteed that the page is mapped at this point since
249 * pci_map_page() is only used upon valid struct skb's.
251 static inline dma_addr_t
252 pci_map_page(struct pci_dev
*cookie
, struct page
*page
, unsigned long off
,
253 size_t size
, int dir
)
257 page_virt
= page_address(page
);
260 return pci_map_single(cookie
, (page_virt
+ off
), size
, dir
);
262 #define pci_unmap_page(cookie, dma_addr, size, dir) \
263 pci_unmap_single(cookie, dma_addr, size, dir)
266 #if (LINUX_VERSION_CODE < 0x020412)
267 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
268 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
269 #define pci_unmap_addr(PTR, ADDR_NAME) 0
270 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do{} while(0)
271 #define pci_unmap_len(PTR, LEN_NAME) 0
272 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do{} while(0)
276 #if (LINUX_VERSION_CODE < 0x02032b)
280 * For pre-softnet kernels we need to tell the upper layer not to
281 * re-enter start_xmit() while we are in there. However softnet
282 * guarantees not to enter while we are in there so there is no need
283 * to do the netif_stop_queue() dance unless the transmit queue really
284 * gets stuck. This should also improve performance according to tests
285 * done by Aman Singla.
287 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
288 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy)
289 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
290 #define late_stop_netif_stop_queue(dev) do{} while(0)
291 #define early_stop_netif_stop_queue(dev) test_and_set_bit(0,&dev->tbusy)
292 #define early_stop_netif_wake_queue(dev) netif_wake_queue(dev)
294 static inline void netif_start_queue(struct net_device
*dev
)
301 #define ace_mark_net_bh() mark_bh(NET_BH)
302 #define netif_queue_stopped(dev) dev->tbusy
303 #define netif_running(dev) dev->start
304 #define ace_if_down(dev) do{dev->start = 0;} while(0)
306 #define tasklet_struct tq_struct
307 static inline void tasklet_schedule(struct tasklet_struct
*tasklet
)
309 queue_task(tasklet
, &tq_immediate
);
310 mark_bh(IMMEDIATE_BH
);
313 static inline void tasklet_init(struct tasklet_struct
*tasklet
,
314 void (*func
)(unsigned long),
317 tasklet
->next
= NULL
;
319 tasklet
->routine
= (void (*)(void *))func
;
320 tasklet
->data
= (void *)data
;
322 #define tasklet_kill(tasklet) do{} while(0)
324 #define late_stop_netif_stop_queue(dev) netif_stop_queue(dev)
325 #define early_stop_netif_stop_queue(dev) 0
326 #define early_stop_netif_wake_queue(dev) do{} while(0)
327 #define ace_mark_net_bh() do{} while(0)
328 #define ace_if_down(dev) do{} while(0)
331 #if (LINUX_VERSION_CODE >= 0x02031b)
333 #define ACE_PROBE_ARG void
335 #define ACE_PROBE_ARG struct net_device *dev
339 #define min_t(type,a,b) (((a)<(b))?(a):(b))
342 #ifndef ARCH_HAS_PREFETCHW
344 #define prefetchw(x) do{} while(0)
348 #define ACE_MAX_MOD_PARMS 8
349 #define BOARD_IDX_STATIC 0
350 #define BOARD_IDX_OVERFLOW -1
352 #if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \
353 defined(NETIF_F_HW_VLAN_RX)
354 #define ACENIC_DO_VLAN 1
355 #define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST
357 #define ACENIC_DO_VLAN 0
358 #define ACE_RCB_VLAN_FLAG 0
364 * These must be defined before the firmware is included.
366 #define MAX_TEXT_LEN 96*1024
367 #define MAX_RODATA_LEN 8*1024
368 #define MAX_DATA_LEN 2*1024
370 #include "acenic_firmware.h"
372 #ifndef tigon2FwReleaseLocal
373 #define tigon2FwReleaseLocal 0
377 * This driver currently supports Tigon I and Tigon II based cards
378 * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
379 * GA620. The driver should also work on the SGI, DEC and Farallon
380 * versions of the card, however I have not been able to test that
383 * This card is really neat, it supports receive hardware checksumming
384 * and jumbo frames (up to 9000 bytes) and does a lot of work in the
385 * firmware. Also the programming interface is quite neat, except for
386 * the parts dealing with the i2c eeprom on the card ;-)
388 * Using jumbo frames:
390 * To enable jumbo frames, simply specify an mtu between 1500 and 9000
391 * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
392 * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
393 * interface number and <MTU> being the MTU value.
397 * When compiled as a loadable module, the driver allows for a number
398 * of module parameters to be specified. The driver supports the
399 * following module parameters:
401 * trace=<val> - Firmware trace level. This requires special traced
402 * firmware to replace the firmware supplied with
403 * the driver - for debugging purposes only.
405 * link=<val> - Link state. Normally you want to use the default link
406 * parameters set by the driver. This can be used to
407 * override these in case your switch doesn't negotiate
408 * the link properly. Valid values are:
409 * 0x0001 - Force half duplex link.
410 * 0x0002 - Do not negotiate line speed with the other end.
411 * 0x0010 - 10Mbit/sec link.
412 * 0x0020 - 100Mbit/sec link.
413 * 0x0040 - 1000Mbit/sec link.
414 * 0x0100 - Do not negotiate flow control.
415 * 0x0200 - Enable RX flow control Y
416 * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
417 * Default value is 0x0270, ie. enable link+flow
418 * control negotiation. Negotiating the highest
419 * possible link speed with RX flow control enabled.
421 * When disabling link speed negotiation, only one link
422 * speed is allowed to be specified!
424 * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
425 * to wait for more packets to arive before
426 * interrupting the host, from the time the first
429 * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
430 * to wait for more packets to arive in the transmit ring,
431 * before interrupting the host, after transmitting the
432 * first packet in the ring.
434 * max_tx_desc=<val> - maximum number of transmit descriptors
435 * (packets) transmitted before interrupting the host.
437 * max_rx_desc=<val> - maximum number of receive descriptors
438 * (packets) received before interrupting the host.
440 * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
441 * increments of the NIC's on board memory to be used for
442 * transmit and receive buffers. For the 1MB NIC app. 800KB
443 * is available, on the 1/2MB NIC app. 300KB is available.
444 * 68KB will always be available as a minimum for both
445 * directions. The default value is a 50/50 split.
446 * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
447 * operations, default (1) is to always disable this as
448 * that is what Alteon does on NT. I have not been able
449 * to measure any real performance differences with
450 * this on my systems. Set <val>=0 if you want to
451 * enable these operations.
453 * If you use more than one NIC, specify the parameters for the
454 * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
455 * run tracing on NIC #2 but not on NIC #1 and #3.
459 * - Proper multicast support.
460 * - NIC dump support.
461 * - More tuning parameters.
463 * The mini ring is not used under Linux and I am not sure it makes sense
464 * to actually use it.
466 * New interrupt handler strategy:
468 * The old interrupt handler worked using the traditional method of
469 * replacing an skbuff with a new one when a packet arrives. However
470 * the rx rings do not need to contain a static number of buffer
471 * descriptors, thus it makes sense to move the memory allocation out
472 * of the main interrupt handler and do it in a bottom half handler
473 * and only allocate new buffers when the number of buffers in the
474 * ring is below a certain threshold. In order to avoid starving the
475 * NIC under heavy load it is however necessary to force allocation
476 * when hitting a minimum threshold. The strategy for alloction is as
479 * RX_LOW_BUF_THRES - allocate buffers in the bottom half
480 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
481 * the buffers in the interrupt handler
482 * RX_RING_THRES - maximum number of buffers in the rx ring
483 * RX_MINI_THRES - maximum number of buffers in the mini ring
484 * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
486 * One advantagous side effect of this allocation approach is that the
487 * entire rx processing can be done without holding any spin lock
488 * since the rx rings and registers are totally independent of the tx
489 * ring and its registers. This of course includes the kmalloc's of
490 * new skb's. Thus start_xmit can run in parallel with rx processing
491 * and the memory allocation on SMP systems.
493 * Note that running the skb reallocation in a bottom half opens up
494 * another can of races which needs to be handled properly. In
495 * particular it can happen that the interrupt handler tries to run
496 * the reallocation while the bottom half is either running on another
497 * CPU or was interrupted on the same CPU. To get around this the
498 * driver uses bitops to prevent the reallocation routines from being
501 * TX handling can also be done without holding any spin lock, wheee
502 * this is fun! since tx_ret_csm is only written to by the interrupt
503 * handler. The case to be aware of is when shutting down the device
504 * and cleaning up where it is necessary to make sure that
505 * start_xmit() is not running while this is happening. Well DaveM
506 * informs me that this case is already protected against ... bye bye
507 * Mr. Spin Lock, it was nice to know you.
509 * TX interrupts are now partly disabled so the NIC will only generate
510 * TX interrupts for the number of coal ticks, not for the number of
511 * TX packets in the queue. This should reduce the number of TX only,
512 * ie. when no RX processing is done, interrupts seen.
516 * Threshold values for RX buffer allocation - the low water marks for
517 * when to start refilling the rings are set to 75% of the ring
518 * sizes. It seems to make sense to refill the rings entirely from the
519 * intrrupt handler once it gets below the panic threshold, that way
520 * we don't risk that the refilling is moved to another CPU when the
521 * one running the interrupt handler just got the slab code hot in its
524 #define RX_RING_SIZE 72
525 #define RX_MINI_SIZE 64
526 #define RX_JUMBO_SIZE 48
528 #define RX_PANIC_STD_THRES 16
529 #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
530 #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
531 #define RX_PANIC_MINI_THRES 12
532 #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
533 #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
534 #define RX_PANIC_JUMBO_THRES 6
535 #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
536 #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
540 * Size of the mini ring entries, basically these just should be big
541 * enough to take TCP ACKs
543 #define ACE_MINI_SIZE 100
545 #define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
546 #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
547 #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
550 * There seems to be a magic difference in the effect between 995 and 996
551 * but little difference between 900 and 995 ... no idea why.
553 * There is now a default set of tuning parameters which is set, depending
554 * on whether or not the user enables Jumbo frames. It's assumed that if
555 * Jumbo frames are enabled, the user wants optimal tuning for that case.
557 #define DEF_TX_COAL 400 /* 996 */
558 #define DEF_TX_MAX_DESC 60 /* was 40 */
559 #define DEF_RX_COAL 120 /* 1000 */
560 #define DEF_RX_MAX_DESC 25
561 #define DEF_TX_RATIO 21 /* 24 */
563 #define DEF_JUMBO_TX_COAL 20
564 #define DEF_JUMBO_TX_MAX_DESC 60
565 #define DEF_JUMBO_RX_COAL 30
566 #define DEF_JUMBO_RX_MAX_DESC 6
567 #define DEF_JUMBO_TX_RATIO 21
569 #if tigon2FwReleaseLocal < 20001118
571 * Standard firmware and early modifications duplicate
572 * IRQ load without this flag (coal timer is never reset).
573 * Note that with this flag tx_coal should be less than
574 * time to xmit full tx ring.
575 * 400usec is not so bad for tx ring size of 128.
577 #define TX_COAL_INTS_ONLY 1 /* worth it */
580 * With modified firmware, this is not necessary, but still useful.
582 #define TX_COAL_INTS_ONLY 1
586 #define DEF_STAT (2 * TICKS_PER_SEC)
589 static int link
[ACE_MAX_MOD_PARMS
];
590 static int trace
[ACE_MAX_MOD_PARMS
];
591 static int tx_coal_tick
[ACE_MAX_MOD_PARMS
];
592 static int rx_coal_tick
[ACE_MAX_MOD_PARMS
];
593 static int max_tx_desc
[ACE_MAX_MOD_PARMS
];
594 static int max_rx_desc
[ACE_MAX_MOD_PARMS
];
595 static int tx_ratio
[ACE_MAX_MOD_PARMS
];
596 static int dis_pci_mem_inval
[ACE_MAX_MOD_PARMS
] = {1, 1, 1, 1, 1, 1, 1, 1};
598 static char version
[] __initdata
=
599 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
600 " http://home.cern.ch/~jes/gige/acenic.html\n";
602 static struct net_device
*root_dev
;
604 static int probed __initdata
= 0;
607 int __devinit
acenic_probe (ACE_PROBE_ARG
)
610 struct net_device
*dev
;
612 struct ace_private
*ap
;
613 struct pci_dev
*pdev
= NULL
;
614 int boards_found
= 0;
623 while ((pdev
= pci_find_class(PCI_CLASS_NETWORK_ETHERNET
<<8, pdev
))) {
625 if (!((pdev
->vendor
== PCI_VENDOR_ID_ALTEON
) &&
626 ((pdev
->device
== PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
) ||
627 (pdev
->device
== PCI_DEVICE_ID_ALTEON_ACENIC_COPPER
)))&&
628 !((pdev
->vendor
== PCI_VENDOR_ID_3COM
) &&
629 (pdev
->device
== PCI_DEVICE_ID_3COM_3C985
)) &&
630 !((pdev
->vendor
== PCI_VENDOR_ID_NETGEAR
) &&
631 ((pdev
->device
== PCI_DEVICE_ID_NETGEAR_GA620
) ||
632 (pdev
->device
== PCI_DEVICE_ID_NETGEAR_GA620T
))) &&
634 * Farallon used the DEC vendor ID on their cards by
635 * mistake for a while
637 !((pdev
->vendor
== PCI_VENDOR_ID_DEC
) &&
638 (pdev
->device
== PCI_DEVICE_ID_FARALLON_PN9000SX
)) &&
639 !((pdev
->vendor
== PCI_VENDOR_ID_ALTEON
) &&
640 (pdev
->device
== PCI_DEVICE_ID_FARALLON_PN9100T
)) &&
641 !((pdev
->vendor
== PCI_VENDOR_ID_SGI
) &&
642 (pdev
->device
== PCI_DEVICE_ID_SGI_ACENIC
)))
645 dev
= alloc_etherdev(sizeof(struct ace_private
));
647 printk(KERN_ERR
"acenic: Unable to allocate "
648 "net_device structure!\n");
652 SET_MODULE_OWNER(dev
);
653 SET_NETDEV_DEV(dev
, &pdev
->dev
);
658 dev
->open
= &ace_open
;
659 dev
->hard_start_xmit
= &ace_start_xmit
;
660 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
662 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
663 dev
->vlan_rx_register
= ace_vlan_rx_register
;
664 dev
->vlan_rx_kill_vid
= ace_vlan_rx_kill_vid
;
667 static void ace_watchdog(struct net_device
*dev
);
668 dev
->tx_timeout
= &ace_watchdog
;
669 dev
->watchdog_timeo
= 5*HZ
;
671 dev
->stop
= &ace_close
;
672 dev
->get_stats
= &ace_get_stats
;
673 dev
->set_multicast_list
= &ace_set_multicast_list
;
674 dev
->do_ioctl
= &ace_ioctl
;
675 dev
->set_mac_address
= &ace_set_mac_addr
;
676 dev
->change_mtu
= &ace_change_mtu
;
678 /* display version info if adapter is found */
681 /* set display flag to TRUE so that */
682 /* we only display this string ONCE */
687 if (pci_enable_device(pdev
)) {
693 * Enable master mode before we start playing with the
694 * pci_command word since pci_set_master() will modify
697 pci_set_master(pdev
);
699 pci_read_config_word(pdev
, PCI_COMMAND
, &ap
->pci_command
);
701 /* OpenFirmware on Mac's does not set this - DOH.. */
702 if (!(ap
->pci_command
& PCI_COMMAND_MEMORY
)) {
703 printk(KERN_INFO
"%s: Enabling PCI Memory Mapped "
704 "access - was not enabled by BIOS/Firmware\n",
706 ap
->pci_command
= ap
->pci_command
| PCI_COMMAND_MEMORY
;
707 pci_write_config_word(ap
->pdev
, PCI_COMMAND
,
712 pci_read_config_byte(pdev
, PCI_LATENCY_TIMER
,
714 if (ap
->pci_latency
<= 0x40) {
715 ap
->pci_latency
= 0x40;
716 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
,
721 * Remap the regs into kernel space - this is abuse of
722 * dev->base_addr since it was means for I/O port
723 * addresses but who gives a damn.
725 dev
->base_addr
= pci_resource_start(pdev
, 0);
726 ap
->regs
= (struct ace_regs
*)ioremap(dev
->base_addr
, 0x4000);
728 printk(KERN_ERR
"%s: Unable to map I/O register, "
729 "AceNIC %i will be disabled.\n",
730 dev
->name
, boards_found
);
734 if (register_netdev(dev
)) {
735 printk(KERN_ERR
"acenic: device registration failed\n");
740 switch(pdev
->vendor
) {
741 case PCI_VENDOR_ID_ALTEON
:
742 if (pdev
->device
== PCI_DEVICE_ID_FARALLON_PN9100T
) {
743 strncpy(ap
->name
, "Farallon PN9100-T "
744 "Gigabit Ethernet", sizeof (ap
->name
));
745 printk(KERN_INFO
"%s: Farallon PN9100-T ",
748 strncpy(ap
->name
, "AceNIC Gigabit Ethernet",
750 printk(KERN_INFO
"%s: Alteon AceNIC ",
754 case PCI_VENDOR_ID_3COM
:
755 strncpy(ap
->name
, "3Com 3C985 Gigabit Ethernet",
757 printk(KERN_INFO
"%s: 3Com 3C985 ", dev
->name
);
759 case PCI_VENDOR_ID_NETGEAR
:
760 strncpy(ap
->name
, "NetGear GA620 Gigabit Ethernet",
762 printk(KERN_INFO
"%s: NetGear GA620 ", dev
->name
);
764 case PCI_VENDOR_ID_DEC
:
765 if (pdev
->device
== PCI_DEVICE_ID_FARALLON_PN9000SX
) {
766 strncpy(ap
->name
, "Farallon PN9000-SX "
767 "Gigabit Ethernet", sizeof (ap
->name
));
768 printk(KERN_INFO
"%s: Farallon PN9000-SX ",
772 case PCI_VENDOR_ID_SGI
:
773 strncpy(ap
->name
, "SGI AceNIC Gigabit Ethernet",
775 printk(KERN_INFO
"%s: SGI AceNIC ", dev
->name
);
778 strncpy(ap
->name
, "Unknown AceNIC based Gigabit "
779 "Ethernet", sizeof (ap
->name
));
780 printk(KERN_INFO
"%s: Unknown AceNIC ", dev
->name
);
783 ap
->name
[sizeof (ap
->name
) - 1] = '\0';
784 printk("Gigabit Ethernet at 0x%08lx, ", dev
->base_addr
);
786 printk("irq %s\n", __irq_itoa(pdev
->irq
));
788 printk("irq %i\n", pdev
->irq
);
791 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
792 if ((readl(&ap
->regs
->HostCtrl
) >> 28) == 4) {
793 printk(KERN_ERR
"%s: Driver compiled without Tigon I"
794 " support - NIC disabled\n", dev
->name
);
795 ace_init_cleanup(dev
);
801 if (ace_allocate_descriptors(dev
)) {
803 * ace_allocate_descriptors() calls
804 * ace_init_cleanup() on error.
811 if (boards_found
>= ACE_MAX_MOD_PARMS
)
812 ap
->board_idx
= BOARD_IDX_OVERFLOW
;
814 ap
->board_idx
= boards_found
;
816 ap
->board_idx
= BOARD_IDX_STATIC
;
821 * ace_init() calls ace_init_cleanup() on error.
827 if (ap
->pci_using_dac
)
828 dev
->features
|= NETIF_F_HIGHDMA
;
834 * If we're at this point we're going through ace_probe() for
835 * the first time. Return success (0) if we've initialized 1
836 * or more boards. Otherwise, return failure (-ENODEV).
839 if (boards_found
> 0)
847 MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
848 MODULE_LICENSE("GPL");
849 MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
850 MODULE_PARM(link
, "1-" __MODULE_STRING(8) "i");
851 MODULE_PARM(trace
, "1-" __MODULE_STRING(8) "i");
852 MODULE_PARM(tx_coal_tick
, "1-" __MODULE_STRING(8) "i");
853 MODULE_PARM(max_tx_desc
, "1-" __MODULE_STRING(8) "i");
854 MODULE_PARM(rx_coal_tick
, "1-" __MODULE_STRING(8) "i");
855 MODULE_PARM(max_rx_desc
, "1-" __MODULE_STRING(8) "i");
856 MODULE_PARM(tx_ratio
, "1-" __MODULE_STRING(8) "i");
857 MODULE_PARM_DESC(link
, "AceNIC/3C985/NetGear link state");
858 MODULE_PARM_DESC(trace
, "AceNIC/3C985/NetGear firmware trace level");
859 MODULE_PARM_DESC(tx_coal_tick
, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
860 MODULE_PARM_DESC(max_tx_desc
, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
861 MODULE_PARM_DESC(rx_coal_tick
, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
862 MODULE_PARM_DESC(max_rx_desc
, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
863 MODULE_PARM_DESC(tx_ratio
, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
867 static void __exit
ace_module_cleanup(void)
869 struct ace_private
*ap
;
870 struct ace_regs
*regs
;
871 struct net_device
*next
;
880 writel(readl(®s
->CpuCtrl
) | CPU_HALT
, ®s
->CpuCtrl
);
881 if (ap
->version
>= 2)
882 writel(readl(®s
->CpuBCtrl
) | CPU_HALT
,
885 * This clears any pending interrupts
887 writel(1, ®s
->Mb0Lo
);
888 readl(®s
->CpuCtrl
); /* flush */
891 * Make sure no other CPUs are processing interrupts
892 * on the card before the buffers are being released.
893 * Otherwise one might experience some `interesting'
896 * Then release the RX buffers - jumbo buffers were
897 * already released in ace_close().
899 ace_sync_irq(root_dev
->irq
);
901 for (i
= 0; i
< RX_STD_RING_ENTRIES
; i
++) {
902 struct sk_buff
*skb
= ap
->skb
->rx_std_skbuff
[i
].skb
;
905 struct ring_info
*ringp
;
908 ringp
= &ap
->skb
->rx_std_skbuff
[i
];
909 mapping
= pci_unmap_addr(ringp
, mapping
);
910 pci_unmap_page(ap
->pdev
, mapping
,
911 ACE_STD_BUFSIZE
- (2 + 16),
914 ap
->rx_std_ring
[i
].size
= 0;
915 ap
->skb
->rx_std_skbuff
[i
].skb
= NULL
;
919 if (ap
->version
>= 2) {
920 for (i
= 0; i
< RX_MINI_RING_ENTRIES
; i
++) {
921 struct sk_buff
*skb
= ap
->skb
->rx_mini_skbuff
[i
].skb
;
924 struct ring_info
*ringp
;
927 ringp
= &ap
->skb
->rx_mini_skbuff
[i
];
928 mapping
= pci_unmap_addr(ringp
,mapping
);
929 pci_unmap_page(ap
->pdev
, mapping
,
930 ACE_MINI_BUFSIZE
- (2 + 16),
933 ap
->rx_mini_ring
[i
].size
= 0;
934 ap
->skb
->rx_mini_skbuff
[i
].skb
= NULL
;
939 for (i
= 0; i
< RX_JUMBO_RING_ENTRIES
; i
++) {
940 struct sk_buff
*skb
= ap
->skb
->rx_jumbo_skbuff
[i
].skb
;
942 struct ring_info
*ringp
;
945 ringp
= &ap
->skb
->rx_jumbo_skbuff
[i
];
946 mapping
= pci_unmap_addr(ringp
, mapping
);
947 pci_unmap_page(ap
->pdev
, mapping
,
948 ACE_JUMBO_BUFSIZE
- (2 + 16),
951 ap
->rx_jumbo_ring
[i
].size
= 0;
952 ap
->skb
->rx_jumbo_skbuff
[i
].skb
= NULL
;
957 ace_init_cleanup(root_dev
);
964 int __init
ace_module_init(void)
971 status
= acenic_probe();
973 status
= acenic_probe(NULL
);
979 #if (LINUX_VERSION_CODE < 0x02032a)
981 int init_module(void)
983 return ace_module_init();
987 void cleanup_module(void)
989 ace_module_cleanup();
993 module_init(ace_module_init
);
994 module_exit(ace_module_cleanup
);
998 static void ace_free_descriptors(struct net_device
*dev
)
1000 struct ace_private
*ap
= dev
->priv
;
1003 if (ap
->rx_std_ring
!= NULL
) {
1004 size
= (sizeof(struct rx_desc
) *
1005 (RX_STD_RING_ENTRIES
+
1006 RX_JUMBO_RING_ENTRIES
+
1007 RX_MINI_RING_ENTRIES
+
1008 RX_RETURN_RING_ENTRIES
));
1009 pci_free_consistent(ap
->pdev
, size
, ap
->rx_std_ring
,
1010 ap
->rx_ring_base_dma
);
1011 ap
->rx_std_ring
= NULL
;
1012 ap
->rx_jumbo_ring
= NULL
;
1013 ap
->rx_mini_ring
= NULL
;
1014 ap
->rx_return_ring
= NULL
;
1016 if (ap
->evt_ring
!= NULL
) {
1017 size
= (sizeof(struct event
) * EVT_RING_ENTRIES
);
1018 pci_free_consistent(ap
->pdev
, size
, ap
->evt_ring
,
1020 ap
->evt_ring
= NULL
;
1022 if (ap
->tx_ring
!= NULL
&& !ACE_IS_TIGON_I(ap
)) {
1023 size
= (sizeof(struct tx_desc
) * MAX_TX_RING_ENTRIES
);
1024 pci_free_consistent(ap
->pdev
, size
, ap
->tx_ring
,
1029 if (ap
->evt_prd
!= NULL
) {
1030 pci_free_consistent(ap
->pdev
, sizeof(u32
),
1031 (void *)ap
->evt_prd
, ap
->evt_prd_dma
);
1034 if (ap
->rx_ret_prd
!= NULL
) {
1035 pci_free_consistent(ap
->pdev
, sizeof(u32
),
1036 (void *)ap
->rx_ret_prd
,
1037 ap
->rx_ret_prd_dma
);
1038 ap
->rx_ret_prd
= NULL
;
1040 if (ap
->tx_csm
!= NULL
) {
1041 pci_free_consistent(ap
->pdev
, sizeof(u32
),
1042 (void *)ap
->tx_csm
, ap
->tx_csm_dma
);
1048 static int ace_allocate_descriptors(struct net_device
*dev
)
1050 struct ace_private
*ap
= dev
->priv
;
1053 size
= (sizeof(struct rx_desc
) *
1054 (RX_STD_RING_ENTRIES
+
1055 RX_JUMBO_RING_ENTRIES
+
1056 RX_MINI_RING_ENTRIES
+
1057 RX_RETURN_RING_ENTRIES
));
1059 ap
->rx_std_ring
= pci_alloc_consistent(ap
->pdev
, size
,
1060 &ap
->rx_ring_base_dma
);
1061 if (ap
->rx_std_ring
== NULL
)
1064 ap
->rx_jumbo_ring
= ap
->rx_std_ring
+ RX_STD_RING_ENTRIES
;
1065 ap
->rx_mini_ring
= ap
->rx_jumbo_ring
+ RX_JUMBO_RING_ENTRIES
;
1066 ap
->rx_return_ring
= ap
->rx_mini_ring
+ RX_MINI_RING_ENTRIES
;
1068 size
= (sizeof(struct event
) * EVT_RING_ENTRIES
);
1070 ap
->evt_ring
= pci_alloc_consistent(ap
->pdev
, size
, &ap
->evt_ring_dma
);
1072 if (ap
->evt_ring
== NULL
)
1076 * Only allocate a host TX ring for the Tigon II, the Tigon I
1077 * has to use PCI registers for this ;-(
1079 if (!ACE_IS_TIGON_I(ap
)) {
1080 size
= (sizeof(struct tx_desc
) * MAX_TX_RING_ENTRIES
);
1082 ap
->tx_ring
= pci_alloc_consistent(ap
->pdev
, size
,
1085 if (ap
->tx_ring
== NULL
)
1089 ap
->evt_prd
= pci_alloc_consistent(ap
->pdev
, sizeof(u32
),
1091 if (ap
->evt_prd
== NULL
)
1094 ap
->rx_ret_prd
= pci_alloc_consistent(ap
->pdev
, sizeof(u32
),
1095 &ap
->rx_ret_prd_dma
);
1096 if (ap
->rx_ret_prd
== NULL
)
1099 ap
->tx_csm
= pci_alloc_consistent(ap
->pdev
, sizeof(u32
),
1101 if (ap
->tx_csm
== NULL
)
1108 ace_init_cleanup(dev
);
1114 * Generic cleanup handling data allocated during init. Used when the
1115 * module is unloaded or if an error occurs during initialization
1117 static void ace_init_cleanup(struct net_device
*dev
)
1119 struct ace_private
*ap
;
1123 ace_free_descriptors(dev
);
1126 pci_free_consistent(ap
->pdev
, sizeof(struct ace_info
),
1127 ap
->info
, ap
->info_dma
);
1131 kfree(ap
->trace_buf
);
1134 free_irq(dev
->irq
, dev
);
1136 unregister_netdev(dev
);
1142 * Commands are considered to be slow.
1144 static inline void ace_issue_cmd(struct ace_regs
*regs
, struct cmd
*cmd
)
1148 idx
= readl(®s
->CmdPrd
);
1150 writel(*(u32
*)(cmd
), ®s
->CmdRng
[idx
]);
1151 idx
= (idx
+ 1) % CMD_RING_ENTRIES
;
1153 writel(idx
, ®s
->CmdPrd
);
1157 static int __init
ace_init(struct net_device
*dev
)
1159 struct ace_private
*ap
;
1160 struct ace_regs
*regs
;
1161 struct ace_info
*info
= NULL
;
1162 struct pci_dev
*pdev
;
1163 unsigned long myjif
;
1165 u32 tig_ver
, mac1
, mac2
, tmp
, pci_state
;
1166 int board_idx
, ecode
= 0;
1168 unsigned char cache_size
;
1173 board_idx
= ap
->board_idx
;
1176 * aman@sgi.com - its useful to do a NIC reset here to
1177 * address the `Firmware not running' problem subsequent
1178 * to any crashes involving the NIC
1180 writel(HW_RESET
| (HW_RESET
<< 24), ®s
->HostCtrl
);
1181 readl(®s
->HostCtrl
); /* PCI write posting */
1185 * Don't access any other registers before this point!
1189 * This will most likely need BYTE_SWAP once we switch
1190 * to using __raw_writel()
1192 writel((WORD_SWAP
| CLR_INT
| ((WORD_SWAP
| CLR_INT
) << 24)),
1195 writel((CLR_INT
| WORD_SWAP
| ((CLR_INT
| WORD_SWAP
) << 24)),
1198 readl(®s
->HostCtrl
); /* PCI write posting */
1201 * Stop the NIC CPU and clear pending interrupts
1203 writel(readl(®s
->CpuCtrl
) | CPU_HALT
, ®s
->CpuCtrl
);
1204 readl(®s
->CpuCtrl
); /* PCI write posting */
1205 writel(0, ®s
->Mb0Lo
);
1207 tig_ver
= readl(®s
->HostCtrl
) >> 28;
1210 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
1212 printk(KERN_INFO
" Tigon I (Rev. 4), Firmware: %i.%i.%i, ",
1213 tigonFwReleaseMajor
, tigonFwReleaseMinor
,
1215 writel(0, ®s
->LocalCtrl
);
1217 ap
->tx_ring_entries
= TIGON_I_TX_RING_ENTRIES
;
1221 printk(KERN_INFO
" Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
1222 tig_ver
, tigon2FwReleaseMajor
, tigon2FwReleaseMinor
,
1223 tigon2FwReleaseFix
);
1224 writel(readl(®s
->CpuBCtrl
) | CPU_HALT
, ®s
->CpuBCtrl
);
1225 readl(®s
->CpuBCtrl
); /* PCI write posting */
1227 * The SRAM bank size does _not_ indicate the amount
1228 * of memory on the card, it controls the _bank_ size!
1229 * Ie. a 1MB AceNIC will have two banks of 512KB.
1231 writel(SRAM_BANK_512K
, ®s
->LocalCtrl
);
1232 writel(SYNC_SRAM_TIMING
, ®s
->MiscCfg
);
1234 ap
->tx_ring_entries
= MAX_TX_RING_ENTRIES
;
1237 printk(KERN_WARNING
" Unsupported Tigon version detected "
1244 * ModeStat _must_ be set after the SRAM settings as this change
1245 * seems to corrupt the ModeStat and possible other registers.
1246 * The SRAM settings survive resets and setting it to the same
1247 * value a second time works as well. This is what caused the
1248 * `Firmware not running' problem on the Tigon II.
1251 writel(ACE_BYTE_SWAP_DMA
| ACE_WARN
| ACE_FATAL
| ACE_BYTE_SWAP_BD
|
1252 ACE_WORD_SWAP_BD
| ACE_NO_JUMBO_FRAG
, ®s
->ModeStat
);
1254 writel(ACE_BYTE_SWAP_DMA
| ACE_WARN
| ACE_FATAL
|
1255 ACE_WORD_SWAP_BD
| ACE_NO_JUMBO_FRAG
, ®s
->ModeStat
);
1257 readl(®s
->ModeStat
); /* PCI write posting */
1260 for(i
= 0; i
< 4; i
++) {
1262 tmp
= read_eeprom_byte(dev
, 0x8c+i
);
1267 mac1
|= (tmp
& 0xff);
1270 for(i
= 4; i
< 8; i
++) {
1272 tmp
= read_eeprom_byte(dev
, 0x8c+i
);
1277 mac2
|= (tmp
& 0xff);
1280 writel(mac1
, ®s
->MacAddrHi
);
1281 writel(mac2
, ®s
->MacAddrLo
);
1283 printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
1284 (mac1
>> 8) & 0xff, mac1
& 0xff, (mac2
>> 24) &0xff,
1285 (mac2
>> 16) & 0xff, (mac2
>> 8) & 0xff, mac2
& 0xff);
1287 dev
->dev_addr
[0] = (mac1
>> 8) & 0xff;
1288 dev
->dev_addr
[1] = mac1
& 0xff;
1289 dev
->dev_addr
[2] = (mac2
>> 24) & 0xff;
1290 dev
->dev_addr
[3] = (mac2
>> 16) & 0xff;
1291 dev
->dev_addr
[4] = (mac2
>> 8) & 0xff;
1292 dev
->dev_addr
[5] = mac2
& 0xff;
1295 * Looks like this is necessary to deal with on all architectures,
1296 * even this %$#%$# N440BX Intel based thing doesn't get it right.
1297 * Ie. having two NICs in the machine, one will have the cache
1298 * line set at boot time, the other will not.
1301 pci_read_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, &cache_size
);
1303 if (cache_size
!= SMP_CACHE_BYTES
) {
1304 printk(KERN_INFO
" PCI cache line size set incorrectly "
1305 "(%i bytes) by BIOS/FW, ", cache_size
);
1306 if (cache_size
> SMP_CACHE_BYTES
)
1307 printk("expecting %i\n", SMP_CACHE_BYTES
);
1309 printk("correcting to %i\n", SMP_CACHE_BYTES
);
1310 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
1311 SMP_CACHE_BYTES
>> 2);
1315 pci_state
= readl(®s
->PciState
);
1316 printk(KERN_INFO
" PCI bus width: %i bits, speed: %iMHz, "
1317 "latency: %i clks\n",
1318 (pci_state
& PCI_32BIT
) ? 32 : 64,
1319 (pci_state
& PCI_66MHZ
) ? 66 : 33,
1323 * Set the max DMA transfer size. Seems that for most systems
1324 * the performance is better when no MAX parameter is
1325 * set. However for systems enabling PCI write and invalidate,
1326 * DMA writes must be set to the L1 cache line size to get
1327 * optimal performance.
1329 * The default is now to turn the PCI write and invalidate off
1330 * - that is what Alteon does for NT.
1332 tmp
= READ_CMD_MEM
| WRITE_CMD_MEM
;
1333 if (ap
->version
>= 2) {
1334 tmp
|= (MEM_READ_MULTIPLE
| (pci_state
& PCI_66MHZ
));
1336 * Tuning parameters only supported for 8 cards
1338 if (board_idx
== BOARD_IDX_OVERFLOW
||
1339 dis_pci_mem_inval
[board_idx
]) {
1340 if (ap
->pci_command
& PCI_COMMAND_INVALIDATE
) {
1341 ap
->pci_command
&= ~PCI_COMMAND_INVALIDATE
;
1342 pci_write_config_word(pdev
, PCI_COMMAND
,
1344 printk(KERN_INFO
" Disabling PCI memory "
1345 "write and invalidate\n");
1347 } else if (ap
->pci_command
& PCI_COMMAND_INVALIDATE
) {
1348 printk(KERN_INFO
" PCI memory write & invalidate "
1349 "enabled by BIOS, enabling counter measures\n");
1351 switch(SMP_CACHE_BYTES
) {
1353 tmp
|= DMA_WRITE_MAX_16
;
1356 tmp
|= DMA_WRITE_MAX_32
;
1359 tmp
|= DMA_WRITE_MAX_64
;
1362 tmp
|= DMA_WRITE_MAX_128
;
1365 printk(KERN_INFO
" Cache line size %i not "
1366 "supported, PCI write and invalidate "
1367 "disabled\n", SMP_CACHE_BYTES
);
1368 ap
->pci_command
&= ~PCI_COMMAND_INVALIDATE
;
1369 pci_write_config_word(pdev
, PCI_COMMAND
,
1377 * On this platform, we know what the best dma settings
1378 * are. We use 64-byte maximum bursts, because if we
1379 * burst larger than the cache line size (or even cross
1380 * a 64byte boundary in a single burst) the UltraSparc
1381 * PCI controller will disconnect at 64-byte multiples.
1383 * Read-multiple will be properly enabled above, and when
1384 * set will give the PCI controller proper hints about
1387 tmp
&= ~DMA_READ_WRITE_MASK
;
1388 tmp
|= DMA_READ_MAX_64
;
1389 tmp
|= DMA_WRITE_MAX_64
;
1392 tmp
&= ~DMA_READ_WRITE_MASK
;
1393 tmp
|= DMA_READ_MAX_128
;
1395 * All the docs say MUST NOT. Well, I did.
1396 * Nothing terrible happens, if we load wrong size.
1397 * Bit w&i still works better!
1399 tmp
|= DMA_WRITE_MAX_128
;
1401 writel(tmp
, ®s
->PciState
);
1405 * The Host PCI bus controller driver has to set FBB.
1406 * If all devices on that PCI bus support FBB, then the controller
1407 * can enable FBB support in the Host PCI Bus controller (or on
1408 * the PCI-PCI bridge if that applies).
1412 * I have received reports from people having problems when this
1415 if (!(ap
->pci_command
& PCI_COMMAND_FAST_BACK
)) {
1416 printk(KERN_INFO
" Enabling PCI Fast Back to Back\n");
1417 ap
->pci_command
|= PCI_COMMAND_FAST_BACK
;
1418 pci_write_config_word(pdev
, PCI_COMMAND
, ap
->pci_command
);
1423 * Configure DMA attributes.
1425 if (!pci_set_dma_mask(pdev
, 0xffffffffffffffffULL
)) {
1426 ap
->pci_using_dac
= 1;
1427 } else if (!pci_set_dma_mask(pdev
, 0xffffffffULL
)) {
1428 ap
->pci_using_dac
= 0;
1435 * Initialize the generic info block and the command+event rings
1436 * and the control blocks for the transmit and receive rings
1437 * as they need to be setup once and for all.
1439 if (!(info
= pci_alloc_consistent(ap
->pdev
, sizeof(struct ace_info
),
1447 * Get the memory for the skb rings.
1449 if (!(ap
->skb
= kmalloc(sizeof(struct ace_skb
), GFP_KERNEL
))) {
1454 ecode
= request_irq(pdev
->irq
, ace_interrupt
, SA_SHIRQ
,
1457 printk(KERN_WARNING
"%s: Requested IRQ %d is busy\n",
1458 dev
->name
, pdev
->irq
);
1461 dev
->irq
= pdev
->irq
;
1464 * Register the device here to be able to catch allocated
1465 * interrupt handlers in case the firmware doesn't come up.
1467 ap
->next
= root_dev
;
1471 spin_lock_init(&ap
->debug_lock
);
1472 ap
->last_tx
= ACE_TX_RING_ENTRIES(ap
) - 1;
1473 ap
->last_std_rx
= 0;
1474 ap
->last_mini_rx
= 0;
1477 memset(ap
->info
, 0, sizeof(struct ace_info
));
1478 memset(ap
->skb
, 0, sizeof(struct ace_skb
));
1480 ace_load_firmware(dev
);
1483 tmp_ptr
= ap
->info_dma
;
1484 writel(tmp_ptr
>> 32, ®s
->InfoPtrHi
);
1485 writel(tmp_ptr
& 0xffffffff, ®s
->InfoPtrLo
);
1487 memset(ap
->evt_ring
, 0, EVT_RING_ENTRIES
* sizeof(struct event
));
1489 set_aceaddr(&info
->evt_ctrl
.rngptr
, ap
->evt_ring_dma
);
1490 info
->evt_ctrl
.flags
= 0;
1494 set_aceaddr(&info
->evt_prd_ptr
, ap
->evt_prd_dma
);
1495 writel(0, ®s
->EvtCsm
);
1497 set_aceaddr(&info
->cmd_ctrl
.rngptr
, 0x100);
1498 info
->cmd_ctrl
.flags
= 0;
1499 info
->cmd_ctrl
.max_len
= 0;
1501 for (i
= 0; i
< CMD_RING_ENTRIES
; i
++)
1502 writel(0, ®s
->CmdRng
[i
]);
1504 writel(0, ®s
->CmdPrd
);
1505 writel(0, ®s
->CmdCsm
);
1507 tmp_ptr
= ap
->info_dma
;
1508 tmp_ptr
+= (unsigned long) &(((struct ace_info
*)0)->s
.stats
);
1509 set_aceaddr(&info
->stats2_ptr
, (dma_addr_t
) tmp_ptr
);
1511 set_aceaddr(&info
->rx_std_ctrl
.rngptr
, ap
->rx_ring_base_dma
);
1512 info
->rx_std_ctrl
.max_len
= ACE_STD_MTU
+ ETH_HLEN
+ 4;
1513 info
->rx_std_ctrl
.flags
=
1514 RCB_FLG_TCP_UDP_SUM
| RCB_FLG_NO_PSEUDO_HDR
| ACE_RCB_VLAN_FLAG
;
1516 memset(ap
->rx_std_ring
, 0,
1517 RX_STD_RING_ENTRIES
* sizeof(struct rx_desc
));
1519 for (i
= 0; i
< RX_STD_RING_ENTRIES
; i
++)
1520 ap
->rx_std_ring
[i
].flags
= BD_FLG_TCP_UDP_SUM
;
1522 ap
->rx_std_skbprd
= 0;
1523 atomic_set(&ap
->cur_rx_bufs
, 0);
1525 set_aceaddr(&info
->rx_jumbo_ctrl
.rngptr
,
1526 (ap
->rx_ring_base_dma
+
1527 (sizeof(struct rx_desc
) * RX_STD_RING_ENTRIES
)));
1528 info
->rx_jumbo_ctrl
.max_len
= 0;
1529 info
->rx_jumbo_ctrl
.flags
=
1530 RCB_FLG_TCP_UDP_SUM
| RCB_FLG_NO_PSEUDO_HDR
| ACE_RCB_VLAN_FLAG
;
1532 memset(ap
->rx_jumbo_ring
, 0,
1533 RX_JUMBO_RING_ENTRIES
* sizeof(struct rx_desc
));
1535 for (i
= 0; i
< RX_JUMBO_RING_ENTRIES
; i
++)
1536 ap
->rx_jumbo_ring
[i
].flags
= BD_FLG_TCP_UDP_SUM
| BD_FLG_JUMBO
;
1538 ap
->rx_jumbo_skbprd
= 0;
1539 atomic_set(&ap
->cur_jumbo_bufs
, 0);
1541 memset(ap
->rx_mini_ring
, 0,
1542 RX_MINI_RING_ENTRIES
* sizeof(struct rx_desc
));
1544 if (ap
->version
>= 2) {
1545 set_aceaddr(&info
->rx_mini_ctrl
.rngptr
,
1546 (ap
->rx_ring_base_dma
+
1547 (sizeof(struct rx_desc
) *
1548 (RX_STD_RING_ENTRIES
+
1549 RX_JUMBO_RING_ENTRIES
))));
1550 info
->rx_mini_ctrl
.max_len
= ACE_MINI_SIZE
;
1551 info
->rx_mini_ctrl
.flags
=
1552 RCB_FLG_TCP_UDP_SUM
|RCB_FLG_NO_PSEUDO_HDR
|ACE_RCB_VLAN_FLAG
;
1554 for (i
= 0; i
< RX_MINI_RING_ENTRIES
; i
++)
1555 ap
->rx_mini_ring
[i
].flags
=
1556 BD_FLG_TCP_UDP_SUM
| BD_FLG_MINI
;
1558 set_aceaddr(&info
->rx_mini_ctrl
.rngptr
, 0);
1559 info
->rx_mini_ctrl
.flags
= RCB_FLG_RNG_DISABLE
;
1560 info
->rx_mini_ctrl
.max_len
= 0;
1563 ap
->rx_mini_skbprd
= 0;
1564 atomic_set(&ap
->cur_mini_bufs
, 0);
1566 set_aceaddr(&info
->rx_return_ctrl
.rngptr
,
1567 (ap
->rx_ring_base_dma
+
1568 (sizeof(struct rx_desc
) *
1569 (RX_STD_RING_ENTRIES
+
1570 RX_JUMBO_RING_ENTRIES
+
1571 RX_MINI_RING_ENTRIES
))));
1572 info
->rx_return_ctrl
.flags
= 0;
1573 info
->rx_return_ctrl
.max_len
= RX_RETURN_RING_ENTRIES
;
1575 memset(ap
->rx_return_ring
, 0,
1576 RX_RETURN_RING_ENTRIES
* sizeof(struct rx_desc
));
1578 set_aceaddr(&info
->rx_ret_prd_ptr
, ap
->rx_ret_prd_dma
);
1579 *(ap
->rx_ret_prd
) = 0;
1581 writel(TX_RING_BASE
, ®s
->WinBase
);
1583 if (ACE_IS_TIGON_I(ap
)) {
1584 ap
->tx_ring
= (struct tx_desc
*)regs
->Window
;
1585 for (i
= 0; i
< (TIGON_I_TX_RING_ENTRIES
*
1586 sizeof(struct tx_desc
) / 4); i
++) {
1587 writel(0, (unsigned long)ap
->tx_ring
+ i
* 4);
1590 set_aceaddr(&info
->tx_ctrl
.rngptr
, TX_RING_BASE
);
1592 memset(ap
->tx_ring
, 0,
1593 MAX_TX_RING_ENTRIES
* sizeof(struct tx_desc
));
1595 set_aceaddr(&info
->tx_ctrl
.rngptr
, ap
->tx_ring_dma
);
1598 info
->tx_ctrl
.max_len
= ACE_TX_RING_ENTRIES(ap
);
1599 tmp
= RCB_FLG_TCP_UDP_SUM
| RCB_FLG_NO_PSEUDO_HDR
| ACE_RCB_VLAN_FLAG
;
1602 * The Tigon I does not like having the TX ring in host memory ;-(
1604 if (!ACE_IS_TIGON_I(ap
))
1605 tmp
|= RCB_FLG_TX_HOST_RING
;
1606 #if TX_COAL_INTS_ONLY
1607 tmp
|= RCB_FLG_COAL_INT_ONLY
;
1609 info
->tx_ctrl
.flags
= tmp
;
1611 set_aceaddr(&info
->tx_csm_ptr
, ap
->tx_csm_dma
);
1614 * Potential item for tuning parameter
1617 writel(DMA_THRESH_16W
, ®s
->DmaReadCfg
);
1618 writel(DMA_THRESH_16W
, ®s
->DmaWriteCfg
);
1620 writel(DMA_THRESH_8W
, ®s
->DmaReadCfg
);
1621 writel(DMA_THRESH_8W
, ®s
->DmaWriteCfg
);
1624 writel(0, ®s
->MaskInt
);
1625 writel(1, ®s
->IfIdx
);
1628 * McKinley boxes do not like us fiddling with AssistState
1631 writel(1, ®s
->AssistState
);
1634 writel(DEF_STAT
, ®s
->TuneStatTicks
);
1635 writel(DEF_TRACE
, ®s
->TuneTrace
);
1637 ace_set_rxtx_parms(dev
, 0);
1639 if (board_idx
== BOARD_IDX_OVERFLOW
) {
1640 printk(KERN_WARNING
"%s: more than %i NICs detected, "
1641 "ignoring module parameters!\n",
1642 dev
->name
, ACE_MAX_MOD_PARMS
);
1643 } else if (board_idx
>= 0) {
1644 if (tx_coal_tick
[board_idx
])
1645 writel(tx_coal_tick
[board_idx
],
1646 ®s
->TuneTxCoalTicks
);
1647 if (max_tx_desc
[board_idx
])
1648 writel(max_tx_desc
[board_idx
], ®s
->TuneMaxTxDesc
);
1650 if (rx_coal_tick
[board_idx
])
1651 writel(rx_coal_tick
[board_idx
],
1652 ®s
->TuneRxCoalTicks
);
1653 if (max_rx_desc
[board_idx
])
1654 writel(max_rx_desc
[board_idx
], ®s
->TuneMaxRxDesc
);
1656 if (trace
[board_idx
])
1657 writel(trace
[board_idx
], ®s
->TuneTrace
);
1659 if ((tx_ratio
[board_idx
] > 0) && (tx_ratio
[board_idx
] < 64))
1660 writel(tx_ratio
[board_idx
], ®s
->TxBufRat
);
1664 * Default link parameters
1666 tmp
= LNK_ENABLE
| LNK_FULL_DUPLEX
| LNK_1000MB
| LNK_100MB
|
1667 LNK_10MB
| LNK_RX_FLOW_CTL_Y
| LNK_NEG_FCTL
| LNK_NEGOTIATE
;
1668 if(ap
->version
>= 2)
1669 tmp
|= LNK_TX_FLOW_CTL_Y
;
1672 * Override link default parameters
1674 if ((board_idx
>= 0) && link
[board_idx
]) {
1675 int option
= link
[board_idx
];
1679 if (option
& 0x01) {
1680 printk(KERN_INFO
"%s: Setting half duplex link\n",
1682 tmp
&= ~LNK_FULL_DUPLEX
;
1685 tmp
&= ~LNK_NEGOTIATE
;
1692 if ((option
& 0x70) == 0) {
1693 printk(KERN_WARNING
"%s: No media speed specified, "
1694 "forcing auto negotiation\n", dev
->name
);
1695 tmp
|= LNK_NEGOTIATE
| LNK_1000MB
|
1696 LNK_100MB
| LNK_10MB
;
1698 if ((option
& 0x100) == 0)
1699 tmp
|= LNK_NEG_FCTL
;
1701 printk(KERN_INFO
"%s: Disabling flow control "
1702 "negotiation\n", dev
->name
);
1704 tmp
|= LNK_RX_FLOW_CTL_Y
;
1705 if ((option
& 0x400) && (ap
->version
>= 2)) {
1706 printk(KERN_INFO
"%s: Enabling TX flow control\n",
1708 tmp
|= LNK_TX_FLOW_CTL_Y
;
1713 writel(tmp
, ®s
->TuneLink
);
1714 if (ap
->version
>= 2)
1715 writel(tmp
, ®s
->TuneFastLink
);
1717 if (ACE_IS_TIGON_I(ap
))
1718 writel(tigonFwStartAddr
, ®s
->Pc
);
1719 if (ap
->version
== 2)
1720 writel(tigon2FwStartAddr
, ®s
->Pc
);
1722 writel(0, ®s
->Mb0Lo
);
1725 * Set tx_csm before we start receiving interrupts, otherwise
1726 * the interrupt handler might think it is supposed to process
1727 * tx ints before we are up and running, which may cause a null
1728 * pointer access in the int handler.
1731 ap
->tx_prd
= *(ap
->tx_csm
) = ap
->tx_ret_csm
= 0;
1734 ace_set_txprd(regs
, ap
, 0);
1735 writel(0, ®s
->RxRetCsm
);
1738 * Zero the stats before starting the interface
1740 memset(&ap
->stats
, 0, sizeof(ap
->stats
));
1743 * Enable DMA engine now.
1744 * If we do this sooner, Mckinley box pukes.
1745 * I assume it's because Tigon II DMA engine wants to check
1746 * *something* even before the CPU is started.
1748 writel(1, ®s
->AssistState
); /* enable DMA */
1753 writel(readl(®s
->CpuCtrl
) & ~(CPU_HALT
|CPU_TRACE
), ®s
->CpuCtrl
);
1754 readl(®s
->CpuCtrl
);
1757 * Wait for the firmware to spin up - max 3 seconds.
1759 myjif
= jiffies
+ 3 * HZ
;
1760 while (time_before(jiffies
, myjif
) && !ap
->fw_running
);
1762 if (!ap
->fw_running
) {
1763 printk(KERN_ERR
"%s: Firmware NOT running!\n", dev
->name
);
1766 writel(readl(®s
->CpuCtrl
) | CPU_HALT
, ®s
->CpuCtrl
);
1767 readl(®s
->CpuCtrl
);
1769 /* aman@sgi.com - account for badly behaving firmware/NIC:
1770 * - have observed that the NIC may continue to generate
1771 * interrupts for some reason; attempt to stop it - halt
1772 * second CPU for Tigon II cards, and also clear Mb0
1773 * - if we're a module, we'll fail to load if this was
1774 * the only GbE card in the system => if the kernel does
1775 * see an interrupt from the NIC, code to handle it is
1776 * gone and OOps! - so free_irq also
1778 if (ap
->version
>= 2)
1779 writel(readl(®s
->CpuBCtrl
) | CPU_HALT
,
1781 writel(0, ®s
->Mb0Lo
);
1782 readl(®s
->Mb0Lo
);
1789 * We load the ring here as there seem to be no way to tell the
1790 * firmware to wipe the ring without re-initializing it.
1792 if (!test_and_set_bit(0, &ap
->std_refill_busy
))
1793 ace_load_std_rx_ring(ap
, RX_RING_SIZE
);
1795 printk(KERN_ERR
"%s: Someone is busy refilling the RX ring\n",
1797 if (ap
->version
>= 2) {
1798 if (!test_and_set_bit(0, &ap
->mini_refill_busy
))
1799 ace_load_mini_rx_ring(ap
, RX_MINI_SIZE
);
1801 printk(KERN_ERR
"%s: Someone is busy refilling "
1802 "the RX mini ring\n", dev
->name
);
1807 ace_init_cleanup(dev
);
1812 static void ace_set_rxtx_parms(struct net_device
*dev
, int jumbo
)
1814 struct ace_private
*ap
;
1815 struct ace_regs
*regs
;
1821 board_idx
= ap
->board_idx
;
1823 if (board_idx
>= 0) {
1825 if (!tx_coal_tick
[board_idx
])
1826 writel(DEF_TX_COAL
, ®s
->TuneTxCoalTicks
);
1827 if (!max_tx_desc
[board_idx
])
1828 writel(DEF_TX_MAX_DESC
, ®s
->TuneMaxTxDesc
);
1829 if (!rx_coal_tick
[board_idx
])
1830 writel(DEF_RX_COAL
, ®s
->TuneRxCoalTicks
);
1831 if (!max_rx_desc
[board_idx
])
1832 writel(DEF_RX_MAX_DESC
, ®s
->TuneMaxRxDesc
);
1833 if (!tx_ratio
[board_idx
])
1834 writel(DEF_TX_RATIO
, ®s
->TxBufRat
);
1836 if (!tx_coal_tick
[board_idx
])
1837 writel(DEF_JUMBO_TX_COAL
,
1838 ®s
->TuneTxCoalTicks
);
1839 if (!max_tx_desc
[board_idx
])
1840 writel(DEF_JUMBO_TX_MAX_DESC
,
1841 ®s
->TuneMaxTxDesc
);
1842 if (!rx_coal_tick
[board_idx
])
1843 writel(DEF_JUMBO_RX_COAL
,
1844 ®s
->TuneRxCoalTicks
);
1845 if (!max_rx_desc
[board_idx
])
1846 writel(DEF_JUMBO_RX_MAX_DESC
,
1847 ®s
->TuneMaxRxDesc
);
1848 if (!tx_ratio
[board_idx
])
1849 writel(DEF_JUMBO_TX_RATIO
, ®s
->TxBufRat
);
1855 static void ace_watchdog(struct net_device
*data
)
1857 struct net_device
*dev
= data
;
1858 struct ace_private
*ap
= dev
->priv
;
1859 struct ace_regs
*regs
= ap
->regs
;
1862 * We haven't received a stats update event for more than 2.5
1863 * seconds and there is data in the transmit queue, thus we
1864 * asume the card is stuck.
1866 if (*ap
->tx_csm
!= ap
->tx_ret_csm
) {
1867 printk(KERN_WARNING
"%s: Transmitter is stuck, %08x\n",
1868 dev
->name
, (unsigned int)readl(®s
->HostCtrl
));
1869 /* This can happen due to ieee flow control. */
1871 printk(KERN_DEBUG
"%s: BUG... transmitter died. Kicking it.\n",
1874 netif_wake_queue(dev
);
1880 static void ace_tasklet(unsigned long dev
)
1882 struct ace_private
*ap
= ((struct net_device
*)dev
)->priv
;
1885 cur_size
= atomic_read(&ap
->cur_rx_bufs
);
1886 if ((cur_size
< RX_LOW_STD_THRES
) &&
1887 !test_and_set_bit(0, &ap
->std_refill_busy
)) {
1889 printk("refilling buffers (current %i)\n", cur_size
);
1891 ace_load_std_rx_ring(ap
, RX_RING_SIZE
- cur_size
);
1894 if (ap
->version
>= 2) {
1895 cur_size
= atomic_read(&ap
->cur_mini_bufs
);
1896 if ((cur_size
< RX_LOW_MINI_THRES
) &&
1897 !test_and_set_bit(0, &ap
->mini_refill_busy
)) {
1899 printk("refilling mini buffers (current %i)\n",
1902 ace_load_mini_rx_ring(ap
, RX_MINI_SIZE
- cur_size
);
1906 cur_size
= atomic_read(&ap
->cur_jumbo_bufs
);
1907 if (ap
->jumbo
&& (cur_size
< RX_LOW_JUMBO_THRES
) &&
1908 !test_and_set_bit(0, &ap
->jumbo_refill_busy
)) {
1910 printk("refilling jumbo buffers (current %i)\n", cur_size
);
1912 ace_load_jumbo_rx_ring(ap
, RX_JUMBO_SIZE
- cur_size
);
1914 ap
->tasklet_pending
= 0;
1919 * Copy the contents of the NIC's trace buffer to kernel memory.
1921 static void ace_dump_trace(struct ace_private
*ap
)
1925 if (!(ap
->trace_buf
= kmalloc(ACE_TRACE_SIZE
, GFP_KERNEL
)))
1932 * Load the standard rx ring.
1934 * Loading rings is safe without holding the spin lock since this is
1935 * done only before the device is enabled, thus no interrupts are
1936 * generated and by the interrupt handler/tasklet handler.
1938 static void ace_load_std_rx_ring(struct ace_private
*ap
, int nr_bufs
)
1940 struct ace_regs
*regs
;
1945 prefetchw(&ap
->cur_rx_bufs
);
1947 idx
= ap
->rx_std_skbprd
;
1949 for (i
= 0; i
< nr_bufs
; i
++) {
1950 struct sk_buff
*skb
;
1954 skb
= alloc_skb(ACE_STD_BUFSIZE
, GFP_ATOMIC
);
1959 * Make sure IP header starts on a fresh cache line.
1961 skb_reserve(skb
, 2 + 16);
1962 mapping
= pci_map_page(ap
->pdev
, virt_to_page(skb
->data
),
1963 ((unsigned long)skb
->data
& ~PAGE_MASK
),
1964 ACE_STD_BUFSIZE
- (2 + 16),
1965 PCI_DMA_FROMDEVICE
);
1966 ap
->skb
->rx_std_skbuff
[idx
].skb
= skb
;
1967 pci_unmap_addr_set(&ap
->skb
->rx_std_skbuff
[idx
],
1970 rd
= &ap
->rx_std_ring
[idx
];
1971 set_aceaddr(&rd
->addr
, mapping
);
1972 rd
->size
= ACE_STD_MTU
+ ETH_HLEN
+ 4;
1974 idx
= (idx
+ 1) % RX_STD_RING_ENTRIES
;
1980 atomic_add(i
, &ap
->cur_rx_bufs
);
1981 ap
->rx_std_skbprd
= idx
;
1983 if (ACE_IS_TIGON_I(ap
)) {
1985 cmd
.evt
= C_SET_RX_PRD_IDX
;
1987 cmd
.idx
= ap
->rx_std_skbprd
;
1988 ace_issue_cmd(regs
, &cmd
);
1990 writel(idx
, ®s
->RxStdPrd
);
1995 clear_bit(0, &ap
->std_refill_busy
);
1999 printk(KERN_INFO
"Out of memory when allocating "
2000 "standard receive buffers\n");
2005 static void ace_load_mini_rx_ring(struct ace_private
*ap
, int nr_bufs
)
2007 struct ace_regs
*regs
;
2012 prefetchw(&ap
->cur_mini_bufs
);
2014 idx
= ap
->rx_mini_skbprd
;
2015 for (i
= 0; i
< nr_bufs
; i
++) {
2016 struct sk_buff
*skb
;
2020 skb
= alloc_skb(ACE_MINI_BUFSIZE
, GFP_ATOMIC
);
2025 * Make sure the IP header ends up on a fresh cache line
2027 skb_reserve(skb
, 2 + 16);
2028 mapping
= pci_map_page(ap
->pdev
, virt_to_page(skb
->data
),
2029 ((unsigned long)skb
->data
& ~PAGE_MASK
),
2030 ACE_MINI_BUFSIZE
- (2 + 16),
2031 PCI_DMA_FROMDEVICE
);
2032 ap
->skb
->rx_mini_skbuff
[idx
].skb
= skb
;
2033 pci_unmap_addr_set(&ap
->skb
->rx_mini_skbuff
[idx
],
2036 rd
= &ap
->rx_mini_ring
[idx
];
2037 set_aceaddr(&rd
->addr
, mapping
);
2038 rd
->size
= ACE_MINI_SIZE
;
2040 idx
= (idx
+ 1) % RX_MINI_RING_ENTRIES
;
2046 atomic_add(i
, &ap
->cur_mini_bufs
);
2048 ap
->rx_mini_skbprd
= idx
;
2050 writel(idx
, ®s
->RxMiniPrd
);
2054 clear_bit(0, &ap
->mini_refill_busy
);
2057 printk(KERN_INFO
"Out of memory when allocating "
2058 "mini receive buffers\n");
2064 * Load the jumbo rx ring, this may happen at any time if the MTU
2065 * is changed to a value > 1500.
2067 static void ace_load_jumbo_rx_ring(struct ace_private
*ap
, int nr_bufs
)
2069 struct ace_regs
*regs
;
2074 idx
= ap
->rx_jumbo_skbprd
;
2076 for (i
= 0; i
< nr_bufs
; i
++) {
2077 struct sk_buff
*skb
;
2081 skb
= alloc_skb(ACE_JUMBO_BUFSIZE
, GFP_ATOMIC
);
2086 * Make sure the IP header ends up on a fresh cache line
2088 skb_reserve(skb
, 2 + 16);
2089 mapping
= pci_map_page(ap
->pdev
, virt_to_page(skb
->data
),
2090 ((unsigned long)skb
->data
& ~PAGE_MASK
),
2091 ACE_JUMBO_BUFSIZE
- (2 + 16),
2092 PCI_DMA_FROMDEVICE
);
2093 ap
->skb
->rx_jumbo_skbuff
[idx
].skb
= skb
;
2094 pci_unmap_addr_set(&ap
->skb
->rx_jumbo_skbuff
[idx
],
2097 rd
= &ap
->rx_jumbo_ring
[idx
];
2098 set_aceaddr(&rd
->addr
, mapping
);
2099 rd
->size
= ACE_JUMBO_MTU
+ ETH_HLEN
+ 4;
2101 idx
= (idx
+ 1) % RX_JUMBO_RING_ENTRIES
;
2107 atomic_add(i
, &ap
->cur_jumbo_bufs
);
2108 ap
->rx_jumbo_skbprd
= idx
;
2110 if (ACE_IS_TIGON_I(ap
)) {
2112 cmd
.evt
= C_SET_RX_JUMBO_PRD_IDX
;
2114 cmd
.idx
= ap
->rx_jumbo_skbprd
;
2115 ace_issue_cmd(regs
, &cmd
);
2117 writel(idx
, ®s
->RxJumboPrd
);
2122 clear_bit(0, &ap
->jumbo_refill_busy
);
2125 if (net_ratelimit())
2126 printk(KERN_INFO
"Out of memory when allocating "
2127 "jumbo receive buffers\n");
2133 * All events are considered to be slow (RX/TX ints do not generate
2134 * events) and are handled here, outside the main interrupt handler,
2135 * to reduce the size of the handler.
2137 static u32
ace_handle_event(struct net_device
*dev
, u32 evtcsm
, u32 evtprd
)
2139 struct ace_private
*ap
;
2143 while (evtcsm
!= evtprd
) {
2144 switch (ap
->evt_ring
[evtcsm
].evt
) {
2146 printk(KERN_INFO
"%s: Firmware up and running\n",
2151 case E_STATS_UPDATED
:
2155 u16 code
= ap
->evt_ring
[evtcsm
].code
;
2159 u32 state
= readl(&ap
->regs
->GigLnkState
);
2160 printk(KERN_WARNING
"%s: Optical link UP "
2161 "(%s Duplex, Flow Control: %s%s)\n",
2163 state
& LNK_FULL_DUPLEX
? "Full":"Half",
2164 state
& LNK_TX_FLOW_CTL_Y
? "TX " : "",
2165 state
& LNK_RX_FLOW_CTL_Y
? "RX" : "");
2169 printk(KERN_WARNING
"%s: Optical link DOWN\n",
2172 case E_C_LINK_10_100
:
2173 printk(KERN_WARNING
"%s: 10/100BaseT link "
2177 printk(KERN_ERR
"%s: Unknown optical link "
2178 "state %02x\n", dev
->name
, code
);
2183 switch(ap
->evt_ring
[evtcsm
].code
) {
2184 case E_C_ERR_INVAL_CMD
:
2185 printk(KERN_ERR
"%s: invalid command error\n",
2188 case E_C_ERR_UNIMP_CMD
:
2189 printk(KERN_ERR
"%s: unimplemented command "
2190 "error\n", dev
->name
);
2192 case E_C_ERR_BAD_CFG
:
2193 printk(KERN_ERR
"%s: bad config error\n",
2197 printk(KERN_ERR
"%s: unknown error %02x\n",
2198 dev
->name
, ap
->evt_ring
[evtcsm
].code
);
2201 case E_RESET_JUMBO_RNG
:
2204 for (i
= 0; i
< RX_JUMBO_RING_ENTRIES
; i
++) {
2205 if (ap
->skb
->rx_jumbo_skbuff
[i
].skb
) {
2206 ap
->rx_jumbo_ring
[i
].size
= 0;
2207 set_aceaddr(&ap
->rx_jumbo_ring
[i
].addr
, 0);
2208 dev_kfree_skb(ap
->skb
->rx_jumbo_skbuff
[i
].skb
);
2209 ap
->skb
->rx_jumbo_skbuff
[i
].skb
= NULL
;
2213 if (ACE_IS_TIGON_I(ap
)) {
2215 cmd
.evt
= C_SET_RX_JUMBO_PRD_IDX
;
2218 ace_issue_cmd(ap
->regs
, &cmd
);
2220 writel(0, &((ap
->regs
)->RxJumboPrd
));
2225 ap
->rx_jumbo_skbprd
= 0;
2226 printk(KERN_INFO
"%s: Jumbo ring flushed\n",
2228 clear_bit(0, &ap
->jumbo_refill_busy
);
2232 printk(KERN_ERR
"%s: Unhandled event 0x%02x\n",
2233 dev
->name
, ap
->evt_ring
[evtcsm
].evt
);
2235 evtcsm
= (evtcsm
+ 1) % EVT_RING_ENTRIES
;
2242 static void ace_rx_int(struct net_device
*dev
, u32 rxretprd
, u32 rxretcsm
)
2244 struct ace_private
*ap
= dev
->priv
;
2246 int mini_count
= 0, std_count
= 0;
2250 prefetchw(&ap
->cur_rx_bufs
);
2251 prefetchw(&ap
->cur_mini_bufs
);
2253 while (idx
!= rxretprd
) {
2254 struct ring_info
*rip
;
2255 struct sk_buff
*skb
;
2256 struct rx_desc
*rxdesc
, *retdesc
;
2258 int bd_flags
, desc_type
, mapsize
;
2261 retdesc
= &ap
->rx_return_ring
[idx
];
2262 skbidx
= retdesc
->idx
;
2263 bd_flags
= retdesc
->flags
;
2264 desc_type
= bd_flags
& (BD_FLG_JUMBO
| BD_FLG_MINI
);
2268 * Normal frames do not have any flags set
2270 * Mini and normal frames arrive frequently,
2271 * so use a local counter to avoid doing
2272 * atomic operations for each packet arriving.
2275 rip
= &ap
->skb
->rx_std_skbuff
[skbidx
];
2276 mapsize
= ACE_STD_BUFSIZE
- (2 + 16);
2277 rxdesc
= &ap
->rx_std_ring
[skbidx
];
2281 rip
= &ap
->skb
->rx_jumbo_skbuff
[skbidx
];
2282 mapsize
= ACE_JUMBO_BUFSIZE
- (2 + 16);
2283 rxdesc
= &ap
->rx_jumbo_ring
[skbidx
];
2284 atomic_dec(&ap
->cur_jumbo_bufs
);
2287 rip
= &ap
->skb
->rx_mini_skbuff
[skbidx
];
2288 mapsize
= ACE_MINI_BUFSIZE
- (2 + 16);
2289 rxdesc
= &ap
->rx_mini_ring
[skbidx
];
2293 printk(KERN_INFO
"%s: unknown frame type (0x%02x) "
2294 "returned by NIC\n", dev
->name
,
2301 pci_unmap_page(ap
->pdev
,
2302 pci_unmap_addr(rip
, mapping
),
2304 PCI_DMA_FROMDEVICE
);
2305 skb_put(skb
, retdesc
->size
);
2310 csum
= retdesc
->tcp_udp_csum
;
2313 skb
->protocol
= eth_type_trans(skb
, dev
);
2316 * Instead of forcing the poor tigon mips cpu to calculate
2317 * pseudo hdr checksum, we do this ourselves.
2319 if (bd_flags
& BD_FLG_TCP_UDP_SUM
) {
2320 skb
->csum
= htons(csum
);
2321 skb
->ip_summed
= CHECKSUM_HW
;
2323 skb
->ip_summed
= CHECKSUM_NONE
;
2328 if (ap
->vlgrp
&& (bd_flags
& BD_FLG_VLAN_TAG
)) {
2329 vlan_hwaccel_rx(skb
, ap
->vlgrp
, retdesc
->vlan
);
2334 dev
->last_rx
= jiffies
;
2335 ap
->stats
.rx_packets
++;
2336 ap
->stats
.rx_bytes
+= retdesc
->size
;
2338 idx
= (idx
+ 1) % RX_RETURN_RING_ENTRIES
;
2341 atomic_sub(std_count
, &ap
->cur_rx_bufs
);
2342 if (!ACE_IS_TIGON_I(ap
))
2343 atomic_sub(mini_count
, &ap
->cur_mini_bufs
);
2347 * According to the documentation RxRetCsm is obsolete with
2348 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
2350 if (ACE_IS_TIGON_I(ap
)) {
2351 struct ace_regs
*regs
= ap
->regs
;
2352 writel(idx
, ®s
->RxRetCsm
);
2363 static inline void ace_tx_int(struct net_device
*dev
,
2366 struct ace_private
*ap
= dev
->priv
;
2369 struct sk_buff
*skb
;
2371 struct tx_ring_info
*info
;
2373 info
= ap
->skb
->tx_skbuff
+ idx
;
2375 mapping
= pci_unmap_addr(info
, mapping
);
2378 pci_unmap_page(ap
->pdev
, mapping
,
2379 pci_unmap_len(info
, maplen
),
2381 pci_unmap_addr_set(info
, mapping
, 0);
2385 ap
->stats
.tx_packets
++;
2386 ap
->stats
.tx_bytes
+= skb
->len
;
2387 dev_kfree_skb_irq(skb
);
2391 idx
= (idx
+ 1) % ACE_TX_RING_ENTRIES(ap
);
2392 } while (idx
!= txcsm
);
2394 if (netif_queue_stopped(dev
))
2395 netif_wake_queue(dev
);
2398 ap
->tx_ret_csm
= txcsm
;
2400 /* So... tx_ret_csm is advanced _after_ check for device wakeup.
2402 * We could try to make it before. In this case we would get
2403 * the following race condition: hard_start_xmit on other cpu
2404 * enters after we advanced tx_ret_csm and fills space,
2405 * which we have just freed, so that we make illegal device wakeup.
2406 * There is no good way to workaround this (at entry
2407 * to ace_start_xmit detects this condition and prevents
2408 * ring corruption, but it is not a good workaround.)
2410 * When tx_ret_csm is advanced after, we wake up device _only_
2411 * if we really have some space in ring (though the core doing
2412 * hard_start_xmit can see full ring for some period and has to
2413 * synchronize.) Superb.
2414 * BUT! We get another subtle race condition. hard_start_xmit
2415 * may think that ring is full between wakeup and advancing
2416 * tx_ret_csm and will stop device instantly! It is not so bad.
2417 * We are guaranteed that there is something in ring, so that
2418 * the next irq will resume transmission. To speedup this we could
2419 * mark descriptor, which closes ring with BD_FLG_COAL_NOW
2420 * (see ace_start_xmit).
2422 * Well, this dilemma exists in all lock-free devices.
2423 * We, following scheme used in drivers by Donald Becker,
2424 * select the least dangerous.
2430 static irqreturn_t
ace_interrupt(int irq
, void *dev_id
, struct pt_regs
*ptregs
)
2432 struct ace_private
*ap
;
2433 struct ace_regs
*regs
;
2434 struct net_device
*dev
= (struct net_device
*)dev_id
;
2436 u32 txcsm
, rxretcsm
, rxretprd
;
2443 * In case of PCI shared interrupts or spurious interrupts,
2444 * we want to make sure it is actually our interrupt before
2445 * spending any time in here.
2447 if (!(readl(®s
->HostCtrl
) & IN_INT
))
2451 * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
2452 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
2453 * writel(0, ®s->Mb0Lo).
2455 * "IRQ avoidance" recommended in docs applies to IRQs served
2456 * threads and it is wrong even for that case.
2458 writel(0, ®s
->Mb0Lo
);
2459 readl(®s
->Mb0Lo
);
2462 * There is no conflict between transmit handling in
2463 * start_xmit and receive processing, thus there is no reason
2464 * to take a spin lock for RX handling. Wait until we start
2465 * working on the other stuff - hey we don't need a spin lock
2468 rxretprd
= *ap
->rx_ret_prd
;
2469 rxretcsm
= ap
->cur_rx
;
2471 if (rxretprd
!= rxretcsm
)
2472 ace_rx_int(dev
, rxretprd
, rxretcsm
);
2474 txcsm
= *ap
->tx_csm
;
2475 idx
= ap
->tx_ret_csm
;
2479 * If each skb takes only one descriptor this check degenerates
2480 * to identity, because new space has just been opened.
2481 * But if skbs are fragmented we must check that this index
2482 * update releases enough of space, otherwise we just
2483 * wait for device to make more work.
2485 if (!tx_ring_full(ap
, txcsm
, ap
->tx_prd
))
2486 ace_tx_int(dev
, txcsm
, idx
);
2489 evtcsm
= readl(®s
->EvtCsm
);
2490 evtprd
= *ap
->evt_prd
;
2492 if (evtcsm
!= evtprd
) {
2493 evtcsm
= ace_handle_event(dev
, evtcsm
, evtprd
);
2494 writel(evtcsm
, ®s
->EvtCsm
);
2498 * This has to go last in the interrupt handler and run with
2499 * the spin lock released ... what lock?
2501 if (netif_running(dev
)) {
2503 int run_tasklet
= 0;
2505 cur_size
= atomic_read(&ap
->cur_rx_bufs
);
2506 if (cur_size
< RX_LOW_STD_THRES
) {
2507 if ((cur_size
< RX_PANIC_STD_THRES
) &&
2508 !test_and_set_bit(0, &ap
->std_refill_busy
)) {
2510 printk("low on std buffers %i\n", cur_size
);
2512 ace_load_std_rx_ring(ap
,
2513 RX_RING_SIZE
- cur_size
);
2518 if (!ACE_IS_TIGON_I(ap
)) {
2519 cur_size
= atomic_read(&ap
->cur_mini_bufs
);
2520 if (cur_size
< RX_LOW_MINI_THRES
) {
2521 if ((cur_size
< RX_PANIC_MINI_THRES
) &&
2522 !test_and_set_bit(0,
2523 &ap
->mini_refill_busy
)) {
2525 printk("low on mini buffers %i\n",
2528 ace_load_mini_rx_ring(ap
, RX_MINI_SIZE
- cur_size
);
2535 cur_size
= atomic_read(&ap
->cur_jumbo_bufs
);
2536 if (cur_size
< RX_LOW_JUMBO_THRES
) {
2537 if ((cur_size
< RX_PANIC_JUMBO_THRES
) &&
2538 !test_and_set_bit(0,
2539 &ap
->jumbo_refill_busy
)){
2541 printk("low on jumbo buffers %i\n",
2544 ace_load_jumbo_rx_ring(ap
, RX_JUMBO_SIZE
- cur_size
);
2549 if (run_tasklet
&& !ap
->tasklet_pending
) {
2550 ap
->tasklet_pending
= 1;
2551 tasklet_schedule(&ap
->ace_tasklet
);
2560 static void ace_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2562 struct ace_private
*ap
= dev
->priv
;
2563 unsigned long flags
;
2565 local_irq_save(flags
);
2570 ace_unmask_irq(dev
);
2571 local_irq_restore(flags
);
2575 static void ace_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2577 struct ace_private
*ap
= dev
->priv
;
2578 unsigned long flags
;
2580 local_irq_save(flags
);
2584 ap
->vlgrp
->vlan_devices
[vid
] = NULL
;
2586 ace_unmask_irq(dev
);
2587 local_irq_restore(flags
);
2589 #endif /* ACENIC_DO_VLAN */
2592 static int ace_open(struct net_device
*dev
)
2594 struct ace_private
*ap
;
2595 struct ace_regs
*regs
;
2601 if (!(ap
->fw_running
)) {
2602 printk(KERN_WARNING
"%s: Firmware not running!\n", dev
->name
);
2606 writel(dev
->mtu
+ ETH_HLEN
+ 4, ®s
->IfMtu
);
2608 cmd
.evt
= C_CLEAR_STATS
;
2611 ace_issue_cmd(regs
, &cmd
);
2613 cmd
.evt
= C_HOST_STATE
;
2614 cmd
.code
= C_C_STACK_UP
;
2616 ace_issue_cmd(regs
, &cmd
);
2619 !test_and_set_bit(0, &ap
->jumbo_refill_busy
))
2620 ace_load_jumbo_rx_ring(ap
, RX_JUMBO_SIZE
);
2622 if (dev
->flags
& IFF_PROMISC
) {
2623 cmd
.evt
= C_SET_PROMISC_MODE
;
2624 cmd
.code
= C_C_PROMISC_ENABLE
;
2626 ace_issue_cmd(regs
, &cmd
);
2634 cmd
.evt
= C_LNK_NEGOTIATION
;
2637 ace_issue_cmd(regs
, &cmd
);
2640 netif_start_queue(dev
);
2642 ACE_MOD_INC_USE_COUNT
;
2645 * Setup the bottom half rx ring refill handler
2647 tasklet_init(&ap
->ace_tasklet
, ace_tasklet
, (unsigned long)dev
);
2652 static int ace_close(struct net_device
*dev
)
2654 struct ace_private
*ap
;
2655 struct ace_regs
*regs
;
2657 unsigned long flags
;
2663 * Without (or before) releasing irq and stopping hardware, this
2664 * is an absolute non-sense, by the way. It will be reset instantly
2667 netif_stop_queue(dev
);
2673 cmd
.evt
= C_SET_PROMISC_MODE
;
2674 cmd
.code
= C_C_PROMISC_DISABLE
;
2676 ace_issue_cmd(regs
, &cmd
);
2680 cmd
.evt
= C_HOST_STATE
;
2681 cmd
.code
= C_C_STACK_DOWN
;
2683 ace_issue_cmd(regs
, &cmd
);
2685 tasklet_kill(&ap
->ace_tasklet
);
2688 * Make sure one CPU is not processing packets while
2689 * buffers are being released by another.
2692 local_irq_save(flags
);
2695 for (i
= 0; i
< ACE_TX_RING_ENTRIES(ap
); i
++) {
2696 struct sk_buff
*skb
;
2698 struct tx_ring_info
*info
;
2700 info
= ap
->skb
->tx_skbuff
+ i
;
2702 mapping
= pci_unmap_addr(info
, mapping
);
2705 if (ACE_IS_TIGON_I(ap
)) {
2706 writel(0, &ap
->tx_ring
[i
].addr
.addrhi
);
2707 writel(0, &ap
->tx_ring
[i
].addr
.addrlo
);
2708 writel(0, &ap
->tx_ring
[i
].flagsize
);
2710 memset(ap
->tx_ring
+ i
, 0,
2711 sizeof(struct tx_desc
));
2712 pci_unmap_page(ap
->pdev
, mapping
,
2713 pci_unmap_len(info
, maplen
),
2715 pci_unmap_addr_set(info
, mapping
, 0);
2724 cmd
.evt
= C_RESET_JUMBO_RNG
;
2727 ace_issue_cmd(regs
, &cmd
);
2730 ace_unmask_irq(dev
);
2731 local_irq_restore(flags
);
2733 ACE_MOD_DEC_USE_COUNT
;
2738 static inline dma_addr_t
2739 ace_map_tx_skb(struct ace_private
*ap
, struct sk_buff
*skb
,
2740 struct sk_buff
*tail
, u32 idx
)
2743 struct tx_ring_info
*info
;
2745 mapping
= pci_map_page(ap
->pdev
, virt_to_page(skb
->data
),
2746 ((unsigned long) skb
->data
& ~PAGE_MASK
),
2747 skb
->len
, PCI_DMA_TODEVICE
);
2749 info
= ap
->skb
->tx_skbuff
+ idx
;
2751 pci_unmap_addr_set(info
, mapping
, mapping
);
2752 pci_unmap_len_set(info
, maplen
, skb
->len
);
2758 ace_load_tx_bd(struct ace_private
*ap
, struct tx_desc
*desc
, u64 addr
,
2759 u32 flagsize
, u32 vlan_tag
)
2761 #if !USE_TX_COAL_NOW
2762 flagsize
&= ~BD_FLG_COAL_NOW
;
2765 if (ACE_IS_TIGON_I(ap
)) {
2766 writel(addr
>> 32, &desc
->addr
.addrhi
);
2767 writel(addr
& 0xffffffff, &desc
->addr
.addrlo
);
2768 writel(flagsize
, &desc
->flagsize
);
2770 writel(vlan_tag
, &desc
->vlanres
);
2773 desc
->addr
.addrhi
= addr
>> 32;
2774 desc
->addr
.addrlo
= addr
;
2775 desc
->flagsize
= flagsize
;
2777 desc
->vlanres
= vlan_tag
;
2783 static int ace_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2785 struct ace_private
*ap
= dev
->priv
;
2786 struct ace_regs
*regs
= ap
->regs
;
2787 struct tx_desc
*desc
;
2791 * This only happens with pre-softnet, ie. 2.2.x kernels.
2793 if (early_stop_netif_stop_queue(dev
))
2799 if (tx_ring_full(ap
, ap
->tx_ret_csm
, idx
))
2803 if (!skb_shinfo(skb
)->nr_frags
)
2809 mapping
= ace_map_tx_skb(ap
, skb
, skb
, idx
);
2810 flagsize
= (skb
->len
<< 16) | (BD_FLG_END
);
2811 if (skb
->ip_summed
== CHECKSUM_HW
)
2812 flagsize
|= BD_FLG_TCP_UDP_SUM
;
2814 if (vlan_tx_tag_present(skb
)) {
2815 flagsize
|= BD_FLG_VLAN_TAG
;
2816 vlan_tag
= vlan_tx_tag_get(skb
);
2819 desc
= ap
->tx_ring
+ idx
;
2820 idx
= (idx
+ 1) % ACE_TX_RING_ENTRIES(ap
);
2822 /* Look at ace_tx_int for explanations. */
2823 if (tx_ring_full(ap
, ap
->tx_ret_csm
, idx
))
2824 flagsize
|= BD_FLG_COAL_NOW
;
2826 ace_load_tx_bd(ap
, desc
, mapping
, flagsize
, vlan_tag
);
2834 mapping
= ace_map_tx_skb(ap
, skb
, NULL
, idx
);
2835 flagsize
= (skb_headlen(skb
) << 16);
2836 if (skb
->ip_summed
== CHECKSUM_HW
)
2837 flagsize
|= BD_FLG_TCP_UDP_SUM
;
2839 if (vlan_tx_tag_present(skb
)) {
2840 flagsize
|= BD_FLG_VLAN_TAG
;
2841 vlan_tag
= vlan_tx_tag_get(skb
);
2845 ace_load_tx_bd(ap
, ap
->tx_ring
+ idx
, mapping
, flagsize
, vlan_tag
);
2847 idx
= (idx
+ 1) % ACE_TX_RING_ENTRIES(ap
);
2849 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2850 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2851 struct tx_ring_info
*info
;
2854 info
= ap
->skb
->tx_skbuff
+ idx
;
2855 desc
= ap
->tx_ring
+ idx
;
2857 mapping
= pci_map_page(ap
->pdev
, frag
->page
,
2858 frag
->page_offset
, frag
->size
,
2861 flagsize
= (frag
->size
<< 16);
2862 if (skb
->ip_summed
== CHECKSUM_HW
)
2863 flagsize
|= BD_FLG_TCP_UDP_SUM
;
2864 idx
= (idx
+ 1) % ACE_TX_RING_ENTRIES(ap
);
2866 if (i
== skb_shinfo(skb
)->nr_frags
- 1) {
2867 flagsize
|= BD_FLG_END
;
2868 if (tx_ring_full(ap
, ap
->tx_ret_csm
, idx
))
2869 flagsize
|= BD_FLG_COAL_NOW
;
2872 * Only the last fragment frees
2879 pci_unmap_addr_set(info
, mapping
, mapping
);
2880 pci_unmap_len_set(info
, maplen
, frag
->size
);
2881 ace_load_tx_bd(ap
, desc
, mapping
, flagsize
, vlan_tag
);
2888 ace_set_txprd(regs
, ap
, idx
);
2890 if (flagsize
& BD_FLG_COAL_NOW
) {
2891 netif_stop_queue(dev
);
2894 * A TX-descriptor producer (an IRQ) might have gotten
2895 * inbetween, making the ring free again. Since xmit is
2896 * serialized, this is the only situation we have to
2899 if (!tx_ring_full(ap
, ap
->tx_ret_csm
, idx
))
2900 netif_wake_queue(dev
);
2903 dev
->trans_start
= jiffies
;
2908 * This race condition is unavoidable with lock-free drivers.
2909 * We wake up the queue _before_ tx_prd is advanced, so that we can
2910 * enter hard_start_xmit too early, while tx ring still looks closed.
2911 * This happens ~1-4 times per 100000 packets, so that we can allow
2912 * to loop syncing to other CPU. Probably, we need an additional
2913 * wmb() in ace_tx_intr as well.
2915 * Note that this race is relieved by reserving one more entry
2916 * in tx ring than it is necessary (see original non-SG driver).
2917 * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
2918 * is already overkill.
2920 * Alternative is to return with 1 not throttling queue. In this
2921 * case loop becomes longer, no more useful effects.
2928 static int ace_change_mtu(struct net_device
*dev
, int new_mtu
)
2930 struct ace_private
*ap
= dev
->priv
;
2931 struct ace_regs
*regs
= ap
->regs
;
2933 if (new_mtu
> ACE_JUMBO_MTU
)
2936 writel(new_mtu
+ ETH_HLEN
+ 4, ®s
->IfMtu
);
2939 if (new_mtu
> ACE_STD_MTU
) {
2941 printk(KERN_INFO
"%s: Enabling Jumbo frame "
2942 "support\n", dev
->name
);
2944 if (!test_and_set_bit(0, &ap
->jumbo_refill_busy
))
2945 ace_load_jumbo_rx_ring(ap
, RX_JUMBO_SIZE
);
2946 ace_set_rxtx_parms(dev
, 1);
2949 while (test_and_set_bit(0, &ap
->jumbo_refill_busy
));
2950 ace_sync_irq(dev
->irq
);
2951 ace_set_rxtx_parms(dev
, 0);
2955 cmd
.evt
= C_RESET_JUMBO_RNG
;
2958 ace_issue_cmd(regs
, &cmd
);
2966 static int ace_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2968 struct ace_private
*ap
= dev
->priv
;
2969 struct ace_regs
*regs
= ap
->regs
;
2971 struct ethtool_cmd ecmd
;
2974 if (cmd
!= SIOCETHTOOL
)
2976 if (copy_from_user(&ecmd
, ifr
->ifr_data
, sizeof(ecmd
)))
2981 (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
2982 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
2983 SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full
|
2984 SUPPORTED_Autoneg
| SUPPORTED_FIBRE
);
2986 ecmd
.port
= PORT_FIBRE
;
2987 ecmd
.transceiver
= XCVR_INTERNAL
;
2988 ecmd
.phy_address
= 0;
2990 link
= readl(®s
->GigLnkState
);
2991 if (link
& LNK_1000MB
)
2992 ecmd
.speed
= SPEED_1000
;
2994 link
= readl(®s
->FastLnkState
);
2995 if (link
& LNK_100MB
)
2996 ecmd
.speed
= SPEED_100
;
2997 else if (link
& LNK_100MB
)
2998 ecmd
.speed
= SPEED_10
;
3002 if (link
& LNK_FULL_DUPLEX
)
3003 ecmd
.duplex
= DUPLEX_FULL
;
3005 ecmd
.duplex
= DUPLEX_HALF
;
3007 if (link
& LNK_NEGOTIATE
)
3008 ecmd
.autoneg
= AUTONEG_ENABLE
;
3010 ecmd
.autoneg
= AUTONEG_DISABLE
;
3014 * Current struct ethtool_cmd is insufficient
3016 ecmd
.trace
= readl(®s
->TuneTrace
);
3018 ecmd
.txcoal
= readl(®s
->TuneTxCoalTicks
);
3019 ecmd
.rxcoal
= readl(®s
->TuneRxCoalTicks
);
3021 ecmd
.maxtxpkt
= readl(®s
->TuneMaxTxDesc
);
3022 ecmd
.maxrxpkt
= readl(®s
->TuneMaxRxDesc
);
3024 if(copy_to_user(ifr
->ifr_data
, &ecmd
, sizeof(ecmd
)))
3029 if(!capable(CAP_NET_ADMIN
))
3032 link
= readl(®s
->GigLnkState
);
3033 if (link
& LNK_1000MB
)
3036 link
= readl(®s
->FastLnkState
);
3037 if (link
& LNK_100MB
)
3039 else if (link
& LNK_100MB
)
3045 link
= LNK_ENABLE
| LNK_1000MB
| LNK_100MB
| LNK_10MB
|
3046 LNK_RX_FLOW_CTL_Y
| LNK_NEG_FCTL
;
3047 if (!ACE_IS_TIGON_I(ap
))
3048 link
|= LNK_TX_FLOW_CTL_Y
;
3049 if (ecmd
.autoneg
== AUTONEG_ENABLE
)
3050 link
|= LNK_NEGOTIATE
;
3051 if (ecmd
.speed
!= speed
) {
3052 link
&= ~(LNK_1000MB
| LNK_100MB
| LNK_10MB
);
3065 if (ecmd
.duplex
== DUPLEX_FULL
)
3066 link
|= LNK_FULL_DUPLEX
;
3068 if (link
!= ap
->link
) {
3070 printk(KERN_INFO
"%s: Renegotiating link state\n",
3074 writel(link
, ®s
->TuneLink
);
3075 if (!ACE_IS_TIGON_I(ap
))
3076 writel(link
, ®s
->TuneFastLink
);
3079 cmd
.evt
= C_LNK_NEGOTIATION
;
3082 ace_issue_cmd(regs
, &cmd
);
3086 case ETHTOOL_GDRVINFO
: {
3087 struct ethtool_drvinfo info
= {ETHTOOL_GDRVINFO
};
3088 strncpy(info
.driver
, "acenic", sizeof(info
.driver
) - 1);
3089 sprintf(info
.fw_version
, "%i.%i.%i",
3090 tigonFwReleaseMajor
, tigonFwReleaseMinor
,
3092 strncpy(info
.version
, version
, sizeof(info
.version
) - 1);
3094 strcpy(info
.bus_info
, ap
->pdev
->slot_name
);
3095 if (copy_to_user(ifr
->ifr_data
, &info
, sizeof(info
)))
3110 * Set the hardware MAC address.
3112 static int ace_set_mac_addr(struct net_device
*dev
, void *p
)
3114 struct sockaddr
*addr
=p
;
3115 struct ace_regs
*regs
;
3119 if(netif_running(dev
))
3122 memcpy(dev
->dev_addr
, addr
->sa_data
,dev
->addr_len
);
3124 da
= (u8
*)dev
->dev_addr
;
3126 regs
= ((struct ace_private
*)dev
->priv
)->regs
;
3127 writel(da
[0] << 8 | da
[1], ®s
->MacAddrHi
);
3128 writel((da
[2] << 24) | (da
[3] << 16) | (da
[4] << 8) | da
[5],
3131 cmd
.evt
= C_SET_MAC_ADDR
;
3134 ace_issue_cmd(regs
, &cmd
);
3140 static void ace_set_multicast_list(struct net_device
*dev
)
3142 struct ace_private
*ap
= dev
->priv
;
3143 struct ace_regs
*regs
= ap
->regs
;
3146 if ((dev
->flags
& IFF_ALLMULTI
) && !(ap
->mcast_all
)) {
3147 cmd
.evt
= C_SET_MULTICAST_MODE
;
3148 cmd
.code
= C_C_MCAST_ENABLE
;
3150 ace_issue_cmd(regs
, &cmd
);
3152 } else if (ap
->mcast_all
) {
3153 cmd
.evt
= C_SET_MULTICAST_MODE
;
3154 cmd
.code
= C_C_MCAST_DISABLE
;
3156 ace_issue_cmd(regs
, &cmd
);
3160 if ((dev
->flags
& IFF_PROMISC
) && !(ap
->promisc
)) {
3161 cmd
.evt
= C_SET_PROMISC_MODE
;
3162 cmd
.code
= C_C_PROMISC_ENABLE
;
3164 ace_issue_cmd(regs
, &cmd
);
3166 }else if (!(dev
->flags
& IFF_PROMISC
) && (ap
->promisc
)) {
3167 cmd
.evt
= C_SET_PROMISC_MODE
;
3168 cmd
.code
= C_C_PROMISC_DISABLE
;
3170 ace_issue_cmd(regs
, &cmd
);
3175 * For the time being multicast relies on the upper layers
3176 * filtering it properly. The Firmware does not allow one to
3177 * set the entire multicast list at a time and keeping track of
3178 * it here is going to be messy.
3180 if ((dev
->mc_count
) && !(ap
->mcast_all
)) {
3181 cmd
.evt
= C_SET_MULTICAST_MODE
;
3182 cmd
.code
= C_C_MCAST_ENABLE
;
3184 ace_issue_cmd(regs
, &cmd
);
3185 }else if (!ap
->mcast_all
) {
3186 cmd
.evt
= C_SET_MULTICAST_MODE
;
3187 cmd
.code
= C_C_MCAST_DISABLE
;
3189 ace_issue_cmd(regs
, &cmd
);
3194 static struct net_device_stats
*ace_get_stats(struct net_device
*dev
)
3196 struct ace_private
*ap
= dev
->priv
;
3197 struct ace_mac_stats
*mac_stats
=
3198 (struct ace_mac_stats
*)ap
->regs
->Stats
;
3200 ap
->stats
.rx_missed_errors
= readl(&mac_stats
->drop_space
);
3201 ap
->stats
.multicast
= readl(&mac_stats
->kept_mc
);
3202 ap
->stats
.collisions
= readl(&mac_stats
->coll
);
3208 static void __init
ace_copy(struct ace_regs
*regs
, void *src
,
3211 unsigned long tdest
;
3219 tsize
= min_t(u32
, ((~dest
& (ACE_WINDOW_SIZE
- 1)) + 1),
3220 min_t(u32
, size
, ACE_WINDOW_SIZE
));
3221 tdest
= (unsigned long)®s
->Window
+
3222 (dest
& (ACE_WINDOW_SIZE
- 1));
3223 writel(dest
& ~(ACE_WINDOW_SIZE
- 1), ®s
->WinBase
);
3225 * This requires byte swapping on big endian, however
3226 * writel does that for us
3229 for (i
= 0; i
< (tsize
/ 4); i
++) {
3230 writel(wsrc
[i
], tdest
+ i
*4);
3241 static void __init
ace_clear(struct ace_regs
*regs
, u32 dest
, int size
)
3243 unsigned long tdest
;
3250 tsize
= min_t(u32
, ((~dest
& (ACE_WINDOW_SIZE
- 1)) + 1),
3251 min_t(u32
, size
, ACE_WINDOW_SIZE
));
3252 tdest
= (unsigned long)®s
->Window
+
3253 (dest
& (ACE_WINDOW_SIZE
- 1));
3254 writel(dest
& ~(ACE_WINDOW_SIZE
- 1), ®s
->WinBase
);
3256 for (i
= 0; i
< (tsize
/ 4); i
++) {
3257 writel(0, tdest
+ i
*4);
3269 * Download the firmware into the SRAM on the NIC
3271 * This operation requires the NIC to be halted and is performed with
3272 * interrupts disabled and with the spinlock hold.
3274 int __init
ace_load_firmware(struct net_device
*dev
)
3276 struct ace_private
*ap
;
3277 struct ace_regs
*regs
;
3282 if (!(readl(®s
->CpuCtrl
) & CPU_HALTED
)) {
3283 printk(KERN_ERR
"%s: trying to download firmware while the "
3284 "CPU is running!\n", dev
->name
);
3289 * Do not try to clear more than 512KB or we end up seeing
3290 * funny things on NICs with only 512KB SRAM
3292 ace_clear(regs
, 0x2000, 0x80000-0x2000);
3293 if (ACE_IS_TIGON_I(ap
)) {
3294 ace_copy(regs
, tigonFwText
, tigonFwTextAddr
, tigonFwTextLen
);
3295 ace_copy(regs
, tigonFwData
, tigonFwDataAddr
, tigonFwDataLen
);
3296 ace_copy(regs
, tigonFwRodata
, tigonFwRodataAddr
,
3298 ace_clear(regs
, tigonFwBssAddr
, tigonFwBssLen
);
3299 ace_clear(regs
, tigonFwSbssAddr
, tigonFwSbssLen
);
3300 }else if (ap
->version
== 2) {
3301 ace_clear(regs
, tigon2FwBssAddr
, tigon2FwBssLen
);
3302 ace_clear(regs
, tigon2FwSbssAddr
, tigon2FwSbssLen
);
3303 ace_copy(regs
, tigon2FwText
, tigon2FwTextAddr
,tigon2FwTextLen
);
3304 ace_copy(regs
, tigon2FwRodata
, tigon2FwRodataAddr
,
3306 ace_copy(regs
, tigon2FwData
, tigon2FwDataAddr
,tigon2FwDataLen
);
3314 * The eeprom on the AceNIC is an Atmel i2c EEPROM.
3316 * Accessing the EEPROM is `interesting' to say the least - don't read
3317 * this code right after dinner.
3319 * This is all about black magic and bit-banging the device .... I
3320 * wonder in what hospital they have put the guy who designed the i2c
3323 * Oh yes, this is only the beginning!
3325 * Thanks to Stevarino Webinski for helping tracking down the bugs in the
3326 * code i2c readout code by beta testing all my hacks.
3328 static void __init
eeprom_start(struct ace_regs
*regs
)
3332 readl(®s
->LocalCtrl
);
3333 udelay(ACE_SHORT_DELAY
);
3334 local
= readl(®s
->LocalCtrl
);
3335 local
|= EEPROM_DATA_OUT
| EEPROM_WRITE_ENABLE
;
3336 writel(local
, ®s
->LocalCtrl
);
3337 readl(®s
->LocalCtrl
);
3339 udelay(ACE_SHORT_DELAY
);
3340 local
|= EEPROM_CLK_OUT
;
3341 writel(local
, ®s
->LocalCtrl
);
3342 readl(®s
->LocalCtrl
);
3344 udelay(ACE_SHORT_DELAY
);
3345 local
&= ~EEPROM_DATA_OUT
;
3346 writel(local
, ®s
->LocalCtrl
);
3347 readl(®s
->LocalCtrl
);
3349 udelay(ACE_SHORT_DELAY
);
3350 local
&= ~EEPROM_CLK_OUT
;
3351 writel(local
, ®s
->LocalCtrl
);
3352 readl(®s
->LocalCtrl
);
3357 static void __init
eeprom_prep(struct ace_regs
*regs
, u8 magic
)
3362 udelay(ACE_SHORT_DELAY
);
3363 local
= readl(®s
->LocalCtrl
);
3364 local
&= ~EEPROM_DATA_OUT
;
3365 local
|= EEPROM_WRITE_ENABLE
;
3366 writel(local
, ®s
->LocalCtrl
);
3367 readl(®s
->LocalCtrl
);
3370 for (i
= 0; i
< 8; i
++, magic
<<= 1) {
3371 udelay(ACE_SHORT_DELAY
);
3373 local
|= EEPROM_DATA_OUT
;
3375 local
&= ~EEPROM_DATA_OUT
;
3376 writel(local
, ®s
->LocalCtrl
);
3377 readl(®s
->LocalCtrl
);
3380 udelay(ACE_SHORT_DELAY
);
3381 local
|= EEPROM_CLK_OUT
;
3382 writel(local
, ®s
->LocalCtrl
);
3383 readl(®s
->LocalCtrl
);
3385 udelay(ACE_SHORT_DELAY
);
3386 local
&= ~(EEPROM_CLK_OUT
| EEPROM_DATA_OUT
);
3387 writel(local
, ®s
->LocalCtrl
);
3388 readl(®s
->LocalCtrl
);
3394 static int __init
eeprom_check_ack(struct ace_regs
*regs
)
3399 local
= readl(®s
->LocalCtrl
);
3400 local
&= ~EEPROM_WRITE_ENABLE
;
3401 writel(local
, ®s
->LocalCtrl
);
3402 readl(®s
->LocalCtrl
);
3404 udelay(ACE_LONG_DELAY
);
3405 local
|= EEPROM_CLK_OUT
;
3406 writel(local
, ®s
->LocalCtrl
);
3407 readl(®s
->LocalCtrl
);
3409 udelay(ACE_SHORT_DELAY
);
3410 /* sample data in middle of high clk */
3411 state
= (readl(®s
->LocalCtrl
) & EEPROM_DATA_IN
) != 0;
3412 udelay(ACE_SHORT_DELAY
);
3414 writel(readl(®s
->LocalCtrl
) & ~EEPROM_CLK_OUT
, ®s
->LocalCtrl
);
3415 readl(®s
->LocalCtrl
);
3422 static void __init
eeprom_stop(struct ace_regs
*regs
)
3426 udelay(ACE_SHORT_DELAY
);
3427 local
= readl(®s
->LocalCtrl
);
3428 local
|= EEPROM_WRITE_ENABLE
;
3429 writel(local
, ®s
->LocalCtrl
);
3430 readl(®s
->LocalCtrl
);
3432 udelay(ACE_SHORT_DELAY
);
3433 local
&= ~EEPROM_DATA_OUT
;
3434 writel(local
, ®s
->LocalCtrl
);
3435 readl(®s
->LocalCtrl
);
3437 udelay(ACE_SHORT_DELAY
);
3438 local
|= EEPROM_CLK_OUT
;
3439 writel(local
, ®s
->LocalCtrl
);
3440 readl(®s
->LocalCtrl
);
3442 udelay(ACE_SHORT_DELAY
);
3443 local
|= EEPROM_DATA_OUT
;
3444 writel(local
, ®s
->LocalCtrl
);
3445 readl(®s
->LocalCtrl
);
3447 udelay(ACE_LONG_DELAY
);
3448 local
&= ~EEPROM_CLK_OUT
;
3449 writel(local
, ®s
->LocalCtrl
);
3455 * Read a whole byte from the EEPROM.
3457 static int __init
read_eeprom_byte(struct net_device
*dev
,
3458 unsigned long offset
)
3460 struct ace_regs
*regs
;
3461 unsigned long flags
;
3467 printk(KERN_ERR
"No device!\n");
3469 goto eeprom_read_error
;
3472 regs
= ((struct ace_private
*)dev
->priv
)->regs
;
3475 * Don't take interrupts on this CPU will bit banging
3476 * the %#%#@$ I2C device
3478 local_irq_save(flags
);
3482 eeprom_prep(regs
, EEPROM_WRITE_SELECT
);
3483 if (eeprom_check_ack(regs
)) {
3484 local_irq_restore(flags
);
3485 printk(KERN_ERR
"%s: Unable to sync eeprom\n", dev
->name
);
3487 goto eeprom_read_error
;
3490 eeprom_prep(regs
, (offset
>> 8) & 0xff);
3491 if (eeprom_check_ack(regs
)) {
3492 local_irq_restore(flags
);
3493 printk(KERN_ERR
"%s: Unable to set address byte 0\n",
3496 goto eeprom_read_error
;
3499 eeprom_prep(regs
, offset
& 0xff);
3500 if (eeprom_check_ack(regs
)) {
3501 local_irq_restore(flags
);
3502 printk(KERN_ERR
"%s: Unable to set address byte 1\n",
3505 goto eeprom_read_error
;
3509 eeprom_prep(regs
, EEPROM_READ_SELECT
);
3510 if (eeprom_check_ack(regs
)) {
3511 local_irq_restore(flags
);
3512 printk(KERN_ERR
"%s: Unable to set READ_SELECT\n",
3515 goto eeprom_read_error
;
3518 for (i
= 0; i
< 8; i
++) {
3519 local
= readl(®s
->LocalCtrl
);
3520 local
&= ~EEPROM_WRITE_ENABLE
;
3521 writel(local
, ®s
->LocalCtrl
);
3522 readl(®s
->LocalCtrl
);
3523 udelay(ACE_LONG_DELAY
);
3525 local
|= EEPROM_CLK_OUT
;
3526 writel(local
, ®s
->LocalCtrl
);
3527 readl(®s
->LocalCtrl
);
3529 udelay(ACE_SHORT_DELAY
);
3530 /* sample data mid high clk */
3531 result
= (result
<< 1) |
3532 ((readl(®s
->LocalCtrl
) & EEPROM_DATA_IN
) != 0);
3533 udelay(ACE_SHORT_DELAY
);
3535 local
= readl(®s
->LocalCtrl
);
3536 local
&= ~EEPROM_CLK_OUT
;
3537 writel(local
, ®s
->LocalCtrl
);
3538 readl(®s
->LocalCtrl
);
3539 udelay(ACE_SHORT_DELAY
);
3542 local
|= EEPROM_WRITE_ENABLE
;
3543 writel(local
, ®s
->LocalCtrl
);
3544 readl(®s
->LocalCtrl
);
3546 udelay(ACE_SHORT_DELAY
);
3550 local
|= EEPROM_DATA_OUT
;
3551 writel(local
, ®s
->LocalCtrl
);
3552 readl(®s
->LocalCtrl
);
3554 udelay(ACE_SHORT_DELAY
);
3555 writel(readl(®s
->LocalCtrl
) | EEPROM_CLK_OUT
, ®s
->LocalCtrl
);
3556 readl(®s
->LocalCtrl
);
3557 udelay(ACE_LONG_DELAY
);
3558 writel(readl(®s
->LocalCtrl
) & ~EEPROM_CLK_OUT
, ®s
->LocalCtrl
);
3559 readl(®s
->LocalCtrl
);
3561 udelay(ACE_SHORT_DELAY
);
3564 local_irq_restore(flags
);
3569 printk(KERN_ERR
"%s: Unable to read eeprom byte 0x%02lx\n",
3577 * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"