MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / typhoon.c
blobea5643afddc9b537898e8e29c420394e986d9f22
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3 Written 2002-2003 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36 *) I've not tested multicast. I think it works, but reports welcome.
37 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41 * Setting to > 1518 effectively disables this feature.
43 static int rx_copybreak = 200;
45 /* end user-configurable values */
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
49 static const int multicast_filter_limit = 32;
51 /* Operational parameters that are set at compile time. */
53 /* Keep the ring sizes a power of two for compile efficiency.
54 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55 * Making the Tx ring too large decreases the effectiveness of channel
56 * bonding and packet priority.
57 * There are no ill effects from too-large receive rings.
59 * We don't currently use the Hi Tx ring so, don't make it very big.
61 * Beware that if we start using the Hi Tx ring, we will need to change
62 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
64 #define TXHI_ENTRIES 2
65 #define TXLO_ENTRIES 128
66 #define RX_ENTRIES 32
67 #define COMMAND_ENTRIES 16
68 #define RESPONSE_ENTRIES 32
70 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
73 /* The 3XP will preload and remove 64 entries from the free buffer
74 * list, and we need one entry to keep the ring from wrapping, so
75 * to keep this a power of two, we use 128 entries.
77 #define RXFREE_ENTRIES 128
78 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
80 /* Operational parameters that usually are not changed. */
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT (2*HZ)
85 #define PKT_BUF_SZ 1536
87 #define DRV_MODULE_NAME "typhoon"
88 #define DRV_MODULE_VERSION "1.5.3"
89 #define DRV_MODULE_RELDATE "03/12/15"
90 #define PFX DRV_MODULE_NAME ": "
91 #define ERR_PFX KERN_ERR PFX
93 #include <linux/module.h>
94 #include <linux/kernel.h>
95 #include <linux/string.h>
96 #include <linux/timer.h>
97 #include <linux/errno.h>
98 #include <linux/ioport.h>
99 #include <linux/slab.h>
100 #include <linux/interrupt.h>
101 #include <linux/pci.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/skbuff.h>
105 #include <linux/init.h>
106 #include <linux/delay.h>
107 #include <linux/ethtool.h>
108 #include <linux/if_vlan.h>
109 #include <linux/crc32.h>
110 #include <asm/processor.h>
111 #include <asm/bitops.h>
112 #include <asm/io.h>
113 #include <asm/uaccess.h>
114 #include <linux/in6.h>
115 #include <asm/checksum.h>
116 #include <linux/version.h>
118 #include "typhoon.h"
119 #include "typhoon-firmware.h"
121 static char version[] __devinitdata =
122 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
124 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
125 MODULE_LICENSE("GPL");
126 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
127 MODULE_PARM(rx_copybreak, "i");
129 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
130 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
131 #undef NETIF_F_TSO
132 #endif
134 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
135 #error TX ring too small!
136 #endif
138 struct typhoon_card_info {
139 char *name;
140 int capabilities;
143 #define TYPHOON_CRYPTO_NONE 0x00
144 #define TYPHOON_CRYPTO_DES 0x01
145 #define TYPHOON_CRYPTO_3DES 0x02
146 #define TYPHOON_CRYPTO_VARIABLE 0x04
147 #define TYPHOON_FIBER 0x08
148 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
150 enum typhoon_cards {
151 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
152 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
153 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
154 TYPHOON_FXM,
157 /* directly indexed by enum typhoon_cards, above */
158 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
159 { "3Com Typhoon (3C990-TX)",
160 TYPHOON_CRYPTO_NONE},
161 { "3Com Typhoon (3CR990-TX-95)",
162 TYPHOON_CRYPTO_DES},
163 { "3Com Typhoon (3CR990-TX-97)",
164 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
165 { "3Com Typhoon (3C990SVR)",
166 TYPHOON_CRYPTO_NONE},
167 { "3Com Typhoon (3CR990SVR95)",
168 TYPHOON_CRYPTO_DES},
169 { "3Com Typhoon (3CR990SVR97)",
170 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
171 { "3Com Typhoon2 (3C990B-TX-M)",
172 TYPHOON_CRYPTO_VARIABLE},
173 { "3Com Typhoon2 (3C990BSVR)",
174 TYPHOON_CRYPTO_VARIABLE},
175 { "3Com Typhoon (3CR990-FX-95)",
176 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
177 { "3Com Typhoon (3CR990-FX-97)",
178 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
179 { "3Com Typhoon (3CR990-FX-95 Server)",
180 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
181 { "3Com Typhoon (3CR990-FX-97 Server)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
183 { "3Com Typhoon2 (3C990B-FX-97)",
184 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
187 /* Notes on the new subsystem numbering scheme:
188 * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
189 * bit 4 indicates if this card has secured firmware (we don't support it)
190 * bit 8 indicates if this is a (0) copper or (1) fiber card
191 * bits 12-16 indicate card type: (0) client and (1) server
193 static struct pci_device_id typhoon_pci_tbl[] = {
194 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
196 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
198 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
200 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
201 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
202 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
203 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
204 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
205 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
206 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
207 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
208 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
209 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
210 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
211 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
220 { 0, }
222 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
224 /* Define the shared memory area
225 * Align everything the 3XP will normally be using.
226 * We'll need to move/align txHi if we start using that ring.
228 #define __3xp_aligned ____cacheline_aligned
229 struct typhoon_shared {
230 struct typhoon_interface iface;
231 struct typhoon_indexes indexes __3xp_aligned;
232 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
233 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
234 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
235 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
236 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
237 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
238 u32 zeroWord;
239 struct tx_desc txHi[TXHI_ENTRIES];
240 } __attribute__ ((packed));
242 struct rxbuff_ent {
243 struct sk_buff *skb;
244 dma_addr_t dma_addr;
247 struct typhoon {
248 /* Tx cache line section */
249 struct transmit_ring txLoRing ____cacheline_aligned;
250 struct pci_dev * tx_pdev;
251 void __iomem *tx_ioaddr;
252 u32 txlo_dma_addr;
254 /* Irq/Rx cache line section */
255 void __iomem *ioaddr ____cacheline_aligned;
256 struct typhoon_indexes *indexes;
257 u8 awaiting_resp;
258 u8 duplex;
259 u8 speed;
260 u8 card_state;
261 struct basic_ring rxLoRing;
262 struct pci_dev * pdev;
263 struct net_device * dev;
264 spinlock_t state_lock;
265 struct vlan_group * vlgrp;
266 struct basic_ring rxHiRing;
267 struct basic_ring rxBuffRing;
268 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
270 /* general section */
271 spinlock_t command_lock ____cacheline_aligned;
272 struct basic_ring cmdRing;
273 struct basic_ring respRing;
274 struct net_device_stats stats;
275 struct net_device_stats stats_saved;
276 const char * name;
277 struct typhoon_shared * shared;
278 dma_addr_t shared_dma;
279 u16 xcvr_select;
280 u16 wol_events;
281 u32 offload;
282 u32 pci_state[16];
284 /* unused stuff (future use) */
285 int capabilities;
286 struct transmit_ring txHiRing;
289 enum completion_wait_values {
290 NoWait = 0, WaitNoSleep, WaitSleep,
293 /* These are the values for the typhoon.card_state variable.
294 * These determine where the statistics will come from in get_stats().
295 * The sleep image does not support the statistics we need.
297 enum state_values {
298 Sleeping = 0, Running,
301 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
302 * cannot pass a read, so this forces current writes to post.
304 #define typhoon_post_pci_writes(x) \
305 do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
307 /* We'll wait up to six seconds for a reset, and half a second normally.
309 #define TYPHOON_UDELAY 50
310 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
311 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
312 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
314 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
315 #define typhoon_synchronize_irq(x) synchronize_irq()
316 #else
317 #define typhoon_synchronize_irq(x) synchronize_irq(x)
318 #endif
320 #if defined(NETIF_F_TSO)
321 #define skb_tso_size(x) (skb_shinfo(x)->tso_size)
322 #define TSO_NUM_DESCRIPTORS 2
323 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
324 #else
325 #define NETIF_F_TSO 0
326 #define skb_tso_size(x) 0
327 #define TSO_NUM_DESCRIPTORS 0
328 #define TSO_OFFLOAD_ON 0
329 #endif
331 static inline void
332 typhoon_inc_index(u32 *index, const int count, const int num_entries)
334 /* Increment a ring index -- we can use this for all rings execept
335 * the Rx rings, as they use different size descriptors
336 * otherwise, everything is the same size as a cmd_desc
338 *index += count * sizeof(struct cmd_desc);
339 *index %= num_entries * sizeof(struct cmd_desc);
342 static inline void
343 typhoon_inc_cmd_index(u32 *index, const int count)
345 typhoon_inc_index(index, count, COMMAND_ENTRIES);
348 static inline void
349 typhoon_inc_resp_index(u32 *index, const int count)
351 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
354 static inline void
355 typhoon_inc_rxfree_index(u32 *index, const int count)
357 typhoon_inc_index(index, count, RXFREE_ENTRIES);
360 static inline void
361 typhoon_inc_tx_index(u32 *index, const int count)
363 /* if we start using the Hi Tx ring, this needs updateing */
364 typhoon_inc_index(index, count, TXLO_ENTRIES);
367 static inline void
368 typhoon_inc_rx_index(u32 *index, const int count)
370 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
371 *index += count * sizeof(struct rx_desc);
372 *index %= RX_ENTRIES * sizeof(struct rx_desc);
375 static int
376 typhoon_reset(void __iomem *ioaddr, int wait_type)
378 int i, err = 0;
379 int timeout;
381 if(wait_type == WaitNoSleep)
382 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
383 else
384 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
386 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
387 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
389 writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
390 typhoon_post_pci_writes(ioaddr);
391 udelay(1);
392 writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
394 if(wait_type != NoWait) {
395 for(i = 0; i < timeout; i++) {
396 if(readl(ioaddr + TYPHOON_REG_STATUS) ==
397 TYPHOON_STATUS_WAITING_FOR_HOST)
398 goto out;
400 if(wait_type == WaitSleep) {
401 set_current_state(TASK_UNINTERRUPTIBLE);
402 schedule_timeout(1);
403 } else
404 udelay(TYPHOON_UDELAY);
407 err = -ETIMEDOUT;
410 out:
411 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
412 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
413 udelay(100);
414 return err;
416 /* The 3XP seems to need a little extra time to complete the load
417 * of the sleep image before we can reliably boot it. Failure to
418 * do this occasionally results in a hung adapter after boot in
419 * typhoon_init_one() while trying to read the MAC address or
420 * putting the card to sleep. 3Com's driver waits 5ms, but
421 * that seems to be overkill -- with a 50usec delay, it survives
422 * 35000 typhoon_init_one() calls, where it only make it 25-100
423 * without it.
425 * As it turns out, still occasionally getting a hung adapter,
426 * so I'm bumping it to 100us.
430 static int
431 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
433 int i, err = 0;
435 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
436 if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
437 goto out;
438 udelay(TYPHOON_UDELAY);
441 err = -ETIMEDOUT;
443 out:
444 return err;
447 static inline void
448 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
450 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
451 netif_carrier_off(dev);
452 else
453 netif_carrier_on(dev);
456 static inline void
457 typhoon_hello(struct typhoon *tp)
459 struct basic_ring *ring = &tp->cmdRing;
460 struct cmd_desc *cmd;
462 /* We only get a hello request if we've not sent anything to the
463 * card in a long while. If the lock is held, then we're in the
464 * process of issuing a command, so we don't need to respond.
466 if(spin_trylock(&tp->command_lock)) {
467 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
468 typhoon_inc_cmd_index(&ring->lastWrite, 1);
470 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
471 smp_wmb();
472 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
473 spin_unlock(&tp->command_lock);
477 static int
478 typhoon_process_response(struct typhoon *tp, int resp_size,
479 struct resp_desc *resp_save)
481 struct typhoon_indexes *indexes = tp->indexes;
482 struct resp_desc *resp;
483 u8 *base = tp->respRing.ringBase;
484 int count, len, wrap_len;
485 u32 cleared;
486 u32 ready;
488 cleared = le32_to_cpu(indexes->respCleared);
489 ready = le32_to_cpu(indexes->respReady);
490 while(cleared != ready) {
491 resp = (struct resp_desc *)(base + cleared);
492 count = resp->numDesc + 1;
493 if(resp_save && resp->seqNo) {
494 if(count > resp_size) {
495 resp_save->flags = TYPHOON_RESP_ERROR;
496 goto cleanup;
499 wrap_len = 0;
500 len = count * sizeof(*resp);
501 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
502 wrap_len = cleared + len - RESPONSE_RING_SIZE;
503 len = RESPONSE_RING_SIZE - cleared;
506 memcpy(resp_save, resp, len);
507 if(unlikely(wrap_len)) {
508 resp_save += len / sizeof(*resp);
509 memcpy(resp_save, base, wrap_len);
512 resp_save = NULL;
513 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
514 typhoon_media_status(tp->dev, resp);
515 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
516 typhoon_hello(tp);
517 } else {
518 printk(KERN_ERR "%s: dumping unexpected response "
519 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
520 tp->name, le16_to_cpu(resp->cmd),
521 resp->numDesc, resp->flags,
522 le16_to_cpu(resp->parm1),
523 le32_to_cpu(resp->parm2),
524 le32_to_cpu(resp->parm3));
527 cleanup:
528 typhoon_inc_resp_index(&cleared, count);
531 indexes->respCleared = cpu_to_le32(cleared);
532 wmb();
533 return (resp_save == NULL);
536 static inline int
537 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
539 /* this works for all descriptors but rx_desc, as they are a
540 * different size than the cmd_desc -- everyone else is the same
542 lastWrite /= sizeof(struct cmd_desc);
543 lastRead /= sizeof(struct cmd_desc);
544 return (ringSize + lastRead - lastWrite - 1) % ringSize;
547 static inline int
548 typhoon_num_free_cmd(struct typhoon *tp)
550 int lastWrite = tp->cmdRing.lastWrite;
551 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
553 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
556 static inline int
557 typhoon_num_free_resp(struct typhoon *tp)
559 int respReady = le32_to_cpu(tp->indexes->respReady);
560 int respCleared = le32_to_cpu(tp->indexes->respCleared);
562 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
565 static inline int
566 typhoon_num_free_tx(struct transmit_ring *ring)
568 /* if we start using the Hi Tx ring, this needs updating */
569 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
572 static int
573 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
574 int num_resp, struct resp_desc *resp)
576 struct typhoon_indexes *indexes = tp->indexes;
577 struct basic_ring *ring = &tp->cmdRing;
578 struct resp_desc local_resp;
579 int i, err = 0;
580 int got_resp;
581 int freeCmd, freeResp;
582 int len, wrap_len;
584 spin_lock(&tp->command_lock);
586 freeCmd = typhoon_num_free_cmd(tp);
587 freeResp = typhoon_num_free_resp(tp);
589 if(freeCmd < num_cmd || freeResp < num_resp) {
590 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
591 "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
592 freeResp, num_resp);
593 err = -ENOMEM;
594 goto out;
597 if(cmd->flags & TYPHOON_CMD_RESPOND) {
598 /* If we're expecting a response, but the caller hasn't given
599 * us a place to put it, we'll provide one.
601 tp->awaiting_resp = 1;
602 if(resp == NULL) {
603 resp = &local_resp;
604 num_resp = 1;
608 wrap_len = 0;
609 len = num_cmd * sizeof(*cmd);
610 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
611 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
612 len = COMMAND_RING_SIZE - ring->lastWrite;
615 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
616 if(unlikely(wrap_len)) {
617 struct cmd_desc *wrap_ptr = cmd;
618 wrap_ptr += len / sizeof(*cmd);
619 memcpy(ring->ringBase, wrap_ptr, wrap_len);
622 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
624 /* "I feel a presence... another warrior is on the the mesa."
626 wmb();
627 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
628 typhoon_post_pci_writes(tp->ioaddr);
630 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
631 goto out;
633 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
634 * preempt or do anything other than take interrupts. So, don't
635 * wait for a response unless you have to.
637 * I've thought about trying to sleep here, but we're called
638 * from many contexts that don't allow that. Also, given the way
639 * 3Com has implemented irq coalescing, we would likely timeout --
640 * this has been observed in real life!
642 * The big killer is we have to wait to get stats from the card,
643 * though we could go to a periodic refresh of those if we don't
644 * mind them getting somewhat stale. The rest of the waiting
645 * commands occur during open/close/suspend/resume, so they aren't
646 * time critical. Creating SAs in the future will also have to
647 * wait here.
649 got_resp = 0;
650 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
651 if(indexes->respCleared != indexes->respReady)
652 got_resp = typhoon_process_response(tp, num_resp,
653 resp);
654 udelay(TYPHOON_UDELAY);
657 if(!got_resp) {
658 err = -ETIMEDOUT;
659 goto out;
662 /* Collect the error response even if we don't care about the
663 * rest of the response
665 if(resp->flags & TYPHOON_RESP_ERROR)
666 err = -EIO;
668 out:
669 if(tp->awaiting_resp) {
670 tp->awaiting_resp = 0;
671 smp_wmb();
673 /* Ugh. If a response was added to the ring between
674 * the call to typhoon_process_response() and the clearing
675 * of tp->awaiting_resp, we could have missed the interrupt
676 * and it could hang in the ring an indeterminate amount of
677 * time. So, check for it, and interrupt ourselves if this
678 * is the case.
680 if(indexes->respCleared != indexes->respReady)
681 writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
684 spin_unlock(&tp->command_lock);
685 return err;
688 static void
689 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
691 struct typhoon *tp = (struct typhoon *) dev->priv;
692 struct cmd_desc xp_cmd;
693 int err;
695 spin_lock_bh(&tp->state_lock);
696 if(!tp->vlgrp != !grp) {
697 /* We've either been turned on for the first time, or we've
698 * been turned off. Update the 3XP.
700 if(grp)
701 tp->offload |= TYPHOON_OFFLOAD_VLAN;
702 else
703 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
705 /* If the interface is up, the runtime is running -- and we
706 * must be up for the vlan core to call us.
708 * Do the command outside of the spin lock, as it is slow.
710 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
711 TYPHOON_CMD_SET_OFFLOAD_TASKS);
712 xp_cmd.parm2 = tp->offload;
713 xp_cmd.parm3 = tp->offload;
714 spin_unlock_bh(&tp->state_lock);
715 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
716 if(err < 0)
717 printk("%s: vlan offload error %d\n", tp->name, -err);
718 spin_lock_bh(&tp->state_lock);
721 /* now make the change visible */
722 tp->vlgrp = grp;
723 spin_unlock_bh(&tp->state_lock);
726 static void
727 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
729 struct typhoon *tp = (struct typhoon *) dev->priv;
730 spin_lock_bh(&tp->state_lock);
731 if(tp->vlgrp)
732 tp->vlgrp->vlan_devices[vid] = NULL;
733 spin_unlock_bh(&tp->state_lock);
736 static inline void
737 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738 u32 ring_dma)
740 struct tcpopt_desc *tcpd;
741 u32 tcpd_offset = ring_dma;
743 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744 tcpd_offset += txRing->lastWrite;
745 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746 typhoon_inc_tx_index(&txRing->lastWrite, 1);
748 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
749 tcpd->numDesc = 1;
750 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753 tcpd->bytesTx = cpu_to_le32(skb->len);
754 tcpd->status = 0;
757 static int
758 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
760 struct typhoon *tp = (struct typhoon *) dev->priv;
761 struct transmit_ring *txRing;
762 struct tx_desc *txd, *first_txd;
763 dma_addr_t skb_dma;
764 int numDesc;
766 /* we have two rings to choose from, but we only use txLo for now
767 * If we start using the Hi ring as well, we'll need to update
768 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
769 * and TXHI_ENTIRES to match, as well as update the TSO code below
770 * to get the right DMA address
772 txRing = &tp->txLoRing;
774 /* We need one descriptor for each fragment of the sk_buff, plus the
775 * one for the ->data area of it.
777 * The docs say a maximum of 16 fragment descriptors per TCP option
778 * descriptor, then make a new packet descriptor and option descriptor
779 * for the next 16 fragments. The engineers say just an option
780 * descriptor is needed. I've tested up to 26 fragments with a single
781 * packet descriptor/option descriptor combo, so I use that for now.
783 * If problems develop with TSO, check this first.
785 numDesc = skb_shinfo(skb)->nr_frags + 1;
786 if(skb_tso_size(skb))
787 numDesc++;
789 /* When checking for free space in the ring, we need to also
790 * account for the initial Tx descriptor, and we always must leave
791 * at least one descriptor unused in the ring so that it doesn't
792 * wrap and look empty.
794 * The only time we should loop here is when we hit the race
795 * between marking the queue awake and updating the cleared index.
796 * Just loop and it will appear. This comes from the acenic driver.
798 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
799 smp_rmb();
801 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802 typhoon_inc_tx_index(&txRing->lastWrite, 1);
804 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805 first_txd->numDesc = 0;
806 first_txd->len = 0;
807 first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
808 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
809 first_txd->processFlags = 0;
811 if(skb->ip_summed == CHECKSUM_HW) {
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
822 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
826 if(skb_tso_size(skb)) {
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841 PCI_DMA_TODEVICE);
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
844 txd->addr = cpu_to_le32(skb_dma);
845 txd->addrHi = 0;
846 first_txd->numDesc++;
847 } else {
848 int i, len;
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852 PCI_DMA_TODEVICE);
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
855 txd->addr = cpu_to_le32(skb_dma);
856 txd->addrHi = 0;
857 first_txd->numDesc++;
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861 void *frag_addr;
863 txd = (struct tx_desc *) (txRing->ringBase +
864 txRing->lastWrite);
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
867 len = frag->size;
868 frag_addr = (void *) page_address(frag->page) +
869 frag->page_offset;
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871 PCI_DMA_TODEVICE);
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
874 txd->addr = cpu_to_le32(skb_dma);
875 txd->addrHi = 0;
876 first_txd->numDesc++;
880 /* Kick the 3XP
882 wmb();
883 writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
885 dev->trans_start = jiffies;
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
890 * Tx header.
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
899 * Tx is serialized.
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
905 return 0;
908 static void
909 typhoon_set_rx_mode(struct net_device *dev)
911 struct typhoon *tp = (struct typhoon *) dev->priv;
912 struct cmd_desc xp_cmd;
913 u32 mc_filter[2];
914 u16 filter;
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
918 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
919 dev->name);
920 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
921 } else if((dev->mc_count > multicast_filter_limit) ||
922 (dev->flags & IFF_ALLMULTI)) {
923 /* Too many to match, or accept all multicasts. */
924 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
925 } else if(dev->mc_count) {
926 struct dev_mc_list *mclist;
927 int i;
929 memset(mc_filter, 0, sizeof(mc_filter));
930 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
931 i++, mclist = mclist->next) {
932 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
933 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
936 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
937 TYPHOON_CMD_SET_MULTICAST_HASH);
938 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
939 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
940 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
941 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
943 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
947 xp_cmd.parm1 = filter;
948 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
951 static int
952 typhoon_do_get_stats(struct typhoon *tp)
954 struct net_device_stats *stats = &tp->stats;
955 struct net_device_stats *saved = &tp->stats_saved;
956 struct cmd_desc xp_cmd;
957 struct resp_desc xp_resp[7];
958 struct stats_resp *s = (struct stats_resp *) xp_resp;
959 int err;
961 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
962 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
963 if(err < 0)
964 return err;
966 /* 3Com's Linux driver uses txMultipleCollisions as it's
967 * collisions value, but there is some other collision info as well...
969 stats->tx_packets = le32_to_cpu(s->txPackets);
970 stats->tx_bytes = le32_to_cpu(s->txBytes);
971 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
972 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
973 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
974 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
975 stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
976 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
977 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
978 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
979 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
980 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
981 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
982 SPEED_100 : SPEED_10;
983 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
984 DUPLEX_FULL : DUPLEX_HALF;
986 /* add in the saved statistics
988 stats->tx_packets += saved->tx_packets;
989 stats->tx_bytes += saved->tx_bytes;
990 stats->tx_errors += saved->tx_errors;
991 stats->collisions += saved->collisions;
992 stats->rx_packets += saved->rx_packets;
993 stats->rx_bytes += saved->rx_bytes;
994 stats->rx_fifo_errors += saved->rx_fifo_errors;
995 stats->rx_errors += saved->rx_errors;
996 stats->rx_crc_errors += saved->rx_crc_errors;
997 stats->rx_length_errors += saved->rx_length_errors;
999 return 0;
1002 static struct net_device_stats *
1003 typhoon_get_stats(struct net_device *dev)
1005 struct typhoon *tp = (struct typhoon *) dev->priv;
1006 struct net_device_stats *stats = &tp->stats;
1007 struct net_device_stats *saved = &tp->stats_saved;
1009 smp_rmb();
1010 if(tp->card_state == Sleeping)
1011 return saved;
1013 if(typhoon_do_get_stats(tp) < 0) {
1014 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1015 return saved;
1018 return stats;
1021 static int
1022 typhoon_set_mac_address(struct net_device *dev, void *addr)
1024 struct sockaddr *saddr = (struct sockaddr *) addr;
1026 if(netif_running(dev))
1027 return -EBUSY;
1029 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1030 return 0;
1033 static inline void
1034 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1040 smp_rmb();
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1043 } else {
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1047 } else {
1048 strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1049 info->fw_version[31] = 0;
1053 strcpy(info->driver, DRV_MODULE_NAME);
1054 strcpy(info->version, DRV_MODULE_VERSION);
1055 strcpy(info->bus_info, pci_name(pci_dev));
1058 static inline void
1059 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1061 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1062 SUPPORTED_Autoneg;
1064 switch (tp->xcvr_select) {
1065 case TYPHOON_XCVR_10HALF:
1066 cmd->advertising = ADVERTISED_10baseT_Half;
1067 break;
1068 case TYPHOON_XCVR_10FULL:
1069 cmd->advertising = ADVERTISED_10baseT_Full;
1070 break;
1071 case TYPHOON_XCVR_100HALF:
1072 cmd->advertising = ADVERTISED_100baseT_Half;
1073 break;
1074 case TYPHOON_XCVR_100FULL:
1075 cmd->advertising = ADVERTISED_100baseT_Full;
1076 break;
1077 case TYPHOON_XCVR_AUTONEG:
1078 cmd->advertising = ADVERTISED_10baseT_Half |
1079 ADVERTISED_10baseT_Full |
1080 ADVERTISED_100baseT_Half |
1081 ADVERTISED_100baseT_Full |
1082 ADVERTISED_Autoneg;
1083 break;
1086 if(tp->capabilities & TYPHOON_FIBER) {
1087 cmd->supported |= SUPPORTED_FIBRE;
1088 cmd->advertising |= ADVERTISED_FIBRE;
1089 cmd->port = PORT_FIBRE;
1090 } else {
1091 cmd->supported |= SUPPORTED_10baseT_Half |
1092 SUPPORTED_10baseT_Full |
1093 SUPPORTED_TP;
1094 cmd->advertising |= ADVERTISED_TP;
1095 cmd->port = PORT_TP;
1098 /* need to get stats to make these link speed/duplex valid */
1099 typhoon_do_get_stats(tp);
1100 cmd->speed = tp->speed;
1101 cmd->duplex = tp->duplex;
1102 cmd->phy_address = 0;
1103 cmd->transceiver = XCVR_INTERNAL;
1104 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1105 cmd->autoneg = AUTONEG_ENABLE;
1106 else
1107 cmd->autoneg = AUTONEG_DISABLE;
1108 cmd->maxtxpkt = 1;
1109 cmd->maxrxpkt = 1;
1112 static inline int
1113 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1115 struct cmd_desc xp_cmd;
1116 int xcvr;
1117 int err;
1119 if(cmd->autoneg == AUTONEG_ENABLE) {
1120 xcvr = TYPHOON_XCVR_AUTONEG;
1121 } else {
1122 if(cmd->duplex == DUPLEX_HALF) {
1123 if(cmd->speed == SPEED_10)
1124 xcvr = TYPHOON_XCVR_10HALF;
1125 else if(cmd->speed == SPEED_100)
1126 xcvr = TYPHOON_XCVR_100HALF;
1127 else
1128 return -EINVAL;
1129 } else if(cmd->duplex == DUPLEX_FULL) {
1130 if(cmd->speed == SPEED_10)
1131 xcvr = TYPHOON_XCVR_10FULL;
1132 else if(cmd->speed == SPEED_100)
1133 xcvr = TYPHOON_XCVR_100FULL;
1134 else
1135 return -EINVAL;
1136 } else
1137 return -EINVAL;
1140 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1141 xp_cmd.parm1 = cpu_to_le16(xcvr);
1142 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1143 if(err < 0)
1144 return err;
1146 tp->xcvr_select = xcvr;
1147 if(cmd->autoneg == AUTONEG_ENABLE) {
1148 tp->speed = 0xff; /* invalid */
1149 tp->duplex = 0xff; /* invalid */
1150 } else {
1151 tp->speed = cmd->speed;
1152 tp->duplex = cmd->duplex;
1155 return 0;
1158 static inline int
1159 typhoon_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1161 struct typhoon *tp = (struct typhoon *) dev->priv;
1162 u32 ethcmd;
1164 if(copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1165 return -EFAULT;
1167 switch (ethcmd) {
1168 case ETHTOOL_GDRVINFO: {
1169 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1171 typhoon_ethtool_gdrvinfo(tp, &info);
1172 if(copy_to_user(useraddr, &info, sizeof(info)))
1173 return -EFAULT;
1174 return 0;
1176 case ETHTOOL_GSET: {
1177 struct ethtool_cmd cmd = { ETHTOOL_GSET };
1179 typhoon_ethtool_gset(tp, &cmd);
1180 if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1181 return -EFAULT;
1182 return 0;
1184 case ETHTOOL_SSET: {
1185 struct ethtool_cmd cmd;
1186 if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1187 return -EFAULT;
1189 return typhoon_ethtool_sset(tp, &cmd);
1191 case ETHTOOL_GLINK:{
1192 struct ethtool_value edata = { ETHTOOL_GLINK };
1194 edata.data = netif_carrier_ok(dev) ? 1 : 0;
1195 if(copy_to_user(useraddr, &edata, sizeof(edata)))
1196 return -EFAULT;
1197 return 0;
1199 case ETHTOOL_GWOL: {
1200 struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1202 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1203 wol.wolopts |= WAKE_PHY;
1204 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1205 wol.wolopts |= WAKE_MAGIC;
1206 if(copy_to_user(useraddr, &wol, sizeof(wol)))
1207 return -EFAULT;
1208 return 0;
1210 case ETHTOOL_SWOL: {
1211 struct ethtool_wolinfo wol;
1213 if(copy_from_user(&wol, useraddr, sizeof(wol)))
1214 return -EFAULT;
1215 tp->wol_events = 0;
1216 if(wol.wolopts & WAKE_PHY)
1217 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1218 if(wol.wolopts & WAKE_MAGIC)
1219 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1220 return 0;
1222 default:
1223 break;
1226 return -EOPNOTSUPP;
1229 static int
1230 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1232 switch (cmd) {
1233 case SIOCETHTOOL:
1234 return typhoon_ethtool_ioctl(dev, ifr->ifr_data);
1235 default:
1236 break;
1239 return -EOPNOTSUPP;
1242 static int
1243 typhoon_wait_interrupt(void __iomem *ioaddr)
1245 int i, err = 0;
1247 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1248 if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1249 TYPHOON_INTR_BOOTCMD)
1250 goto out;
1251 udelay(TYPHOON_UDELAY);
1254 err = -ETIMEDOUT;
1256 out:
1257 writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1258 return err;
1261 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1263 static void
1264 typhoon_init_interface(struct typhoon *tp)
1266 struct typhoon_interface *iface = &tp->shared->iface;
1267 dma_addr_t shared_dma;
1269 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271 /* The *Hi members of iface are all init'd to zero by the memset().
1273 shared_dma = tp->shared_dma + shared_offset(indexes);
1274 iface->ringIndex = cpu_to_le32(shared_dma);
1276 shared_dma = tp->shared_dma + shared_offset(txLo);
1277 iface->txLoAddr = cpu_to_le32(shared_dma);
1278 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280 shared_dma = tp->shared_dma + shared_offset(txHi);
1281 iface->txHiAddr = cpu_to_le32(shared_dma);
1282 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1285 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1286 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1287 sizeof(struct rx_free));
1289 shared_dma = tp->shared_dma + shared_offset(rxLo);
1290 iface->rxLoAddr = cpu_to_le32(shared_dma);
1291 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293 shared_dma = tp->shared_dma + shared_offset(rxHi);
1294 iface->rxHiAddr = cpu_to_le32(shared_dma);
1295 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297 shared_dma = tp->shared_dma + shared_offset(cmd);
1298 iface->cmdAddr = cpu_to_le32(shared_dma);
1299 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301 shared_dma = tp->shared_dma + shared_offset(resp);
1302 iface->respAddr = cpu_to_le32(shared_dma);
1303 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1306 iface->zeroAddr = cpu_to_le32(shared_dma);
1308 tp->indexes = &tp->shared->indexes;
1309 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1310 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1311 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1312 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1313 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1314 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1315 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1318 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320 tp->txlo_dma_addr = iface->txLoAddr;
1321 tp->card_state = Sleeping;
1322 smp_wmb();
1324 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1325 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327 spin_lock_init(&tp->command_lock);
1328 spin_lock_init(&tp->state_lock);
1331 static void
1332 typhoon_init_rings(struct typhoon *tp)
1334 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336 tp->txLoRing.lastWrite = 0;
1337 tp->txHiRing.lastWrite = 0;
1338 tp->rxLoRing.lastWrite = 0;
1339 tp->rxHiRing.lastWrite = 0;
1340 tp->rxBuffRing.lastWrite = 0;
1341 tp->cmdRing.lastWrite = 0;
1342 tp->cmdRing.lastWrite = 0;
1344 tp->txLoRing.lastRead = 0;
1345 tp->txHiRing.lastRead = 0;
1348 static int
1349 typhoon_download_firmware(struct typhoon *tp)
1351 void __iomem *ioaddr = tp->ioaddr;
1352 struct pci_dev *pdev = tp->pdev;
1353 struct typhoon_file_header *fHdr;
1354 struct typhoon_section_header *sHdr;
1355 u8 *image_data;
1356 void *dpage;
1357 dma_addr_t dpage_dma;
1358 unsigned int csum;
1359 u32 irqEnabled;
1360 u32 irqMasked;
1361 u32 numSections;
1362 u32 section_len;
1363 u32 len;
1364 u32 load_addr;
1365 u32 hmac;
1366 int i;
1367 int err;
1369 err = -EINVAL;
1370 fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1371 image_data = (u8 *) fHdr;
1373 if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1374 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1375 goto err_out;
1378 /* Cannot just map the firmware image using pci_map_single() as
1379 * the firmware is part of the kernel/module image, so we allocate
1380 * some consistent memory to copy the sections into, as it is simpler,
1381 * and short-lived. If we ever split out and require a userland
1382 * firmware loader, then we can revisit this.
1384 err = -ENOMEM;
1385 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1386 if(!dpage) {
1387 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1388 goto err_out;
1391 irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1392 writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1393 ioaddr + TYPHOON_REG_INTR_ENABLE);
1394 irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1395 writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1396 ioaddr + TYPHOON_REG_INTR_MASK);
1398 err = -ETIMEDOUT;
1399 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1400 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1401 goto err_out_irq;
1404 numSections = le32_to_cpu(fHdr->numSections);
1405 load_addr = le32_to_cpu(fHdr->startAddr);
1407 writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1408 writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1409 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1410 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1411 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1412 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1413 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1414 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1415 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1416 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1417 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1418 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1419 typhoon_post_pci_writes(ioaddr);
1420 writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1422 image_data += sizeof(struct typhoon_file_header);
1424 /* The readl() in typhoon_wait_interrupt() will force the
1425 * last write to the command register to post, so
1426 * we don't need a typhoon_post_pci_writes() after it.
1428 for(i = 0; i < numSections; i++) {
1429 sHdr = (struct typhoon_section_header *) image_data;
1430 image_data += sizeof(struct typhoon_section_header);
1431 load_addr = le32_to_cpu(sHdr->startAddr);
1432 section_len = le32_to_cpu(sHdr->len);
1434 while(section_len) {
1435 len = min_t(u32, section_len, PAGE_SIZE);
1437 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1438 readl(ioaddr + TYPHOON_REG_STATUS) !=
1439 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1440 printk(KERN_ERR "%s: segment ready timeout\n",
1441 tp->name);
1442 goto err_out_irq;
1445 /* Do an pseudo IPv4 checksum on the data -- first
1446 * need to convert each u16 to cpu order before
1447 * summing. Fortunately, due to the properties of
1448 * the checksum, we can do this once, at the end.
1450 csum = csum_partial_copy_nocheck(image_data, dpage,
1451 len, 0);
1452 csum = csum_fold(csum);
1453 csum = le16_to_cpu(csum);
1455 writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1456 writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1457 writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1458 writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1459 writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1460 typhoon_post_pci_writes(ioaddr);
1461 writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1462 ioaddr + TYPHOON_REG_COMMAND);
1464 image_data += len;
1465 load_addr += len;
1466 section_len -= len;
1470 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1471 readl(ioaddr + TYPHOON_REG_STATUS) !=
1472 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1473 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1474 goto err_out_irq;
1477 writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1479 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1480 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1481 tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1482 goto err_out_irq;
1485 err = 0;
1487 err_out_irq:
1488 writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1489 writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1491 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1493 err_out:
1494 return err;
1497 static int
1498 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1500 void __iomem *ioaddr = tp->ioaddr;
1502 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1503 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1504 goto out_timeout;
1507 writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1508 writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1509 typhoon_post_pci_writes(ioaddr);
1510 writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1512 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1513 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1514 tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1515 goto out_timeout;
1518 /* Clear the Transmit and Command ready registers
1520 writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1521 writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1522 writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1523 typhoon_post_pci_writes(ioaddr);
1524 writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1526 return 0;
1528 out_timeout:
1529 return -ETIMEDOUT;
1532 static u32
1533 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1534 volatile u32 * index)
1536 u32 lastRead = txRing->lastRead;
1537 struct tx_desc *tx;
1538 dma_addr_t skb_dma;
1539 int dma_len;
1540 int type;
1542 while(lastRead != le32_to_cpu(*index)) {
1543 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1544 type = tx->flags & TYPHOON_TYPE_MASK;
1546 if(type == TYPHOON_TX_DESC) {
1547 /* This tx_desc describes a packet.
1549 unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1550 struct sk_buff *skb = (struct sk_buff *) ptr;
1551 dev_kfree_skb_irq(skb);
1552 } else if(type == TYPHOON_FRAG_DESC) {
1553 /* This tx_desc describes a memory mapping. Free it.
1555 skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1556 dma_len = le16_to_cpu(tx->len);
1557 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1558 PCI_DMA_TODEVICE);
1561 tx->flags = 0;
1562 typhoon_inc_tx_index(&lastRead, 1);
1565 return lastRead;
1568 static void
1569 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1570 volatile u32 * index)
1572 u32 lastRead;
1573 int numDesc = MAX_SKB_FRAGS + 1;
1575 /* This will need changing if we start to use the Hi Tx ring. */
1576 lastRead = typhoon_clean_tx(tp, txRing, index);
1577 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1578 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1579 netif_wake_queue(tp->dev);
1581 txRing->lastRead = lastRead;
1582 smp_wmb();
1585 static void
1586 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1588 struct typhoon_indexes *indexes = tp->indexes;
1589 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1590 struct basic_ring *ring = &tp->rxBuffRing;
1591 struct rx_free *r;
1593 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1594 indexes->rxBuffCleared) {
1595 /* no room in ring, just drop the skb
1597 dev_kfree_skb_any(rxb->skb);
1598 rxb->skb = NULL;
1599 return;
1602 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1603 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1604 r->virtAddr = idx;
1605 r->physAddr = cpu_to_le32(rxb->dma_addr);
1607 /* Tell the card about it */
1608 wmb();
1609 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1612 static int
1613 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1615 struct typhoon_indexes *indexes = tp->indexes;
1616 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1617 struct basic_ring *ring = &tp->rxBuffRing;
1618 struct rx_free *r;
1619 struct sk_buff *skb;
1620 dma_addr_t dma_addr;
1622 rxb->skb = NULL;
1624 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1625 indexes->rxBuffCleared)
1626 return -ENOMEM;
1628 skb = dev_alloc_skb(PKT_BUF_SZ);
1629 if(!skb)
1630 return -ENOMEM;
1632 #if 0
1633 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1634 * address! Pretty please?
1636 skb_reserve(skb, 2);
1637 #endif
1639 skb->dev = tp->dev;
1640 dma_addr = pci_map_single(tp->pdev, skb->tail,
1641 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1643 /* Since no card does 64 bit DAC, the high bits will never
1644 * change from zero.
1646 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1647 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1648 r->virtAddr = idx;
1649 r->physAddr = cpu_to_le32(dma_addr);
1650 rxb->skb = skb;
1651 rxb->dma_addr = dma_addr;
1653 /* Tell the card about it */
1654 wmb();
1655 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1656 return 0;
1659 static int
1660 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1661 volatile u32 * cleared, int budget)
1663 struct rx_desc *rx;
1664 struct sk_buff *skb, *new_skb;
1665 struct rxbuff_ent *rxb;
1666 dma_addr_t dma_addr;
1667 u32 local_ready;
1668 u32 rxaddr;
1669 int pkt_len;
1670 u32 idx;
1671 u32 csum_bits;
1672 int received;
1674 received = 0;
1675 local_ready = le32_to_cpu(*ready);
1676 rxaddr = le32_to_cpu(*cleared);
1677 while(rxaddr != local_ready && budget > 0) {
1678 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1679 idx = rx->addr;
1680 rxb = &tp->rxbuffers[idx];
1681 skb = rxb->skb;
1682 dma_addr = rxb->dma_addr;
1684 rxaddr += sizeof(struct rx_desc);
1685 rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1687 if(rx->flags & TYPHOON_RX_ERROR) {
1688 typhoon_recycle_rx_skb(tp, idx);
1689 continue;
1692 pkt_len = le16_to_cpu(rx->frameLen);
1694 if(pkt_len < rx_copybreak &&
1695 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1696 new_skb->dev = tp->dev;
1697 skb_reserve(new_skb, 2);
1698 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1699 PKT_BUF_SZ,
1700 PCI_DMA_FROMDEVICE);
1701 eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1702 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1703 PKT_BUF_SZ,
1704 PCI_DMA_FROMDEVICE);
1705 skb_put(new_skb, pkt_len);
1706 typhoon_recycle_rx_skb(tp, idx);
1707 } else {
1708 new_skb = skb;
1709 skb_put(new_skb, pkt_len);
1710 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1711 PCI_DMA_FROMDEVICE);
1712 typhoon_alloc_rx_skb(tp, idx);
1714 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1715 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1716 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1717 if(csum_bits ==
1718 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1719 || csum_bits ==
1720 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1721 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 } else
1723 new_skb->ip_summed = CHECKSUM_NONE;
1725 spin_lock(&tp->state_lock);
1726 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1727 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1728 ntohl(rx->vlanTag) & 0xffff);
1729 else
1730 netif_receive_skb(new_skb);
1731 spin_unlock(&tp->state_lock);
1733 tp->dev->last_rx = jiffies;
1734 received++;
1735 budget--;
1737 *cleared = cpu_to_le32(rxaddr);
1739 return received;
1742 static void
1743 typhoon_fill_free_ring(struct typhoon *tp)
1745 u32 i;
1747 for(i = 0; i < RXENT_ENTRIES; i++) {
1748 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1749 if(rxb->skb)
1750 continue;
1751 if(typhoon_alloc_rx_skb(tp, i) < 0)
1752 break;
1756 static int
1757 typhoon_poll(struct net_device *dev, int *total_budget)
1759 struct typhoon *tp = (struct typhoon *) dev->priv;
1760 struct typhoon_indexes *indexes = tp->indexes;
1761 int orig_budget = *total_budget;
1762 int budget, work_done, done;
1764 rmb();
1765 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1766 typhoon_process_response(tp, 0, NULL);
1768 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1769 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1771 if(orig_budget > dev->quota)
1772 orig_budget = dev->quota;
1774 budget = orig_budget;
1775 work_done = 0;
1776 done = 1;
1778 if(indexes->rxHiCleared != indexes->rxHiReady) {
1779 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1780 &indexes->rxHiCleared, budget);
1781 budget -= work_done;
1784 if(indexes->rxLoCleared != indexes->rxLoReady) {
1785 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1786 &indexes->rxLoCleared, budget);
1789 if(work_done) {
1790 *total_budget -= work_done;
1791 dev->quota -= work_done;
1793 if(work_done >= orig_budget)
1794 done = 0;
1797 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1798 /* rxBuff ring is empty, try to fill it. */
1799 typhoon_fill_free_ring(tp);
1802 if(done) {
1803 netif_rx_complete(dev);
1804 writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1805 typhoon_post_pci_writes(tp->ioaddr);
1808 return (done ? 0 : 1);
1811 static irqreturn_t
1812 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1814 struct net_device *dev = (struct net_device *) dev_instance;
1815 struct typhoon *tp = dev->priv;
1816 void __iomem *ioaddr = tp->ioaddr;
1817 u32 intr_status;
1819 intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1820 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1821 return IRQ_NONE;
1823 writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1825 if(netif_rx_schedule_prep(dev)) {
1826 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1827 typhoon_post_pci_writes(ioaddr);
1828 __netif_rx_schedule(dev);
1829 } else {
1830 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1831 dev->name);
1833 return IRQ_HANDLED;
1836 static void
1837 typhoon_free_rx_rings(struct typhoon *tp)
1839 u32 i;
1841 for(i = 0; i < RXENT_ENTRIES; i++) {
1842 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1843 if(rxb->skb) {
1844 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1845 PCI_DMA_FROMDEVICE);
1846 dev_kfree_skb(rxb->skb);
1847 rxb->skb = NULL;
1852 static int
1853 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1855 struct pci_dev *pdev = tp->pdev;
1856 void __iomem *ioaddr = tp->ioaddr;
1857 struct cmd_desc xp_cmd;
1858 int err;
1860 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1861 xp_cmd.parm1 = events;
1862 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1863 if(err < 0) {
1864 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1865 tp->name, err);
1866 return err;
1869 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1870 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1871 if(err < 0) {
1872 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1873 tp->name, err);
1874 return err;
1877 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1878 return -ETIMEDOUT;
1880 /* Since we cannot monitor the status of the link while sleeping,
1881 * tell the world it went away.
1883 netif_carrier_off(tp->dev);
1885 pci_enable_wake(tp->pdev, state, 1);
1886 pci_disable_device(pdev);
1887 return pci_set_power_state(pdev, state);
1890 static int
1891 typhoon_wakeup(struct typhoon *tp, int wait_type)
1893 struct pci_dev *pdev = tp->pdev;
1894 void __iomem *ioaddr = tp->ioaddr;
1896 pci_set_power_state(pdev, 0);
1897 pci_restore_state(pdev, tp->pci_state);
1899 /* Post 2.x.x versions of the Sleep Image require a reset before
1900 * we can download the Runtime Image. But let's not make users of
1901 * the old firmware pay for the reset.
1903 writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1904 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1905 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1906 return typhoon_reset(ioaddr, wait_type);
1908 return 0;
1911 static int
1912 typhoon_start_runtime(struct typhoon *tp)
1914 struct net_device *dev = tp->dev;
1915 void __iomem *ioaddr = tp->ioaddr;
1916 struct cmd_desc xp_cmd;
1917 int err;
1919 typhoon_init_rings(tp);
1920 typhoon_fill_free_ring(tp);
1922 err = typhoon_download_firmware(tp);
1923 if(err < 0) {
1924 printk("%s: cannot load runtime on 3XP\n", tp->name);
1925 goto error_out;
1928 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1929 printk("%s: cannot boot 3XP\n", tp->name);
1930 err = -EIO;
1931 goto error_out;
1934 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1935 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1936 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1937 if(err < 0)
1938 goto error_out;
1940 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1941 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1942 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1943 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1944 if(err < 0)
1945 goto error_out;
1947 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1948 * us some more information on how to control it.
1950 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1951 xp_cmd.parm1 = 0;
1952 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1953 if(err < 0)
1954 goto error_out;
1956 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1957 xp_cmd.parm1 = tp->xcvr_select;
1958 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1959 if(err < 0)
1960 goto error_out;
1962 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1963 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1964 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 if(err < 0)
1966 goto error_out;
1968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1969 spin_lock_bh(&tp->state_lock);
1970 xp_cmd.parm2 = tp->offload;
1971 xp_cmd.parm3 = tp->offload;
1972 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1973 spin_unlock_bh(&tp->state_lock);
1974 if(err < 0)
1975 goto error_out;
1977 typhoon_set_rx_mode(dev);
1979 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1980 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1981 if(err < 0)
1982 goto error_out;
1984 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1985 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986 if(err < 0)
1987 goto error_out;
1989 tp->card_state = Running;
1990 smp_wmb();
1992 writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1993 writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1994 typhoon_post_pci_writes(ioaddr);
1996 return 0;
1998 error_out:
1999 typhoon_reset(ioaddr, WaitNoSleep);
2000 typhoon_free_rx_rings(tp);
2001 typhoon_init_rings(tp);
2002 return err;
2005 static int
2006 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2008 struct typhoon_indexes *indexes = tp->indexes;
2009 struct transmit_ring *txLo = &tp->txLoRing;
2010 void __iomem *ioaddr = tp->ioaddr;
2011 struct cmd_desc xp_cmd;
2012 int i;
2014 /* Disable interrupts early, since we can't schedule a poll
2015 * when called with !netif_running(). This will be posted
2016 * when we force the posting of the command.
2018 writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2020 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2021 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2023 /* Wait 1/2 sec for any outstanding transmits to occur
2024 * We'll cleanup after the reset if this times out.
2026 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2027 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2028 break;
2029 udelay(TYPHOON_UDELAY);
2032 if(i == TYPHOON_WAIT_TIMEOUT)
2033 printk(KERN_ERR
2034 "%s: halt timed out waiting for Tx to complete\n",
2035 tp->name);
2037 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2038 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2040 /* save the statistics so when we bring the interface up again,
2041 * the values reported to userspace are correct.
2043 tp->card_state = Sleeping;
2044 smp_wmb();
2045 typhoon_do_get_stats(tp);
2046 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2048 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2049 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2051 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2052 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2053 tp->name);
2055 if(typhoon_reset(ioaddr, wait_type) < 0) {
2056 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2057 return -ETIMEDOUT;
2060 /* cleanup any outstanding Tx packets */
2061 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2062 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2063 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2066 return 0;
2069 static void
2070 typhoon_tx_timeout(struct net_device *dev)
2072 struct typhoon *tp = (struct typhoon *) dev->priv;
2074 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2075 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2076 dev->name);
2077 goto truely_dead;
2080 /* If we ever start using the Hi ring, it will need cleaning too */
2081 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2082 typhoon_free_rx_rings(tp);
2084 if(typhoon_start_runtime(tp) < 0) {
2085 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2086 dev->name);
2087 goto truely_dead;
2090 netif_wake_queue(dev);
2091 return;
2093 truely_dead:
2094 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2095 typhoon_reset(tp->ioaddr, NoWait);
2096 netif_carrier_off(dev);
2099 static int
2100 typhoon_open(struct net_device *dev)
2102 struct typhoon *tp = (struct typhoon *) dev->priv;
2103 int err;
2105 err = typhoon_wakeup(tp, WaitSleep);
2106 if(err < 0) {
2107 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2108 goto out_sleep;
2111 err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2112 dev->name, dev);
2113 if(err < 0)
2114 goto out_sleep;
2116 err = typhoon_start_runtime(tp);
2117 if(err < 0)
2118 goto out_irq;
2120 netif_start_queue(dev);
2121 return 0;
2123 out_irq:
2124 free_irq(dev->irq, dev);
2126 out_sleep:
2127 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2128 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2129 dev->name);
2130 typhoon_reset(tp->ioaddr, NoWait);
2131 goto out;
2134 if(typhoon_sleep(tp, 3, 0) < 0)
2135 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2137 out:
2138 return err;
2141 static int
2142 typhoon_close(struct net_device *dev)
2144 struct typhoon *tp = (struct typhoon *) dev->priv;
2146 netif_stop_queue(dev);
2148 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2149 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2151 /* Make sure there is no irq handler running on a different CPU. */
2152 typhoon_synchronize_irq(dev->irq);
2153 free_irq(dev->irq, dev);
2155 typhoon_free_rx_rings(tp);
2156 typhoon_init_rings(tp);
2158 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2159 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2161 if(typhoon_sleep(tp, 3, 0) < 0)
2162 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2164 return 0;
2167 #ifdef CONFIG_PM
2168 static int
2169 typhoon_resume(struct pci_dev *pdev)
2171 struct net_device *dev = pci_get_drvdata(pdev);
2172 struct typhoon *tp = (struct typhoon *) dev->priv;
2174 /* If we're down, resume when we are upped.
2176 if(!netif_running(dev))
2177 return 0;
2179 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2180 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2181 dev->name);
2182 goto reset;
2185 if(typhoon_start_runtime(tp) < 0) {
2186 printk(KERN_ERR "%s: critical: could not start runtime in "
2187 "resume\n", dev->name);
2188 goto reset;
2191 netif_device_attach(dev);
2192 netif_start_queue(dev);
2193 return 0;
2195 reset:
2196 typhoon_reset(tp->ioaddr, NoWait);
2197 return -EBUSY;
2200 static int
2201 typhoon_suspend(struct pci_dev *pdev, u32 state)
2203 struct net_device *dev = pci_get_drvdata(pdev);
2204 struct typhoon *tp = (struct typhoon *) dev->priv;
2205 struct cmd_desc xp_cmd;
2207 /* If we're down, we're already suspended.
2209 if(!netif_running(dev))
2210 return 0;
2212 spin_lock_bh(&tp->state_lock);
2213 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2214 spin_unlock_bh(&tp->state_lock);
2215 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2216 dev->name);
2217 return -EBUSY;
2219 spin_unlock_bh(&tp->state_lock);
2221 netif_device_detach(dev);
2223 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2224 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2225 goto need_resume;
2228 typhoon_free_rx_rings(tp);
2229 typhoon_init_rings(tp);
2231 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2232 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2233 goto need_resume;
2236 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2237 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2238 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2239 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2240 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2241 dev->name);
2242 goto need_resume;
2245 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2246 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2247 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2248 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2249 dev->name);
2250 goto need_resume;
2253 if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2254 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2255 goto need_resume;
2258 return 0;
2260 need_resume:
2261 typhoon_resume(pdev);
2262 return -EBUSY;
2265 static int
2266 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2268 return pci_enable_wake(pdev, state, enable);
2270 #endif
2272 static int __devinit
2273 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 static int did_version = 0;
2276 struct net_device *dev;
2277 struct typhoon *tp;
2278 int card_id = (int) ent->driver_data;
2279 unsigned long ioaddr;
2280 void __iomem *ioaddr_mapped;
2281 void *shared;
2282 dma_addr_t shared_dma;
2283 struct cmd_desc xp_cmd;
2284 struct resp_desc xp_resp[3];
2285 int i;
2286 int err = 0;
2288 if(!did_version++)
2289 printk(KERN_INFO "%s", version);
2291 dev = alloc_etherdev(sizeof(*tp));
2292 if(dev == NULL) {
2293 printk(ERR_PFX "%s: unable to alloc new net device\n",
2294 pci_name(pdev));
2295 err = -ENOMEM;
2296 goto error_out;
2298 SET_MODULE_OWNER(dev);
2299 SET_NETDEV_DEV(dev, &pdev->dev);
2301 err = pci_enable_device(pdev);
2302 if(err < 0) {
2303 printk(ERR_PFX "%s: unable to enable device\n",
2304 pci_name(pdev));
2305 goto error_out_dev;
2308 /* If we transitioned from D3->D0 in pci_enable_device(),
2309 * we lost our configuration and need to restore it to the
2310 * conditions at boot.
2312 pci_restore_state(pdev, NULL);
2314 err = pci_set_dma_mask(pdev, 0xffffffffULL);
2315 if(err < 0) {
2316 printk(ERR_PFX "%s: No usable DMA configuration\n",
2317 pci_name(pdev));
2318 goto error_out_dev;
2321 /* sanity checks, resource #1 is our mmio area
2323 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2324 printk(ERR_PFX
2325 "%s: region #1 not a PCI MMIO resource, aborting\n",
2326 pci_name(pdev));
2327 err = -ENODEV;
2328 goto error_out_dev;
2330 if(pci_resource_len(pdev, 1) < 128) {
2331 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2332 pci_name(pdev));
2333 err = -ENODEV;
2334 goto error_out_dev;
2337 err = pci_request_regions(pdev, "typhoon");
2338 if(err < 0) {
2339 printk(ERR_PFX "%s: could not request regions\n",
2340 pci_name(pdev));
2341 goto error_out_dev;
2344 pci_set_master(pdev);
2345 pci_set_mwi(pdev);
2347 /* map our MMIO region
2349 ioaddr = pci_resource_start(pdev, 1);
2350 ioaddr_mapped = ioremap(ioaddr, 128);
2351 if (!ioaddr_mapped) {
2352 printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2353 pci_name(pdev));
2354 err = -EIO;
2355 goto error_out_regions;
2358 /* allocate pci dma space for rx and tx descriptor rings
2360 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2361 &shared_dma);
2362 if(!shared) {
2363 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2364 pci_name(pdev));
2365 err = -ENOMEM;
2366 goto error_out_remap;
2369 dev->irq = pdev->irq;
2370 tp = dev->priv;
2371 tp->shared = (struct typhoon_shared *) shared;
2372 tp->shared_dma = shared_dma;
2373 tp->pdev = pdev;
2374 tp->tx_pdev = pdev;
2375 tp->ioaddr = ioaddr_mapped;
2376 tp->tx_ioaddr = ioaddr_mapped;
2377 tp->dev = dev;
2379 /* need to be able to restore PCI state after a suspend */
2380 pci_save_state(pdev, tp->pci_state);
2382 /* Init sequence:
2383 * 1) Reset the adapter to clear any bad juju
2384 * 2) Reload the sleep image
2385 * 3) Boot the sleep image
2386 * 4) Get the hardware address.
2387 * 5) Put the card to sleep.
2389 if (typhoon_reset(ioaddr_mapped, WaitSleep) < 0) {
2390 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2391 err = -EIO;
2392 goto error_out_dma;
2395 /* dev->name is not valid until we register, but we need to
2396 * use some common routines to initialize the card. So that those
2397 * routines print the right name, we keep our oun pointer to the name
2399 tp->name = pci_name(pdev);
2401 typhoon_init_interface(tp);
2402 typhoon_init_rings(tp);
2404 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2405 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2406 pci_name(pdev));
2407 err = -EIO;
2408 goto error_out_reset;
2411 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2412 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2413 printk(ERR_PFX "%s: cannot read MAC address\n",
2414 pci_name(pdev));
2415 err = -EIO;
2416 goto error_out_reset;
2419 *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2420 *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2422 if(!is_valid_ether_addr(dev->dev_addr)) {
2423 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2424 "aborting\n", pci_name(pdev));
2425 goto error_out_reset;
2428 /* Read the Sleep Image version last, so the response is valid
2429 * later when we print out the version reported.
2431 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2432 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2433 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2434 pdev->slot_name);
2435 goto error_out_reset;
2438 tp->capabilities = typhoon_card_info[card_id].capabilities;
2439 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2441 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2442 * READ_VERSIONS command. Those versions are OK after waking up
2443 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2444 * seem to need a little extra help to get started. Since we don't
2445 * know how to nudge it along, just kick it.
2447 if(xp_resp[0].numDesc != 0)
2448 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2450 if(typhoon_sleep(tp, 3, 0) < 0) {
2451 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2452 pci_name(pdev));
2453 err = -EIO;
2454 goto error_out_reset;
2457 /* The chip-specific entries in the device structure. */
2458 dev->open = typhoon_open;
2459 dev->hard_start_xmit = typhoon_start_tx;
2460 dev->stop = typhoon_close;
2461 dev->set_multicast_list = typhoon_set_rx_mode;
2462 dev->tx_timeout = typhoon_tx_timeout;
2463 dev->poll = typhoon_poll;
2464 dev->weight = 16;
2465 dev->watchdog_timeo = TX_TIMEOUT;
2466 dev->get_stats = typhoon_get_stats;
2467 dev->set_mac_address = typhoon_set_mac_address;
2468 dev->do_ioctl = typhoon_ioctl;
2469 dev->vlan_rx_register = typhoon_vlan_rx_register;
2470 dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
2472 /* We can handle scatter gather, up to 16 entries, and
2473 * we can do IP checksumming (only version 4, doh...)
2475 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2476 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2477 dev->features |= NETIF_F_TSO;
2479 if(register_netdev(dev) < 0)
2480 goto error_out_reset;
2482 /* fixup our local name */
2483 tp->name = dev->name;
2485 pci_set_drvdata(pdev, dev);
2487 printk(KERN_INFO "%s: %s at 0x%lx, ",
2488 dev->name, typhoon_card_info[card_id].name, ioaddr);
2489 for(i = 0; i < 5; i++)
2490 printk("%2.2x:", dev->dev_addr[i]);
2491 printk("%2.2x\n", dev->dev_addr[i]);
2493 /* xp_resp still contains the response to the READ_VERSIONS command.
2494 * For debugging, let the user know what version he has.
2496 if(xp_resp[0].numDesc == 0) {
2497 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2498 * of version is Month/Day of build.
2500 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2501 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2502 "%02u/%02u/2000\n", dev->name, monthday >> 8,
2503 monthday & 0xff);
2504 } else if(xp_resp[0].numDesc == 2) {
2505 /* This is the Typhoon 1.1+ type Sleep Image
2507 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2508 u8 *ver_string = (u8 *) &xp_resp[1];
2509 ver_string[25] = 0;
2510 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2511 "%u.%u.%u.%u %s\n", dev->name, HIPQUAD(sleep_ver),
2512 ver_string);
2513 } else {
2514 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2515 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2516 le32_to_cpu(xp_resp[0].parm2));
2519 return 0;
2521 error_out_reset:
2522 typhoon_reset(ioaddr_mapped, NoWait);
2524 error_out_dma:
2525 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2526 shared, shared_dma);
2527 error_out_remap:
2528 iounmap(ioaddr_mapped);
2529 error_out_regions:
2530 pci_release_regions(pdev);
2531 error_out_dev:
2532 free_netdev(dev);
2533 error_out:
2534 return err;
2537 static void __devexit
2538 typhoon_remove_one(struct pci_dev *pdev)
2540 struct net_device *dev = pci_get_drvdata(pdev);
2541 struct typhoon *tp = (struct typhoon *) (dev->priv);
2543 unregister_netdev(dev);
2544 pci_set_power_state(pdev, 0);
2545 pci_restore_state(pdev, tp->pci_state);
2546 typhoon_reset(tp->ioaddr, NoWait);
2547 iounmap(tp->ioaddr);
2548 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2549 tp->shared, tp->shared_dma);
2550 pci_release_regions(pdev);
2551 pci_disable_device(pdev);
2552 pci_set_drvdata(pdev, NULL);
2553 free_netdev(dev);
2556 static struct pci_driver typhoon_driver = {
2557 .name = DRV_MODULE_NAME,
2558 .id_table = typhoon_pci_tbl,
2559 .probe = typhoon_init_one,
2560 .remove = __devexit_p(typhoon_remove_one),
2561 #ifdef CONFIG_PM
2562 .suspend = typhoon_suspend,
2563 .resume = typhoon_resume,
2564 .enable_wake = typhoon_enable_wake,
2565 #endif
2568 static int __init
2569 typhoon_init(void)
2571 return pci_module_init(&typhoon_driver);
2574 static void __exit
2575 typhoon_cleanup(void)
2577 pci_unregister_driver(&typhoon_driver);
2580 module_init(typhoon_init);
2581 module_exit(typhoon_cleanup);