TOMOYO: Use GFP_NOFS rather than GFP_KERNEL.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / typhoon.c
blobcd24e5f2b2a27944054f9ee4192910a81ab01795
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3 Written 2002-2004 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
21 number Y1-LM-2015-01.
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
37 TODO:
38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 *) Add more support for ethtool (especially for NIC stats)
40 *) Allow disabling of RX checksum offloading
41 *) Fix MAC changing to work while the interface is up
42 (Need to put commands on the TX ring, which changes
43 the locking)
44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49 * Setting to > 1518 effectively disables this feature.
51 static int rx_copybreak = 200;
53 /* Should we use MMIO or Port IO?
54 * 0: Port IO
55 * 1: MMIO
56 * 2: Try MMIO, fallback to Port IO
58 static unsigned int use_mmio = 2;
60 /* end user-configurable values */
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64 static const int multicast_filter_limit = 32;
66 /* Operational parameters that are set at compile time. */
68 /* Keep the ring sizes a power of two for compile efficiency.
69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70 * Making the Tx ring too large decreases the effectiveness of channel
71 * bonding and packet priority.
72 * There are no ill effects from too-large receive rings.
74 * We don't currently use the Hi Tx ring so, don't make it very big.
76 * Beware that if we start using the Hi Tx ring, we will need to change
77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
79 #define TXHI_ENTRIES 2
80 #define TXLO_ENTRIES 128
81 #define RX_ENTRIES 32
82 #define COMMAND_ENTRIES 16
83 #define RESPONSE_ENTRIES 32
85 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
88 /* The 3XP will preload and remove 64 entries from the free buffer
89 * list, and we need one entry to keep the ring from wrapping, so
90 * to keep this a power of two, we use 128 entries.
92 #define RXFREE_ENTRIES 128
93 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
95 /* Operational parameters that usually are not changed. */
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT (2*HZ)
100 #define PKT_BUF_SZ 1536
101 #define FIRMWARE_NAME "3com/typhoon.bin"
103 #define pr_fmt(fmt) KBUILD_MODNAME " " fmt
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/slab.h>
113 #include <linux/interrupt.h>
114 #include <linux/pci.h>
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118 #include <linux/mm.h>
119 #include <linux/init.h>
120 #include <linux/delay.h>
121 #include <linux/ethtool.h>
122 #include <linux/if_vlan.h>
123 #include <linux/crc32.h>
124 #include <linux/bitops.h>
125 #include <asm/processor.h>
126 #include <asm/io.h>
127 #include <asm/uaccess.h>
128 #include <linux/in6.h>
129 #include <linux/dma-mapping.h>
130 #include <linux/firmware.h>
131 #include <generated/utsrelease.h>
133 #include "typhoon.h"
135 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
136 MODULE_VERSION(UTS_RELEASE);
137 MODULE_LICENSE("GPL");
138 MODULE_FIRMWARE(FIRMWARE_NAME);
139 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
140 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
141 "the buffer given back to the NIC. Default "
142 "is 200.");
143 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
144 "Default is to try MMIO and fallback to PIO.");
145 module_param(rx_copybreak, int, 0);
146 module_param(use_mmio, int, 0);
148 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
149 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
150 #undef NETIF_F_TSO
151 #endif
153 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
154 #error TX ring too small!
155 #endif
157 struct typhoon_card_info {
158 const char *name;
159 const int capabilities;
162 #define TYPHOON_CRYPTO_NONE 0x00
163 #define TYPHOON_CRYPTO_DES 0x01
164 #define TYPHOON_CRYPTO_3DES 0x02
165 #define TYPHOON_CRYPTO_VARIABLE 0x04
166 #define TYPHOON_FIBER 0x08
167 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
169 enum typhoon_cards {
170 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
171 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
172 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
173 TYPHOON_FXM,
176 /* directly indexed by enum typhoon_cards, above */
177 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
178 { "3Com Typhoon (3C990-TX)",
179 TYPHOON_CRYPTO_NONE},
180 { "3Com Typhoon (3CR990-TX-95)",
181 TYPHOON_CRYPTO_DES},
182 { "3Com Typhoon (3CR990-TX-97)",
183 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
184 { "3Com Typhoon (3C990SVR)",
185 TYPHOON_CRYPTO_NONE},
186 { "3Com Typhoon (3CR990SVR95)",
187 TYPHOON_CRYPTO_DES},
188 { "3Com Typhoon (3CR990SVR97)",
189 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
190 { "3Com Typhoon2 (3C990B-TX-M)",
191 TYPHOON_CRYPTO_VARIABLE},
192 { "3Com Typhoon2 (3C990BSVR)",
193 TYPHOON_CRYPTO_VARIABLE},
194 { "3Com Typhoon (3CR990-FX-95)",
195 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
196 { "3Com Typhoon (3CR990-FX-97)",
197 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
198 { "3Com Typhoon (3CR990-FX-95 Server)",
199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
200 { "3Com Typhoon (3CR990-FX-97 Server)",
201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
202 { "3Com Typhoon2 (3C990B-FX-97)",
203 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
206 /* Notes on the new subsystem numbering scheme:
207 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
208 * bit 4 indicates if this card has secured firmware (we don't support it)
209 * bit 8 indicates if this is a (0) copper or (1) fiber card
210 * bits 12-16 indicate card type: (0) client and (1) server
212 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
213 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
215 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
220 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
222 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
224 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
226 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
228 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
230 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
232 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
239 { 0, }
241 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
243 /* Define the shared memory area
244 * Align everything the 3XP will normally be using.
245 * We'll need to move/align txHi if we start using that ring.
247 #define __3xp_aligned ____cacheline_aligned
248 struct typhoon_shared {
249 struct typhoon_interface iface;
250 struct typhoon_indexes indexes __3xp_aligned;
251 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
252 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
253 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
254 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
255 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
256 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
257 u32 zeroWord;
258 struct tx_desc txHi[TXHI_ENTRIES];
259 } __attribute__ ((packed));
261 struct rxbuff_ent {
262 struct sk_buff *skb;
263 dma_addr_t dma_addr;
266 struct typhoon {
267 /* Tx cache line section */
268 struct transmit_ring txLoRing ____cacheline_aligned;
269 struct pci_dev * tx_pdev;
270 void __iomem *tx_ioaddr;
271 u32 txlo_dma_addr;
273 /* Irq/Rx cache line section */
274 void __iomem *ioaddr ____cacheline_aligned;
275 struct typhoon_indexes *indexes;
276 u8 awaiting_resp;
277 u8 duplex;
278 u8 speed;
279 u8 card_state;
280 struct basic_ring rxLoRing;
281 struct pci_dev * pdev;
282 struct net_device * dev;
283 struct napi_struct napi;
284 spinlock_t state_lock;
285 struct vlan_group * vlgrp;
286 struct basic_ring rxHiRing;
287 struct basic_ring rxBuffRing;
288 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
290 /* general section */
291 spinlock_t command_lock ____cacheline_aligned;
292 struct basic_ring cmdRing;
293 struct basic_ring respRing;
294 struct net_device_stats stats;
295 struct net_device_stats stats_saved;
296 struct typhoon_shared * shared;
297 dma_addr_t shared_dma;
298 __le16 xcvr_select;
299 __le16 wol_events;
300 __le32 offload;
302 /* unused stuff (future use) */
303 int capabilities;
304 struct transmit_ring txHiRing;
307 enum completion_wait_values {
308 NoWait = 0, WaitNoSleep, WaitSleep,
311 /* These are the values for the typhoon.card_state variable.
312 * These determine where the statistics will come from in get_stats().
313 * The sleep image does not support the statistics we need.
315 enum state_values {
316 Sleeping = 0, Running,
319 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
320 * cannot pass a read, so this forces current writes to post.
322 #define typhoon_post_pci_writes(x) \
323 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
325 /* We'll wait up to six seconds for a reset, and half a second normally.
327 #define TYPHOON_UDELAY 50
328 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
329 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
330 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
332 #if defined(NETIF_F_TSO)
333 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
334 #define TSO_NUM_DESCRIPTORS 2
335 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
336 #else
337 #define NETIF_F_TSO 0
338 #define skb_tso_size(x) 0
339 #define TSO_NUM_DESCRIPTORS 0
340 #define TSO_OFFLOAD_ON 0
341 #endif
343 static inline void
344 typhoon_inc_index(u32 *index, const int count, const int num_entries)
346 /* Increment a ring index -- we can use this for all rings execept
347 * the Rx rings, as they use different size descriptors
348 * otherwise, everything is the same size as a cmd_desc
350 *index += count * sizeof(struct cmd_desc);
351 *index %= num_entries * sizeof(struct cmd_desc);
354 static inline void
355 typhoon_inc_cmd_index(u32 *index, const int count)
357 typhoon_inc_index(index, count, COMMAND_ENTRIES);
360 static inline void
361 typhoon_inc_resp_index(u32 *index, const int count)
363 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
366 static inline void
367 typhoon_inc_rxfree_index(u32 *index, const int count)
369 typhoon_inc_index(index, count, RXFREE_ENTRIES);
372 static inline void
373 typhoon_inc_tx_index(u32 *index, const int count)
375 /* if we start using the Hi Tx ring, this needs updateing */
376 typhoon_inc_index(index, count, TXLO_ENTRIES);
379 static inline void
380 typhoon_inc_rx_index(u32 *index, const int count)
382 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
383 *index += count * sizeof(struct rx_desc);
384 *index %= RX_ENTRIES * sizeof(struct rx_desc);
387 static int
388 typhoon_reset(void __iomem *ioaddr, int wait_type)
390 int i, err = 0;
391 int timeout;
393 if(wait_type == WaitNoSleep)
394 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
395 else
396 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
398 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
399 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
401 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
402 typhoon_post_pci_writes(ioaddr);
403 udelay(1);
404 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
406 if(wait_type != NoWait) {
407 for(i = 0; i < timeout; i++) {
408 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
409 TYPHOON_STATUS_WAITING_FOR_HOST)
410 goto out;
412 if(wait_type == WaitSleep)
413 schedule_timeout_uninterruptible(1);
414 else
415 udelay(TYPHOON_UDELAY);
418 err = -ETIMEDOUT;
421 out:
422 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
423 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
425 /* The 3XP seems to need a little extra time to complete the load
426 * of the sleep image before we can reliably boot it. Failure to
427 * do this occasionally results in a hung adapter after boot in
428 * typhoon_init_one() while trying to read the MAC address or
429 * putting the card to sleep. 3Com's driver waits 5ms, but
430 * that seems to be overkill. However, if we can sleep, we might
431 * as well give it that much time. Otherwise, we'll give it 500us,
432 * which should be enough (I've see it work well at 100us, but still
433 * saw occasional problems.)
435 if(wait_type == WaitSleep)
436 msleep(5);
437 else
438 udelay(500);
439 return err;
442 static int
443 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
445 int i, err = 0;
447 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
448 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
449 goto out;
450 udelay(TYPHOON_UDELAY);
453 err = -ETIMEDOUT;
455 out:
456 return err;
459 static inline void
460 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
462 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
463 netif_carrier_off(dev);
464 else
465 netif_carrier_on(dev);
468 static inline void
469 typhoon_hello(struct typhoon *tp)
471 struct basic_ring *ring = &tp->cmdRing;
472 struct cmd_desc *cmd;
474 /* We only get a hello request if we've not sent anything to the
475 * card in a long while. If the lock is held, then we're in the
476 * process of issuing a command, so we don't need to respond.
478 if(spin_trylock(&tp->command_lock)) {
479 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
480 typhoon_inc_cmd_index(&ring->lastWrite, 1);
482 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
483 wmb();
484 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
485 spin_unlock(&tp->command_lock);
489 static int
490 typhoon_process_response(struct typhoon *tp, int resp_size,
491 struct resp_desc *resp_save)
493 struct typhoon_indexes *indexes = tp->indexes;
494 struct resp_desc *resp;
495 u8 *base = tp->respRing.ringBase;
496 int count, len, wrap_len;
497 u32 cleared;
498 u32 ready;
500 cleared = le32_to_cpu(indexes->respCleared);
501 ready = le32_to_cpu(indexes->respReady);
502 while(cleared != ready) {
503 resp = (struct resp_desc *)(base + cleared);
504 count = resp->numDesc + 1;
505 if(resp_save && resp->seqNo) {
506 if(count > resp_size) {
507 resp_save->flags = TYPHOON_RESP_ERROR;
508 goto cleanup;
511 wrap_len = 0;
512 len = count * sizeof(*resp);
513 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
514 wrap_len = cleared + len - RESPONSE_RING_SIZE;
515 len = RESPONSE_RING_SIZE - cleared;
518 memcpy(resp_save, resp, len);
519 if(unlikely(wrap_len)) {
520 resp_save += len / sizeof(*resp);
521 memcpy(resp_save, base, wrap_len);
524 resp_save = NULL;
525 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
526 typhoon_media_status(tp->dev, resp);
527 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
528 typhoon_hello(tp);
529 } else {
530 netdev_err(tp->dev,
531 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
532 le16_to_cpu(resp->cmd),
533 resp->numDesc, resp->flags,
534 le16_to_cpu(resp->parm1),
535 le32_to_cpu(resp->parm2),
536 le32_to_cpu(resp->parm3));
539 cleanup:
540 typhoon_inc_resp_index(&cleared, count);
543 indexes->respCleared = cpu_to_le32(cleared);
544 wmb();
545 return (resp_save == NULL);
548 static inline int
549 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
551 /* this works for all descriptors but rx_desc, as they are a
552 * different size than the cmd_desc -- everyone else is the same
554 lastWrite /= sizeof(struct cmd_desc);
555 lastRead /= sizeof(struct cmd_desc);
556 return (ringSize + lastRead - lastWrite - 1) % ringSize;
559 static inline int
560 typhoon_num_free_cmd(struct typhoon *tp)
562 int lastWrite = tp->cmdRing.lastWrite;
563 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
565 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
568 static inline int
569 typhoon_num_free_resp(struct typhoon *tp)
571 int respReady = le32_to_cpu(tp->indexes->respReady);
572 int respCleared = le32_to_cpu(tp->indexes->respCleared);
574 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
577 static inline int
578 typhoon_num_free_tx(struct transmit_ring *ring)
580 /* if we start using the Hi Tx ring, this needs updating */
581 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
584 static int
585 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
586 int num_resp, struct resp_desc *resp)
588 struct typhoon_indexes *indexes = tp->indexes;
589 struct basic_ring *ring = &tp->cmdRing;
590 struct resp_desc local_resp;
591 int i, err = 0;
592 int got_resp;
593 int freeCmd, freeResp;
594 int len, wrap_len;
596 spin_lock(&tp->command_lock);
598 freeCmd = typhoon_num_free_cmd(tp);
599 freeResp = typhoon_num_free_resp(tp);
601 if(freeCmd < num_cmd || freeResp < num_resp) {
602 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
603 freeCmd, num_cmd, freeResp, num_resp);
604 err = -ENOMEM;
605 goto out;
608 if(cmd->flags & TYPHOON_CMD_RESPOND) {
609 /* If we're expecting a response, but the caller hasn't given
610 * us a place to put it, we'll provide one.
612 tp->awaiting_resp = 1;
613 if(resp == NULL) {
614 resp = &local_resp;
615 num_resp = 1;
619 wrap_len = 0;
620 len = num_cmd * sizeof(*cmd);
621 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
622 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
623 len = COMMAND_RING_SIZE - ring->lastWrite;
626 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
627 if(unlikely(wrap_len)) {
628 struct cmd_desc *wrap_ptr = cmd;
629 wrap_ptr += len / sizeof(*cmd);
630 memcpy(ring->ringBase, wrap_ptr, wrap_len);
633 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
635 /* "I feel a presence... another warrior is on the mesa."
637 wmb();
638 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
639 typhoon_post_pci_writes(tp->ioaddr);
641 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
642 goto out;
644 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
645 * preempt or do anything other than take interrupts. So, don't
646 * wait for a response unless you have to.
648 * I've thought about trying to sleep here, but we're called
649 * from many contexts that don't allow that. Also, given the way
650 * 3Com has implemented irq coalescing, we would likely timeout --
651 * this has been observed in real life!
653 * The big killer is we have to wait to get stats from the card,
654 * though we could go to a periodic refresh of those if we don't
655 * mind them getting somewhat stale. The rest of the waiting
656 * commands occur during open/close/suspend/resume, so they aren't
657 * time critical. Creating SAs in the future will also have to
658 * wait here.
660 got_resp = 0;
661 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
662 if(indexes->respCleared != indexes->respReady)
663 got_resp = typhoon_process_response(tp, num_resp,
664 resp);
665 udelay(TYPHOON_UDELAY);
668 if(!got_resp) {
669 err = -ETIMEDOUT;
670 goto out;
673 /* Collect the error response even if we don't care about the
674 * rest of the response
676 if(resp->flags & TYPHOON_RESP_ERROR)
677 err = -EIO;
679 out:
680 if(tp->awaiting_resp) {
681 tp->awaiting_resp = 0;
682 smp_wmb();
684 /* Ugh. If a response was added to the ring between
685 * the call to typhoon_process_response() and the clearing
686 * of tp->awaiting_resp, we could have missed the interrupt
687 * and it could hang in the ring an indeterminate amount of
688 * time. So, check for it, and interrupt ourselves if this
689 * is the case.
691 if(indexes->respCleared != indexes->respReady)
692 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
695 spin_unlock(&tp->command_lock);
696 return err;
699 static void
700 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
702 struct typhoon *tp = netdev_priv(dev);
703 struct cmd_desc xp_cmd;
704 int err;
706 spin_lock_bh(&tp->state_lock);
707 if(!tp->vlgrp != !grp) {
708 /* We've either been turned on for the first time, or we've
709 * been turned off. Update the 3XP.
711 if(grp)
712 tp->offload |= TYPHOON_OFFLOAD_VLAN;
713 else
714 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
716 /* If the interface is up, the runtime is running -- and we
717 * must be up for the vlan core to call us.
719 * Do the command outside of the spin lock, as it is slow.
721 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
722 TYPHOON_CMD_SET_OFFLOAD_TASKS);
723 xp_cmd.parm2 = tp->offload;
724 xp_cmd.parm3 = tp->offload;
725 spin_unlock_bh(&tp->state_lock);
726 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
727 if(err < 0)
728 netdev_err(tp->dev, "vlan offload error %d\n", -err);
729 spin_lock_bh(&tp->state_lock);
732 /* now make the change visible */
733 tp->vlgrp = grp;
734 spin_unlock_bh(&tp->state_lock);
737 static inline void
738 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
739 u32 ring_dma)
741 struct tcpopt_desc *tcpd;
742 u32 tcpd_offset = ring_dma;
744 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
745 tcpd_offset += txRing->lastWrite;
746 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
747 typhoon_inc_tx_index(&txRing->lastWrite, 1);
749 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
750 tcpd->numDesc = 1;
751 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
752 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
753 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
754 tcpd->bytesTx = cpu_to_le32(skb->len);
755 tcpd->status = 0;
758 static netdev_tx_t
759 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
761 struct typhoon *tp = netdev_priv(dev);
762 struct transmit_ring *txRing;
763 struct tx_desc *txd, *first_txd;
764 dma_addr_t skb_dma;
765 int numDesc;
767 /* we have two rings to choose from, but we only use txLo for now
768 * If we start using the Hi ring as well, we'll need to update
769 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
770 * and TXHI_ENTRIES to match, as well as update the TSO code below
771 * to get the right DMA address
773 txRing = &tp->txLoRing;
775 /* We need one descriptor for each fragment of the sk_buff, plus the
776 * one for the ->data area of it.
778 * The docs say a maximum of 16 fragment descriptors per TCP option
779 * descriptor, then make a new packet descriptor and option descriptor
780 * for the next 16 fragments. The engineers say just an option
781 * descriptor is needed. I've tested up to 26 fragments with a single
782 * packet descriptor/option descriptor combo, so I use that for now.
784 * If problems develop with TSO, check this first.
786 numDesc = skb_shinfo(skb)->nr_frags + 1;
787 if (skb_is_gso(skb))
788 numDesc++;
790 /* When checking for free space in the ring, we need to also
791 * account for the initial Tx descriptor, and we always must leave
792 * at least one descriptor unused in the ring so that it doesn't
793 * wrap and look empty.
795 * The only time we should loop here is when we hit the race
796 * between marking the queue awake and updating the cleared index.
797 * Just loop and it will appear. This comes from the acenic driver.
799 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
800 smp_rmb();
802 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
803 typhoon_inc_tx_index(&txRing->lastWrite, 1);
805 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
806 first_txd->numDesc = 0;
807 first_txd->len = 0;
808 first_txd->tx_addr = (u64)((unsigned long) skb);
809 first_txd->processFlags = 0;
811 if(skb->ip_summed == CHECKSUM_PARTIAL) {
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
822 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
826 if (skb_is_gso(skb)) {
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841 PCI_DMA_TODEVICE);
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
844 txd->frag.addr = cpu_to_le32(skb_dma);
845 txd->frag.addrHi = 0;
846 first_txd->numDesc++;
847 } else {
848 int i, len;
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852 PCI_DMA_TODEVICE);
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
855 txd->frag.addr = cpu_to_le32(skb_dma);
856 txd->frag.addrHi = 0;
857 first_txd->numDesc++;
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861 void *frag_addr;
863 txd = (struct tx_desc *) (txRing->ringBase +
864 txRing->lastWrite);
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
867 len = frag->size;
868 frag_addr = (void *) page_address(frag->page) +
869 frag->page_offset;
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871 PCI_DMA_TODEVICE);
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
874 txd->frag.addr = cpu_to_le32(skb_dma);
875 txd->frag.addrHi = 0;
876 first_txd->numDesc++;
880 /* Kick the 3XP
882 wmb();
883 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
885 dev->trans_start = jiffies;
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
890 * Tx header.
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
899 * Tx is serialized.
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
905 return NETDEV_TX_OK;
908 static void
909 typhoon_set_rx_mode(struct net_device *dev)
911 struct typhoon *tp = netdev_priv(dev);
912 struct cmd_desc xp_cmd;
913 u32 mc_filter[2];
914 __le16 filter;
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
918 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
919 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
920 (dev->flags & IFF_ALLMULTI)) {
921 /* Too many to match, or accept all multicasts. */
922 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
923 } else if (!netdev_mc_empty(dev)) {
924 struct dev_mc_list *mclist;
926 memset(mc_filter, 0, sizeof(mc_filter));
927 netdev_for_each_mc_addr(mclist, dev) {
928 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
929 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
932 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
933 TYPHOON_CMD_SET_MULTICAST_HASH);
934 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
935 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
936 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
937 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
939 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
942 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
943 xp_cmd.parm1 = filter;
944 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
947 static int
948 typhoon_do_get_stats(struct typhoon *tp)
950 struct net_device_stats *stats = &tp->stats;
951 struct net_device_stats *saved = &tp->stats_saved;
952 struct cmd_desc xp_cmd;
953 struct resp_desc xp_resp[7];
954 struct stats_resp *s = (struct stats_resp *) xp_resp;
955 int err;
957 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
958 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
959 if(err < 0)
960 return err;
962 /* 3Com's Linux driver uses txMultipleCollisions as it's
963 * collisions value, but there is some other collision info as well...
965 * The extra status reported would be a good candidate for
966 * ethtool_ops->get_{strings,stats}()
968 stats->tx_packets = le32_to_cpu(s->txPackets);
969 stats->tx_bytes = le64_to_cpu(s->txBytes);
970 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
972 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
973 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
974 stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
975 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
976 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
977 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
978 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
979 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
980 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
981 SPEED_100 : SPEED_10;
982 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
983 DUPLEX_FULL : DUPLEX_HALF;
985 /* add in the saved statistics
987 stats->tx_packets += saved->tx_packets;
988 stats->tx_bytes += saved->tx_bytes;
989 stats->tx_errors += saved->tx_errors;
990 stats->collisions += saved->collisions;
991 stats->rx_packets += saved->rx_packets;
992 stats->rx_bytes += saved->rx_bytes;
993 stats->rx_fifo_errors += saved->rx_fifo_errors;
994 stats->rx_errors += saved->rx_errors;
995 stats->rx_crc_errors += saved->rx_crc_errors;
996 stats->rx_length_errors += saved->rx_length_errors;
998 return 0;
1001 static struct net_device_stats *
1002 typhoon_get_stats(struct net_device *dev)
1004 struct typhoon *tp = netdev_priv(dev);
1005 struct net_device_stats *stats = &tp->stats;
1006 struct net_device_stats *saved = &tp->stats_saved;
1008 smp_rmb();
1009 if(tp->card_state == Sleeping)
1010 return saved;
1012 if(typhoon_do_get_stats(tp) < 0) {
1013 netdev_err(dev, "error getting stats\n");
1014 return saved;
1017 return stats;
1020 static int
1021 typhoon_set_mac_address(struct net_device *dev, void *addr)
1023 struct sockaddr *saddr = (struct sockaddr *) addr;
1025 if(netif_running(dev))
1026 return -EBUSY;
1028 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1029 return 0;
1032 static void
1033 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1035 struct typhoon *tp = netdev_priv(dev);
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1040 smp_rmb();
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1043 } else {
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1047 } else {
1048 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1049 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1050 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1051 sleep_ver & 0xfff);
1055 strcpy(info->driver, KBUILD_MODNAME);
1056 strcpy(info->version, UTS_RELEASE);
1057 strcpy(info->bus_info, pci_name(pci_dev));
1060 static int
1061 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1063 struct typhoon *tp = netdev_priv(dev);
1065 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1066 SUPPORTED_Autoneg;
1068 switch (tp->xcvr_select) {
1069 case TYPHOON_XCVR_10HALF:
1070 cmd->advertising = ADVERTISED_10baseT_Half;
1071 break;
1072 case TYPHOON_XCVR_10FULL:
1073 cmd->advertising = ADVERTISED_10baseT_Full;
1074 break;
1075 case TYPHOON_XCVR_100HALF:
1076 cmd->advertising = ADVERTISED_100baseT_Half;
1077 break;
1078 case TYPHOON_XCVR_100FULL:
1079 cmd->advertising = ADVERTISED_100baseT_Full;
1080 break;
1081 case TYPHOON_XCVR_AUTONEG:
1082 cmd->advertising = ADVERTISED_10baseT_Half |
1083 ADVERTISED_10baseT_Full |
1084 ADVERTISED_100baseT_Half |
1085 ADVERTISED_100baseT_Full |
1086 ADVERTISED_Autoneg;
1087 break;
1090 if(tp->capabilities & TYPHOON_FIBER) {
1091 cmd->supported |= SUPPORTED_FIBRE;
1092 cmd->advertising |= ADVERTISED_FIBRE;
1093 cmd->port = PORT_FIBRE;
1094 } else {
1095 cmd->supported |= SUPPORTED_10baseT_Half |
1096 SUPPORTED_10baseT_Full |
1097 SUPPORTED_TP;
1098 cmd->advertising |= ADVERTISED_TP;
1099 cmd->port = PORT_TP;
1102 /* need to get stats to make these link speed/duplex valid */
1103 typhoon_do_get_stats(tp);
1104 cmd->speed = tp->speed;
1105 cmd->duplex = tp->duplex;
1106 cmd->phy_address = 0;
1107 cmd->transceiver = XCVR_INTERNAL;
1108 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1109 cmd->autoneg = AUTONEG_ENABLE;
1110 else
1111 cmd->autoneg = AUTONEG_DISABLE;
1112 cmd->maxtxpkt = 1;
1113 cmd->maxrxpkt = 1;
1115 return 0;
1118 static int
1119 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1121 struct typhoon *tp = netdev_priv(dev);
1122 struct cmd_desc xp_cmd;
1123 __le16 xcvr;
1124 int err;
1126 err = -EINVAL;
1127 if(cmd->autoneg == AUTONEG_ENABLE) {
1128 xcvr = TYPHOON_XCVR_AUTONEG;
1129 } else {
1130 if(cmd->duplex == DUPLEX_HALF) {
1131 if(cmd->speed == SPEED_10)
1132 xcvr = TYPHOON_XCVR_10HALF;
1133 else if(cmd->speed == SPEED_100)
1134 xcvr = TYPHOON_XCVR_100HALF;
1135 else
1136 goto out;
1137 } else if(cmd->duplex == DUPLEX_FULL) {
1138 if(cmd->speed == SPEED_10)
1139 xcvr = TYPHOON_XCVR_10FULL;
1140 else if(cmd->speed == SPEED_100)
1141 xcvr = TYPHOON_XCVR_100FULL;
1142 else
1143 goto out;
1144 } else
1145 goto out;
1148 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1149 xp_cmd.parm1 = xcvr;
1150 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1151 if(err < 0)
1152 goto out;
1154 tp->xcvr_select = xcvr;
1155 if(cmd->autoneg == AUTONEG_ENABLE) {
1156 tp->speed = 0xff; /* invalid */
1157 tp->duplex = 0xff; /* invalid */
1158 } else {
1159 tp->speed = cmd->speed;
1160 tp->duplex = cmd->duplex;
1163 out:
1164 return err;
1167 static void
1168 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1170 struct typhoon *tp = netdev_priv(dev);
1172 wol->supported = WAKE_PHY | WAKE_MAGIC;
1173 wol->wolopts = 0;
1174 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1175 wol->wolopts |= WAKE_PHY;
1176 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1177 wol->wolopts |= WAKE_MAGIC;
1178 memset(&wol->sopass, 0, sizeof(wol->sopass));
1181 static int
1182 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1184 struct typhoon *tp = netdev_priv(dev);
1186 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1187 return -EINVAL;
1189 tp->wol_events = 0;
1190 if(wol->wolopts & WAKE_PHY)
1191 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1192 if(wol->wolopts & WAKE_MAGIC)
1193 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1195 return 0;
1198 static u32
1199 typhoon_get_rx_csum(struct net_device *dev)
1201 /* For now, we don't allow turning off RX checksums.
1203 return 1;
1206 static void
1207 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1209 ering->rx_max_pending = RXENT_ENTRIES;
1210 ering->rx_mini_max_pending = 0;
1211 ering->rx_jumbo_max_pending = 0;
1212 ering->tx_max_pending = TXLO_ENTRIES - 1;
1214 ering->rx_pending = RXENT_ENTRIES;
1215 ering->rx_mini_pending = 0;
1216 ering->rx_jumbo_pending = 0;
1217 ering->tx_pending = TXLO_ENTRIES - 1;
1220 static const struct ethtool_ops typhoon_ethtool_ops = {
1221 .get_settings = typhoon_get_settings,
1222 .set_settings = typhoon_set_settings,
1223 .get_drvinfo = typhoon_get_drvinfo,
1224 .get_wol = typhoon_get_wol,
1225 .set_wol = typhoon_set_wol,
1226 .get_link = ethtool_op_get_link,
1227 .get_rx_csum = typhoon_get_rx_csum,
1228 .set_tx_csum = ethtool_op_set_tx_csum,
1229 .set_sg = ethtool_op_set_sg,
1230 .set_tso = ethtool_op_set_tso,
1231 .get_ringparam = typhoon_get_ringparam,
1234 static int
1235 typhoon_wait_interrupt(void __iomem *ioaddr)
1237 int i, err = 0;
1239 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1240 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1241 TYPHOON_INTR_BOOTCMD)
1242 goto out;
1243 udelay(TYPHOON_UDELAY);
1246 err = -ETIMEDOUT;
1248 out:
1249 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1250 return err;
1253 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1255 static void
1256 typhoon_init_interface(struct typhoon *tp)
1258 struct typhoon_interface *iface = &tp->shared->iface;
1259 dma_addr_t shared_dma;
1261 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1263 /* The *Hi members of iface are all init'd to zero by the memset().
1265 shared_dma = tp->shared_dma + shared_offset(indexes);
1266 iface->ringIndex = cpu_to_le32(shared_dma);
1268 shared_dma = tp->shared_dma + shared_offset(txLo);
1269 iface->txLoAddr = cpu_to_le32(shared_dma);
1270 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1272 shared_dma = tp->shared_dma + shared_offset(txHi);
1273 iface->txHiAddr = cpu_to_le32(shared_dma);
1274 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1276 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1277 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1278 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1279 sizeof(struct rx_free));
1281 shared_dma = tp->shared_dma + shared_offset(rxLo);
1282 iface->rxLoAddr = cpu_to_le32(shared_dma);
1283 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1285 shared_dma = tp->shared_dma + shared_offset(rxHi);
1286 iface->rxHiAddr = cpu_to_le32(shared_dma);
1287 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1289 shared_dma = tp->shared_dma + shared_offset(cmd);
1290 iface->cmdAddr = cpu_to_le32(shared_dma);
1291 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1293 shared_dma = tp->shared_dma + shared_offset(resp);
1294 iface->respAddr = cpu_to_le32(shared_dma);
1295 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1297 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1298 iface->zeroAddr = cpu_to_le32(shared_dma);
1300 tp->indexes = &tp->shared->indexes;
1301 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1302 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1303 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1304 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1305 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1306 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1307 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1309 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1310 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1312 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1313 tp->card_state = Sleeping;
1315 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1316 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1318 spin_lock_init(&tp->command_lock);
1319 spin_lock_init(&tp->state_lock);
1321 /* Force the writes to the shared memory area out before continuing. */
1322 wmb();
1325 static void
1326 typhoon_init_rings(struct typhoon *tp)
1328 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1330 tp->txLoRing.lastWrite = 0;
1331 tp->txHiRing.lastWrite = 0;
1332 tp->rxLoRing.lastWrite = 0;
1333 tp->rxHiRing.lastWrite = 0;
1334 tp->rxBuffRing.lastWrite = 0;
1335 tp->cmdRing.lastWrite = 0;
1336 tp->cmdRing.lastWrite = 0;
1338 tp->txLoRing.lastRead = 0;
1339 tp->txHiRing.lastRead = 0;
1342 static const struct firmware *typhoon_fw;
1344 static int
1345 typhoon_request_firmware(struct typhoon *tp)
1347 const struct typhoon_file_header *fHdr;
1348 const struct typhoon_section_header *sHdr;
1349 const u8 *image_data;
1350 u32 numSections;
1351 u32 section_len;
1352 u32 remaining;
1353 int err;
1355 if (typhoon_fw)
1356 return 0;
1358 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1359 if (err) {
1360 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1361 FIRMWARE_NAME);
1362 return err;
1365 image_data = (u8 *) typhoon_fw->data;
1366 remaining = typhoon_fw->size;
1367 if (remaining < sizeof(struct typhoon_file_header))
1368 goto invalid_fw;
1370 fHdr = (struct typhoon_file_header *) image_data;
1371 if (memcmp(fHdr->tag, "TYPHOON", 8))
1372 goto invalid_fw;
1374 numSections = le32_to_cpu(fHdr->numSections);
1375 image_data += sizeof(struct typhoon_file_header);
1376 remaining -= sizeof(struct typhoon_file_header);
1378 while (numSections--) {
1379 if (remaining < sizeof(struct typhoon_section_header))
1380 goto invalid_fw;
1382 sHdr = (struct typhoon_section_header *) image_data;
1383 image_data += sizeof(struct typhoon_section_header);
1384 section_len = le32_to_cpu(sHdr->len);
1386 if (remaining < section_len)
1387 goto invalid_fw;
1389 image_data += section_len;
1390 remaining -= section_len;
1393 return 0;
1395 invalid_fw:
1396 netdev_err(tp->dev, "Invalid firmware image\n");
1397 release_firmware(typhoon_fw);
1398 typhoon_fw = NULL;
1399 return -EINVAL;
1402 static int
1403 typhoon_download_firmware(struct typhoon *tp)
1405 void __iomem *ioaddr = tp->ioaddr;
1406 struct pci_dev *pdev = tp->pdev;
1407 const struct typhoon_file_header *fHdr;
1408 const struct typhoon_section_header *sHdr;
1409 const u8 *image_data;
1410 void *dpage;
1411 dma_addr_t dpage_dma;
1412 __sum16 csum;
1413 u32 irqEnabled;
1414 u32 irqMasked;
1415 u32 numSections;
1416 u32 section_len;
1417 u32 len;
1418 u32 load_addr;
1419 u32 hmac;
1420 int i;
1421 int err;
1423 image_data = (u8 *) typhoon_fw->data;
1424 fHdr = (struct typhoon_file_header *) image_data;
1426 /* Cannot just map the firmware image using pci_map_single() as
1427 * the firmware is vmalloc()'d and may not be physically contiguous,
1428 * so we allocate some consistent memory to copy the sections into.
1430 err = -ENOMEM;
1431 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1432 if(!dpage) {
1433 netdev_err(tp->dev, "no DMA mem for firmware\n");
1434 goto err_out;
1437 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1438 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1439 ioaddr + TYPHOON_REG_INTR_ENABLE);
1440 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1441 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1442 ioaddr + TYPHOON_REG_INTR_MASK);
1444 err = -ETIMEDOUT;
1445 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1446 netdev_err(tp->dev, "card ready timeout\n");
1447 goto err_out_irq;
1450 numSections = le32_to_cpu(fHdr->numSections);
1451 load_addr = le32_to_cpu(fHdr->startAddr);
1453 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1454 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1455 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1456 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1457 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1458 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1459 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1460 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1461 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1462 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1463 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1464 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1465 typhoon_post_pci_writes(ioaddr);
1466 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1468 image_data += sizeof(struct typhoon_file_header);
1470 /* The ioread32() in typhoon_wait_interrupt() will force the
1471 * last write to the command register to post, so
1472 * we don't need a typhoon_post_pci_writes() after it.
1474 for(i = 0; i < numSections; i++) {
1475 sHdr = (struct typhoon_section_header *) image_data;
1476 image_data += sizeof(struct typhoon_section_header);
1477 load_addr = le32_to_cpu(sHdr->startAddr);
1478 section_len = le32_to_cpu(sHdr->len);
1480 while(section_len) {
1481 len = min_t(u32, section_len, PAGE_SIZE);
1483 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1484 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1485 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1486 netdev_err(tp->dev, "segment ready timeout\n");
1487 goto err_out_irq;
1490 /* Do an pseudo IPv4 checksum on the data -- first
1491 * need to convert each u16 to cpu order before
1492 * summing. Fortunately, due to the properties of
1493 * the checksum, we can do this once, at the end.
1495 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1496 dpage, len,
1497 0));
1499 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1500 iowrite32(le16_to_cpu((__force __le16)csum),
1501 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1502 iowrite32(load_addr,
1503 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1504 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1505 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1506 typhoon_post_pci_writes(ioaddr);
1507 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1508 ioaddr + TYPHOON_REG_COMMAND);
1510 image_data += len;
1511 load_addr += len;
1512 section_len -= len;
1516 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1517 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1518 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1519 netdev_err(tp->dev, "final segment ready timeout\n");
1520 goto err_out_irq;
1523 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1525 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1526 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1527 ioread32(ioaddr + TYPHOON_REG_STATUS));
1528 goto err_out_irq;
1531 err = 0;
1533 err_out_irq:
1534 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1535 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1537 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1539 err_out:
1540 return err;
1543 static int
1544 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1546 void __iomem *ioaddr = tp->ioaddr;
1548 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1549 netdev_err(tp->dev, "boot ready timeout\n");
1550 goto out_timeout;
1553 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1554 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1555 typhoon_post_pci_writes(ioaddr);
1556 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1557 ioaddr + TYPHOON_REG_COMMAND);
1559 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1560 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1561 ioread32(ioaddr + TYPHOON_REG_STATUS));
1562 goto out_timeout;
1565 /* Clear the Transmit and Command ready registers
1567 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1568 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1569 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1570 typhoon_post_pci_writes(ioaddr);
1571 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1573 return 0;
1575 out_timeout:
1576 return -ETIMEDOUT;
1579 static u32
1580 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1581 volatile __le32 * index)
1583 u32 lastRead = txRing->lastRead;
1584 struct tx_desc *tx;
1585 dma_addr_t skb_dma;
1586 int dma_len;
1587 int type;
1589 while(lastRead != le32_to_cpu(*index)) {
1590 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1591 type = tx->flags & TYPHOON_TYPE_MASK;
1593 if(type == TYPHOON_TX_DESC) {
1594 /* This tx_desc describes a packet.
1596 unsigned long ptr = tx->tx_addr;
1597 struct sk_buff *skb = (struct sk_buff *) ptr;
1598 dev_kfree_skb_irq(skb);
1599 } else if(type == TYPHOON_FRAG_DESC) {
1600 /* This tx_desc describes a memory mapping. Free it.
1602 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1603 dma_len = le16_to_cpu(tx->len);
1604 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1605 PCI_DMA_TODEVICE);
1608 tx->flags = 0;
1609 typhoon_inc_tx_index(&lastRead, 1);
1612 return lastRead;
1615 static void
1616 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1617 volatile __le32 * index)
1619 u32 lastRead;
1620 int numDesc = MAX_SKB_FRAGS + 1;
1622 /* This will need changing if we start to use the Hi Tx ring. */
1623 lastRead = typhoon_clean_tx(tp, txRing, index);
1624 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1625 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1626 netif_wake_queue(tp->dev);
1628 txRing->lastRead = lastRead;
1629 smp_wmb();
1632 static void
1633 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1635 struct typhoon_indexes *indexes = tp->indexes;
1636 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1637 struct basic_ring *ring = &tp->rxBuffRing;
1638 struct rx_free *r;
1640 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1641 le32_to_cpu(indexes->rxBuffCleared)) {
1642 /* no room in ring, just drop the skb
1644 dev_kfree_skb_any(rxb->skb);
1645 rxb->skb = NULL;
1646 return;
1649 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1650 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1651 r->virtAddr = idx;
1652 r->physAddr = cpu_to_le32(rxb->dma_addr);
1654 /* Tell the card about it */
1655 wmb();
1656 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1659 static int
1660 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1662 struct typhoon_indexes *indexes = tp->indexes;
1663 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1664 struct basic_ring *ring = &tp->rxBuffRing;
1665 struct rx_free *r;
1666 struct sk_buff *skb;
1667 dma_addr_t dma_addr;
1669 rxb->skb = NULL;
1671 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1672 le32_to_cpu(indexes->rxBuffCleared))
1673 return -ENOMEM;
1675 skb = dev_alloc_skb(PKT_BUF_SZ);
1676 if(!skb)
1677 return -ENOMEM;
1679 #if 0
1680 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1681 * address! Pretty please?
1683 skb_reserve(skb, 2);
1684 #endif
1686 skb->dev = tp->dev;
1687 dma_addr = pci_map_single(tp->pdev, skb->data,
1688 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1690 /* Since no card does 64 bit DAC, the high bits will never
1691 * change from zero.
1693 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1694 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1695 r->virtAddr = idx;
1696 r->physAddr = cpu_to_le32(dma_addr);
1697 rxb->skb = skb;
1698 rxb->dma_addr = dma_addr;
1700 /* Tell the card about it */
1701 wmb();
1702 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1703 return 0;
1706 static int
1707 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1708 volatile __le32 * cleared, int budget)
1710 struct rx_desc *rx;
1711 struct sk_buff *skb, *new_skb;
1712 struct rxbuff_ent *rxb;
1713 dma_addr_t dma_addr;
1714 u32 local_ready;
1715 u32 rxaddr;
1716 int pkt_len;
1717 u32 idx;
1718 __le32 csum_bits;
1719 int received;
1721 received = 0;
1722 local_ready = le32_to_cpu(*ready);
1723 rxaddr = le32_to_cpu(*cleared);
1724 while(rxaddr != local_ready && budget > 0) {
1725 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1726 idx = rx->addr;
1727 rxb = &tp->rxbuffers[idx];
1728 skb = rxb->skb;
1729 dma_addr = rxb->dma_addr;
1731 typhoon_inc_rx_index(&rxaddr, 1);
1733 if(rx->flags & TYPHOON_RX_ERROR) {
1734 typhoon_recycle_rx_skb(tp, idx);
1735 continue;
1738 pkt_len = le16_to_cpu(rx->frameLen);
1740 if(pkt_len < rx_copybreak &&
1741 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1742 skb_reserve(new_skb, 2);
1743 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1744 PKT_BUF_SZ,
1745 PCI_DMA_FROMDEVICE);
1746 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1747 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1748 PKT_BUF_SZ,
1749 PCI_DMA_FROMDEVICE);
1750 skb_put(new_skb, pkt_len);
1751 typhoon_recycle_rx_skb(tp, idx);
1752 } else {
1753 new_skb = skb;
1754 skb_put(new_skb, pkt_len);
1755 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1756 PCI_DMA_FROMDEVICE);
1757 typhoon_alloc_rx_skb(tp, idx);
1759 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1760 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1761 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1762 if(csum_bits ==
1763 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1764 csum_bits ==
1765 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1766 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1767 } else
1768 new_skb->ip_summed = CHECKSUM_NONE;
1770 spin_lock(&tp->state_lock);
1771 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1772 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1773 ntohl(rx->vlanTag) & 0xffff);
1774 else
1775 netif_receive_skb(new_skb);
1776 spin_unlock(&tp->state_lock);
1778 received++;
1779 budget--;
1781 *cleared = cpu_to_le32(rxaddr);
1783 return received;
1786 static void
1787 typhoon_fill_free_ring(struct typhoon *tp)
1789 u32 i;
1791 for(i = 0; i < RXENT_ENTRIES; i++) {
1792 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1793 if(rxb->skb)
1794 continue;
1795 if(typhoon_alloc_rx_skb(tp, i) < 0)
1796 break;
1800 static int
1801 typhoon_poll(struct napi_struct *napi, int budget)
1803 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1804 struct typhoon_indexes *indexes = tp->indexes;
1805 int work_done;
1807 rmb();
1808 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1809 typhoon_process_response(tp, 0, NULL);
1811 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1812 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1814 work_done = 0;
1816 if(indexes->rxHiCleared != indexes->rxHiReady) {
1817 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1818 &indexes->rxHiCleared, budget);
1821 if(indexes->rxLoCleared != indexes->rxLoReady) {
1822 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1823 &indexes->rxLoCleared, budget - work_done);
1826 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1827 /* rxBuff ring is empty, try to fill it. */
1828 typhoon_fill_free_ring(tp);
1831 if (work_done < budget) {
1832 napi_complete(napi);
1833 iowrite32(TYPHOON_INTR_NONE,
1834 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1835 typhoon_post_pci_writes(tp->ioaddr);
1838 return work_done;
1841 static irqreturn_t
1842 typhoon_interrupt(int irq, void *dev_instance)
1844 struct net_device *dev = dev_instance;
1845 struct typhoon *tp = netdev_priv(dev);
1846 void __iomem *ioaddr = tp->ioaddr;
1847 u32 intr_status;
1849 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1850 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1851 return IRQ_NONE;
1853 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1855 if (napi_schedule_prep(&tp->napi)) {
1856 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1857 typhoon_post_pci_writes(ioaddr);
1858 __napi_schedule(&tp->napi);
1859 } else {
1860 netdev_err(dev, "Error, poll already scheduled\n");
1862 return IRQ_HANDLED;
1865 static void
1866 typhoon_free_rx_rings(struct typhoon *tp)
1868 u32 i;
1870 for(i = 0; i < RXENT_ENTRIES; i++) {
1871 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1872 if(rxb->skb) {
1873 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1874 PCI_DMA_FROMDEVICE);
1875 dev_kfree_skb(rxb->skb);
1876 rxb->skb = NULL;
1881 static int
1882 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1884 struct pci_dev *pdev = tp->pdev;
1885 void __iomem *ioaddr = tp->ioaddr;
1886 struct cmd_desc xp_cmd;
1887 int err;
1889 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1890 xp_cmd.parm1 = events;
1891 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1892 if(err < 0) {
1893 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1894 err);
1895 return err;
1898 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1899 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1900 if(err < 0) {
1901 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1902 return err;
1905 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1906 return -ETIMEDOUT;
1908 /* Since we cannot monitor the status of the link while sleeping,
1909 * tell the world it went away.
1911 netif_carrier_off(tp->dev);
1913 pci_enable_wake(tp->pdev, state, 1);
1914 pci_disable_device(pdev);
1915 return pci_set_power_state(pdev, state);
1918 static int
1919 typhoon_wakeup(struct typhoon *tp, int wait_type)
1921 struct pci_dev *pdev = tp->pdev;
1922 void __iomem *ioaddr = tp->ioaddr;
1924 pci_set_power_state(pdev, PCI_D0);
1925 pci_restore_state(pdev);
1927 /* Post 2.x.x versions of the Sleep Image require a reset before
1928 * we can download the Runtime Image. But let's not make users of
1929 * the old firmware pay for the reset.
1931 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1932 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1933 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1934 return typhoon_reset(ioaddr, wait_type);
1936 return 0;
1939 static int
1940 typhoon_start_runtime(struct typhoon *tp)
1942 struct net_device *dev = tp->dev;
1943 void __iomem *ioaddr = tp->ioaddr;
1944 struct cmd_desc xp_cmd;
1945 int err;
1947 typhoon_init_rings(tp);
1948 typhoon_fill_free_ring(tp);
1950 err = typhoon_download_firmware(tp);
1951 if(err < 0) {
1952 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1953 goto error_out;
1956 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1957 netdev_err(tp->dev, "cannot boot 3XP\n");
1958 err = -EIO;
1959 goto error_out;
1962 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1963 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1964 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 if(err < 0)
1966 goto error_out;
1968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1969 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1970 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1971 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1972 if(err < 0)
1973 goto error_out;
1975 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1976 * us some more information on how to control it.
1978 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1979 xp_cmd.parm1 = 0;
1980 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1981 if(err < 0)
1982 goto error_out;
1984 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1985 xp_cmd.parm1 = tp->xcvr_select;
1986 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1987 if(err < 0)
1988 goto error_out;
1990 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1991 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1992 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993 if(err < 0)
1994 goto error_out;
1996 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1997 spin_lock_bh(&tp->state_lock);
1998 xp_cmd.parm2 = tp->offload;
1999 xp_cmd.parm3 = tp->offload;
2000 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2001 spin_unlock_bh(&tp->state_lock);
2002 if(err < 0)
2003 goto error_out;
2005 typhoon_set_rx_mode(dev);
2007 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2008 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2009 if(err < 0)
2010 goto error_out;
2012 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2013 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2014 if(err < 0)
2015 goto error_out;
2017 tp->card_state = Running;
2018 smp_wmb();
2020 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2021 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2022 typhoon_post_pci_writes(ioaddr);
2024 return 0;
2026 error_out:
2027 typhoon_reset(ioaddr, WaitNoSleep);
2028 typhoon_free_rx_rings(tp);
2029 typhoon_init_rings(tp);
2030 return err;
2033 static int
2034 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2036 struct typhoon_indexes *indexes = tp->indexes;
2037 struct transmit_ring *txLo = &tp->txLoRing;
2038 void __iomem *ioaddr = tp->ioaddr;
2039 struct cmd_desc xp_cmd;
2040 int i;
2042 /* Disable interrupts early, since we can't schedule a poll
2043 * when called with !netif_running(). This will be posted
2044 * when we force the posting of the command.
2046 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2048 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2049 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2051 /* Wait 1/2 sec for any outstanding transmits to occur
2052 * We'll cleanup after the reset if this times out.
2054 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2055 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2056 break;
2057 udelay(TYPHOON_UDELAY);
2060 if(i == TYPHOON_WAIT_TIMEOUT)
2061 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2063 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2064 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2066 /* save the statistics so when we bring the interface up again,
2067 * the values reported to userspace are correct.
2069 tp->card_state = Sleeping;
2070 smp_wmb();
2071 typhoon_do_get_stats(tp);
2072 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2074 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2075 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2077 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2078 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2080 if(typhoon_reset(ioaddr, wait_type) < 0) {
2081 netdev_err(tp->dev, "unable to reset 3XP\n");
2082 return -ETIMEDOUT;
2085 /* cleanup any outstanding Tx packets */
2086 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2087 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2088 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2091 return 0;
2094 static void
2095 typhoon_tx_timeout(struct net_device *dev)
2097 struct typhoon *tp = netdev_priv(dev);
2099 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2100 netdev_warn(dev, "could not reset in tx timeout\n");
2101 goto truly_dead;
2104 /* If we ever start using the Hi ring, it will need cleaning too */
2105 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2106 typhoon_free_rx_rings(tp);
2108 if(typhoon_start_runtime(tp) < 0) {
2109 netdev_err(dev, "could not start runtime in tx timeout\n");
2110 goto truly_dead;
2113 netif_wake_queue(dev);
2114 return;
2116 truly_dead:
2117 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2118 typhoon_reset(tp->ioaddr, NoWait);
2119 netif_carrier_off(dev);
2122 static int
2123 typhoon_open(struct net_device *dev)
2125 struct typhoon *tp = netdev_priv(dev);
2126 int err;
2128 err = typhoon_request_firmware(tp);
2129 if (err)
2130 goto out;
2132 err = typhoon_wakeup(tp, WaitSleep);
2133 if(err < 0) {
2134 netdev_err(dev, "unable to wakeup device\n");
2135 goto out_sleep;
2138 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2139 dev->name, dev);
2140 if(err < 0)
2141 goto out_sleep;
2143 napi_enable(&tp->napi);
2145 err = typhoon_start_runtime(tp);
2146 if(err < 0) {
2147 napi_disable(&tp->napi);
2148 goto out_irq;
2151 netif_start_queue(dev);
2152 return 0;
2154 out_irq:
2155 free_irq(dev->irq, dev);
2157 out_sleep:
2158 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2159 netdev_err(dev, "unable to reboot into sleep img\n");
2160 typhoon_reset(tp->ioaddr, NoWait);
2161 goto out;
2164 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2165 netdev_err(dev, "unable to go back to sleep\n");
2167 out:
2168 return err;
2171 static int
2172 typhoon_close(struct net_device *dev)
2174 struct typhoon *tp = netdev_priv(dev);
2176 netif_stop_queue(dev);
2177 napi_disable(&tp->napi);
2179 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2180 netdev_err(dev, "unable to stop runtime\n");
2182 /* Make sure there is no irq handler running on a different CPU. */
2183 free_irq(dev->irq, dev);
2185 typhoon_free_rx_rings(tp);
2186 typhoon_init_rings(tp);
2188 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2189 netdev_err(dev, "unable to boot sleep image\n");
2191 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2192 netdev_err(dev, "unable to put card to sleep\n");
2194 return 0;
2197 #ifdef CONFIG_PM
2198 static int
2199 typhoon_resume(struct pci_dev *pdev)
2201 struct net_device *dev = pci_get_drvdata(pdev);
2202 struct typhoon *tp = netdev_priv(dev);
2204 /* If we're down, resume when we are upped.
2206 if(!netif_running(dev))
2207 return 0;
2209 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2210 netdev_err(dev, "critical: could not wake up in resume\n");
2211 goto reset;
2214 if(typhoon_start_runtime(tp) < 0) {
2215 netdev_err(dev, "critical: could not start runtime in resume\n");
2216 goto reset;
2219 netif_device_attach(dev);
2220 return 0;
2222 reset:
2223 typhoon_reset(tp->ioaddr, NoWait);
2224 return -EBUSY;
2227 static int
2228 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2230 struct net_device *dev = pci_get_drvdata(pdev);
2231 struct typhoon *tp = netdev_priv(dev);
2232 struct cmd_desc xp_cmd;
2234 /* If we're down, we're already suspended.
2236 if(!netif_running(dev))
2237 return 0;
2239 spin_lock_bh(&tp->state_lock);
2240 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2241 spin_unlock_bh(&tp->state_lock);
2242 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2243 return -EBUSY;
2245 spin_unlock_bh(&tp->state_lock);
2247 netif_device_detach(dev);
2249 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2250 netdev_err(dev, "unable to stop runtime\n");
2251 goto need_resume;
2254 typhoon_free_rx_rings(tp);
2255 typhoon_init_rings(tp);
2257 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2258 netdev_err(dev, "unable to boot sleep image\n");
2259 goto need_resume;
2262 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2263 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2264 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2265 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2266 netdev_err(dev, "unable to set mac address in suspend\n");
2267 goto need_resume;
2270 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2271 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2272 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2273 netdev_err(dev, "unable to set rx filter in suspend\n");
2274 goto need_resume;
2277 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2278 netdev_err(dev, "unable to put card to sleep\n");
2279 goto need_resume;
2282 return 0;
2284 need_resume:
2285 typhoon_resume(pdev);
2286 return -EBUSY;
2288 #endif
2290 static int __devinit
2291 typhoon_test_mmio(struct pci_dev *pdev)
2293 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2294 int mode = 0;
2295 u32 val;
2297 if(!ioaddr)
2298 goto out;
2300 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2301 TYPHOON_STATUS_WAITING_FOR_HOST)
2302 goto out_unmap;
2304 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2305 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2306 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2308 /* Ok, see if we can change our interrupt status register by
2309 * sending ourselves an interrupt. If so, then MMIO works.
2310 * The 50usec delay is arbitrary -- it could probably be smaller.
2312 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2313 if((val & TYPHOON_INTR_SELF) == 0) {
2314 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2315 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316 udelay(50);
2317 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2318 if(val & TYPHOON_INTR_SELF)
2319 mode = 1;
2322 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2323 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2324 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2325 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2327 out_unmap:
2328 pci_iounmap(pdev, ioaddr);
2330 out:
2331 if(!mode)
2332 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2333 return mode;
2336 static const struct net_device_ops typhoon_netdev_ops = {
2337 .ndo_open = typhoon_open,
2338 .ndo_stop = typhoon_close,
2339 .ndo_start_xmit = typhoon_start_tx,
2340 .ndo_set_multicast_list = typhoon_set_rx_mode,
2341 .ndo_tx_timeout = typhoon_tx_timeout,
2342 .ndo_get_stats = typhoon_get_stats,
2343 .ndo_validate_addr = eth_validate_addr,
2344 .ndo_set_mac_address = typhoon_set_mac_address,
2345 .ndo_change_mtu = eth_change_mtu,
2346 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2349 static int __devinit
2350 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2352 struct net_device *dev;
2353 struct typhoon *tp;
2354 int card_id = (int) ent->driver_data;
2355 void __iomem *ioaddr;
2356 void *shared;
2357 dma_addr_t shared_dma;
2358 struct cmd_desc xp_cmd;
2359 struct resp_desc xp_resp[3];
2360 int err = 0;
2361 const char *err_msg;
2363 dev = alloc_etherdev(sizeof(*tp));
2364 if(dev == NULL) {
2365 err_msg = "unable to alloc new net device";
2366 err = -ENOMEM;
2367 goto error_out;
2369 SET_NETDEV_DEV(dev, &pdev->dev);
2371 err = pci_enable_device(pdev);
2372 if(err < 0) {
2373 err_msg = "unable to enable device";
2374 goto error_out_dev;
2377 err = pci_set_mwi(pdev);
2378 if(err < 0) {
2379 err_msg = "unable to set MWI";
2380 goto error_out_disable;
2383 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2384 if(err < 0) {
2385 err_msg = "No usable DMA configuration";
2386 goto error_out_mwi;
2389 /* sanity checks on IO and MMIO BARs
2391 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2392 err_msg = "region #1 not a PCI IO resource, aborting";
2393 err = -ENODEV;
2394 goto error_out_mwi;
2396 if(pci_resource_len(pdev, 0) < 128) {
2397 err_msg = "Invalid PCI IO region size, aborting";
2398 err = -ENODEV;
2399 goto error_out_mwi;
2401 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2402 err_msg = "region #1 not a PCI MMIO resource, aborting";
2403 err = -ENODEV;
2404 goto error_out_mwi;
2406 if(pci_resource_len(pdev, 1) < 128) {
2407 err_msg = "Invalid PCI MMIO region size, aborting";
2408 err = -ENODEV;
2409 goto error_out_mwi;
2412 err = pci_request_regions(pdev, KBUILD_MODNAME);
2413 if(err < 0) {
2414 err_msg = "could not request regions";
2415 goto error_out_mwi;
2418 /* map our registers
2420 if(use_mmio != 0 && use_mmio != 1)
2421 use_mmio = typhoon_test_mmio(pdev);
2423 ioaddr = pci_iomap(pdev, use_mmio, 128);
2424 if (!ioaddr) {
2425 err_msg = "cannot remap registers, aborting";
2426 err = -EIO;
2427 goto error_out_regions;
2430 /* allocate pci dma space for rx and tx descriptor rings
2432 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2433 &shared_dma);
2434 if(!shared) {
2435 err_msg = "could not allocate DMA memory";
2436 err = -ENOMEM;
2437 goto error_out_remap;
2440 dev->irq = pdev->irq;
2441 tp = netdev_priv(dev);
2442 tp->shared = (struct typhoon_shared *) shared;
2443 tp->shared_dma = shared_dma;
2444 tp->pdev = pdev;
2445 tp->tx_pdev = pdev;
2446 tp->ioaddr = ioaddr;
2447 tp->tx_ioaddr = ioaddr;
2448 tp->dev = dev;
2450 /* Init sequence:
2451 * 1) Reset the adapter to clear any bad juju
2452 * 2) Reload the sleep image
2453 * 3) Boot the sleep image
2454 * 4) Get the hardware address.
2455 * 5) Put the card to sleep.
2457 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2458 err_msg = "could not reset 3XP";
2459 err = -EIO;
2460 goto error_out_dma;
2463 /* Now that we've reset the 3XP and are sure it's not going to
2464 * write all over memory, enable bus mastering, and save our
2465 * state for resuming after a suspend.
2467 pci_set_master(pdev);
2468 pci_save_state(pdev);
2470 typhoon_init_interface(tp);
2471 typhoon_init_rings(tp);
2473 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2474 err_msg = "cannot boot 3XP sleep image";
2475 err = -EIO;
2476 goto error_out_reset;
2479 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2480 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2481 err_msg = "cannot read MAC address";
2482 err = -EIO;
2483 goto error_out_reset;
2486 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2487 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2489 if(!is_valid_ether_addr(dev->dev_addr)) {
2490 err_msg = "Could not obtain valid ethernet address, aborting";
2491 goto error_out_reset;
2494 /* Read the Sleep Image version last, so the response is valid
2495 * later when we print out the version reported.
2497 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2498 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2499 err_msg = "Could not get Sleep Image version";
2500 goto error_out_reset;
2503 tp->capabilities = typhoon_card_info[card_id].capabilities;
2504 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2506 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2507 * READ_VERSIONS command. Those versions are OK after waking up
2508 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2509 * seem to need a little extra help to get started. Since we don't
2510 * know how to nudge it along, just kick it.
2512 if(xp_resp[0].numDesc != 0)
2513 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2515 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2516 err_msg = "cannot put adapter to sleep";
2517 err = -EIO;
2518 goto error_out_reset;
2521 /* The chip-specific entries in the device structure. */
2522 dev->netdev_ops = &typhoon_netdev_ops;
2523 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2524 dev->watchdog_timeo = TX_TIMEOUT;
2526 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2528 /* We can handle scatter gather, up to 16 entries, and
2529 * we can do IP checksumming (only version 4, doh...)
2531 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2532 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2533 dev->features |= NETIF_F_TSO;
2535 if(register_netdev(dev) < 0) {
2536 err_msg = "unable to register netdev";
2537 goto error_out_reset;
2540 pci_set_drvdata(pdev, dev);
2542 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2543 typhoon_card_info[card_id].name,
2544 use_mmio ? "MMIO" : "IO",
2545 (unsigned long long)pci_resource_start(pdev, use_mmio),
2546 dev->dev_addr);
2548 /* xp_resp still contains the response to the READ_VERSIONS command.
2549 * For debugging, let the user know what version he has.
2551 if(xp_resp[0].numDesc == 0) {
2552 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2553 * of version is Month/Day of build.
2555 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2556 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2557 monthday >> 8, monthday & 0xff);
2558 } else if(xp_resp[0].numDesc == 2) {
2559 /* This is the Typhoon 1.1+ type Sleep Image
2561 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2562 u8 *ver_string = (u8 *) &xp_resp[1];
2563 ver_string[25] = 0;
2564 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2565 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2566 sleep_ver & 0xfff, ver_string);
2567 } else {
2568 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2569 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2572 return 0;
2574 error_out_reset:
2575 typhoon_reset(ioaddr, NoWait);
2577 error_out_dma:
2578 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2579 shared, shared_dma);
2580 error_out_remap:
2581 pci_iounmap(pdev, ioaddr);
2582 error_out_regions:
2583 pci_release_regions(pdev);
2584 error_out_mwi:
2585 pci_clear_mwi(pdev);
2586 error_out_disable:
2587 pci_disable_device(pdev);
2588 error_out_dev:
2589 free_netdev(dev);
2590 error_out:
2591 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2592 return err;
2595 static void __devexit
2596 typhoon_remove_one(struct pci_dev *pdev)
2598 struct net_device *dev = pci_get_drvdata(pdev);
2599 struct typhoon *tp = netdev_priv(dev);
2601 unregister_netdev(dev);
2602 pci_set_power_state(pdev, PCI_D0);
2603 pci_restore_state(pdev);
2604 typhoon_reset(tp->ioaddr, NoWait);
2605 pci_iounmap(pdev, tp->ioaddr);
2606 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2607 tp->shared, tp->shared_dma);
2608 pci_release_regions(pdev);
2609 pci_clear_mwi(pdev);
2610 pci_disable_device(pdev);
2611 pci_set_drvdata(pdev, NULL);
2612 free_netdev(dev);
2615 static struct pci_driver typhoon_driver = {
2616 .name = KBUILD_MODNAME,
2617 .id_table = typhoon_pci_tbl,
2618 .probe = typhoon_init_one,
2619 .remove = __devexit_p(typhoon_remove_one),
2620 #ifdef CONFIG_PM
2621 .suspend = typhoon_suspend,
2622 .resume = typhoon_resume,
2623 #endif
2626 static int __init
2627 typhoon_init(void)
2629 return pci_register_driver(&typhoon_driver);
2632 static void __exit
2633 typhoon_cleanup(void)
2635 if (typhoon_fw)
2636 release_firmware(typhoon_fw);
2637 pci_unregister_driver(&typhoon_driver);
2640 module_init(typhoon_init);
2641 module_exit(typhoon_cleanup);