Merge with Linux 2.6.0-test1.
[linux-2.6/linux-mips.git] / drivers / net / e100 / e100_main.c
blob3b24d4284cad31fa3e4379cbc2c90e1f716f92e7
1 /*******************************************************************************
4 Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /**********************************************************************
29 * *
30 * INTEL CORPORATION *
31 * *
32 * This software is supplied under the terms of the license included *
33 * above. All use of this driver must be in accordance with the terms *
34 * of that license. *
35 * *
36 * Module Name: e100_main.c *
37 * *
38 * Abstract: Functions for the driver entry points like load, *
39 * unload, open and close. All board specific calls made *
40 * by the network interface section of the driver. *
41 * *
42 * Environment: This file is intended to be specific to the Linux *
43 * operating system. *
44 * *
45 **********************************************************************/
47 /* Change Log
49 * 2.3.18 07/08/03
50 * o Bug fix: read skb->len after freeing skb
51 * [Andrew Morton] akpm@zip.com.au
52 * o Bug fix: 82557 (with National PHY) timeout during init
53 * [Adam Kropelin] akropel1@rochester.rr.com
54 * o Feature add: allow to change Wake On LAN when EEPROM disabled
56 * 2.3.13 05/08/03
57 * o Feature remove: /proc/net/PRO_LAN_Adapters support gone completely
58 * o Feature remove: IDIAG support (use ethtool -t instead)
59 * o Cleanup: fixed spelling mistakes found by community
60 * o Feature add: ethtool cable diag test
61 * o Feature add: ethtool parameter support (ring size, xsum, flow ctrl)
62 * o Cleanup: move e100_asf_enable under CONFIG_PM to avoid warning
63 * [Stephen Rothwell (sfr@canb.auug.org.au)]
64 * o Bug fix: don't call any netif_carrier_* until netdev registered.
65 * [Andrew Morton (akpm@digeo.com)]
66 * o Cleanup: replace (skb->len - skb->data_len) with skb_headlen(skb)
67 * [jmorris@intercode.com.au]
68 * o Bug fix: cleanup of Tx skbs after running ethtool diags
69 * o Bug fix: incorrect reporting of ethtool diag overall results
70 * o Bug fix: must hold xmit_lock before stopping queue in ethtool
71 * operations that require reset h/w and driver structures.
72 * o Bug fix: statistic command failure would stop statistic collection.
74 * 2.2.21 02/11/03
77 #include <linux/config.h>
78 #include <net/checksum.h>
79 #include <linux/tcp.h>
80 #include <linux/udp.h>
81 #include "e100.h"
82 #include "e100_ucode.h"
83 #include "e100_config.h"
84 #include "e100_phy.h"
86 extern void e100_force_speed_duplex_to_phy(struct e100_private *bdp);
88 static char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
89 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
90 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
91 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
92 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
93 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
94 "tx_heartbeat_errors", "tx_window_errors",
96 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
98 static int e100_do_ethtool_ioctl(struct net_device *, struct ifreq *);
99 static void e100_get_speed_duplex_caps(struct e100_private *);
100 static int e100_ethtool_get_settings(struct net_device *, struct ifreq *);
101 static int e100_ethtool_set_settings(struct net_device *, struct ifreq *);
103 static int e100_ethtool_get_drvinfo(struct net_device *, struct ifreq *);
104 static int e100_ethtool_eeprom(struct net_device *, struct ifreq *);
106 #define E100_EEPROM_MAGIC 0x1234
107 static int e100_ethtool_glink(struct net_device *, struct ifreq *);
108 static int e100_ethtool_gregs(struct net_device *, struct ifreq *);
109 static int e100_ethtool_nway_rst(struct net_device *, struct ifreq *);
110 static int e100_ethtool_wol(struct net_device *, struct ifreq *);
111 #ifdef CONFIG_PM
112 static unsigned char e100_setup_filter(struct e100_private *bdp);
113 static void e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp);
114 #endif
115 static u16 e100_get_ip_lbytes(struct net_device *dev);
116 extern void e100_config_wol(struct e100_private *bdp);
117 extern u32 e100_run_diag(struct net_device *dev, u64 *test_info, u32 flags);
118 static int e100_ethtool_test(struct net_device *, struct ifreq *);
119 static int e100_ethtool_gstrings(struct net_device *, struct ifreq *);
120 static char test_strings[][ETH_GSTRING_LEN] = {
121 "Link test (on/offline)",
122 "Eeprom test (on/offline)",
123 "Self test (offline)",
124 "Mac loopback (offline)",
125 "Phy loopback (offline)",
126 "Cable diagnostic (offline)"
129 static int e100_ethtool_led_blink(struct net_device *, struct ifreq *);
131 static int e100_mii_ioctl(struct net_device *, struct ifreq *, int);
133 static unsigned char e100_delayed_exec_non_cu_cmd(struct e100_private *,
134 nxmit_cb_entry_t *);
135 static void e100_free_nontx_list(struct e100_private *);
136 static void e100_non_tx_background(unsigned long);
137 static inline void e100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb);
138 /* Global Data structures and variables */
139 char e100_copyright[] __devinitdata = "Copyright (c) 2003 Intel Corporation";
140 char e100_driver_version[]="2.3.18-k1";
141 const char *e100_full_driver_name = "Intel(R) PRO/100 Network Driver";
142 char e100_short_driver_name[] = "e100";
143 static int e100nics = 0;
144 static void e100_vlan_rx_register(struct net_device *netdev, struct vlan_group
145 *grp);
146 static void e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
147 static void e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
149 #ifdef CONFIG_PM
150 static int e100_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
151 static int e100_suspend(struct pci_dev *pcid, u32 state);
152 static int e100_resume(struct pci_dev *pcid);
153 static unsigned char e100_asf_enabled(struct e100_private *bdp);
154 struct notifier_block e100_notifier_reboot = {
155 .notifier_call = e100_notify_reboot,
156 .next = NULL,
157 .priority = 0
159 #endif
161 /*********************************************************************/
162 /*! This is a GCC extension to ANSI C.
163 * See the item "Labeled Elements in Initializers" in the section
164 * "Extensions to the C Language Family" of the GCC documentation.
165 *********************************************************************/
166 #define E100_PARAM_INIT { [0 ... E100_MAX_NIC] = -1 }
168 /* All parameters are treated the same, as an integer array of values.
169 * This macro just reduces the need to repeat the same declaration code
170 * over and over (plus this helps to avoid typo bugs).
172 #define E100_PARAM(X, S) \
173 static const int X[E100_MAX_NIC + 1] = E100_PARAM_INIT; \
174 MODULE_PARM(X, "1-" __MODULE_STRING(E100_MAX_NIC) "i"); \
175 MODULE_PARM_DESC(X, S);
177 /* ====================================================================== */
178 static u8 e100_D101M_checksum(struct e100_private *, struct sk_buff *);
179 static u8 e100_D102_check_checksum(rfd_t *);
180 static int e100_ioctl(struct net_device *, struct ifreq *, int);
181 static int e100_change_mtu(struct net_device *, int);
182 static int e100_xmit_frame(struct sk_buff *, struct net_device *);
183 static unsigned char e100_init(struct e100_private *);
184 static int e100_set_mac(struct net_device *, void *);
185 struct net_device_stats *e100_get_stats(struct net_device *);
187 static irqreturn_t e100intr(int, void *, struct pt_regs *);
188 static void e100_print_brd_conf(struct e100_private *);
189 static void e100_set_multi(struct net_device *);
191 static u8 e100_pci_setup(struct pci_dev *, struct e100_private *);
192 static u8 e100_sw_init(struct e100_private *);
193 static void e100_tco_workaround(struct e100_private *);
194 static unsigned char e100_alloc_space(struct e100_private *);
195 static void e100_dealloc_space(struct e100_private *);
196 static int e100_alloc_tcb_pool(struct e100_private *);
197 static void e100_setup_tcb_pool(tcb_t *, unsigned int, struct e100_private *);
198 static void e100_free_tcb_pool(struct e100_private *);
199 static int e100_alloc_rfd_pool(struct e100_private *);
200 static void e100_free_rfd_pool(struct e100_private *);
202 static void e100_rd_eaddr(struct e100_private *);
203 static void e100_rd_pwa_no(struct e100_private *);
204 extern u16 e100_eeprom_read(struct e100_private *, u16);
205 extern void e100_eeprom_write_block(struct e100_private *, u16, u16 *, u16);
206 extern u16 e100_eeprom_size(struct e100_private *);
207 u16 e100_eeprom_calculate_chksum(struct e100_private *adapter);
209 static unsigned char e100_clr_cntrs(struct e100_private *);
210 static unsigned char e100_load_microcode(struct e100_private *);
211 static unsigned char e100_setup_iaaddr(struct e100_private *, u8 *);
212 static unsigned char e100_update_stats(struct e100_private *bdp);
214 static void e100_start_ru(struct e100_private *);
215 static void e100_dump_stats_cntrs(struct e100_private *);
217 static void e100_check_options(int board, struct e100_private *bdp);
218 static void e100_set_int_option(int *, int, int, int, int, char *);
219 static void e100_set_bool_option(struct e100_private *bdp, int, u32, int,
220 char *);
221 unsigned char e100_wait_exec_cmplx(struct e100_private *, u32, u8, u8);
222 void e100_exec_cmplx(struct e100_private *, u32, u8);
225 * e100_get_rx_struct - retrieve cell to hold skb buff from the pool
226 * @bdp: atapter's private data struct
228 * Returns the new cell to hold sk_buff or %NULL.
230 static inline struct rx_list_elem *
231 e100_get_rx_struct(struct e100_private *bdp)
233 struct rx_list_elem *rx_struct = NULL;
235 if (!list_empty(&(bdp->rx_struct_pool))) {
236 rx_struct = list_entry(bdp->rx_struct_pool.next,
237 struct rx_list_elem, list_elem);
238 list_del(&(rx_struct->list_elem));
241 return rx_struct;
245 * e100_alloc_skb - allocate an skb for the adapter
246 * @bdp: atapter's private data struct
248 * Allocates skb with enough room for rfd, and data, and reserve non-data space.
249 * Returns the new cell with sk_buff or %NULL.
251 static inline struct rx_list_elem *
252 e100_alloc_skb(struct e100_private *bdp)
254 struct sk_buff *new_skb;
255 u32 skb_size = sizeof (rfd_t);
256 struct rx_list_elem *rx_struct;
258 new_skb = (struct sk_buff *) dev_alloc_skb(skb_size);
259 if (new_skb) {
260 /* The IP data should be
261 DWORD aligned. since the ethernet header is 14 bytes long,
262 we need to reserve 2 extra bytes so that the TCP/IP headers
263 will be DWORD aligned. */
264 skb_reserve(new_skb, 2);
265 if ((rx_struct = e100_get_rx_struct(bdp)) == NULL)
266 goto err;
267 rx_struct->skb = new_skb;
268 rx_struct->dma_addr = pci_map_single(bdp->pdev, new_skb->data,
269 sizeof (rfd_t),
270 PCI_DMA_FROMDEVICE);
271 if (!rx_struct->dma_addr)
272 goto err;
273 skb_reserve(new_skb, bdp->rfd_size);
274 return rx_struct;
275 } else {
276 return NULL;
279 err:
280 dev_kfree_skb_irq(new_skb);
281 return NULL;
285 * e100_add_skb_to_end - add an skb to the end of our rfd list
286 * @bdp: atapter's private data struct
287 * @rx_struct: rx_list_elem with the new skb
289 * Adds a newly allocated skb to the end of our rfd list.
291 inline void
292 e100_add_skb_to_end(struct e100_private *bdp, struct rx_list_elem *rx_struct)
294 rfd_t *rfdn; /* The new rfd */
295 rfd_t *rfd; /* The old rfd */
296 struct rx_list_elem *rx_struct_last;
298 (rx_struct->skb)->dev = bdp->device;
299 rfdn = RFD_POINTER(rx_struct->skb, bdp);
300 rfdn->rfd_header.cb_status = 0;
301 rfdn->rfd_header.cb_cmd = __constant_cpu_to_le16(RFD_EL_BIT);
302 rfdn->rfd_act_cnt = 0;
303 rfdn->rfd_sz = __constant_cpu_to_le16(RFD_DATA_SIZE);
305 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr, bdp->rfd_size,
306 PCI_DMA_TODEVICE);
308 if (!list_empty(&(bdp->active_rx_list))) {
309 rx_struct_last = list_entry(bdp->active_rx_list.prev,
310 struct rx_list_elem, list_elem);
311 rfd = RFD_POINTER(rx_struct_last->skb, bdp);
312 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
313 4, PCI_DMA_FROMDEVICE);
314 put_unaligned(cpu_to_le32(rx_struct->dma_addr),
315 ((u32 *) (&(rfd->rfd_header.cb_lnk_ptr))));
317 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
318 8, PCI_DMA_TODEVICE);
319 rfd->rfd_header.cb_cmd &=
320 __constant_cpu_to_le16((u16) ~RFD_EL_BIT);
322 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
323 4, PCI_DMA_TODEVICE);
326 list_add_tail(&(rx_struct->list_elem), &(bdp->active_rx_list));
329 static inline void
330 e100_alloc_skbs(struct e100_private *bdp)
332 for (; bdp->skb_req > 0; bdp->skb_req--) {
333 struct rx_list_elem *rx_struct;
335 if ((rx_struct = e100_alloc_skb(bdp)) == NULL)
336 return;
338 e100_add_skb_to_end(bdp, rx_struct);
342 void e100_tx_srv(struct e100_private *);
343 u32 e100_rx_srv(struct e100_private *);
345 void e100_watchdog(struct net_device *);
346 void e100_refresh_txthld(struct e100_private *);
347 void e100_manage_adaptive_ifs(struct e100_private *);
348 void e100_clear_pools(struct e100_private *);
349 static void e100_clear_structs(struct net_device *);
350 static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *,
351 struct sk_buff *);
352 static void e100_set_multi_exec(struct net_device *dev);
354 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
355 MODULE_DESCRIPTION("Intel(R) PRO/100 Network Driver");
356 MODULE_LICENSE("GPL");
358 E100_PARAM(TxDescriptors, "Number of transmit descriptors");
359 E100_PARAM(RxDescriptors, "Number of receive descriptors");
360 E100_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
361 E100_PARAM(e100_speed_duplex, "Speed and Duplex settings");
362 E100_PARAM(ucode, "Disable or enable microcode loading");
363 E100_PARAM(ber, "Value for the BER correction algorithm");
364 E100_PARAM(flow_control, "Disable or enable Ethernet PAUSE frames processing");
365 E100_PARAM(IntDelay, "Value for CPU saver's interrupt delay");
366 E100_PARAM(BundleSmallFr, "Disable or enable interrupt bundling of small frames");
367 E100_PARAM(BundleMax, "Maximum number for CPU saver's packet bundling");
368 E100_PARAM(IFS, "Disable or enable the adaptive IFS algorithm");
371 * e100_exec_cmd - issue a comand
372 * @bdp: atapter's private data struct
373 * @scb_cmd_low: the command that is to be issued
375 * This general routine will issue a command to the e100.
377 static inline void
378 e100_exec_cmd(struct e100_private *bdp, u8 cmd_low)
380 writeb(cmd_low, &(bdp->scb->scb_cmd_low));
381 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
385 * e100_wait_scb - wait for SCB to clear
386 * @bdp: atapter's private data struct
388 * This routine checks to see if the e100 has accepted a command.
389 * It does so by checking the command field in the SCB, which will
390 * be zeroed by the e100 upon accepting a command. The loop waits
391 * for up to 1 millisecond for command acceptance.
393 * Returns:
394 * true if the SCB cleared within 1 millisecond.
395 * false if it didn't clear within 1 millisecond
397 unsigned char
398 e100_wait_scb(struct e100_private *bdp)
400 int i;
402 /* loop on the scb for a few times */
403 for (i = 0; i < 100; i++) {
404 if (!readb(&bdp->scb->scb_cmd_low))
405 return true;
406 cpu_relax();
409 /* it didn't work. do it the slow way using udelay()s */
410 for (i = 0; i < E100_MAX_SCB_WAIT; i++) {
411 if (!readb(&bdp->scb->scb_cmd_low))
412 return true;
413 cpu_relax();
414 udelay(1);
417 return false;
421 * e100_wait_exec_simple - issue a command
422 * @bdp: atapter's private data struct
423 * @scb_cmd_low: the command that is to be issued
425 * This general routine will issue a command to the e100 after waiting for
426 * the previous command to finish.
428 * Returns:
429 * true if the command was issued to the chip successfully
430 * false if the command was not issued to the chip
432 inline unsigned char
433 e100_wait_exec_simple(struct e100_private *bdp, u8 scb_cmd_low)
435 if (!e100_wait_scb(bdp)) {
436 printk(KERN_DEBUG "e100: %s: e100_wait_exec_simple: failed\n",
437 bdp->device->name);
438 #ifdef E100_CU_DEBUG
439 printk(KERN_ERR "e100: %s: Last command (%x/%x) "
440 "timeout\n", bdp->device->name,
441 bdp->last_cmd, bdp->last_sub_cmd);
442 printk(KERN_ERR "e100: %s: Current simple command (%x) "
443 "can't be executed\n",
444 bdp->device->name, scb_cmd_low);
445 #endif
446 return false;
448 e100_exec_cmd(bdp, scb_cmd_low);
449 #ifdef E100_CU_DEBUG
450 bdp->last_cmd = scb_cmd_low;
451 bdp->last_sub_cmd = 0;
452 #endif
453 return true;
456 void
457 e100_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd)
459 writel(phys_addr, &(bdp->scb->scb_gen_ptr));
460 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
461 e100_exec_cmd(bdp, cmd);
464 unsigned char
465 e100_wait_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd, u8 sub_cmd)
467 if (!e100_wait_scb(bdp)) {
468 #ifdef E100_CU_DEBUG
469 printk(KERN_ERR "e100: %s: Last command (%x/%x) "
470 "timeout\n", bdp->device->name,
471 bdp->last_cmd, bdp->last_sub_cmd);
472 printk(KERN_ERR "e100: %s: Current complex command "
473 "(%x/%x) can't be executed\n",
474 bdp->device->name, cmd, sub_cmd);
475 #endif
476 return false;
478 e100_exec_cmplx(bdp, phys_addr, cmd);
479 #ifdef E100_CU_DEBUG
480 bdp->last_cmd = cmd;
481 bdp->last_sub_cmd = sub_cmd;
482 #endif
483 return true;
486 inline u8
487 e100_wait_cus_idle(struct e100_private *bdp)
489 int i;
491 /* loop on the scb for a few times */
492 for (i = 0; i < 100; i++) {
493 if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
494 SCB_CUS_ACTIVE)) {
495 return true;
497 cpu_relax();
500 for (i = 0; i < E100_MAX_CU_IDLE_WAIT; i++) {
501 if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
502 SCB_CUS_ACTIVE)) {
503 return true;
505 cpu_relax();
506 udelay(1);
509 return false;
513 * e100_disable_clear_intr - disable and clear/ack interrupts
514 * @bdp: atapter's private data struct
516 * This routine disables interrupts at the hardware, by setting
517 * the M (mask) bit in the adapter's CSR SCB command word.
518 * It also clear/ack interrupts.
520 static inline void
521 e100_disable_clear_intr(struct e100_private *bdp)
523 u16 intr_status;
524 /* Disable interrupts on our PCI board by setting the mask bit */
525 writeb(SCB_INT_MASK, &bdp->scb->scb_cmd_hi);
526 intr_status = readw(&bdp->scb->scb_status);
527 /* ack and clear intrs */
528 writew(intr_status, &bdp->scb->scb_status);
529 readw(&bdp->scb->scb_status);
533 * e100_set_intr_mask - set interrupts
534 * @bdp: atapter's private data struct
536 * This routine sets interrupts at the hardware, by resetting
537 * the M (mask) bit in the adapter's CSR SCB command word
539 static inline void
540 e100_set_intr_mask(struct e100_private *bdp)
542 writeb(bdp->intr_mask, &bdp->scb->scb_cmd_hi);
543 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
546 static inline void
547 e100_trigger_SWI(struct e100_private *bdp)
549 /* Trigger interrupt on our PCI board by asserting SWI bit */
550 writeb(SCB_SOFT_INT, &bdp->scb->scb_cmd_hi);
551 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
554 static int __devinit
555 e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
557 static int first_time = true;
558 struct net_device *dev = NULL;
559 struct e100_private *bdp = NULL;
560 int rc = 0;
561 u16 cal_checksum, read_checksum;
563 dev = alloc_etherdev(sizeof (struct e100_private));
564 if (dev == NULL) {
565 printk(KERN_ERR "e100: Not able to alloc etherdev struct\n");
566 rc = -ENODEV;
567 goto out;
570 SET_MODULE_OWNER(dev);
572 if (first_time) {
573 first_time = false;
574 printk(KERN_NOTICE "%s - version %s\n",
575 e100_full_driver_name, e100_driver_version);
576 printk(KERN_NOTICE "%s\n", e100_copyright);
577 printk(KERN_NOTICE "\n");
580 bdp = dev->priv;
581 bdp->pdev = pcid;
582 bdp->device = dev;
584 pci_set_drvdata(pcid, dev);
585 SET_NETDEV_DEV(dev, &pcid->dev);
587 if ((rc = e100_alloc_space(bdp)) != 0) {
588 goto err_dev;
591 bdp->flags = 0;
592 bdp->ifs_state = 0;
593 bdp->ifs_value = 0;
594 bdp->scb = 0;
596 init_timer(&bdp->nontx_timer_id);
597 bdp->nontx_timer_id.data = (unsigned long) bdp;
598 bdp->nontx_timer_id.function = (void *) &e100_non_tx_background;
599 INIT_LIST_HEAD(&(bdp->non_tx_cmd_list));
600 bdp->non_tx_command_state = E100_NON_TX_IDLE;
602 init_timer(&bdp->watchdog_timer);
603 bdp->watchdog_timer.data = (unsigned long) dev;
604 bdp->watchdog_timer.function = (void *) &e100_watchdog;
606 if ((rc = e100_pci_setup(pcid, bdp)) != 0) {
607 goto err_dealloc;
610 if (((bdp->pdev->device > 0x1030)
611 && (bdp->pdev->device < 0x103F))
612 || ((bdp->pdev->device >= 0x1050)
613 && (bdp->pdev->device <= 0x1057))
614 || (bdp->pdev->device == 0x2449)
615 || (bdp->pdev->device == 0x2459)
616 || (bdp->pdev->device == 0x245D)) {
617 bdp->rev_id = D101MA_REV_ID; /* workaround for ICH3 */
618 bdp->flags |= IS_ICH;
621 if (bdp->rev_id == 0xff)
622 bdp->rev_id = 1;
624 if ((u8) bdp->rev_id >= D101A4_REV_ID)
625 bdp->flags |= IS_BACHELOR;
627 if ((u8) bdp->rev_id >= D102_REV_ID) {
628 bdp->flags |= USE_IPCB;
629 bdp->rfd_size = 32;
630 } else {
631 bdp->rfd_size = 16;
634 dev->vlan_rx_register = e100_vlan_rx_register;
635 dev->vlan_rx_add_vid = e100_vlan_rx_add_vid;
636 dev->vlan_rx_kill_vid = e100_vlan_rx_kill_vid;
637 dev->irq = pcid->irq;
638 dev->open = &e100_open;
639 dev->hard_start_xmit = &e100_xmit_frame;
640 dev->stop = &e100_close;
641 dev->change_mtu = &e100_change_mtu;
642 dev->get_stats = &e100_get_stats;
643 dev->set_multicast_list = &e100_set_multi;
644 dev->set_mac_address = &e100_set_mac;
645 dev->do_ioctl = &e100_ioctl;
647 if (bdp->flags & USE_IPCB)
648 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
649 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
651 if ((rc = register_netdev(dev)) != 0) {
652 goto err_pci;
655 e100_check_options(e100nics, bdp);
657 if (!e100_init(bdp)) {
658 printk(KERN_ERR "e100: Failed to initialize, instance #%d\n",
659 e100nics);
660 rc = -ENODEV;
661 goto err_unregister_netdev;
664 /* Check if checksum is valid */
665 cal_checksum = e100_eeprom_calculate_chksum(bdp);
666 read_checksum = e100_eeprom_read(bdp, (bdp->eeprom_size - 1));
667 if (cal_checksum != read_checksum) {
668 printk(KERN_ERR "e100: Corrupted EEPROM on instance #%d\n",
669 e100nics);
670 rc = -ENODEV;
671 goto err_unregister_netdev;
674 e100nics++;
676 e100_get_speed_duplex_caps(bdp);
678 printk(KERN_NOTICE
679 "e100: %s: %s\n",
680 bdp->device->name, "Intel(R) PRO/100 Network Connection");
681 e100_print_brd_conf(bdp);
683 bdp->wolsupported = 0;
684 bdp->wolopts = 0;
685 if (bdp->rev_id >= D101A4_REV_ID)
686 bdp->wolsupported = WAKE_PHY | WAKE_MAGIC;
687 if (bdp->rev_id >= D101MA_REV_ID)
688 bdp->wolsupported |= WAKE_UCAST | WAKE_ARP;
690 /* Check if WoL is enabled on EEPROM */
691 if (e100_eeprom_read(bdp, EEPROM_ID_WORD) & BIT_5) {
692 /* Magic Packet WoL is enabled on device by default */
693 /* if EEPROM WoL bit is TRUE */
694 bdp->wolopts = WAKE_MAGIC;
697 printk(KERN_NOTICE "\n");
699 goto out;
701 err_unregister_netdev:
702 unregister_netdev(dev);
703 err_pci:
704 iounmap(bdp->scb);
705 pci_release_regions(pcid);
706 pci_disable_device(pcid);
707 err_dealloc:
708 e100_dealloc_space(bdp);
709 err_dev:
710 pci_set_drvdata(pcid, NULL);
711 kfree(dev);
712 out:
713 return rc;
717 * e100_clear_structs - free resources
718 * @dev: adapter's net_device struct
720 * Free all device specific structs, unmap i/o address, etc.
722 static void __devexit
723 e100_clear_structs(struct net_device *dev)
725 struct e100_private *bdp = dev->priv;
727 iounmap(bdp->scb);
728 pci_release_regions(bdp->pdev);
729 pci_disable_device(bdp->pdev);
731 e100_dealloc_space(bdp);
732 pci_set_drvdata(bdp->pdev, NULL);
733 kfree(dev);
736 static void __devexit
737 e100_remove1(struct pci_dev *pcid)
739 struct net_device *dev;
740 struct e100_private *bdp;
742 if (!(dev = (struct net_device *) pci_get_drvdata(pcid)))
743 return;
745 bdp = dev->priv;
747 unregister_netdev(dev);
749 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
751 if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
752 del_timer_sync(&bdp->nontx_timer_id);
753 e100_free_nontx_list(bdp);
754 bdp->non_tx_command_state = E100_NON_TX_IDLE;
757 e100_clear_structs(dev);
759 --e100nics;
762 static struct pci_device_id e100_id_table[] __devinitdata = {
763 {0x8086, 0x1229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
764 {0x8086, 0x2449, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
765 {0x8086, 0x1059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
766 {0x8086, 0x1209, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
767 {0x8086, 0x1029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
768 {0x8086, 0x1030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
769 {0x8086, 0x1031, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
770 {0x8086, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
771 {0x8086, 0x1033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
772 {0x8086, 0x1034, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
773 {0x8086, 0x1038, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
774 {0x8086, 0x1039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
775 {0x8086, 0x103A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
776 {0x8086, 0x103B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
777 {0x8086, 0x103C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
778 {0x8086, 0x103D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
779 {0x8086, 0x103E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
780 {0x8086, 0x1050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
781 {0x8086, 0x1051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
782 {0x8086, 0x1052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
783 {0x8086, 0x1053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
784 {0x8086, 0x1054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
785 {0x8086, 0x1055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
786 {0x8086, 0x2459, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
787 {0x8086, 0x245D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
788 {0,} /* This has to be the last entry*/
790 MODULE_DEVICE_TABLE(pci, e100_id_table);
792 static struct pci_driver e100_driver = {
793 .name = "e100",
794 .id_table = e100_id_table,
795 .probe = e100_found1,
796 .remove = __devexit_p(e100_remove1),
797 #ifdef CONFIG_PM
798 .suspend = e100_suspend,
799 .resume = e100_resume,
800 #endif
803 static int __init
804 e100_init_module(void)
806 int ret;
807 ret = pci_module_init(&e100_driver);
809 if(ret >= 0) {
810 #ifdef CONFIG_PM
811 register_reboot_notifier(&e100_notifier_reboot);
812 #endif
815 return ret;
818 static void __exit
819 e100_cleanup_module(void)
821 #ifdef CONFIG_PM
822 unregister_reboot_notifier(&e100_notifier_reboot);
823 #endif
825 pci_unregister_driver(&e100_driver);
828 module_init(e100_init_module);
829 module_exit(e100_cleanup_module);
832 * e100_check_options - check command line options
833 * @board: board number
834 * @bdp: atapter's private data struct
836 * This routine does range checking on command-line options
838 void __devinit
839 e100_check_options(int board, struct e100_private *bdp)
841 if (board >= E100_MAX_NIC) {
842 printk(KERN_NOTICE
843 "e100: No configuration available for board #%d\n",
844 board);
845 printk(KERN_NOTICE "e100: Using defaults for all values\n");
846 board = E100_MAX_NIC;
849 e100_set_int_option(&(bdp->params.TxDescriptors), TxDescriptors[board],
850 E100_MIN_TCB, E100_MAX_TCB, E100_DEFAULT_TCB,
851 "TxDescriptor count");
853 e100_set_int_option(&(bdp->params.RxDescriptors), RxDescriptors[board],
854 E100_MIN_RFD, E100_MAX_RFD, E100_DEFAULT_RFD,
855 "RxDescriptor count");
857 e100_set_int_option(&(bdp->params.e100_speed_duplex),
858 e100_speed_duplex[board], 0, 4,
859 E100_DEFAULT_SPEED_DUPLEX, "speed/duplex mode");
861 e100_set_int_option(&(bdp->params.ber), ber[board], 0, ZLOCK_MAX_ERRORS,
862 E100_DEFAULT_BER, "Bit Error Rate count");
864 e100_set_bool_option(bdp, XsumRX[board], PRM_XSUMRX, E100_DEFAULT_XSUM,
865 "XsumRX value");
867 /* Default ucode value depended on controller revision */
868 if (bdp->rev_id >= D101MA_REV_ID) {
869 e100_set_bool_option(bdp, ucode[board], PRM_UCODE,
870 E100_DEFAULT_UCODE, "ucode value");
871 } else {
872 e100_set_bool_option(bdp, ucode[board], PRM_UCODE, false,
873 "ucode value");
876 e100_set_bool_option(bdp, flow_control[board], PRM_FC, E100_DEFAULT_FC,
877 "flow control value");
879 e100_set_bool_option(bdp, IFS[board], PRM_IFS, E100_DEFAULT_IFS,
880 "IFS value");
882 e100_set_bool_option(bdp, BundleSmallFr[board], PRM_BUNDLE_SMALL,
883 E100_DEFAULT_BUNDLE_SMALL_FR,
884 "CPU saver bundle small frames value");
886 e100_set_int_option(&(bdp->params.IntDelay), IntDelay[board], 0x0,
887 0xFFFF, E100_DEFAULT_CPUSAVER_INTERRUPT_DELAY,
888 "CPU saver interrupt delay value");
890 e100_set_int_option(&(bdp->params.BundleMax), BundleMax[board], 0x1,
891 0xFFFF, E100_DEFAULT_CPUSAVER_BUNDLE_MAX,
892 "CPU saver bundle max value");
897 * e100_set_int_option - check and set an integer option
898 * @option: a pointer to the relevant option field
899 * @val: the value specified
900 * @min: the minimum valid value
901 * @max: the maximum valid value
902 * @default_val: the default value
903 * @name: the name of the option
905 * This routine does range checking on a command-line option.
906 * If the option's value is '-1' use the specified default.
907 * Otherwise, if the value is invalid, change it to the default.
909 void __devinit
910 e100_set_int_option(int *option, int val, int min, int max, int default_val,
911 char *name)
913 if (val == -1) { /* no value specified. use default */
914 *option = default_val;
916 } else if ((val < min) || (val > max)) {
917 printk(KERN_NOTICE
918 "e100: Invalid %s specified (%i). "
919 "Valid range is %i-%i\n",
920 name, val, min, max);
921 printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
922 default_val);
923 *option = default_val;
924 } else {
925 printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
926 *option = val;
931 * e100_set_bool_option - check and set a boolean option
932 * @bdp: atapter's private data struct
933 * @val: the value specified
934 * @mask: the mask for the relevant option
935 * @default_val: the default value
936 * @name: the name of the option
938 * This routine checks a boolean command-line option.
939 * If the option's value is '-1' use the specified default.
940 * Otherwise, if the value is invalid (not 0 or 1),
941 * change it to the default.
943 void __devinit
944 e100_set_bool_option(struct e100_private *bdp, int val, u32 mask,
945 int default_val, char *name)
947 if (val == -1) {
948 if (default_val)
949 bdp->params.b_params |= mask;
951 } else if ((val != true) && (val != false)) {
952 printk(KERN_NOTICE
953 "e100: Invalid %s specified (%i). "
954 "Valid values are %i/%i\n",
955 name, val, false, true);
956 printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
957 default_val);
959 if (default_val)
960 bdp->params.b_params |= mask;
961 } else {
962 printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
963 if (val)
964 bdp->params.b_params |= mask;
969 e100_open(struct net_device *dev)
971 struct e100_private *bdp;
972 int rc = 0;
974 bdp = dev->priv;
976 /* setup the tcb pool */
977 if (!e100_alloc_tcb_pool(bdp)) {
978 rc = -ENOMEM;
979 goto err_exit;
981 bdp->last_tcb = NULL;
983 bdp->tcb_pool.head = 0;
984 bdp->tcb_pool.tail = 1;
986 e100_setup_tcb_pool((tcb_t *) bdp->tcb_pool.data,
987 bdp->params.TxDescriptors, bdp);
989 if (!e100_alloc_rfd_pool(bdp)) {
990 rc = -ENOMEM;
991 goto err_exit;
994 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) {
995 rc = -EAGAIN;
996 goto err_exit;
999 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) {
1000 rc = -EAGAIN;
1001 goto err_exit;
1004 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
1006 if (dev->flags & IFF_UP)
1007 /* Otherwise process may sleep forever */
1008 netif_wake_queue(dev);
1009 else
1010 netif_start_queue(dev);
1012 e100_start_ru(bdp);
1013 if ((rc = request_irq(dev->irq, &e100intr, SA_SHIRQ,
1014 dev->name, dev)) != 0) {
1015 del_timer_sync(&bdp->watchdog_timer);
1016 goto err_exit;
1018 bdp->intr_mask = 0;
1019 e100_set_intr_mask(bdp);
1021 e100_force_config(bdp);
1023 goto exit;
1025 err_exit:
1026 e100_clear_pools(bdp);
1027 exit:
1028 return rc;
1032 e100_close(struct net_device *dev)
1034 struct e100_private *bdp = dev->priv;
1036 e100_disable_clear_intr(bdp);
1037 free_irq(dev->irq, dev);
1038 bdp->intr_mask = SCB_INT_MASK;
1039 e100_isolate_driver(bdp);
1041 netif_carrier_off(bdp->device);
1042 bdp->cur_line_speed = 0;
1043 bdp->cur_dplx_mode = 0;
1044 e100_clear_pools(bdp);
1046 return 0;
1049 static int
1050 e100_change_mtu(struct net_device *dev, int new_mtu)
1052 if ((new_mtu < 68) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
1053 return -EINVAL;
1055 dev->mtu = new_mtu;
1056 return 0;
1059 static int
1060 e100_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1062 int rc = 0;
1063 int notify_stop = false;
1064 struct e100_private *bdp = dev->priv;
1066 if (!spin_trylock(&bdp->bd_non_tx_lock)) {
1067 notify_stop = true;
1068 rc = 1;
1069 goto exit2;
1072 /* tcb list may be empty temporarily during releasing resources */
1073 if (!TCBS_AVAIL(bdp->tcb_pool) || (bdp->tcb_phys == 0) ||
1074 (bdp->non_tx_command_state != E100_NON_TX_IDLE)) {
1075 notify_stop = true;
1076 rc = 1;
1077 goto exit1;
1080 bdp->drv_stats.net_stats.tx_bytes += skb->len;
1082 e100_prepare_xmit_buff(bdp, skb);
1084 dev->trans_start = jiffies;
1086 exit1:
1087 spin_unlock(&bdp->bd_non_tx_lock);
1088 exit2:
1089 if (notify_stop) {
1090 netif_stop_queue(dev);
1093 return rc;
1097 * e100_get_stats - get driver statistics
1098 * @dev: adapter's net_device struct
1100 * This routine is called when the OS wants the adapter's stats returned.
1101 * It returns the address of the net_device_stats stucture for the device.
1102 * If the statistics are currently being updated, then they might be incorrect
1103 * for a short while. However, since this cannot actually cause damage, no
1104 * locking is used.
1106 struct net_device_stats *
1107 e100_get_stats(struct net_device *dev)
1109 struct e100_private *bdp = dev->priv;
1111 bdp->drv_stats.net_stats.tx_errors =
1112 bdp->drv_stats.net_stats.tx_carrier_errors +
1113 bdp->drv_stats.net_stats.tx_aborted_errors;
1115 bdp->drv_stats.net_stats.rx_errors =
1116 bdp->drv_stats.net_stats.rx_crc_errors +
1117 bdp->drv_stats.net_stats.rx_frame_errors +
1118 bdp->drv_stats.net_stats.rx_length_errors +
1119 bdp->drv_stats.rcv_cdt_frames;
1121 return &(bdp->drv_stats.net_stats);
1125 * e100_set_mac - set the MAC address
1126 * @dev: adapter's net_device struct
1127 * @addr: the new address
1129 * This routine sets the ethernet address of the board
1130 * Returns:
1131 * 0 - if successful
1132 * -1 - otherwise
1134 static int
1135 e100_set_mac(struct net_device *dev, void *addr)
1137 struct e100_private *bdp;
1138 int rc = -1;
1139 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
1141 if (!is_valid_ether_addr(p_sockaddr->sa_data))
1142 return -EADDRNOTAVAIL;
1143 bdp = dev->priv;
1145 if (e100_setup_iaaddr(bdp, (u8 *) (p_sockaddr->sa_data))) {
1146 memcpy(&(dev->dev_addr[0]), p_sockaddr->sa_data, ETH_ALEN);
1147 rc = 0;
1150 return rc;
1153 static void
1154 e100_set_multi_exec(struct net_device *dev)
1156 struct e100_private *bdp = dev->priv;
1157 mltcst_cb_t *mcast_buff;
1158 cb_header_t *cb_hdr;
1159 struct dev_mc_list *mc_list;
1160 unsigned int i;
1161 nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
1163 if (cmd != NULL) {
1164 mcast_buff = &((cmd->non_tx_cmd)->ntcb.multicast);
1165 cb_hdr = &((cmd->non_tx_cmd)->ntcb.multicast.mc_cbhdr);
1166 } else {
1167 return;
1170 /* initialize the multi cast command */
1171 cb_hdr->cb_cmd = __constant_cpu_to_le16(CB_MULTICAST);
1173 /* now fill in the rest of the multicast command */
1174 *(u16 *) (&(mcast_buff->mc_count)) = cpu_to_le16(dev->mc_count * 6);
1175 for (i = 0, mc_list = dev->mc_list;
1176 (i < dev->mc_count) && (i < MAX_MULTICAST_ADDRS);
1177 i++, mc_list = mc_list->next) {
1178 /* copy into the command */
1179 memcpy(&(mcast_buff->mc_addr[i * ETH_ALEN]),
1180 (u8 *) &(mc_list->dmi_addr), ETH_ALEN);
1183 if (!e100_exec_non_cu_cmd(bdp, cmd)) {
1184 printk(KERN_WARNING "e100: %s: Multicast setup failed\n",
1185 dev->name);
1190 * e100_set_multi - set multicast status
1191 * @dev: adapter's net_device struct
1193 * This routine is called to add or remove multicast addresses, and/or to
1194 * change the adapter's promiscuous state.
1196 static void
1197 e100_set_multi(struct net_device *dev)
1199 struct e100_private *bdp = dev->priv;
1200 unsigned char promisc_enbl;
1201 unsigned char mulcast_enbl;
1203 promisc_enbl = ((dev->flags & IFF_PROMISC) == IFF_PROMISC);
1204 mulcast_enbl = ((dev->flags & IFF_ALLMULTI) ||
1205 (dev->mc_count > MAX_MULTICAST_ADDRS));
1207 e100_config_promisc(bdp, promisc_enbl);
1208 e100_config_mulcast_enbl(bdp, mulcast_enbl);
1210 /* reconfigure the chip if something has changed in its config space */
1211 e100_config(bdp);
1213 if (promisc_enbl || mulcast_enbl) {
1214 return; /* no need for Multicast Cmd */
1217 /* get the multicast CB */
1218 e100_set_multi_exec(dev);
1221 static int
1222 e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1225 switch (cmd) {
1227 case SIOCETHTOOL:
1228 return e100_do_ethtool_ioctl(dev, ifr);
1229 break;
1231 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1232 case SIOCGMIIREG: /* Read MII PHY register. */
1233 case SIOCSMIIREG: /* Write to MII PHY register. */
1234 return e100_mii_ioctl(dev, ifr, cmd);
1235 break;
1237 default:
1238 return -EOPNOTSUPP;
1240 return 0;
1245 * e100init - initialize the adapter
1246 * @bdp: atapter's private data struct
1248 * This routine is called when this driver is loaded. This is the initialization
1249 * routine which allocates memory, configures the adapter and determines the
1250 * system resources.
1252 * Returns:
1253 * true: if successful
1254 * false: otherwise
1256 static unsigned char __devinit
1257 e100_init(struct e100_private *bdp)
1259 u32 st_timeout = 0;
1260 u32 st_result = 0;
1261 e100_sw_init(bdp);
1263 if (!e100_selftest(bdp, &st_timeout, &st_result)) {
1264 if (st_timeout) {
1265 printk(KERN_ERR "e100: selftest timeout\n");
1266 } else {
1267 printk(KERN_ERR "e100: selftest failed. Results: %x\n",
1268 st_result);
1270 return false;
1272 else
1273 printk(KERN_DEBUG "e100: selftest OK.\n");
1275 /* read the MAC address from the eprom */
1276 e100_rd_eaddr(bdp);
1277 if (!is_valid_ether_addr(bdp->device->dev_addr)) {
1278 printk(KERN_ERR "e100: Invalid Ethernet address\n");
1279 return false;
1281 /* read NIC's part number */
1282 e100_rd_pwa_no(bdp);
1284 if (!e100_hw_init(bdp))
1285 return false;
1286 /* Interrupts are enabled after device reset */
1287 e100_disable_clear_intr(bdp);
1289 return true;
1293 * e100_sw_init - initialize software structs
1294 * @bdp: atapter's private data struct
1296 * This routine initializes all software structures. Sets up the
1297 * circular structures for the RFD's & TCB's. Allocates the per board
1298 * structure for storing adapter information. The CSR is also memory
1299 * mapped in this routine.
1301 * Returns :
1302 * true: if S/W was successfully initialized
1303 * false: otherwise
1305 static unsigned char __devinit
1306 e100_sw_init(struct e100_private *bdp)
1308 bdp->next_cu_cmd = START_WAIT; // init the next cu state
1311 * Set the value for # of good xmits per underrun. the value assigned
1312 * here is an intelligent suggested default. Nothing magical about it.
1314 bdp->tx_per_underrun = DEFAULT_TX_PER_UNDERRUN;
1316 /* get the default transmit threshold value */
1317 bdp->tx_thld = TX_THRSHLD;
1319 /* get the EPROM size */
1320 bdp->eeprom_size = e100_eeprom_size(bdp);
1322 /* Initialize our spinlocks */
1323 spin_lock_init(&(bdp->bd_lock));
1324 spin_lock_init(&(bdp->bd_non_tx_lock));
1325 spin_lock_init(&(bdp->config_lock));
1326 spin_lock_init(&(bdp->mdi_access_lock));
1327 /* Initialize configuration data */
1328 e100_config_init(bdp);
1330 return 1;
1333 static void __devinit
1334 e100_tco_workaround(struct e100_private *bdp)
1336 int i;
1338 /* Do software reset */
1339 e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
1341 /* Do a dummy LOAD CU BASE command. */
1342 /* This gets us out of pre-driver to post-driver. */
1343 e100_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE);
1345 /* Wait 20 msec for reset to take effect */
1346 set_current_state(TASK_UNINTERRUPTIBLE);
1347 schedule_timeout(HZ / 50 + 1);
1349 /* disable interrupts since they are enabled */
1350 /* after device reset */
1351 e100_disable_clear_intr(bdp);
1353 /* Wait for command to be cleared up to 1 sec */
1354 for (i=0; i<100; i++) {
1355 if (!readb(&bdp->scb->scb_cmd_low))
1356 break;
1357 set_current_state(TASK_UNINTERRUPTIBLE);
1358 schedule_timeout(HZ / 100 + 1);
1361 /* Wait for TCO request bit in PMDR register to be clear */
1362 for (i=0; i<50; i++) {
1363 if (!(readb(&bdp->scb->scb_ext.d101m_scb.scb_pmdr) & BIT_1))
1364 break;
1365 set_current_state(TASK_UNINTERRUPTIBLE);
1366 schedule_timeout(HZ / 100 + 1);
1371 * e100_hw_init - initialized tthe hardware
1372 * @bdp: atapter's private data struct
1374 * This routine performs a reset on the adapter, and configures the adapter.
1375 * This includes configuring the 82557 LAN controller, validating and setting
1376 * the node address, detecting and configuring the Phy chip on the adapter,
1377 * and initializing all of the on chip counters.
1379 * Returns:
1380 * true - If the adapter was initialized
1381 * false - If the adapter failed initialization
1383 unsigned char
1384 e100_hw_init(struct e100_private *bdp)
1386 if (!e100_phy_init(bdp))
1387 goto err;
1389 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
1391 /* Only 82559 or above needs TCO workaround */
1392 if (bdp->rev_id >= D101MA_REV_ID)
1393 e100_tco_workaround(bdp);
1395 /* Load the CU BASE (set to 0, because we use linear mode) */
1396 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
1397 goto err;
1399 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
1400 goto err;
1402 /* Load interrupt microcode */
1403 if (e100_load_microcode(bdp)) {
1404 bdp->flags |= DF_UCODE_LOADED;
1407 if (!e100_config(bdp))
1408 goto err;
1410 if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr))
1411 goto err;
1413 /* Clear the internal counters */
1414 if (!e100_clr_cntrs(bdp))
1415 goto err;
1417 /* Change for 82558 enhancement */
1418 /* If 82558/9 and if the user has enabled flow control, set up the
1419 * Flow Control Reg. in the CSR */
1420 if ((bdp->flags & IS_BACHELOR)
1421 && (bdp->params.b_params & PRM_FC)) {
1422 writeb(DFLT_FC_THLD, &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
1423 writeb(DFLT_FC_CMD,
1424 &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
1427 return true;
1428 err:
1429 printk(KERN_ERR "e100: hw init failed\n");
1430 return false;
1434 * e100_setup_tcb_pool - setup TCB circular list
1435 * @head: Pointer to head of the allocated TCBs
1436 * @qlen: Number of elements in the queue
1437 * @bdp: atapter's private data struct
1439 * This routine arranges the contigiously allocated TCB's in a circular list.
1440 * Also does the one time initialization of the TCBs.
1442 static void
1443 e100_setup_tcb_pool(tcb_t *head, unsigned int qlen, struct e100_private *bdp)
1445 int ele_no;
1446 tcb_t *pcurr_tcb; /* point to current tcb */
1447 u32 next_phys; /* the next phys addr */
1448 u16 txcommand = CB_S_BIT | CB_TX_SF_BIT;
1450 bdp->tx_count = 0;
1451 if (bdp->flags & USE_IPCB) {
1452 txcommand |= CB_IPCB_TRANSMIT | CB_CID_DEFAULT;
1453 } else if (bdp->flags & IS_BACHELOR) {
1454 txcommand |= CB_TRANSMIT | CB_CID_DEFAULT;
1455 } else {
1456 txcommand |= CB_TRANSMIT;
1459 for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head;
1460 ele_no < qlen; ele_no++, pcurr_tcb++) {
1462 /* set the phys addr for this TCB, next_phys has not incr. yet */
1463 pcurr_tcb->tcb_phys = next_phys;
1464 next_phys += sizeof (tcb_t);
1466 /* set the link to next tcb */
1467 if (ele_no == (qlen - 1))
1468 pcurr_tcb->tcb_hdr.cb_lnk_ptr =
1469 cpu_to_le32(bdp->tcb_phys);
1470 else
1471 pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys);
1473 pcurr_tcb->tcb_hdr.cb_status = 0;
1474 pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand);
1475 pcurr_tcb->tcb_cnt = 0;
1476 pcurr_tcb->tcb_thrshld = bdp->tx_thld;
1477 if (ele_no < 2) {
1478 pcurr_tcb->tcb_hdr.cb_status =
1479 cpu_to_le16(CB_STATUS_COMPLETE);
1481 pcurr_tcb->tcb_tbd_num = 1;
1483 if (bdp->flags & IS_BACHELOR) {
1484 pcurr_tcb->tcb_tbd_ptr =
1485 __constant_cpu_to_le32(0xFFFFFFFF);
1486 } else {
1487 pcurr_tcb->tcb_tbd_ptr =
1488 cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
1491 if (bdp->flags & IS_BACHELOR) {
1492 pcurr_tcb->tcb_tbd_expand_ptr =
1493 cpu_to_le32(pcurr_tcb->tcb_phys + 0x20);
1494 } else {
1495 pcurr_tcb->tcb_tbd_expand_ptr =
1496 cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
1498 pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr;
1500 if (bdp->flags & USE_IPCB) {
1501 pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]);
1502 pcurr_tcb->tcbu.ipcb.ip_activation_high =
1503 IPCB_IP_ACTIVATION_DEFAULT;
1504 pcurr_tcb->tcbu.ipcb.vlan = 0;
1505 } else {
1506 pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]);
1509 pcurr_tcb->tcb_skb = NULL;
1512 wmb();
1515 /***************************************************************************/
1516 /***************************************************************************/
1517 /* Memory Management Routines */
1518 /***************************************************************************/
1521 * e100_alloc_space - allocate private driver data
1522 * @bdp: atapter's private data struct
1524 * This routine allocates memory for the driver. Memory allocated is for the
1525 * selftest and statistics structures.
1527 * Returns:
1528 * 0: if the operation was successful
1529 * %-ENOMEM: if memory allocation failed
1531 unsigned char __devinit
1532 e100_alloc_space(struct e100_private *bdp)
1534 unsigned long off;
1536 /* allocate all the dma-able structures in one call:
1537 * selftest results, adapter stats, and non-tx cb commands */
1538 if (!(bdp->dma_able =
1539 pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t),
1540 &(bdp->dma_able_phys)))) {
1541 goto err;
1544 /* now assign the various pointers into the struct we've just allocated */
1545 off = offsetof(bd_dma_able_t, selftest);
1547 bdp->selftest = (self_test_t *) (bdp->dma_able + off);
1548 bdp->selftest_phys = bdp->dma_able_phys + off;
1550 off = offsetof(bd_dma_able_t, stats_counters);
1552 bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off);
1553 bdp->stat_cnt_phys = bdp->dma_able_phys + off;
1555 return 0;
1557 err:
1558 printk(KERN_ERR
1559 "e100: Failed to allocate memory\n");
1560 return -ENOMEM;
1564 * e100_alloc_tcb_pool - allocate TCB circular list
1565 * @bdp: atapter's private data struct
1567 * This routine allocates memory for the circular list of transmit descriptors.
1569 * Returns:
1570 * 0: if allocation has failed.
1571 * 1: Otherwise.
1574 e100_alloc_tcb_pool(struct e100_private *bdp)
1576 int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors;
1578 /* allocate space for the TCBs */
1579 if (!(bdp->tcb_pool.data =
1580 pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys)))
1581 return 0;
1583 memset(bdp->tcb_pool.data, 0x00, stcb);
1585 return 1;
1588 void
1589 e100_free_tcb_pool(struct e100_private *bdp)
1591 tcb_t *tcb;
1592 int i;
1593 /* Return tx skbs */
1594 for (i = 0; i < bdp->params.TxDescriptors; i++) {
1595 tcb = bdp->tcb_pool.data;
1596 tcb += bdp->tcb_pool.head;
1597 e100_tx_skb_free(bdp, tcb);
1598 if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail)
1599 break;
1600 bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);
1602 pci_free_consistent(bdp->pdev,
1603 sizeof (tcb_t) * bdp->params.TxDescriptors,
1604 bdp->tcb_pool.data, bdp->tcb_phys);
1605 bdp->tcb_pool.head = 0;
1606 bdp->tcb_pool.tail = 1;
1607 bdp->tcb_phys = 0;
1610 static void
1611 e100_dealloc_space(struct e100_private *bdp)
1613 if (bdp->dma_able) {
1614 pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t),
1615 bdp->dma_able, bdp->dma_able_phys);
1618 bdp->selftest_phys = 0;
1619 bdp->stat_cnt_phys = 0;
1620 bdp->dma_able_phys = 0;
1621 bdp->dma_able = 0;
1624 static void
1625 e100_free_rfd_pool(struct e100_private *bdp)
1627 struct rx_list_elem *rx_struct;
1629 while (!list_empty(&(bdp->active_rx_list))) {
1631 rx_struct = list_entry(bdp->active_rx_list.next,
1632 struct rx_list_elem, list_elem);
1633 list_del(&(rx_struct->list_elem));
1634 pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
1635 sizeof (rfd_t), PCI_DMA_TODEVICE);
1636 dev_kfree_skb(rx_struct->skb);
1637 kfree(rx_struct);
1640 while (!list_empty(&(bdp->rx_struct_pool))) {
1641 rx_struct = list_entry(bdp->rx_struct_pool.next,
1642 struct rx_list_elem, list_elem);
1643 list_del(&(rx_struct->list_elem));
1644 kfree(rx_struct);
1649 * e100_alloc_rfd_pool - allocate RFDs
1650 * @bdp: atapter's private data struct
1652 * Allocates initial pool of skb which holds both rfd and data,
1653 * and return a pointer to the head of the list
1655 static int
1656 e100_alloc_rfd_pool(struct e100_private *bdp)
1658 struct rx_list_elem *rx_struct;
1659 int i;
1661 INIT_LIST_HEAD(&(bdp->active_rx_list));
1662 INIT_LIST_HEAD(&(bdp->rx_struct_pool));
1663 bdp->skb_req = bdp->params.RxDescriptors;
1664 for (i = 0; i < bdp->skb_req; i++) {
1665 rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC);
1666 list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
1668 e100_alloc_skbs(bdp);
1669 return !list_empty(&(bdp->active_rx_list));
1673 void
1674 e100_clear_pools(struct e100_private *bdp)
1676 bdp->last_tcb = NULL;
1677 e100_free_rfd_pool(bdp);
1678 e100_free_tcb_pool(bdp);
1681 /*****************************************************************************/
1682 /*****************************************************************************/
1683 /* Run Time Functions */
1684 /*****************************************************************************/
1687 * e100_watchdog
1688 * @dev: adapter's net_device struct
1690 * This routine runs every 2 seconds and updates our statitics and link state,
1691 * and refreshs txthld value.
1693 void
1694 e100_watchdog(struct net_device *dev)
1696 struct e100_private *bdp = dev->priv;
1698 #ifdef E100_CU_DEBUG
1699 if (e100_cu_unknown_state(bdp)) {
1700 printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n",
1701 dev->name);
1703 #endif
1704 if (!netif_running(dev)) {
1705 return;
1708 /* check if link state has changed */
1709 if (e100_phy_check(bdp)) {
1710 if (netif_carrier_ok(dev)) {
1711 printk(KERN_ERR
1712 "e100: %s NIC Link is Up %d Mbps %s duplex\n",
1713 bdp->device->name, bdp->cur_line_speed,
1714 (bdp->cur_dplx_mode == HALF_DUPLEX) ?
1715 "Half" : "Full");
1717 e100_config_fc(bdp);
1718 e100_config(bdp);
1720 } else {
1721 printk(KERN_ERR "e100: %s NIC Link is Down\n",
1722 bdp->device->name);
1726 // toggle the tx queue according to link status
1727 // this also resolves a race condition between tx & non-cu cmd flows
1728 if (netif_carrier_ok(dev)) {
1729 if (netif_running(dev))
1730 netif_wake_queue(dev);
1731 } else {
1732 if (netif_running(dev))
1733 netif_stop_queue(dev);
1734 /* When changing to non-autoneg, device may lose */
1735 /* link with some switches. e100 will try to */
1736 /* revover link by sending command to PHY layer */
1737 if (bdp->params.e100_speed_duplex != E100_AUTONEG)
1738 e100_force_speed_duplex_to_phy(bdp);
1741 rmb();
1743 if (e100_update_stats(bdp)) {
1745 /* Check if a change in the IFS parameter is needed,
1746 and configure the device accordingly */
1747 if (bdp->params.b_params & PRM_IFS)
1748 e100_manage_adaptive_ifs(bdp);
1750 /* Now adjust our dynamic tx threshold value */
1751 e100_refresh_txthld(bdp);
1753 /* Now if we are on a 557 and we havn't received any frames then we
1754 * should issue a multicast command to reset the RU */
1755 if (bdp->rev_id < D101A4_REV_ID) {
1756 if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) {
1757 e100_set_multi(dev);
1761 /* Issue command to dump statistics from device. */
1762 /* Check for command completion on next watchdog timer. */
1763 e100_dump_stats_cntrs(bdp);
1765 wmb();
1767 /* relaunch watchdog timer in 2 sec */
1768 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
1770 if (list_empty(&bdp->active_rx_list))
1771 e100_trigger_SWI(bdp);
1775 * e100_manage_adaptive_ifs
1776 * @bdp: atapter's private data struct
1778 * This routine manages the adaptive Inter-Frame Spacing algorithm
1779 * using a state machine.
1781 void
1782 e100_manage_adaptive_ifs(struct e100_private *bdp)
1784 static u16 state_table[9][4] = { // rows are states
1785 {2, 0, 0, 0}, // state0 // column0: next state if increasing
1786 {2, 0, 5, 30}, // state1 // column1: next state if decreasing
1787 {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit
1788 {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit
1789 {5, 3, 10, 60}, // state4
1790 {8, 4, 10, 60}, // state5
1791 {8, 6, 0, 0}, // state6
1792 {8, 6, 20, 60}, // state7
1793 {8, 7, 20, 60} // state8
1796 u32 transmits =
1797 le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames);
1798 u32 collisions =
1799 le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll);
1800 u32 state = bdp->ifs_state;
1801 u32 old_value = bdp->ifs_value;
1802 int next_col;
1803 u32 min_transmits;
1805 if (bdp->cur_dplx_mode == FULL_DUPLEX) {
1806 bdp->ifs_state = 0;
1807 bdp->ifs_value = 0;
1809 } else { /* Half Duplex */
1810 /* Set speed specific parameters */
1811 if (bdp->cur_line_speed == 100) {
1812 next_col = 2;
1813 min_transmits = MIN_NUMBER_OF_TRANSMITS_100;
1815 } else { /* 10 Mbps */
1816 next_col = 3;
1817 min_transmits = MIN_NUMBER_OF_TRANSMITS_10;
1820 if ((transmits / 32 < collisions)
1821 && (transmits > min_transmits)) {
1822 state = state_table[state][0]; /* increment */
1824 } else if (transmits < min_transmits) {
1825 state = state_table[state][1]; /* decrement */
1828 bdp->ifs_value = state_table[state][next_col];
1829 bdp->ifs_state = state;
1832 /* If the IFS value has changed, configure the device */
1833 if (bdp->ifs_value != old_value) {
1834 e100_config_ifs(bdp);
1835 e100_config(bdp);
1840 * e100intr - interrupt handler
1841 * @irq: the IRQ number
1842 * @dev_inst: the net_device struct
1843 * @regs: registers (unused)
1845 * This routine is the ISR for the e100 board. It services
1846 * the RX & TX queues & starts the RU if it has stopped due
1847 * to no resources.
1849 irqreturn_t
1850 e100intr(int irq, void *dev_inst, struct pt_regs *regs)
1852 struct net_device *dev;
1853 struct e100_private *bdp;
1854 u16 intr_status;
1856 dev = dev_inst;
1857 bdp = dev->priv;
1859 intr_status = readw(&bdp->scb->scb_status);
1860 /* If not my interrupt, just return */
1861 if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) {
1862 return IRQ_NONE;
1865 /* disable and ack intr */
1866 e100_disable_clear_intr(bdp);
1868 /* the device is closed, don't continue or else bad things may happen. */
1869 if (!netif_running(dev)) {
1870 e100_set_intr_mask(bdp);
1871 return IRQ_NONE;
1874 /* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */
1875 if (intr_status & SCB_STATUS_ACK_SWI) {
1876 e100_alloc_skbs(bdp);
1879 /* do recv work if any */
1880 if (intr_status &
1881 (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI))
1882 bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp);
1884 /* clean up after tx'ed packets */
1885 if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX))
1886 e100_tx_srv(bdp);
1888 e100_set_intr_mask(bdp);
1889 return IRQ_HANDLED;
1893 * e100_tx_skb_free - free TX skbs resources
1894 * @bdp: atapter's private data struct
1895 * @tcb: associated tcb of the freed skb
1897 * This routine frees resources of TX skbs.
1899 static inline void
1900 e100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb)
1902 if (tcb->tcb_skb) {
1903 int i;
1904 tbd_t *tbd_arr = tcb->tbd_ptr;
1905 int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;
1907 for (i = 0; i <= frags; i++, tbd_arr++) {
1908 pci_unmap_single(bdp->pdev,
1909 le32_to_cpu(tbd_arr->tbd_buf_addr),
1910 le16_to_cpu(tbd_arr->tbd_buf_cnt),
1911 PCI_DMA_TODEVICE);
1913 dev_kfree_skb_irq(tcb->tcb_skb);
1914 tcb->tcb_skb = NULL;
1919 * e100_tx_srv - service TX queues
1920 * @bdp: atapter's private data struct
1922 * This routine services the TX queues. It reclaims the TCB's & TBD's & other
1923 * resources used during the transmit of this buffer. It is called from the ISR.
1924 * We don't need a tx_lock since we always access buffers which were already
1925 * prepared.
1927 void
1928 e100_tx_srv(struct e100_private *bdp)
1930 tcb_t *tcb;
1931 int i;
1933 /* go over at most TxDescriptors buffers */
1934 for (i = 0; i < bdp->params.TxDescriptors; i++) {
1935 tcb = bdp->tcb_pool.data;
1936 tcb += bdp->tcb_pool.head;
1938 rmb();
1940 /* if the buffer at 'head' is not complete, break */
1941 if (!(tcb->tcb_hdr.cb_status &
1942 __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
1943 break;
1945 /* service next buffer, clear the out of resource condition */
1946 e100_tx_skb_free(bdp, tcb);
1948 if (netif_running(bdp->device))
1949 netif_wake_queue(bdp->device);
1951 /* if we've caught up with 'tail', break */
1952 if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {
1953 break;
1956 bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);
1961 * e100_rx_srv - service RX queue
1962 * @bdp: atapter's private data struct
1963 * @max_number_of_rfds: max number of RFDs to process
1964 * @rx_congestion: flag pointer, to inform the calling function of congestion.
1966 * This routine processes the RX interrupt & services the RX queues.
1967 * For each successful RFD, it allocates a new msg block, links that
1968 * into the RFD list, and sends the old msg upstream.
1969 * The new RFD is then put at the end of the free list of RFD's.
1970 * It returns the number of serviced RFDs.
1973 e100_rx_srv(struct e100_private *bdp)
1975 rfd_t *rfd; /* new rfd, received rfd */
1976 int i;
1977 u16 rfd_status;
1978 struct sk_buff *skb;
1979 struct net_device *dev;
1980 unsigned int data_sz;
1981 struct rx_list_elem *rx_struct;
1982 u32 rfd_cnt = 0;
1984 dev = bdp->device;
1986 /* current design of rx is as following:
1987 * 1. socket buffer (skb) used to pass network packet to upper layer
1988 * 2. all HW host memory structures (like RFDs, RBDs and data buffers)
1989 * are placed in a skb's data room
1990 * 3. when rx process is complete, we change skb internal pointers to exclude
1991 * from data area all unrelated things (RFD, RDB) and to leave
1992 * just rx'ed packet netto
1993 * 4. for each skb passed to upper layer, new one is allocated instead.
1994 * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made
1995 * (watchdog trigger SWI intr and isr should allocate new skbs)
1997 for (i = 0; i < bdp->params.RxDescriptors; i++) {
1998 if (list_empty(&(bdp->active_rx_list))) {
1999 break;
2002 rx_struct = list_entry(bdp->active_rx_list.next,
2003 struct rx_list_elem, list_elem);
2004 skb = rx_struct->skb;
2006 rfd = RFD_POINTER(skb, bdp); /* locate RFD within skb */
2008 // sync only the RFD header
2009 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2010 bdp->rfd_size, PCI_DMA_FROMDEVICE);
2011 rfd_status = le16_to_cpu(rfd->rfd_header.cb_status); /* get RFD's status */
2012 if (!(rfd_status & RFD_STATUS_COMPLETE)) /* does not contains data yet - exit */
2013 break;
2015 /* to allow manipulation with current skb we need to unlink it */
2016 list_del(&(rx_struct->list_elem));
2018 /* do not free & unmap badly received packet.
2019 * move it to the end of skb list for reuse */
2020 if (!(rfd_status & RFD_STATUS_OK)) {
2021 e100_add_skb_to_end(bdp, rx_struct);
2022 continue;
2025 data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),
2026 (sizeof (rfd_t) - bdp->rfd_size));
2028 /* now sync all the data */
2029 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2030 (data_sz + bdp->rfd_size),
2031 PCI_DMA_FROMDEVICE);
2033 pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
2034 sizeof (rfd_t), PCI_DMA_FROMDEVICE);
2036 list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
2038 /* end of dma access to rfd */
2039 bdp->skb_req++; /* incr number of requested skbs */
2040 e100_alloc_skbs(bdp); /* and get them */
2042 /* set packet size, excluding checksum (2 last bytes) if it is present */
2043 if ((bdp->flags & DF_CSUM_OFFLOAD)
2044 && (bdp->rev_id < D102_REV_ID))
2045 skb_put(skb, (int) data_sz - 2);
2046 else
2047 skb_put(skb, (int) data_sz);
2049 /* set the protocol */
2050 skb->protocol = eth_type_trans(skb, dev);
2052 /* set the checksum info */
2053 if (bdp->flags & DF_CSUM_OFFLOAD) {
2054 if (bdp->rev_id >= D102_REV_ID) {
2055 skb->ip_summed = e100_D102_check_checksum(rfd);
2056 } else {
2057 skb->ip_summed = e100_D101M_checksum(bdp, skb);
2059 } else {
2060 skb->ip_summed = CHECKSUM_NONE;
2063 bdp->drv_stats.net_stats.rx_bytes += skb->len;
2065 if(bdp->vlgrp && (rfd_status & CB_STATUS_VLAN)) {
2066 vlan_hwaccel_rx(skb, bdp->vlgrp, be16_to_cpu(rfd->vlanid));
2067 } else {
2068 netif_rx(skb);
2070 dev->last_rx = jiffies;
2072 rfd_cnt++;
2073 } /* end of rfd loop */
2075 /* restart the RU if it has stopped */
2076 if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {
2077 e100_start_ru(bdp);
2080 return rfd_cnt;
2083 void
2084 e100_refresh_txthld(struct e100_private *bdp)
2086 basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
2088 /* as long as tx_per_underrun is not 0, we can go about dynamically *
2089 * adjusting the xmit threshold. we stop doing that & resort to defaults
2090 * * once the adjustments become meaningless. the value is adjusted by *
2091 * dumping the error counters & checking the # of xmit underrun errors *
2092 * we've had. */
2093 if (bdp->tx_per_underrun) {
2094 /* We are going to last values dumped from the dump statistics
2095 * command */
2096 if (le32_to_cpu(pstat->xmt_gd_frames)) {
2097 if (le32_to_cpu(pstat->xmt_uruns)) {
2099 * if we have had more than one underrun per "DEFAULT #
2100 * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the
2101 * THRESHOLD.
2103 if ((le32_to_cpu(pstat->xmt_gd_frames) /
2104 le32_to_cpu(pstat->xmt_uruns)) <
2105 bdp->tx_per_underrun) {
2106 bdp->tx_thld += 3;
2111 * if we've had less than one underrun per the DEFAULT number of
2112 * of good xmits allowed, lower the THOLD but not less than 0
2114 if (le32_to_cpu(pstat->xmt_gd_frames) >
2115 bdp->tx_per_underrun) {
2116 bdp->tx_thld--;
2118 if (bdp->tx_thld < 6)
2119 bdp->tx_thld = 6;
2124 /* end good xmits */
2126 * * if our adjustments are becoming unresonable, stop adjusting &
2127 * resort * to defaults & pray. A THOLD value > 190 means that the
2128 * adapter will * wait for 190*8=1520 bytes in TX FIFO before it
2129 * starts xmit. Since * MTU is 1514, it doesn't make any sense for
2130 * further increase. */
2131 if (bdp->tx_thld >= 190) {
2132 bdp->tx_per_underrun = 0;
2133 bdp->tx_thld = 189;
2135 } /* end underrun check */
2139 * e100_prepare_xmit_buff - prepare a buffer for transmission
2140 * @bdp: atapter's private data struct
2141 * @skb: skb to send
2143 * This routine prepare a buffer for transmission. It checks
2144 * the message length for the appropiate size. It picks up a
2145 * free tcb from the TCB pool and sets up the corresponding
2146 * TBD's. If the number of fragments are more than the number
2147 * of TBD/TCB it copies all the fragments in a coalesce buffer.
2148 * It returns a pointer to the prepared TCB.
2150 static inline tcb_t *
2151 e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
2153 tcb_t *tcb, *prev_tcb;
2155 tcb = bdp->tcb_pool.data;
2156 tcb += TCB_TO_USE(bdp->tcb_pool);
2158 if (bdp->flags & USE_IPCB) {
2159 tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT;
2160 tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET;
2161 tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE;
2164 if(bdp->vlgrp && vlan_tx_tag_present(skb)) {
2165 (tcb->tcbu).ipcb.ip_activation_high |= IPCB_INSERTVLAN_ENABLE;
2166 (tcb->tcbu).ipcb.vlan = cpu_to_be16(vlan_tx_tag_get(skb));
2169 tcb->tcb_hdr.cb_status = 0;
2170 tcb->tcb_thrshld = bdp->tx_thld;
2171 tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT);
2173 /* Set I (Interrupt) bit on every (TX_FRAME_CNT)th packet */
2174 if (!(++bdp->tx_count % TX_FRAME_CNT))
2175 tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT);
2176 else
2177 /* Clear I bit on other packets */
2178 tcb->tcb_hdr.cb_cmd &= ~__constant_cpu_to_le16(CB_I_BIT);
2180 tcb->tcb_skb = skb;
2182 if (skb->ip_summed == CHECKSUM_HW) {
2183 const struct iphdr *ip = skb->nh.iph;
2185 if ((ip->protocol == IPPROTO_TCP) ||
2186 (ip->protocol == IPPROTO_UDP)) {
2188 tcb->tcbu.ipcb.ip_activation_high |=
2189 IPCB_HARDWAREPARSING_ENABLE;
2190 tcb->tcbu.ipcb.ip_schedule |=
2191 IPCB_TCPUDP_CHECKSUM_ENABLE;
2193 if (ip->protocol == IPPROTO_TCP)
2194 tcb->tcbu.ipcb.ip_schedule |= IPCB_TCP_PACKET;
2198 if (!skb_shinfo(skb)->nr_frags) {
2199 (tcb->tbd_ptr)->tbd_buf_addr =
2200 cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
2201 skb->len, PCI_DMA_TODEVICE));
2202 (tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(skb->len);
2203 tcb->tcb_tbd_num = 1;
2204 tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr;
2205 } else {
2206 int i;
2207 void *addr;
2208 tbd_t *tbd_arr_ptr = &(tcb->tbd_ptr[1]);
2209 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2211 (tcb->tbd_ptr)->tbd_buf_addr =
2212 cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
2213 skb_headlen(skb),
2214 PCI_DMA_TODEVICE));
2215 (tcb->tbd_ptr)->tbd_buf_cnt =
2216 cpu_to_le16(skb_headlen(skb));
2218 for (i = 0; i < skb_shinfo(skb)->nr_frags;
2219 i++, tbd_arr_ptr++, frag++) {
2221 addr = ((void *) page_address(frag->page) +
2222 frag->page_offset);
2224 tbd_arr_ptr->tbd_buf_addr =
2225 cpu_to_le32(pci_map_single(bdp->pdev,
2226 addr, frag->size,
2227 PCI_DMA_TODEVICE));
2228 tbd_arr_ptr->tbd_buf_cnt = cpu_to_le16(frag->size);
2230 tcb->tcb_tbd_num = skb_shinfo(skb)->nr_frags + 1;
2231 tcb->tcb_tbd_ptr = tcb->tcb_tbd_expand_ptr;
2234 /* clear the S-BIT on the previous tcb */
2235 prev_tcb = bdp->tcb_pool.data;
2236 prev_tcb += PREV_TCB_USED(bdp->tcb_pool);
2237 prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT);
2239 bdp->tcb_pool.tail = NEXT_TCB_TOUSE(bdp->tcb_pool.tail);
2241 wmb();
2243 e100_start_cu(bdp, tcb);
2245 return tcb;
2248 /* Changed for 82558 enhancement */
2250 * e100_start_cu - start the adapter's CU
2251 * @bdp: atapter's private data struct
2252 * @tcb: TCB to be transmitted
2254 * This routine issues a CU Start or CU Resume command to the 82558/9.
2255 * This routine was added because the prepare_ext_xmit_buff takes advantage
2256 * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as
2257 * soon as the first TBD is ready.
2259 * e100_start_cu must be called while holding the tx_lock !
2262 e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
2264 unsigned long lock_flag;
2265 u8 ret = true;
2267 spin_lock_irqsave(&(bdp->bd_lock), lock_flag);
2268 switch (bdp->next_cu_cmd) {
2269 case RESUME_NO_WAIT:
2270 /*last cu command was a CU_RESMUE if this is a 558 or newer we don't need to
2271 * wait for command word to clear, we reach here only if we are bachlor
2273 e100_exec_cmd(bdp, SCB_CUC_RESUME);
2274 break;
2276 case RESUME_WAIT:
2277 if ((bdp->flags & IS_ICH) &&
2278 (bdp->cur_line_speed == 10) &&
2279 (bdp->cur_dplx_mode == HALF_DUPLEX)) {
2280 e100_wait_exec_simple(bdp, SCB_CUC_NOOP);
2281 udelay(1);
2283 if ((e100_wait_exec_simple(bdp, SCB_CUC_RESUME)) &&
2284 (bdp->flags & IS_BACHELOR) && (!(bdp->flags & IS_ICH))) {
2285 bdp->next_cu_cmd = RESUME_NO_WAIT;
2287 break;
2289 case START_WAIT:
2290 // The last command was a non_tx CU command
2291 if (!e100_wait_cus_idle(bdp))
2292 printk(KERN_DEBUG
2293 "e100: %s: cu_start: timeout waiting for cu\n",
2294 bdp->device->name);
2295 if (!e100_wait_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
2296 SCB_CUC_START, CB_TRANSMIT)) {
2297 printk(KERN_DEBUG
2298 "e100: %s: cu_start: timeout waiting for scb\n",
2299 bdp->device->name);
2300 e100_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
2301 SCB_CUC_START);
2302 ret = false;
2305 bdp->next_cu_cmd = RESUME_WAIT;
2307 break;
2310 /* save the last tcb */
2311 bdp->last_tcb = tcb;
2313 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2314 return ret;
2317 /* ====================================================================== */
2318 /* hw */
2319 /* ====================================================================== */
2322 * e100_selftest - perform H/W self test
2323 * @bdp: atapter's private data struct
2324 * @st_timeout: address to return timeout value, if fails
2325 * @st_result: address to return selftest result, if fails
2327 * This routine will issue PORT Self-test command to test the e100.
2328 * The self-test will fail if the adapter's master-enable bit is not
2329 * set in the PCI Command Register, or if the adapter is not seated
2330 * in a PCI master-enabled slot. we also disable interrupts when the
2331 * command is completed.
2333 * Returns:
2334 * true: if adapter passes self_test
2335 * false: otherwise
2337 unsigned char
2338 e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
2340 u32 selftest_cmd;
2342 /* initialize the nic state before running test */
2343 e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
2344 /* Setup the address of the self_test area */
2345 selftest_cmd = bdp->selftest_phys;
2347 /* Setup SELF TEST Command Code in D3 - D0 */
2348 selftest_cmd |= PORT_SELFTEST;
2350 /* Initialize the self-test signature and results DWORDS */
2351 bdp->selftest->st_sign = 0;
2352 bdp->selftest->st_result = 0xffffffff;
2354 /* Do the port command */
2355 writel(selftest_cmd, &bdp->scb->scb_port);
2356 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
2358 /* Wait at least 10 milliseconds for the self-test to complete */
2359 set_current_state(TASK_UNINTERRUPTIBLE);
2360 schedule_timeout(HZ / 100 + 1);
2362 /* disable interrupts since they are enabled */
2363 /* after device reset during selftest */
2364 e100_disable_clear_intr(bdp);
2366 /* if The First Self Test DWORD Still Zero, We've timed out. If the
2367 * second DWORD is not zero then we have an error. */
2368 if ((bdp->selftest->st_sign == 0) || (bdp->selftest->st_result != 0)) {
2370 if (st_timeout)
2371 *st_timeout = !(le32_to_cpu(bdp->selftest->st_sign));
2373 if (st_result)
2374 *st_result = le32_to_cpu(bdp->selftest->st_result);
2376 return false;
2379 return true;
2383 * e100_setup_iaaddr - issue IA setup sommand
2384 * @bdp: atapter's private data struct
2385 * @eaddr: new ethernet address
2387 * This routine will issue the IA setup command. This command
2388 * will notify the 82557 (e100) of what its individual (node)
2389 * address is. This command will be executed in polled mode.
2391 * Returns:
2392 * true: if the IA setup command was successfully issued and completed
2393 * false: otherwise
2395 unsigned char
2396 e100_setup_iaaddr(struct e100_private *bdp, u8 *eaddr)
2398 unsigned int i;
2399 cb_header_t *ntcb_hdr;
2400 unsigned char res;
2401 nxmit_cb_entry_t *cmd;
2403 if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
2404 res = false;
2405 goto exit;
2408 ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
2409 ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_IA_ADDRESS);
2411 for (i = 0; i < ETH_ALEN; i++) {
2412 (cmd->non_tx_cmd)->ntcb.setup.ia_addr[i] = eaddr[i];
2415 res = e100_exec_non_cu_cmd(bdp, cmd);
2416 if (!res)
2417 printk(KERN_WARNING "e100: %s: IA setup failed\n",
2418 bdp->device->name);
2420 exit:
2421 return res;
2425 * e100_start_ru - start the RU if needed
2426 * @bdp: atapter's private data struct
2428 * This routine checks the status of the 82557's receive unit(RU),
2429 * and starts the RU if it was not already active. However,
2430 * before restarting the RU, the driver gives the RU the buffers
2431 * it freed up during the servicing of the ISR. If there are
2432 * no free buffers to give to the RU, (i.e. we have reached a
2433 * no resource condition) the RU will not be started till the
2434 * next ISR.
2436 void
2437 e100_start_ru(struct e100_private *bdp)
2439 struct rx_list_elem *rx_struct = NULL;
2440 int buffer_found = 0;
2441 struct list_head *entry_ptr;
2443 list_for_each(entry_ptr, &(bdp->active_rx_list)) {
2444 rx_struct =
2445 list_entry(entry_ptr, struct rx_list_elem, list_elem);
2446 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2447 bdp->rfd_size, PCI_DMA_FROMDEVICE);
2448 if (!((SKB_RFD_STATUS(rx_struct->skb, bdp) &
2449 __constant_cpu_to_le16(RFD_STATUS_COMPLETE)))) {
2450 buffer_found = 1;
2451 break;
2455 /* No available buffers */
2456 if (!buffer_found) {
2457 return;
2460 spin_lock(&bdp->bd_lock);
2462 if (!e100_wait_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START, 0)) {
2463 printk(KERN_DEBUG
2464 "e100: %s: start_ru: wait_scb failed\n",
2465 bdp->device->name);
2466 e100_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START);
2468 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2469 bdp->next_cu_cmd = RESUME_WAIT;
2471 spin_unlock(&bdp->bd_lock);
2475 * e100_cmd_complete_location
2476 * @bdp: atapter's private data struct
2478 * This routine returns a pointer to the location of the command-complete
2479 * DWord in the dump statistical counters area, according to the statistical
2480 * counters mode (557 - basic, 558 - extended, or 559 - TCO mode).
2481 * See e100_config_init() for the setting of the statistical counters mode.
2483 static u32 *
2484 e100_cmd_complete_location(struct e100_private *bdp)
2486 u32 *cmd_complete;
2487 max_counters_t *stats = bdp->stats_counters;
2489 switch (bdp->stat_mode) {
2490 case E100_EXTENDED_STATS:
2491 cmd_complete =
2492 (u32 *) &(((err_cntr_558_t *) (stats))->cmd_complete);
2493 break;
2495 case E100_TCO_STATS:
2496 cmd_complete =
2497 (u32 *) &(((err_cntr_559_t *) (stats))->cmd_complete);
2498 break;
2500 case E100_BASIC_STATS:
2501 default:
2502 cmd_complete =
2503 (u32 *) &(((err_cntr_557_t *) (stats))->cmd_complete);
2504 break;
2507 return cmd_complete;
2511 * e100_clr_cntrs - clear statistics counters
2512 * @bdp: atapter's private data struct
2514 * This routine will clear the adapter error statistic counters.
2516 * Returns:
2517 * true: if successfully cleared stat counters
2518 * false: otherwise
2520 static unsigned char __devinit
2521 e100_clr_cntrs(struct e100_private *bdp)
2523 volatile u32 *pcmd_complete;
2525 /* clear the dump counter complete word */
2526 pcmd_complete = e100_cmd_complete_location(bdp);
2527 *pcmd_complete = 0;
2528 wmb();
2530 if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
2531 return false;
2533 /* wait 10 microseconds for the command to complete */
2534 udelay(10);
2536 if (!e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT))
2537 return false;
2539 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2540 bdp->next_cu_cmd = RESUME_WAIT;
2543 return true;
2546 static unsigned char
2547 e100_update_stats(struct e100_private *bdp)
2549 u32 *pcmd_complete;
2550 basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
2552 // check if last dump command completed
2553 pcmd_complete = e100_cmd_complete_location(bdp);
2554 if (*pcmd_complete != le32_to_cpu(DUMP_RST_STAT_COMPLETED) &&
2555 *pcmd_complete != le32_to_cpu(DUMP_STAT_COMPLETED)) {
2556 *pcmd_complete = 0;
2557 return false;
2560 /* increment the statistics */
2561 bdp->drv_stats.net_stats.rx_packets +=
2562 le32_to_cpu(pstat->rcv_gd_frames);
2563 bdp->drv_stats.net_stats.tx_packets +=
2564 le32_to_cpu(pstat->xmt_gd_frames);
2565 bdp->drv_stats.net_stats.rx_dropped += le32_to_cpu(pstat->rcv_rsrc_err);
2566 bdp->drv_stats.net_stats.collisions += le32_to_cpu(pstat->xmt_ttl_coll);
2567 bdp->drv_stats.net_stats.rx_length_errors +=
2568 le32_to_cpu(pstat->rcv_shrt_frames);
2569 bdp->drv_stats.net_stats.rx_over_errors +=
2570 le32_to_cpu(pstat->rcv_rsrc_err);
2571 bdp->drv_stats.net_stats.rx_crc_errors +=
2572 le32_to_cpu(pstat->rcv_crc_errs);
2573 bdp->drv_stats.net_stats.rx_frame_errors +=
2574 le32_to_cpu(pstat->rcv_algn_errs);
2575 bdp->drv_stats.net_stats.rx_fifo_errors +=
2576 le32_to_cpu(pstat->rcv_oruns);
2577 bdp->drv_stats.net_stats.tx_aborted_errors +=
2578 le32_to_cpu(pstat->xmt_max_coll);
2579 bdp->drv_stats.net_stats.tx_carrier_errors +=
2580 le32_to_cpu(pstat->xmt_lost_crs);
2581 bdp->drv_stats.net_stats.tx_fifo_errors +=
2582 le32_to_cpu(pstat->xmt_uruns);
2584 bdp->drv_stats.tx_late_col += le32_to_cpu(pstat->xmt_late_coll);
2585 bdp->drv_stats.tx_ok_defrd += le32_to_cpu(pstat->xmt_deferred);
2586 bdp->drv_stats.tx_one_retry += le32_to_cpu(pstat->xmt_sngl_coll);
2587 bdp->drv_stats.tx_mt_one_retry += le32_to_cpu(pstat->xmt_mlt_coll);
2588 bdp->drv_stats.rcv_cdt_frames += le32_to_cpu(pstat->rcv_err_coll);
2590 if (bdp->stat_mode != E100_BASIC_STATS) {
2591 ext_cntr_t *pex_stat = &bdp->stats_counters->extended_stats;
2593 bdp->drv_stats.xmt_fc_pkts +=
2594 le32_to_cpu(pex_stat->xmt_fc_frames);
2595 bdp->drv_stats.rcv_fc_pkts +=
2596 le32_to_cpu(pex_stat->rcv_fc_frames);
2597 bdp->drv_stats.rcv_fc_unsupported +=
2598 le32_to_cpu(pex_stat->rcv_fc_unsupported);
2601 if (bdp->stat_mode == E100_TCO_STATS) {
2602 tco_cntr_t *ptco_stat = &bdp->stats_counters->tco_stats;
2604 bdp->drv_stats.xmt_tco_pkts +=
2605 le16_to_cpu(ptco_stat->xmt_tco_frames);
2606 bdp->drv_stats.rcv_tco_pkts +=
2607 le16_to_cpu(ptco_stat->rcv_tco_frames);
2610 *pcmd_complete = 0;
2611 return true;
2615 * e100_dump_stat_cntrs
2616 * @bdp: atapter's private data struct
2618 * This routine will dump the board statistical counters without waiting
2619 * for stat_dump to complete. Any access to this stats should verify the completion
2620 * of the command
2622 void
2623 e100_dump_stats_cntrs(struct e100_private *bdp)
2625 unsigned long lock_flag_bd;
2627 spin_lock_irqsave(&(bdp->bd_lock), lock_flag_bd);
2629 /* dump h/w stats counters */
2630 if (e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT)) {
2631 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2632 bdp->next_cu_cmd = RESUME_WAIT;
2636 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag_bd);
2640 * e100_exec_non_cu_cmd
2641 * @bdp: atapter's private data struct
2642 * @command: the non-cu command to execute
2644 * This routine will submit a command block to be executed,
2646 unsigned char
2647 e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
2649 cb_header_t *ntcb_hdr;
2650 unsigned long lock_flag;
2651 unsigned long expiration_time;
2652 unsigned char rc = true;
2653 u8 sub_cmd;
2655 ntcb_hdr = (cb_header_t *) command->non_tx_cmd; /* get hdr of non tcb cmd */
2656 sub_cmd = cpu_to_le16(ntcb_hdr->cb_cmd);
2658 /* Set the Command Block to be the last command block */
2659 ntcb_hdr->cb_cmd |= __constant_cpu_to_le16(CB_EL_BIT);
2660 ntcb_hdr->cb_status = 0;
2661 ntcb_hdr->cb_lnk_ptr = 0;
2663 wmb();
2664 if (in_interrupt())
2665 return e100_delayed_exec_non_cu_cmd(bdp, command);
2667 if (netif_running(bdp->device) && netif_carrier_ok(bdp->device))
2668 return e100_delayed_exec_non_cu_cmd(bdp, command);
2670 spin_lock_bh(&(bdp->bd_non_tx_lock));
2672 if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
2673 goto delayed_exec;
2676 if (bdp->last_tcb) {
2677 rmb();
2678 if ((bdp->last_tcb->tcb_hdr.cb_status &
2679 __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
2680 goto delayed_exec;
2683 if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) == SCB_CUS_ACTIVE) {
2684 goto delayed_exec;
2687 spin_lock_irqsave(&bdp->bd_lock, lock_flag);
2689 if (!e100_wait_exec_cmplx(bdp, command->dma_addr, SCB_CUC_START, sub_cmd)) {
2690 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2691 rc = false;
2692 goto exit;
2695 bdp->next_cu_cmd = START_WAIT;
2696 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2698 /* now wait for completion of non-cu CB up to 20 msec */
2699 expiration_time = jiffies + HZ / 50 + 1;
2700 rmb();
2701 while (!(ntcb_hdr->cb_status &
2702 __constant_cpu_to_le16(CB_STATUS_COMPLETE))) {
2704 if (time_before(jiffies, expiration_time)) {
2705 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2706 yield();
2707 spin_lock_bh(&(bdp->bd_non_tx_lock));
2708 } else {
2709 #ifdef E100_CU_DEBUG
2710 printk(KERN_ERR "e100: %s: non-TX command (%x) "
2711 "timeout\n", bdp->device->name, sub_cmd);
2712 #endif
2713 rc = false;
2714 goto exit;
2716 rmb();
2719 exit:
2720 e100_free_non_tx_cmd(bdp, command);
2722 if (netif_running(bdp->device))
2723 netif_wake_queue(bdp->device);
2725 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2726 return rc;
2728 delayed_exec:
2729 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2730 return e100_delayed_exec_non_cu_cmd(bdp, command);
2734 * e100_sw_reset
2735 * @bdp: atapter's private data struct
2736 * @reset_cmd: s/w reset or selective reset
2738 * This routine will issue a software reset to the adapter. It
2739 * will also disable interrupts, as the are enabled after reset.
2741 void
2742 e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
2744 /* Do a selective reset first to avoid a potential PCI hang */
2745 writel(PORT_SELECTIVE_RESET, &bdp->scb->scb_port);
2746 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
2748 /* wait for the reset to take effect */
2749 udelay(20);
2750 if (reset_cmd == PORT_SOFTWARE_RESET) {
2751 writel(PORT_SOFTWARE_RESET, &bdp->scb->scb_port);
2753 /* wait 20 micro seconds for the reset to take effect */
2754 udelay(20);
2757 /* Mask off our interrupt line -- it is unmasked after reset */
2758 e100_disable_clear_intr(bdp);
2759 #ifdef E100_CU_DEBUG
2760 bdp->last_cmd = 0;
2761 bdp->last_sub_cmd = 0;
2762 #endif
2766 * e100_load_microcode - Download microsocde to controller.
2767 * @bdp: atapter's private data struct
2769 * This routine downloads microcode on to the controller. This
2770 * microcode is available for the 82558/9, 82550. Currently the
2771 * microcode handles interrupt bundling and TCO workaround.
2773 * Returns:
2774 * true: if successfull
2775 * false: otherwise
2777 static unsigned char
2778 e100_load_microcode(struct e100_private *bdp)
2780 static struct {
2781 u8 rev_id;
2782 u32 ucode[UCODE_MAX_DWORDS + 1];
2783 int timer_dword;
2784 int bundle_dword;
2785 int min_size_dword;
2786 } ucode_opts[] = {
2787 { D101A4_REV_ID,
2788 D101_A_RCVBUNDLE_UCODE,
2789 D101_CPUSAVER_TIMER_DWORD,
2790 D101_CPUSAVER_BUNDLE_DWORD,
2791 D101_CPUSAVER_MIN_SIZE_DWORD },
2792 { D101B0_REV_ID,
2793 D101_B0_RCVBUNDLE_UCODE,
2794 D101_CPUSAVER_TIMER_DWORD,
2795 D101_CPUSAVER_BUNDLE_DWORD,
2796 D101_CPUSAVER_MIN_SIZE_DWORD },
2797 { D101MA_REV_ID,
2798 D101M_B_RCVBUNDLE_UCODE,
2799 D101M_CPUSAVER_TIMER_DWORD,
2800 D101M_CPUSAVER_BUNDLE_DWORD,
2801 D101M_CPUSAVER_MIN_SIZE_DWORD },
2802 { D101S_REV_ID,
2803 D101S_RCVBUNDLE_UCODE,
2804 D101S_CPUSAVER_TIMER_DWORD,
2805 D101S_CPUSAVER_BUNDLE_DWORD,
2806 D101S_CPUSAVER_MIN_SIZE_DWORD },
2807 { D102_REV_ID,
2808 D102_B_RCVBUNDLE_UCODE,
2809 D102_B_CPUSAVER_TIMER_DWORD,
2810 D102_B_CPUSAVER_BUNDLE_DWORD,
2811 D102_B_CPUSAVER_MIN_SIZE_DWORD },
2812 { D102C_REV_ID,
2813 D102_C_RCVBUNDLE_UCODE,
2814 D102_C_CPUSAVER_TIMER_DWORD,
2815 D102_C_CPUSAVER_BUNDLE_DWORD,
2816 D102_C_CPUSAVER_MIN_SIZE_DWORD },
2817 { D102E_REV_ID,
2818 D102_E_RCVBUNDLE_UCODE,
2819 D102_E_CPUSAVER_TIMER_DWORD,
2820 D102_E_CPUSAVER_BUNDLE_DWORD,
2821 D102_E_CPUSAVER_MIN_SIZE_DWORD },
2822 { 0, {0}, 0, 0, 0}
2823 }, *opts;
2825 opts = ucode_opts;
2827 /* User turned ucode loading off */
2828 if (!(bdp->params.b_params & PRM_UCODE))
2829 return false;
2831 /* These controllers do not need ucode */
2832 if (bdp->flags & IS_ICH)
2833 return false;
2835 /* Search for ucode match against h/w rev_id */
2836 while (opts->rev_id) {
2837 if (bdp->rev_id == opts->rev_id) {
2838 int i;
2839 u32 *ucode_dword;
2840 load_ucode_cb_t *ucode_cmd_ptr;
2841 nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
2843 if (cmd != NULL) {
2844 ucode_cmd_ptr =
2845 (load_ucode_cb_t *) cmd->non_tx_cmd;
2846 ucode_dword = ucode_cmd_ptr->ucode_dword;
2847 } else {
2848 return false;
2851 memcpy(ucode_dword, opts->ucode, sizeof (opts->ucode));
2853 /* Insert user-tunable settings */
2854 ucode_dword[opts->timer_dword] &= 0xFFFF0000;
2855 ucode_dword[opts->timer_dword] |=
2856 (u16) bdp->params.IntDelay;
2857 ucode_dword[opts->bundle_dword] &= 0xFFFF0000;
2858 ucode_dword[opts->bundle_dword] |=
2859 (u16) bdp->params.BundleMax;
2860 ucode_dword[opts->min_size_dword] &= 0xFFFF0000;
2861 ucode_dword[opts->min_size_dword] |=
2862 (bdp->params.b_params & PRM_BUNDLE_SMALL) ?
2863 0xFFFF : 0xFF80;
2865 for (i = 0; i < UCODE_MAX_DWORDS; i++)
2866 cpu_to_le32s(&(ucode_dword[i]));
2868 ucode_cmd_ptr->load_ucode_cbhdr.cb_cmd =
2869 __constant_cpu_to_le16(CB_LOAD_MICROCODE);
2871 return e100_exec_non_cu_cmd(bdp, cmd);
2873 opts++;
2876 return false;
2879 /***************************************************************************/
2880 /***************************************************************************/
2881 /* EEPROM Functions */
2882 /***************************************************************************/
2884 /* Read PWA (printed wired assembly) number */
2885 void __devinit
2886 e100_rd_pwa_no(struct e100_private *bdp)
2888 bdp->pwa_no = e100_eeprom_read(bdp, EEPROM_PWA_NO);
2889 bdp->pwa_no <<= 16;
2890 bdp->pwa_no |= e100_eeprom_read(bdp, EEPROM_PWA_NO + 1);
2893 /* Read the permanent ethernet address from the eprom. */
2894 void __devinit
2895 e100_rd_eaddr(struct e100_private *bdp)
2897 int i;
2898 u16 eeprom_word;
2900 for (i = 0; i < 6; i += 2) {
2901 eeprom_word =
2902 e100_eeprom_read(bdp,
2903 EEPROM_NODE_ADDRESS_BYTE_0 + (i / 2));
2905 bdp->device->dev_addr[i] =
2906 bdp->perm_node_address[i] = (u8) eeprom_word;
2907 bdp->device->dev_addr[i + 1] =
2908 bdp->perm_node_address[i + 1] = (u8) (eeprom_word >> 8);
2912 /* Check the D102 RFD flags to see if the checksum passed */
2913 static unsigned char
2914 e100_D102_check_checksum(rfd_t *rfd)
2916 if (((le16_to_cpu(rfd->rfd_header.cb_status)) & RFD_PARSE_BIT)
2917 && (((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
2918 RFD_TCP_PACKET)
2919 || ((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
2920 RFD_UDP_PACKET))
2921 && (rfd->checksumstatus & TCPUDP_CHECKSUM_BIT_VALID)
2922 && (rfd->checksumstatus & TCPUDP_CHECKSUM_VALID)) {
2923 return CHECKSUM_UNNECESSARY;
2925 return CHECKSUM_NONE;
2929 * e100_D101M_checksum
2930 * @bdp: atapter's private data struct
2931 * @skb: skb received
2933 * Sets the skb->csum value from D101 csum found at the end of the Rx frame. The
2934 * D101M sums all words in frame excluding the ethernet II header (14 bytes) so
2935 * in case the packet is ethernet II and the protocol is IP, all is need is to
2936 * assign this value to skb->csum.
2938 static unsigned char
2939 e100_D101M_checksum(struct e100_private *bdp, struct sk_buff *skb)
2941 unsigned short proto = (skb->protocol);
2943 if (proto == __constant_htons(ETH_P_IP)) {
2945 skb->csum = get_unaligned((u16 *) (skb->tail));
2946 return CHECKSUM_HW;
2948 return CHECKSUM_NONE;
2951 /***************************************************************************/
2952 /***************************************************************************/
2953 /***************************************************************************/
2954 /***************************************************************************/
2955 /* Auxilary Functions */
2956 /***************************************************************************/
2958 /* Print the board's configuration */
2959 void __devinit
2960 e100_print_brd_conf(struct e100_private *bdp)
2962 /* Print the string if checksum Offloading was enabled */
2963 if (bdp->flags & DF_CSUM_OFFLOAD)
2964 printk(KERN_NOTICE " Hardware receive checksums enabled\n");
2965 else {
2966 if (bdp->rev_id >= D101MA_REV_ID)
2967 printk(KERN_NOTICE " Hardware receive checksums disabled\n");
2970 if ((bdp->flags & DF_UCODE_LOADED))
2971 printk(KERN_NOTICE " cpu cycle saver enabled\n");
2975 * e100_pci_setup - setup the adapter's PCI information
2976 * @pcid: adapter's pci_dev struct
2977 * @bdp: atapter's private data struct
2979 * This routine sets up all PCI information for the adapter. It enables the bus
2980 * master bit (some BIOS don't do this), requests memory ans I/O regions, and
2981 * calls ioremap() on the adapter's memory region.
2983 * Returns:
2984 * true: if successfull
2985 * false: otherwise
2987 static unsigned char __devinit
2988 e100_pci_setup(struct pci_dev *pcid, struct e100_private *bdp)
2990 struct net_device *dev = bdp->device;
2991 int rc = 0;
2993 if ((rc = pci_enable_device(pcid)) != 0) {
2994 goto err;
2997 /* dev and ven ID have already been checked so it is our device */
2998 pci_read_config_byte(pcid, PCI_REVISION_ID, (u8 *) &(bdp->rev_id));
3000 /* address #0 is a memory region */
3001 dev->mem_start = pci_resource_start(pcid, 0);
3002 dev->mem_end = dev->mem_start + sizeof (scb_t);
3004 /* address #1 is a IO region */
3005 dev->base_addr = pci_resource_start(pcid, 1);
3007 if ((rc = pci_request_regions(pcid, e100_short_driver_name)) != 0) {
3008 goto err_disable;
3011 pci_enable_wake(pcid, 0, 0);
3013 /* if Bus Mastering is off, turn it on! */
3014 pci_set_master(pcid);
3016 /* address #0 is a memory mapping */
3017 bdp->scb = (scb_t *) ioremap_nocache(dev->mem_start, sizeof (scb_t));
3019 if (!bdp->scb) {
3020 printk(KERN_ERR "e100: %s: Failed to map PCI address 0x%lX\n",
3021 dev->name, pci_resource_start(pcid, 0));
3022 rc = -ENOMEM;
3023 goto err_region;
3026 return 0;
3028 err_region:
3029 pci_release_regions(pcid);
3030 err_disable:
3031 pci_disable_device(pcid);
3032 err:
3033 return rc;
3036 void
3037 e100_isolate_driver(struct e100_private *bdp)
3040 /* Check if interface is up */
3041 /* NOTE: Can't use netif_running(bdp->device) because */
3042 /* dev_close clears __LINK_STATE_START before calling */
3043 /* e100_close (aka dev->stop) */
3044 if (bdp->device->flags & IFF_UP) {
3045 e100_disable_clear_intr(bdp);
3046 del_timer_sync(&bdp->watchdog_timer);
3047 netif_carrier_off(bdp->device);
3048 netif_stop_queue(bdp->device);
3049 bdp->last_tcb = NULL;
3051 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
3054 static void
3055 e100_tcb_add_C_bit(struct e100_private *bdp)
3057 tcb_t *tcb = (tcb_t *) bdp->tcb_pool.data;
3058 int i;
3060 for (i = 0; i < bdp->params.TxDescriptors; i++, tcb++) {
3061 tcb->tcb_hdr.cb_status |= cpu_to_le16(CB_STATUS_COMPLETE);
3066 * Procedure: e100_configure_device
3068 * Description: This routine will configure device
3070 * Arguments:
3071 * bdp - Ptr to this card's e100_bdconfig structure
3073 * Returns:
3074 * true upon success
3075 * false upon failure
3077 unsigned char
3078 e100_configure_device(struct e100_private *bdp)
3080 /*load CU & RU base */
3081 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
3082 return false;
3084 if (e100_load_microcode(bdp))
3085 bdp->flags |= DF_UCODE_LOADED;
3087 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
3088 return false;
3090 /* Issue the load dump counters address command */
3091 if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
3092 return false;
3094 if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) {
3095 printk(KERN_ERR "e100: e100_configure_device: "
3096 "setup iaaddr failed\n");
3097 return false;
3100 e100_set_multi_exec(bdp->device);
3102 /* Change for 82558 enhancement */
3103 /* If 82558/9 and if the user has enabled flow control, set up */
3104 /* flow Control Reg. in the CSR */
3105 if ((bdp->flags & IS_BACHELOR)
3106 && (bdp->params.b_params & PRM_FC)) {
3107 writeb(DFLT_FC_THLD,
3108 &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
3109 writeb(DFLT_FC_CMD,
3110 &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
3113 e100_force_config(bdp);
3115 return true;
3118 void
3119 e100_deisolate_driver(struct e100_private *bdp, u8 full_reset)
3121 u32 cmd = full_reset ? PORT_SOFTWARE_RESET : PORT_SELECTIVE_RESET;
3122 e100_sw_reset(bdp, cmd);
3123 if (cmd == PORT_SOFTWARE_RESET) {
3124 if (!e100_configure_device(bdp))
3125 printk(KERN_ERR "e100: e100_deisolate_driver:"
3126 " device configuration failed\n");
3129 if (netif_running(bdp->device)) {
3131 bdp->next_cu_cmd = START_WAIT;
3132 bdp->last_tcb = NULL;
3134 e100_start_ru(bdp);
3136 /* relaunch watchdog timer in 2 sec */
3137 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
3139 // we must clear tcbs since we may have lost Tx intrrupt
3140 // or have unsent frames on the tcb chain
3141 e100_tcb_add_C_bit(bdp);
3142 e100_tx_srv(bdp);
3143 netif_wake_queue(bdp->device);
3144 e100_set_intr_mask(bdp);
3148 static int
3149 e100_do_ethtool_ioctl(struct net_device *dev, struct ifreq *ifr)
3151 struct ethtool_cmd ecmd;
3152 int rc = -EOPNOTSUPP;
3154 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd.cmd)))
3155 return -EFAULT;
3157 switch (ecmd.cmd) {
3158 case ETHTOOL_GSET:
3159 rc = e100_ethtool_get_settings(dev, ifr);
3160 break;
3161 case ETHTOOL_SSET:
3162 rc = e100_ethtool_set_settings(dev, ifr);
3163 break;
3164 case ETHTOOL_GDRVINFO:
3165 rc = e100_ethtool_get_drvinfo(dev, ifr);
3166 break;
3167 case ETHTOOL_GREGS:
3168 rc = e100_ethtool_gregs(dev, ifr);
3169 break;
3170 case ETHTOOL_NWAY_RST:
3171 rc = e100_ethtool_nway_rst(dev, ifr);
3172 break;
3173 case ETHTOOL_GLINK:
3174 rc = e100_ethtool_glink(dev, ifr);
3175 break;
3176 case ETHTOOL_GEEPROM:
3177 case ETHTOOL_SEEPROM:
3178 rc = e100_ethtool_eeprom(dev, ifr);
3179 break;
3180 case ETHTOOL_GSTATS: {
3181 struct {
3182 struct ethtool_stats cmd;
3183 uint64_t data[E100_STATS_LEN];
3184 } stats = { {ETHTOOL_GSTATS, E100_STATS_LEN} };
3185 struct e100_private *bdp = dev->priv;
3186 void *addr = ifr->ifr_data;
3187 int i;
3189 for(i = 0; i < E100_STATS_LEN; i++)
3190 stats.data[i] =
3191 ((unsigned long *)&bdp->drv_stats.net_stats)[i];
3192 if(copy_to_user(addr, &stats, sizeof(stats)))
3193 return -EFAULT;
3194 return 0;
3196 case ETHTOOL_GWOL:
3197 case ETHTOOL_SWOL:
3198 rc = e100_ethtool_wol(dev, ifr);
3199 break;
3200 case ETHTOOL_TEST:
3201 rc = e100_ethtool_test(dev, ifr);
3202 break;
3203 case ETHTOOL_GSTRINGS:
3204 rc = e100_ethtool_gstrings(dev,ifr);
3205 break;
3206 case ETHTOOL_PHYS_ID:
3207 rc = e100_ethtool_led_blink(dev,ifr);
3208 break;
3209 #ifdef ETHTOOL_GRINGPARAM
3210 case ETHTOOL_GRINGPARAM: {
3211 struct ethtool_ringparam ering;
3212 struct e100_private *bdp = dev->priv;
3213 memset((void *) &ering, 0, sizeof(ering));
3214 ering.rx_max_pending = E100_MAX_RFD;
3215 ering.tx_max_pending = E100_MAX_TCB;
3216 ering.rx_pending = bdp->params.RxDescriptors;
3217 ering.tx_pending = bdp->params.TxDescriptors;
3218 rc = copy_to_user(ifr->ifr_data, &ering, sizeof(ering))
3219 ? -EFAULT : 0;
3220 return rc;
3222 #endif
3223 #ifdef ETHTOOL_SRINGPARAM
3224 case ETHTOOL_SRINGPARAM: {
3225 struct ethtool_ringparam ering;
3226 struct e100_private *bdp = dev->priv;
3227 if (copy_from_user(&ering, ifr->ifr_data, sizeof(ering)))
3228 return -EFAULT;
3229 if (ering.rx_pending > E100_MAX_RFD
3230 || ering.rx_pending < E100_MIN_RFD)
3231 return -EINVAL;
3232 if (ering.tx_pending > E100_MAX_TCB
3233 || ering.tx_pending < E100_MIN_TCB)
3234 return -EINVAL;
3235 if (netif_running(dev)) {
3236 spin_lock_bh(&dev->xmit_lock);
3237 e100_close(dev);
3238 spin_unlock_bh(&dev->xmit_lock);
3239 /* Use new values to open interface */
3240 bdp->params.RxDescriptors = ering.rx_pending;
3241 bdp->params.TxDescriptors = ering.tx_pending;
3242 e100_hw_init(bdp);
3243 e100_open(dev);
3245 else {
3246 bdp->params.RxDescriptors = ering.rx_pending;
3247 bdp->params.TxDescriptors = ering.tx_pending;
3249 return 0;
3251 #endif
3252 #ifdef ETHTOOL_GPAUSEPARAM
3253 case ETHTOOL_GPAUSEPARAM: {
3254 struct ethtool_pauseparam epause;
3255 struct e100_private *bdp = dev->priv;
3256 memset((void *) &epause, 0, sizeof(epause));
3257 if ((bdp->flags & IS_BACHELOR)
3258 && (bdp->params.b_params & PRM_FC)) {
3259 epause.autoneg = 1;
3260 if (bdp->flags && DF_LINK_FC_CAP) {
3261 epause.rx_pause = 1;
3262 epause.tx_pause = 1;
3264 if (bdp->flags && DF_LINK_FC_TX_ONLY)
3265 epause.tx_pause = 1;
3267 rc = copy_to_user(ifr->ifr_data, &epause, sizeof(epause))
3268 ? -EFAULT : 0;
3269 return rc;
3271 #endif
3272 #ifdef ETHTOOL_SPAUSEPARAM
3273 case ETHTOOL_SPAUSEPARAM: {
3274 struct ethtool_pauseparam epause;
3275 struct e100_private *bdp = dev->priv;
3276 if (!(bdp->flags & IS_BACHELOR))
3277 return -EINVAL;
3278 if (copy_from_user(&epause, ifr->ifr_data, sizeof(epause)))
3279 return -EFAULT;
3280 if (epause.autoneg == 1)
3281 bdp->params.b_params |= PRM_FC;
3282 else
3283 bdp->params.b_params &= ~PRM_FC;
3284 if (netif_running(dev)) {
3285 spin_lock_bh(&dev->xmit_lock);
3286 e100_close(dev);
3287 spin_unlock_bh(&dev->xmit_lock);
3288 e100_hw_init(bdp);
3289 e100_open(dev);
3291 return 0;
3293 #endif
3294 #ifdef ETHTOOL_GRXCSUM
3295 case ETHTOOL_GRXCSUM:
3296 case ETHTOOL_GTXCSUM:
3297 case ETHTOOL_GSG:
3298 { struct ethtool_value eval;
3299 struct e100_private *bdp = dev->priv;
3300 memset((void *) &eval, 0, sizeof(eval));
3301 if ((ecmd.cmd == ETHTOOL_GRXCSUM)
3302 && (bdp->params.b_params & PRM_XSUMRX))
3303 eval.data = 1;
3304 else
3305 eval.data = 0;
3306 rc = copy_to_user(ifr->ifr_data, &eval, sizeof(eval))
3307 ? -EFAULT : 0;
3308 return rc;
3310 #endif
3311 #ifdef ETHTOOL_SRXCSUM
3312 case ETHTOOL_SRXCSUM:
3313 case ETHTOOL_STXCSUM:
3314 case ETHTOOL_SSG:
3315 { struct ethtool_value eval;
3316 struct e100_private *bdp = dev->priv;
3317 if (copy_from_user(&eval, ifr->ifr_data, sizeof(eval)))
3318 return -EFAULT;
3319 if (ecmd.cmd == ETHTOOL_SRXCSUM) {
3320 if (eval.data == 1) {
3321 if (bdp->rev_id >= D101MA_REV_ID)
3322 bdp->params.b_params |= PRM_XSUMRX;
3323 else
3324 return -EINVAL;
3325 } else {
3326 if (bdp->rev_id >= D101MA_REV_ID)
3327 bdp->params.b_params &= ~PRM_XSUMRX;
3328 else
3329 return 0;
3331 } else {
3332 if (eval.data == 1)
3333 return -EINVAL;
3334 else
3335 return 0;
3337 if (netif_running(dev)) {
3338 spin_lock_bh(&dev->xmit_lock);
3339 e100_close(dev);
3340 spin_unlock_bh(&dev->xmit_lock);
3341 e100_hw_init(bdp);
3342 e100_open(dev);
3344 return 0;
3346 #endif
3347 default:
3348 break;
3349 } //switch
3350 return rc;
3353 static int
3354 e100_ethtool_get_settings(struct net_device *dev, struct ifreq *ifr)
3356 struct e100_private *bdp;
3357 struct ethtool_cmd ecmd;
3358 u16 advert = 0;
3360 memset((void *) &ecmd, 0, sizeof (ecmd));
3362 bdp = dev->priv;
3364 ecmd.supported = bdp->speed_duplex_caps;
3366 ecmd.port =
3367 (bdp->speed_duplex_caps & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
3368 ecmd.transceiver = XCVR_INTERNAL;
3369 ecmd.phy_address = bdp->phy_addr;
3371 if (netif_carrier_ok(bdp->device)) {
3372 ecmd.speed = bdp->cur_line_speed;
3373 ecmd.duplex =
3374 (bdp->cur_dplx_mode == HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
3376 else {
3377 ecmd.speed = -1;
3378 ecmd.duplex = -1;
3381 ecmd.advertising = ADVERTISED_TP;
3383 if (bdp->params.e100_speed_duplex == E100_AUTONEG) {
3384 ecmd.autoneg = AUTONEG_ENABLE;
3385 ecmd.advertising |= ADVERTISED_Autoneg;
3386 } else {
3387 ecmd.autoneg = AUTONEG_DISABLE;
3390 if (bdp->speed_duplex_caps & SUPPORTED_MII) {
3391 e100_mdi_read(bdp, MII_ADVERTISE, bdp->phy_addr, &advert);
3393 if (advert & ADVERTISE_10HALF)
3394 ecmd.advertising |= ADVERTISED_10baseT_Half;
3395 if (advert & ADVERTISE_10FULL)
3396 ecmd.advertising |= ADVERTISED_10baseT_Full;
3397 if (advert & ADVERTISE_100HALF)
3398 ecmd.advertising |= ADVERTISED_100baseT_Half;
3399 if (advert & ADVERTISE_100FULL)
3400 ecmd.advertising |= ADVERTISED_100baseT_Full;
3401 } else {
3402 ecmd.autoneg = AUTONEG_DISABLE;
3403 ecmd.advertising &= ~ADVERTISED_Autoneg;
3406 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3407 return -EFAULT;
3409 return 0;
3412 static int
3413 e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
3415 struct e100_private *bdp;
3416 int e100_new_speed_duplex;
3417 int ethtool_new_speed_duplex;
3418 struct ethtool_cmd ecmd;
3420 bdp = dev->priv;
3421 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd))) {
3422 return -EFAULT;
3425 if ((ecmd.autoneg == AUTONEG_ENABLE)
3426 && (bdp->speed_duplex_caps & SUPPORTED_Autoneg)) {
3427 bdp->params.e100_speed_duplex = E100_AUTONEG;
3428 if (netif_running(dev)) {
3429 spin_lock_bh(&dev->xmit_lock);
3430 e100_close(dev);
3431 spin_unlock_bh(&dev->xmit_lock);
3432 e100_hw_init(bdp);
3433 e100_open(dev);
3435 } else {
3436 if (ecmd.speed == SPEED_10) {
3437 if (ecmd.duplex == DUPLEX_HALF) {
3438 e100_new_speed_duplex =
3439 E100_SPEED_10_HALF;
3440 ethtool_new_speed_duplex =
3441 SUPPORTED_10baseT_Half;
3442 } else {
3443 e100_new_speed_duplex =
3444 E100_SPEED_10_FULL;
3445 ethtool_new_speed_duplex =
3446 SUPPORTED_10baseT_Full;
3448 } else {
3449 if (ecmd.duplex == DUPLEX_HALF) {
3450 e100_new_speed_duplex =
3451 E100_SPEED_100_HALF;
3452 ethtool_new_speed_duplex =
3453 SUPPORTED_100baseT_Half;
3454 } else {
3455 e100_new_speed_duplex =
3456 E100_SPEED_100_FULL;
3457 ethtool_new_speed_duplex =
3458 SUPPORTED_100baseT_Full;
3462 if (bdp->speed_duplex_caps & ethtool_new_speed_duplex) {
3463 bdp->params.e100_speed_duplex =
3464 e100_new_speed_duplex;
3465 if (netif_running(dev)) {
3466 spin_lock_bh(&dev->xmit_lock);
3467 e100_close(dev);
3468 spin_unlock_bh(&dev->xmit_lock);
3469 e100_hw_init(bdp);
3470 e100_open(dev);
3472 } else {
3473 return -EOPNOTSUPP;
3477 return 0;
3480 static int
3481 e100_ethtool_glink(struct net_device *dev, struct ifreq *ifr)
3483 struct e100_private *bdp;
3484 struct ethtool_value info;
3486 memset((void *) &info, 0, sizeof (info));
3488 bdp = dev->priv;
3489 info.cmd = ETHTOOL_GLINK;
3491 /* Consider both PHY link and netif_running */
3492 info.data = e100_update_link_state(bdp);
3494 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3495 return -EFAULT;
3497 return 0;
3500 static int
3501 e100_ethtool_test(struct net_device *dev, struct ifreq *ifr)
3503 struct ethtool_test *info;
3504 int rc = -EFAULT;
3506 info = kmalloc(sizeof(*info) + max_test_res * sizeof(u64),
3507 GFP_ATOMIC);
3509 if (!info)
3510 return -ENOMEM;
3512 memset((void *) info, 0, sizeof(*info) +
3513 max_test_res * sizeof(u64));
3515 if (copy_from_user(info, ifr->ifr_data, sizeof(*info)))
3516 goto exit;
3518 info->flags = e100_run_diag(dev, info->data, info->flags);
3520 if (!copy_to_user(ifr->ifr_data, info,
3521 sizeof(*info) + max_test_res * sizeof(u64)))
3522 rc = 0;
3523 exit:
3524 kfree(info);
3525 return rc;
3528 static int
3529 e100_ethtool_gregs(struct net_device *dev, struct ifreq *ifr)
3531 struct e100_private *bdp;
3532 u32 regs_buff[E100_REGS_LEN];
3533 struct ethtool_regs regs = {ETHTOOL_GREGS};
3534 void *addr = ifr->ifr_data;
3535 u16 mdi_reg;
3537 bdp = dev->priv;
3539 if(copy_from_user(&regs, addr, sizeof(regs)))
3540 return -EFAULT;
3542 regs.version = (1 << 24) | bdp->rev_id;
3543 regs_buff[0] = readb(&(bdp->scb->scb_cmd_hi)) << 24 |
3544 readb(&(bdp->scb->scb_cmd_low)) << 16 |
3545 readw(&(bdp->scb->scb_status));
3546 e100_mdi_read(bdp, MII_NCONFIG, bdp->phy_addr, &mdi_reg);
3547 regs_buff[1] = mdi_reg;
3549 if(copy_to_user(addr, &regs, sizeof(regs)))
3550 return -EFAULT;
3552 addr += offsetof(struct ethtool_regs, data);
3553 if(copy_to_user(addr, regs_buff, regs.len))
3554 return -EFAULT;
3556 return 0;
3559 static int
3560 e100_ethtool_nway_rst(struct net_device *dev, struct ifreq *ifr)
3562 struct e100_private *bdp;
3564 bdp = dev->priv;
3566 if ((bdp->speed_duplex_caps & SUPPORTED_Autoneg) &&
3567 (bdp->params.e100_speed_duplex == E100_AUTONEG)) {
3568 if (netif_running(dev)) {
3569 spin_lock_bh(&dev->xmit_lock);
3570 e100_close(dev);
3571 spin_unlock_bh(&dev->xmit_lock);
3572 e100_hw_init(bdp);
3573 e100_open(dev);
3575 } else {
3576 return -EFAULT;
3578 return 0;
3581 static int
3582 e100_ethtool_get_drvinfo(struct net_device *dev, struct ifreq *ifr)
3584 struct e100_private *bdp;
3585 struct ethtool_drvinfo info;
3587 memset((void *) &info, 0, sizeof (info));
3589 bdp = dev->priv;
3591 strncpy(info.driver, e100_short_driver_name, sizeof (info.driver) - 1);
3592 strncpy(info.version, e100_driver_version, sizeof (info.version) - 1);
3593 strncpy(info.fw_version, "N/A",
3594 sizeof (info.fw_version) - 1);
3595 strncpy(info.bus_info, bdp->pdev->slot_name,
3596 sizeof (info.bus_info) - 1);
3597 info.n_stats = E100_STATS_LEN;
3598 info.regdump_len = E100_REGS_LEN * sizeof(u32);
3599 info.eedump_len = (bdp->eeprom_size << 1);
3600 info.testinfo_len = max_test_res;
3601 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3602 return -EFAULT;
3604 return 0;
3607 static int
3608 e100_ethtool_eeprom(struct net_device *dev, struct ifreq *ifr)
3610 struct e100_private *bdp;
3611 struct ethtool_eeprom ecmd;
3612 u16 eeprom_data[256];
3613 u16 *usr_eeprom_ptr;
3614 u16 first_word, last_word;
3615 int i, max_len;
3616 void *ptr;
3617 u8 *eeprom_data_bytes = (u8 *)eeprom_data;
3619 bdp = dev->priv;
3621 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
3622 return -EFAULT;
3624 usr_eeprom_ptr =
3625 (u16 *) (ifr->ifr_data + offsetof(struct ethtool_eeprom, data));
3627 max_len = bdp->eeprom_size * 2;
3629 if (ecmd.offset > ecmd.offset + ecmd.len)
3630 return -EINVAL;
3632 if ((ecmd.offset + ecmd.len) > max_len)
3633 ecmd.len = (max_len - ecmd.offset);
3635 first_word = ecmd.offset >> 1;
3636 last_word = (ecmd.offset + ecmd.len - 1) >> 1;
3638 if (first_word >= bdp->eeprom_size)
3639 return -EFAULT;
3641 if (ecmd.cmd == ETHTOOL_GEEPROM) {
3642 for(i = 0; i <= (last_word - first_word); i++)
3643 eeprom_data[i] = e100_eeprom_read(bdp, first_word + i);
3645 ecmd.magic = E100_EEPROM_MAGIC;
3647 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3648 return -EFAULT;
3650 if(ecmd.offset & 1)
3651 eeprom_data_bytes++;
3652 if (copy_to_user(usr_eeprom_ptr, eeprom_data_bytes, ecmd.len))
3653 return -EFAULT;
3654 } else {
3655 if (ecmd.magic != E100_EEPROM_MAGIC)
3656 return -EFAULT;
3658 ptr = (void *)eeprom_data;
3659 if(ecmd.offset & 1) {
3660 /* need modification of first changed EEPROM word */
3661 /* only the second byte of the word is being modified */
3662 eeprom_data[0] = e100_eeprom_read(bdp, first_word);
3663 ptr++;
3665 if((ecmd.offset + ecmd.len) & 1) {
3666 /* need modification of last changed EEPROM word */
3667 /* only the first byte of the word is being modified */
3668 eeprom_data[last_word - first_word] =
3669 e100_eeprom_read(bdp, last_word);
3671 if(copy_from_user(ptr, usr_eeprom_ptr, ecmd.len))
3672 return -EFAULT;
3674 e100_eeprom_write_block(bdp, first_word, eeprom_data,
3675 last_word - first_word + 1);
3677 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3678 return -EFAULT;
3680 return 0;
3683 #define E100_BLINK_INTERVAL (HZ/4)
3685 * e100_led_control
3686 * @bdp: atapter's private data struct
3687 * @led_mdi_op: led operation
3689 * Software control over adapter's led. The possible operations are:
3690 * TURN LED OFF, TURN LED ON and RETURN LED CONTROL TO HARDWARE.
3692 static void
3693 e100_led_control(struct e100_private *bdp, u16 led_mdi_op)
3695 e100_mdi_write(bdp, PHY_82555_LED_SWITCH_CONTROL,
3696 bdp->phy_addr, led_mdi_op);
3700 * e100_led_blink_callback
3701 * @data: pointer to atapter's private data struct
3703 * Blink timer callback function. Toggles ON/OFF led status bit and calls
3704 * led hardware access function.
3706 static void
3707 e100_led_blink_callback(unsigned long data)
3709 struct e100_private *bdp = (struct e100_private *) data;
3711 if(bdp->flags & LED_IS_ON) {
3712 bdp->flags &= ~LED_IS_ON;
3713 e100_led_control(bdp, PHY_82555_LED_OFF);
3714 } else {
3715 bdp->flags |= LED_IS_ON;
3716 if (bdp->rev_id >= D101MA_REV_ID)
3717 e100_led_control(bdp, PHY_82555_LED_ON_559);
3718 else
3719 e100_led_control(bdp, PHY_82555_LED_ON_PRE_559);
3722 mod_timer(&bdp->blink_timer, jiffies + E100_BLINK_INTERVAL);
3725 * e100_ethtool_led_blink
3726 * @dev: pointer to atapter's net_device struct
3727 * @ifr: pointer to ioctl request structure
3729 * Blink led ioctl handler. Initialtes blink timer and sleeps until
3730 * blink period expires. Than it kills timer and returns. The led control
3731 * is returned back to hardware when blink timer is killed.
3733 static int
3734 e100_ethtool_led_blink(struct net_device *dev, struct ifreq *ifr)
3736 struct e100_private *bdp;
3737 struct ethtool_value ecmd;
3739 bdp = dev->priv;
3741 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
3742 return -EFAULT;
3744 if(!bdp->blink_timer.function) {
3745 init_timer(&bdp->blink_timer);
3746 bdp->blink_timer.function = e100_led_blink_callback;
3747 bdp->blink_timer.data = (unsigned long) bdp;
3750 mod_timer(&bdp->blink_timer, jiffies);
3752 set_current_state(TASK_INTERRUPTIBLE);
3754 if ((!ecmd.data) || (ecmd.data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
3755 ecmd.data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
3757 schedule_timeout(ecmd.data * HZ);
3759 del_timer_sync(&bdp->blink_timer);
3761 e100_led_control(bdp, PHY_82555_LED_NORMAL_CONTROL);
3763 return 0;
3766 static inline int __devinit
3767 e100_10BaseT_adapter(struct e100_private *bdp)
3769 return ((bdp->pdev->device == 0x1229) &&
3770 (bdp->pdev->subsystem_vendor == 0x8086) &&
3771 (bdp->pdev->subsystem_device == 0x0003));
3774 static void __devinit
3775 e100_get_speed_duplex_caps(struct e100_private *bdp)
3777 u16 status;
3779 e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &status);
3781 bdp->speed_duplex_caps = 0;
3783 bdp->speed_duplex_caps |=
3784 (status & BMSR_ANEGCAPABLE) ? SUPPORTED_Autoneg : 0;
3786 bdp->speed_duplex_caps |=
3787 (status & BMSR_10HALF) ? SUPPORTED_10baseT_Half : 0;
3789 bdp->speed_duplex_caps |=
3790 (status & BMSR_10FULL) ? SUPPORTED_10baseT_Full : 0;
3792 bdp->speed_duplex_caps |=
3793 (status & BMSR_100HALF) ? SUPPORTED_100baseT_Half : 0;
3795 bdp->speed_duplex_caps |=
3796 (status & BMSR_100FULL) ? SUPPORTED_100baseT_Full : 0;
3798 if (IS_NC3133(bdp))
3799 bdp->speed_duplex_caps =
3800 (SUPPORTED_FIBRE | SUPPORTED_100baseT_Full);
3801 else
3802 bdp->speed_duplex_caps |= SUPPORTED_TP;
3804 if ((status == 0xFFFF) && e100_10BaseT_adapter(bdp)) {
3805 bdp->speed_duplex_caps =
3806 (SUPPORTED_10baseT_Half | SUPPORTED_TP);
3807 } else {
3808 bdp->speed_duplex_caps |= SUPPORTED_MII;
3813 #ifdef CONFIG_PM
3814 static unsigned char
3815 e100_setup_filter(struct e100_private *bdp)
3817 cb_header_t *ntcb_hdr;
3818 unsigned char res = false;
3819 nxmit_cb_entry_t *cmd;
3821 if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
3822 goto exit;
3825 ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
3826 ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_LOAD_FILTER);
3828 /* Set EL and FIX bit */
3829 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] =
3830 __constant_cpu_to_le32(CB_FILTER_EL | CB_FILTER_FIX);
3832 if (bdp->wolopts & WAKE_UCAST) {
3833 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
3834 __constant_cpu_to_le32(CB_FILTER_IA_MATCH);
3837 if (bdp->wolopts & WAKE_ARP) {
3838 /* Setup ARP bit and lower IP parts */
3839 /* bdp->ip_lbytes contains 2 lower bytes of IP address in network byte order */
3840 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
3841 cpu_to_le32(CB_FILTER_ARP | bdp->ip_lbytes);
3844 res = e100_exec_non_cu_cmd(bdp, cmd);
3845 if (!res)
3846 printk(KERN_WARNING "e100: %s: Filter setup failed\n",
3847 bdp->device->name);
3849 exit:
3850 return res;
3854 static void
3855 e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp)
3857 e100_config_wol(bdp);
3859 if (e100_config(bdp)) {
3860 if (bdp->wolopts & (WAKE_UCAST | WAKE_ARP))
3861 if (!e100_setup_filter(bdp))
3862 printk(KERN_ERR
3863 "e100: WOL options failed\n");
3864 } else {
3865 printk(KERN_ERR "e100: config WOL failed\n");
3868 #endif
3870 static u16
3871 e100_get_ip_lbytes(struct net_device *dev)
3873 struct in_ifaddr *ifa;
3874 struct in_device *in_dev;
3875 u32 res = 0;
3877 in_dev = (struct in_device *) dev->ip_ptr;
3878 /* Check if any in_device bound to interface */
3879 if (in_dev) {
3880 /* Check if any IP address is bound to interface */
3881 if ((ifa = in_dev->ifa_list) != NULL) {
3882 res = __constant_ntohl(ifa->ifa_address);
3883 res = __constant_htons(res & 0x0000ffff);
3886 return res;
3889 static int
3890 e100_ethtool_wol(struct net_device *dev, struct ifreq *ifr)
3892 struct e100_private *bdp;
3893 struct ethtool_wolinfo wolinfo;
3894 int res = 0;
3896 bdp = dev->priv;
3898 if (copy_from_user(&wolinfo, ifr->ifr_data, sizeof (wolinfo))) {
3899 return -EFAULT;
3902 switch (wolinfo.cmd) {
3903 case ETHTOOL_GWOL:
3904 wolinfo.supported = bdp->wolsupported;
3905 wolinfo.wolopts = bdp->wolopts;
3906 if (copy_to_user(ifr->ifr_data, &wolinfo, sizeof (wolinfo)))
3907 res = -EFAULT;
3908 break;
3909 case ETHTOOL_SWOL:
3910 /* If ALL requests are supported or request is DISABLE wol */
3911 if (((wolinfo.wolopts & bdp->wolsupported) == wolinfo.wolopts)
3912 || (wolinfo.wolopts == 0)) {
3913 bdp->wolopts = wolinfo.wolopts;
3914 } else {
3915 res = -EOPNOTSUPP;
3917 if (wolinfo.wolopts & WAKE_ARP)
3918 bdp->ip_lbytes = e100_get_ip_lbytes(dev);
3919 break;
3920 default:
3921 break;
3923 return res;
3926 static int e100_ethtool_gstrings(struct net_device *dev, struct ifreq *ifr)
3928 struct ethtool_gstrings info;
3929 char *strings = NULL;
3930 char *usr_strings;
3931 int i;
3933 memset((void *) &info, 0, sizeof(info));
3935 usr_strings = (u8 *) (ifr->ifr_data +
3936 offsetof(struct ethtool_gstrings, data));
3938 if (copy_from_user(&info, ifr->ifr_data, sizeof (info)))
3939 return -EFAULT;
3941 switch (info.string_set) {
3942 case ETH_SS_TEST: {
3943 int ret = 0;
3944 if (info.len > max_test_res)
3945 info.len = max_test_res;
3946 strings = kmalloc(info.len * ETH_GSTRING_LEN, GFP_ATOMIC);
3947 if (!strings)
3948 return -ENOMEM;
3949 memset(strings, 0, info.len * ETH_GSTRING_LEN);
3951 for (i = 0; i < info.len; i++) {
3952 sprintf(strings + i * ETH_GSTRING_LEN, "%s",
3953 test_strings[i]);
3955 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3956 ret = -EFAULT;
3957 if (copy_to_user(usr_strings, strings, info.len * ETH_GSTRING_LEN))
3958 ret = -EFAULT;
3959 kfree(strings);
3960 return ret;
3962 case ETH_SS_STATS: {
3963 char *strings = NULL;
3964 void *addr = ifr->ifr_data;
3965 info.len = E100_STATS_LEN;
3966 strings = *e100_gstrings_stats;
3967 if(copy_to_user(ifr->ifr_data, &info, sizeof(info)))
3968 return -EFAULT;
3969 addr += offsetof(struct ethtool_gstrings, data);
3970 if(copy_to_user(addr, strings,
3971 info.len * ETH_GSTRING_LEN))
3972 return -EFAULT;
3973 return 0;
3975 default:
3976 return -EOPNOTSUPP;
3980 static int
3981 e100_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3983 struct e100_private *bdp;
3984 struct mii_ioctl_data *data_ptr =
3985 (struct mii_ioctl_data *) &(ifr->ifr_data);
3987 bdp = dev->priv;
3989 switch (cmd) {
3990 case SIOCGMIIPHY:
3991 data_ptr->phy_id = bdp->phy_addr & 0x1f;
3992 break;
3994 case SIOCGMIIREG:
3995 if (!capable(CAP_NET_ADMIN))
3996 return -EPERM;
3997 e100_mdi_read(bdp, data_ptr->reg_num & 0x1f, bdp->phy_addr,
3998 &(data_ptr->val_out));
3999 break;
4001 case SIOCSMIIREG:
4002 if (!capable(CAP_NET_ADMIN))
4003 return -EPERM;
4004 /* If reg = 0 && change speed/duplex */
4005 if (data_ptr->reg_num == 0 &&
4006 (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART) /* restart cmd */
4007 || data_ptr->val_in == (BMCR_RESET) /* reset cmd */
4008 || data_ptr->val_in & (BMCR_SPEED100 | BMCR_FULLDPLX)
4009 || data_ptr->val_in == 0)) {
4010 if (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART)
4011 || data_ptr->val_in == (BMCR_RESET))
4012 bdp->params.e100_speed_duplex = E100_AUTONEG;
4013 else if (data_ptr->val_in == (BMCR_SPEED100 | BMCR_FULLDPLX))
4014 bdp->params.e100_speed_duplex = E100_SPEED_100_FULL;
4015 else if (data_ptr->val_in == (BMCR_SPEED100))
4016 bdp->params.e100_speed_duplex = E100_SPEED_100_HALF;
4017 else if (data_ptr->val_in == (BMCR_FULLDPLX))
4018 bdp->params.e100_speed_duplex = E100_SPEED_10_FULL;
4019 else
4020 bdp->params.e100_speed_duplex = E100_SPEED_10_HALF;
4021 if (netif_running(dev)) {
4022 spin_lock_bh(&dev->xmit_lock);
4023 e100_close(dev);
4024 spin_unlock_bh(&dev->xmit_lock);
4025 e100_hw_init(bdp);
4026 e100_open(dev);
4029 else
4030 /* Only allows changing speed/duplex */
4031 return -EINVAL;
4033 break;
4035 default:
4036 return -EOPNOTSUPP;
4038 return 0;
4041 nxmit_cb_entry_t *
4042 e100_alloc_non_tx_cmd(struct e100_private *bdp)
4044 nxmit_cb_entry_t *non_tx_cmd_elem;
4046 if (!(non_tx_cmd_elem = (nxmit_cb_entry_t *)
4047 kmalloc(sizeof (nxmit_cb_entry_t), GFP_ATOMIC))) {
4048 return NULL;
4050 non_tx_cmd_elem->non_tx_cmd =
4051 pci_alloc_consistent(bdp->pdev, sizeof (nxmit_cb_t),
4052 &(non_tx_cmd_elem->dma_addr));
4053 if (non_tx_cmd_elem->non_tx_cmd == NULL) {
4054 kfree(non_tx_cmd_elem);
4055 return NULL;
4057 return non_tx_cmd_elem;
4060 void
4061 e100_free_non_tx_cmd(struct e100_private *bdp,
4062 nxmit_cb_entry_t *non_tx_cmd_elem)
4064 pci_free_consistent(bdp->pdev, sizeof (nxmit_cb_t),
4065 non_tx_cmd_elem->non_tx_cmd,
4066 non_tx_cmd_elem->dma_addr);
4067 kfree(non_tx_cmd_elem);
4070 static void
4071 e100_free_nontx_list(struct e100_private *bdp)
4073 nxmit_cb_entry_t *command;
4074 int i;
4076 while (!list_empty(&bdp->non_tx_cmd_list)) {
4077 command = list_entry(bdp->non_tx_cmd_list.next,
4078 nxmit_cb_entry_t, list_elem);
4079 list_del(&(command->list_elem));
4080 e100_free_non_tx_cmd(bdp, command);
4083 for (i = 0; i < CB_MAX_NONTX_CMD; i++) {
4084 bdp->same_cmd_entry[i] = NULL;
4088 static unsigned char
4089 e100_delayed_exec_non_cu_cmd(struct e100_private *bdp,
4090 nxmit_cb_entry_t *command)
4092 nxmit_cb_entry_t *same_command;
4093 cb_header_t *ntcb_hdr;
4094 u16 cmd;
4096 ntcb_hdr = (cb_header_t *) command->non_tx_cmd;
4098 cmd = CB_CMD_MASK & le16_to_cpu(ntcb_hdr->cb_cmd);
4100 spin_lock_bh(&(bdp->bd_non_tx_lock));
4102 same_command = bdp->same_cmd_entry[cmd];
4104 if (same_command != NULL) {
4105 memcpy((void *) (same_command->non_tx_cmd),
4106 (void *) (command->non_tx_cmd), sizeof (nxmit_cb_t));
4107 e100_free_non_tx_cmd(bdp, command);
4108 } else {
4109 list_add_tail(&(command->list_elem), &(bdp->non_tx_cmd_list));
4110 bdp->same_cmd_entry[cmd] = command;
4113 if (bdp->non_tx_command_state == E100_NON_TX_IDLE) {
4114 bdp->non_tx_command_state = E100_WAIT_TX_FINISH;
4115 mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
4118 spin_unlock_bh(&(bdp->bd_non_tx_lock));
4119 return true;
4122 static void
4123 e100_non_tx_background(unsigned long ptr)
4125 struct e100_private *bdp = (struct e100_private *) ptr;
4126 nxmit_cb_entry_t *active_command;
4127 int restart = true;
4128 cb_header_t *non_tx_cmd;
4129 u8 sub_cmd;
4131 spin_lock_bh(&(bdp->bd_non_tx_lock));
4133 switch (bdp->non_tx_command_state) {
4134 case E100_WAIT_TX_FINISH:
4135 if (bdp->last_tcb != NULL) {
4136 rmb();
4137 if ((bdp->last_tcb->tcb_hdr.cb_status &
4138 __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
4139 goto exit;
4141 if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) ==
4142 SCB_CUS_ACTIVE) {
4143 goto exit;
4145 break;
4147 case E100_WAIT_NON_TX_FINISH:
4148 active_command = list_entry(bdp->non_tx_cmd_list.next,
4149 nxmit_cb_entry_t, list_elem);
4150 rmb();
4152 if (((((cb_header_t *) (active_command->non_tx_cmd))->cb_status
4153 & __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
4154 && time_before(jiffies, active_command->expiration_time)) {
4155 goto exit;
4156 } else {
4157 non_tx_cmd = (cb_header_t *) active_command->non_tx_cmd;
4158 sub_cmd = CB_CMD_MASK & le16_to_cpu(non_tx_cmd->cb_cmd);
4159 #ifdef E100_CU_DEBUG
4160 if (!(non_tx_cmd->cb_status
4161 & __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
4162 printk(KERN_ERR "e100: %s: Queued "
4163 "command (%x) timeout\n",
4164 bdp->device->name, sub_cmd);
4165 #endif
4166 list_del(&(active_command->list_elem));
4167 e100_free_non_tx_cmd(bdp, active_command);
4169 break;
4171 default:
4172 break;
4173 } //switch
4175 if (list_empty(&bdp->non_tx_cmd_list)) {
4176 bdp->non_tx_command_state = E100_NON_TX_IDLE;
4177 spin_lock_irq(&(bdp->bd_lock));
4178 bdp->next_cu_cmd = START_WAIT;
4179 spin_unlock_irq(&(bdp->bd_lock));
4180 restart = false;
4181 goto exit;
4182 } else {
4183 u16 cmd_type;
4185 bdp->non_tx_command_state = E100_WAIT_NON_TX_FINISH;
4186 active_command = list_entry(bdp->non_tx_cmd_list.next,
4187 nxmit_cb_entry_t, list_elem);
4188 sub_cmd = ((cb_header_t *) active_command->non_tx_cmd)->cb_cmd;
4189 spin_lock_irq(&(bdp->bd_lock));
4190 e100_wait_exec_cmplx(bdp, active_command->dma_addr,
4191 SCB_CUC_START, sub_cmd);
4192 spin_unlock_irq(&(bdp->bd_lock));
4193 active_command->expiration_time = jiffies + HZ;
4194 cmd_type = CB_CMD_MASK &
4195 le16_to_cpu(((cb_header_t *)
4196 (active_command->non_tx_cmd))->cb_cmd);
4197 bdp->same_cmd_entry[cmd_type] = NULL;
4200 exit:
4201 if (restart) {
4202 mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
4203 } else {
4204 if (netif_running(bdp->device))
4205 netif_wake_queue(bdp->device);
4207 spin_unlock_bh(&(bdp->bd_non_tx_lock));
4210 static void
4211 e100_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4213 struct e100_private *bdp = netdev->priv;
4215 e100_disable_clear_intr(bdp);
4216 bdp->vlgrp = grp;
4218 if(grp) {
4219 /* enable VLAN tag insert/strip */
4220 e100_config_vlan_drop(bdp, true);
4222 } else {
4223 /* disable VLAN tag insert/strip */
4224 e100_config_vlan_drop(bdp, false);
4227 e100_config(bdp);
4228 e100_set_intr_mask(bdp);
4231 static void
4232 e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4234 /* We don't do Vlan filtering */
4235 return;
4238 static void
4239 e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4241 struct e100_private *bdp = netdev->priv;
4243 if(bdp->vlgrp)
4244 bdp->vlgrp->vlan_devices[vid] = NULL;
4245 /* We don't do Vlan filtering */
4246 return;
4249 #ifdef CONFIG_PM
4250 static int
4251 e100_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
4253 struct pci_dev *pdev = NULL;
4255 switch(event) {
4256 case SYS_DOWN:
4257 case SYS_HALT:
4258 case SYS_POWER_OFF:
4259 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
4260 if(pci_dev_driver(pdev) == &e100_driver) {
4261 /* If net_device struct is allocated? */
4262 if (pci_get_drvdata(pdev))
4263 e100_suspend(pdev, 3);
4268 return NOTIFY_DONE;
4271 static int
4272 e100_suspend(struct pci_dev *pcid, u32 state)
4274 struct net_device *netdev = pci_get_drvdata(pcid);
4275 struct e100_private *bdp = netdev->priv;
4277 e100_isolate_driver(bdp);
4278 pci_save_state(pcid, bdp->pci_state);
4280 /* Enable or disable WoL */
4281 e100_do_wol(pcid, bdp);
4283 /* If wol is enabled */
4284 if (bdp->wolopts || e100_asf_enabled(bdp)) {
4285 pci_enable_wake(pcid, 3, 1); /* Enable PME for power state D3 */
4286 pci_set_power_state(pcid, 3); /* Set power state to D3. */
4287 } else {
4288 /* Disable bus mastering */
4289 pci_disable_device(pcid);
4290 pci_set_power_state(pcid, state);
4292 return 0;
4295 static int
4296 e100_resume(struct pci_dev *pcid)
4298 struct net_device *netdev = pci_get_drvdata(pcid);
4299 struct e100_private *bdp = netdev->priv;
4301 pci_set_power_state(pcid, 0);
4302 pci_enable_wake(pcid, 0, 0); /* Clear PME status and disable PME */
4303 pci_restore_state(pcid, bdp->pci_state);
4305 /* Also do device full reset because device was in D3 state */
4306 e100_deisolate_driver(bdp, true);
4308 return 0;
4312 * e100_asf_enabled - checks if ASF is configured on the current adaper
4313 * by reading registers 0xD and 0x90 in the EEPROM
4314 * @bdp: atapter's private data struct
4316 * Returns: true if ASF is enabled
4318 static unsigned char
4319 e100_asf_enabled(struct e100_private *bdp)
4321 u16 asf_reg;
4322 u16 smbus_addr_reg;
4323 if ((bdp->pdev->device >= 0x1050) && (bdp->pdev->device <= 0x1055)) {
4324 asf_reg = e100_eeprom_read(bdp, EEPROM_CONFIG_ASF);
4325 if ((asf_reg & EEPROM_FLAG_ASF)
4326 && !(asf_reg & EEPROM_FLAG_GCL)) {
4327 smbus_addr_reg =
4328 e100_eeprom_read(bdp, EEPROM_SMBUS_ADDR);
4329 if ((smbus_addr_reg & 0xFF) != 0xFE)
4330 return true;
4333 return false;
4335 #endif /* CONFIG_PM */
4337 #ifdef E100_CU_DEBUG
4338 unsigned char
4339 e100_cu_unknown_state(struct e100_private *bdp)
4341 u8 scb_cmd_low;
4342 u16 scb_status;
4343 scb_cmd_low = bdp->scb->scb_cmd_low;
4344 scb_status = le16_to_cpu(bdp->scb->scb_status);
4345 /* If CU is active and executing unknown cmd */
4346 if (scb_status & SCB_CUS_ACTIVE && scb_cmd_low & SCB_CUC_UNKNOWN)
4347 return true;
4348 else
4349 return false;
4351 #endif