allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / net / tulip / dmfe.c
blob4ed67ff0e81ef9b8329ba922e9015e0f582645e3
1 /*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 DAVICOM Web-Site: www.davicom.com.tw
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
56 TODO
58 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
61 Test and make sure PCI latency is now correct for all cases.
64 #define DRV_NAME "dmfe"
65 #define DRV_VERSION "1.36.4"
66 #define DRV_RELDATE "2002-01-17"
68 #include <linux/module.h>
69 #include <linux/kernel.h>
70 #include <linux/string.h>
71 #include <linux/timer.h>
72 #include <linux/ptrace.h>
73 #include <linux/errno.h>
74 #include <linux/ioport.h>
75 #include <linux/slab.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/dma-mapping.h>
79 #include <linux/init.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/ethtool.h>
83 #include <linux/skbuff.h>
84 #include <linux/delay.h>
85 #include <linux/spinlock.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
89 #include <asm/processor.h>
90 #include <asm/io.h>
91 #include <asm/dma.h>
92 #include <asm/uaccess.h>
93 #include <asm/irq.h>
96 /* Board/System/Debug information/definition ---------------- */
97 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
98 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
99 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
100 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
102 #define DM9102_IO_SIZE 0x80
103 #define DM9102A_IO_SIZE 0x100
104 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
105 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
106 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
107 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
108 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
109 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
110 #define TX_BUF_ALLOC 0x600
111 #define RX_ALLOC_SIZE 0x620
112 #define DM910X_RESET 1
113 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
114 #define CR6_DEFAULT 0x00080000 /* HD */
115 #define CR7_DEFAULT 0x180c1
116 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
117 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
118 #define MAX_PACKET_SIZE 1514
119 #define DMFE_MAX_MULTICAST 14
120 #define RX_COPY_SIZE 100
121 #define MAX_CHECK_PACKET 0x8000
122 #define DM9801_NOISE_FLOOR 8
123 #define DM9802_NOISE_FLOOR 5
125 #define DMFE_WOL_LINKCHANGE 0x20000000
126 #define DMFE_WOL_SAMPLEPACKET 0x10000000
127 #define DMFE_WOL_MAGICPACKET 0x08000000
130 #define DMFE_10MHF 0
131 #define DMFE_100MHF 1
132 #define DMFE_10MFD 4
133 #define DMFE_100MFD 5
134 #define DMFE_AUTO 8
135 #define DMFE_1M_HPNA 0x10
137 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
138 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
139 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
140 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
141 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
142 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
144 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
145 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
146 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
148 #define DMFE_DBUG(dbug_now, msg, value) \
149 do { \
150 if (dmfe_debug || (dbug_now)) \
151 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
152 (msg), (long) (value)); \
153 } while (0)
155 #define SHOW_MEDIA_TYPE(mode) \
156 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
157 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
160 /* CR9 definition: SROM/MII */
161 #define CR9_SROM_READ 0x4800
162 #define CR9_SRCS 0x1
163 #define CR9_SRCLK 0x2
164 #define CR9_CRDOUT 0x8
165 #define SROM_DATA_0 0x0
166 #define SROM_DATA_1 0x4
167 #define PHY_DATA_1 0x20000
168 #define PHY_DATA_0 0x00000
169 #define MDCLKH 0x10000
171 #define PHY_POWER_DOWN 0x800
173 #define SROM_V41_CODE 0x14
175 #define SROM_CLK_WRITE(data, ioaddr) \
176 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
179 udelay(5); \
180 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
181 udelay(5);
183 #define __CHK_IO_SIZE(pci_id, dev_rev) \
184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
185 DM9102A_IO_SIZE: DM9102_IO_SIZE)
187 #define CHK_IO_SIZE(pci_dev, dev_rev) \
188 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
190 /* Sten Check */
191 #define DEVICE net_device
193 /* Structure/enum declaration ------------------------------- */
194 struct tx_desc {
195 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
196 char *tx_buf_ptr; /* Data for us */
197 struct tx_desc *next_tx_desc;
198 } __attribute__(( aligned(32) ));
200 struct rx_desc {
201 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
202 struct sk_buff *rx_skb_ptr; /* Data for us */
203 struct rx_desc *next_rx_desc;
204 } __attribute__(( aligned(32) ));
206 struct dmfe_board_info {
207 u32 chip_id; /* Chip vendor/Device ID */
208 u32 chip_revision; /* Chip revision */
209 struct DEVICE *next_dev; /* next device */
210 struct pci_dev *pdev; /* PCI device */
211 spinlock_t lock;
213 long ioaddr; /* I/O base address */
214 u32 cr0_data;
215 u32 cr5_data;
216 u32 cr6_data;
217 u32 cr7_data;
218 u32 cr15_data;
220 /* pointer for memory physical address */
221 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
222 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
223 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
224 dma_addr_t first_tx_desc_dma;
225 dma_addr_t first_rx_desc_dma;
227 /* descriptor pointer */
228 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
229 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
230 unsigned char *desc_pool_ptr; /* descriptor pool memory */
231 struct tx_desc *first_tx_desc;
232 struct tx_desc *tx_insert_ptr;
233 struct tx_desc *tx_remove_ptr;
234 struct rx_desc *first_rx_desc;
235 struct rx_desc *rx_insert_ptr;
236 struct rx_desc *rx_ready_ptr; /* packet come pointer */
237 unsigned long tx_packet_cnt; /* transmitted packet count */
238 unsigned long tx_queue_cnt; /* wait to send packet count */
239 unsigned long rx_avail_cnt; /* available rx descriptor count */
240 unsigned long interval_rx_cnt; /* rx packet count a callback time */
242 u16 HPNA_command; /* For HPNA register 16 */
243 u16 HPNA_timer; /* For HPNA remote device check */
244 u16 dbug_cnt;
245 u16 NIC_capability; /* NIC media capability */
246 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
248 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
249 u8 chip_type; /* Keep DM9102A chip type */
250 u8 media_mode; /* user specify media mode */
251 u8 op_mode; /* real work media mode */
252 u8 phy_addr;
253 u8 wait_reset; /* Hardware failed, need to reset */
254 u8 dm910x_chk_mode; /* Operating mode check */
255 u8 first_in_callback; /* Flag to record state */
256 u8 wol_mode; /* user WOL settings */
257 struct timer_list timer;
259 /* System defined statistic counter */
260 struct net_device_stats stats;
262 /* Driver defined statistic counter */
263 unsigned long tx_fifo_underrun;
264 unsigned long tx_loss_carrier;
265 unsigned long tx_no_carrier;
266 unsigned long tx_late_collision;
267 unsigned long tx_excessive_collision;
268 unsigned long tx_jabber_timeout;
269 unsigned long reset_count;
270 unsigned long reset_cr8;
271 unsigned long reset_fatal;
272 unsigned long reset_TXtimeout;
274 /* NIC SROM data */
275 unsigned char srom[128];
278 enum dmfe_offsets {
279 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
280 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
281 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
282 DCR15 = 0x78
285 enum dmfe_CR6_bits {
286 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
287 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
288 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
291 /* Global variable declaration ----------------------------- */
292 static int __devinitdata printed_version;
293 static char version[] __devinitdata =
294 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
295 DRV_VERSION " (" DRV_RELDATE ")\n";
297 static int dmfe_debug;
298 static unsigned char dmfe_media_mode = DMFE_AUTO;
299 static u32 dmfe_cr6_user_set;
301 /* For module input parameter */
302 static int debug;
303 static u32 cr6set;
304 static unsigned char mode = 8;
305 static u8 chkmode = 1;
306 static u8 HPNA_mode; /* Default: Low Power/High Speed */
307 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
308 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
309 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
310 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
311 4: TX pause packet */
314 /* function declaration ------------------------------------- */
315 static int dmfe_open(struct DEVICE *);
316 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
317 static int dmfe_stop(struct DEVICE *);
318 static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
319 static void dmfe_set_filter_mode(struct DEVICE *);
320 static const struct ethtool_ops netdev_ethtool_ops;
321 static u16 read_srom_word(long ,int);
322 static irqreturn_t dmfe_interrupt(int , void *);
323 #ifdef CONFIG_NET_POLL_CONTROLLER
324 static void poll_dmfe (struct net_device *dev);
325 #endif
326 static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
327 static void allocate_rx_buffer(struct dmfe_board_info *);
328 static void update_cr6(u32, unsigned long);
329 static void send_filter_frame(struct DEVICE * ,int);
330 static void dm9132_id_table(struct DEVICE * ,int);
331 static u16 phy_read(unsigned long, u8, u8, u32);
332 static void phy_write(unsigned long, u8, u8, u16, u32);
333 static void phy_write_1bit(unsigned long, u32);
334 static u16 phy_read_1bit(unsigned long);
335 static u8 dmfe_sense_speed(struct dmfe_board_info *);
336 static void dmfe_process_mode(struct dmfe_board_info *);
337 static void dmfe_timer(unsigned long);
338 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342 static void dmfe_dynamic_reset(struct DEVICE *);
343 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344 static void dmfe_init_dm910x(struct DEVICE *);
345 static void dmfe_parse_srom(struct dmfe_board_info *);
346 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347 static void dmfe_program_DM9802(struct dmfe_board_info *);
348 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349 static void dmfe_set_phyxcer(struct dmfe_board_info *);
351 /* DM910X network board routine ---------------------------- */
354 * Search DM910X board ,allocate space and register it
357 static int __devinit dmfe_init_one (struct pci_dev *pdev,
358 const struct pci_device_id *ent)
360 struct dmfe_board_info *db; /* board information structure */
361 struct net_device *dev;
362 u32 dev_rev, pci_pmr;
363 int i, err;
365 DMFE_DBUG(0, "dmfe_init_one()", 0);
367 if (!printed_version++)
368 printk(version);
370 /* Init network device */
371 dev = alloc_etherdev(sizeof(*db));
372 if (dev == NULL)
373 return -ENOMEM;
374 SET_MODULE_OWNER(dev);
375 SET_NETDEV_DEV(dev, &pdev->dev);
377 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
378 printk(KERN_WARNING DRV_NAME
379 ": 32-bit PCI DMA not available.\n");
380 err = -ENODEV;
381 goto err_out_free;
384 /* Enable Master/IO access, Disable memory access */
385 err = pci_enable_device(pdev);
386 if (err)
387 goto err_out_free;
389 if (!pci_resource_start(pdev, 0)) {
390 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
391 err = -ENODEV;
392 goto err_out_disable;
395 /* Read Chip revision */
396 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
398 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
399 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
400 err = -ENODEV;
401 goto err_out_disable;
404 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
406 /* Set Latency Timer 80h */
407 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
408 Need a PCI quirk.. */
410 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
411 #endif
413 if (pci_request_regions(pdev, DRV_NAME)) {
414 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
415 err = -ENODEV;
416 goto err_out_disable;
419 /* Init system & device */
420 db = netdev_priv(dev);
422 /* Allocate Tx/Rx descriptor memory */
423 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
424 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
426 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
427 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
429 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
430 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
431 db->buf_pool_start = db->buf_pool_ptr;
432 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
434 db->chip_id = ent->driver_data;
435 db->ioaddr = pci_resource_start(pdev, 0);
436 db->chip_revision = dev_rev;
437 db->wol_mode = 0;
439 db->pdev = pdev;
441 dev->base_addr = db->ioaddr;
442 dev->irq = pdev->irq;
443 pci_set_drvdata(pdev, dev);
444 dev->open = &dmfe_open;
445 dev->hard_start_xmit = &dmfe_start_xmit;
446 dev->stop = &dmfe_stop;
447 dev->get_stats = &dmfe_get_stats;
448 dev->set_multicast_list = &dmfe_set_filter_mode;
449 #ifdef CONFIG_NET_POLL_CONTROLLER
450 dev->poll_controller = &poll_dmfe;
451 #endif
452 dev->ethtool_ops = &netdev_ethtool_ops;
453 netif_carrier_off(dev);
454 spin_lock_init(&db->lock);
456 pci_read_config_dword(pdev, 0x50, &pci_pmr);
457 pci_pmr &= 0x70000;
458 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
459 db->chip_type = 1; /* DM9102A E3 */
460 else
461 db->chip_type = 0;
463 /* read 64 word srom data */
464 for (i = 0; i < 64; i++)
465 ((__le16 *) db->srom)[i] =
466 cpu_to_le16(read_srom_word(db->ioaddr, i));
468 /* Set Node address */
469 for (i = 0; i < 6; i++)
470 dev->dev_addr[i] = db->srom[20 + i];
472 err = register_netdev (dev);
473 if (err)
474 goto err_out_res;
476 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
477 dev->name,
478 ent->driver_data >> 16,
479 pci_name(pdev));
480 for (i = 0; i < 6; i++)
481 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
482 printk(", irq %d.\n", dev->irq);
484 pci_set_master(pdev);
486 return 0;
488 err_out_res:
489 pci_release_regions(pdev);
490 err_out_disable:
491 pci_disable_device(pdev);
492 err_out_free:
493 pci_set_drvdata(pdev, NULL);
494 free_netdev(dev);
496 return err;
500 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
502 struct net_device *dev = pci_get_drvdata(pdev);
503 struct dmfe_board_info *db = netdev_priv(dev);
505 DMFE_DBUG(0, "dmfe_remove_one()", 0);
507 if (dev) {
509 unregister_netdev(dev);
511 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
512 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
513 db->desc_pool_dma_ptr);
514 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
515 db->buf_pool_ptr, db->buf_pool_dma_ptr);
516 pci_release_regions(pdev);
517 free_netdev(dev); /* free board information */
519 pci_set_drvdata(pdev, NULL);
522 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
527 * Open the interface.
528 * The interface is opened whenever "ifconfig" actives it.
531 static int dmfe_open(struct DEVICE *dev)
533 int ret;
534 struct dmfe_board_info *db = netdev_priv(dev);
536 DMFE_DBUG(0, "dmfe_open", 0);
538 ret = request_irq(dev->irq, &dmfe_interrupt,
539 IRQF_SHARED, dev->name, dev);
540 if (ret)
541 return ret;
543 /* system variable init */
544 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
545 db->tx_packet_cnt = 0;
546 db->tx_queue_cnt = 0;
547 db->rx_avail_cnt = 0;
548 db->wait_reset = 0;
550 db->first_in_callback = 0;
551 db->NIC_capability = 0xf; /* All capability*/
552 db->PHY_reg4 = 0x1e0;
554 /* CR6 operation mode decision */
555 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
556 (db->chip_revision >= 0x02000030) ) {
557 db->cr6_data |= DMFE_TXTH_256;
558 db->cr0_data = CR0_DEFAULT;
559 db->dm910x_chk_mode=4; /* Enter the normal mode */
560 } else {
561 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
562 db->cr0_data = 0;
563 db->dm910x_chk_mode = 1; /* Enter the check mode */
566 /* Initilize DM910X board */
567 dmfe_init_dm910x(dev);
569 /* Active System Interface */
570 netif_wake_queue(dev);
572 /* set and active a timer process */
573 init_timer(&db->timer);
574 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
575 db->timer.data = (unsigned long)dev;
576 db->timer.function = &dmfe_timer;
577 add_timer(&db->timer);
579 return 0;
583 /* Initilize DM910X board
584 * Reset DM910X board
585 * Initilize TX/Rx descriptor chain structure
586 * Send the set-up frame
587 * Enable Tx/Rx machine
590 static void dmfe_init_dm910x(struct DEVICE *dev)
592 struct dmfe_board_info *db = netdev_priv(dev);
593 unsigned long ioaddr = db->ioaddr;
595 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
597 /* Reset DM910x MAC controller */
598 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
599 udelay(100);
600 outl(db->cr0_data, ioaddr + DCR0);
601 udelay(5);
603 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
604 db->phy_addr = 1;
606 /* Parser SROM and media mode */
607 dmfe_parse_srom(db);
608 db->media_mode = dmfe_media_mode;
610 /* RESET Phyxcer Chip by GPR port bit 7 */
611 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
612 if (db->chip_id == PCI_DM9009_ID) {
613 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
614 mdelay(300); /* Delay 300 ms */
616 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
618 /* Process Phyxcer Media Mode */
619 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
620 dmfe_set_phyxcer(db);
622 /* Media Mode Process */
623 if ( !(db->media_mode & DMFE_AUTO) )
624 db->op_mode = db->media_mode; /* Force Mode */
626 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
627 dmfe_descriptor_init(db, ioaddr);
629 /* Init CR6 to program DM910x operation */
630 update_cr6(db->cr6_data, ioaddr);
632 /* Send setup frame */
633 if (db->chip_id == PCI_DM9132_ID)
634 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
635 else
636 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
638 /* Init CR7, interrupt active bit */
639 db->cr7_data = CR7_DEFAULT;
640 outl(db->cr7_data, ioaddr + DCR7);
642 /* Init CR15, Tx jabber and Rx watchdog timer */
643 outl(db->cr15_data, ioaddr + DCR15);
645 /* Enable DM910X Tx/Rx function */
646 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
647 update_cr6(db->cr6_data, ioaddr);
652 * Hardware start transmission.
653 * Send a packet to media from the upper layer.
656 static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
658 struct dmfe_board_info *db = netdev_priv(dev);
659 struct tx_desc *txptr;
660 unsigned long flags;
662 DMFE_DBUG(0, "dmfe_start_xmit", 0);
664 /* Resource flag check */
665 netif_stop_queue(dev);
667 /* Too large packet check */
668 if (skb->len > MAX_PACKET_SIZE) {
669 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
670 dev_kfree_skb(skb);
671 return 0;
674 spin_lock_irqsave(&db->lock, flags);
676 /* No Tx resource check, it never happen nromally */
677 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
678 spin_unlock_irqrestore(&db->lock, flags);
679 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
680 db->tx_queue_cnt);
681 return 1;
684 /* Disable NIC interrupt */
685 outl(0, dev->base_addr + DCR7);
687 /* transmit this packet */
688 txptr = db->tx_insert_ptr;
689 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
690 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
692 /* Point to next transmit free descriptor */
693 db->tx_insert_ptr = txptr->next_tx_desc;
695 /* Transmit Packet Process */
696 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
697 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
698 db->tx_packet_cnt++; /* Ready to send */
699 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
700 dev->trans_start = jiffies; /* saved time stamp */
701 } else {
702 db->tx_queue_cnt++; /* queue TX packet */
703 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
706 /* Tx resource check */
707 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
708 netif_wake_queue(dev);
710 /* Restore CR7 to enable interrupt */
711 spin_unlock_irqrestore(&db->lock, flags);
712 outl(db->cr7_data, dev->base_addr + DCR7);
714 /* free this SKB */
715 dev_kfree_skb(skb);
717 return 0;
722 * Stop the interface.
723 * The interface is stopped when it is brought.
726 static int dmfe_stop(struct DEVICE *dev)
728 struct dmfe_board_info *db = netdev_priv(dev);
729 unsigned long ioaddr = dev->base_addr;
731 DMFE_DBUG(0, "dmfe_stop", 0);
733 /* disable system */
734 netif_stop_queue(dev);
736 /* deleted timer */
737 del_timer_sync(&db->timer);
739 /* Reset & stop DM910X board */
740 outl(DM910X_RESET, ioaddr + DCR0);
741 udelay(5);
742 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
744 /* free interrupt */
745 free_irq(dev->irq, dev);
747 /* free allocated rx buffer */
748 dmfe_free_rxbuffer(db);
750 #if 0
751 /* show statistic counter */
752 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
753 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
754 db->tx_fifo_underrun, db->tx_excessive_collision,
755 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
756 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
757 db->reset_fatal, db->reset_TXtimeout);
758 #endif
760 return 0;
765 * DM9102 insterrupt handler
766 * receive the packet to upper layer, free the transmitted packet
769 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
771 struct DEVICE *dev = dev_id;
772 struct dmfe_board_info *db = netdev_priv(dev);
773 unsigned long ioaddr = dev->base_addr;
774 unsigned long flags;
776 DMFE_DBUG(0, "dmfe_interrupt()", 0);
778 spin_lock_irqsave(&db->lock, flags);
780 /* Got DM910X status */
781 db->cr5_data = inl(ioaddr + DCR5);
782 outl(db->cr5_data, ioaddr + DCR5);
783 if ( !(db->cr5_data & 0xc1) ) {
784 spin_unlock_irqrestore(&db->lock, flags);
785 return IRQ_HANDLED;
788 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
789 outl(0, ioaddr + DCR7);
791 /* Check system status */
792 if (db->cr5_data & 0x2000) {
793 /* system bus error happen */
794 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
795 db->reset_fatal++;
796 db->wait_reset = 1; /* Need to RESET */
797 spin_unlock_irqrestore(&db->lock, flags);
798 return IRQ_HANDLED;
801 /* Received the coming packet */
802 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
803 dmfe_rx_packet(dev, db);
805 /* reallocate rx descriptor buffer */
806 if (db->rx_avail_cnt<RX_DESC_CNT)
807 allocate_rx_buffer(db);
809 /* Free the transmitted descriptor */
810 if ( db->cr5_data & 0x01)
811 dmfe_free_tx_pkt(dev, db);
813 /* Mode Check */
814 if (db->dm910x_chk_mode & 0x2) {
815 db->dm910x_chk_mode = 0x4;
816 db->cr6_data |= 0x100;
817 update_cr6(db->cr6_data, db->ioaddr);
820 /* Restore CR7 to enable interrupt mask */
821 outl(db->cr7_data, ioaddr + DCR7);
823 spin_unlock_irqrestore(&db->lock, flags);
824 return IRQ_HANDLED;
828 #ifdef CONFIG_NET_POLL_CONTROLLER
830 * Polling 'interrupt' - used by things like netconsole to send skbs
831 * without having to re-enable interrupts. It's not called while
832 * the interrupt routine is executing.
835 static void poll_dmfe (struct net_device *dev)
837 /* disable_irq here is not very nice, but with the lockless
838 interrupt handler we have no other choice. */
839 disable_irq(dev->irq);
840 dmfe_interrupt (dev->irq, dev);
841 enable_irq(dev->irq);
843 #endif
846 * Free TX resource after TX complete
849 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
851 struct tx_desc *txptr;
852 unsigned long ioaddr = dev->base_addr;
853 u32 tdes0;
855 txptr = db->tx_remove_ptr;
856 while(db->tx_packet_cnt) {
857 tdes0 = le32_to_cpu(txptr->tdes0);
858 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
859 if (tdes0 & 0x80000000)
860 break;
862 /* A packet sent completed */
863 db->tx_packet_cnt--;
864 db->stats.tx_packets++;
866 /* Transmit statistic counter */
867 if ( tdes0 != 0x7fffffff ) {
868 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
869 db->stats.collisions += (tdes0 >> 3) & 0xf;
870 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
871 if (tdes0 & TDES0_ERR_MASK) {
872 db->stats.tx_errors++;
874 if (tdes0 & 0x0002) { /* UnderRun */
875 db->tx_fifo_underrun++;
876 if ( !(db->cr6_data & CR6_SFT) ) {
877 db->cr6_data = db->cr6_data | CR6_SFT;
878 update_cr6(db->cr6_data, db->ioaddr);
881 if (tdes0 & 0x0100)
882 db->tx_excessive_collision++;
883 if (tdes0 & 0x0200)
884 db->tx_late_collision++;
885 if (tdes0 & 0x0400)
886 db->tx_no_carrier++;
887 if (tdes0 & 0x0800)
888 db->tx_loss_carrier++;
889 if (tdes0 & 0x4000)
890 db->tx_jabber_timeout++;
894 txptr = txptr->next_tx_desc;
895 }/* End of while */
897 /* Update TX remove pointer to next */
898 db->tx_remove_ptr = txptr;
900 /* Send the Tx packet in queue */
901 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
902 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
903 db->tx_packet_cnt++; /* Ready to send */
904 db->tx_queue_cnt--;
905 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
906 dev->trans_start = jiffies; /* saved time stamp */
909 /* Resource available check */
910 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
911 netif_wake_queue(dev); /* Active upper layer, send again */
916 * Calculate the CRC valude of the Rx packet
917 * flag = 1 : return the reverse CRC (for the received packet CRC)
918 * 0 : return the normal CRC (for Hash Table index)
921 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
923 u32 crc = crc32(~0, Data, Len);
924 if (flag) crc = ~crc;
925 return crc;
930 * Receive the come packet and pass to upper layer
933 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
935 struct rx_desc *rxptr;
936 struct sk_buff *skb, *newskb;
937 int rxlen;
938 u32 rdes0;
940 rxptr = db->rx_ready_ptr;
942 while(db->rx_avail_cnt) {
943 rdes0 = le32_to_cpu(rxptr->rdes0);
944 if (rdes0 & 0x80000000) /* packet owner check */
945 break;
947 db->rx_avail_cnt--;
948 db->interval_rx_cnt++;
950 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
951 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
953 if ( (rdes0 & 0x300) != 0x300) {
954 /* A packet without First/Last flag */
955 /* reuse this SKB */
956 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
957 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
958 } else {
959 /* A packet with First/Last flag */
960 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
962 /* error summary bit check */
963 if (rdes0 & 0x8000) {
964 /* This is a error packet */
965 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
966 db->stats.rx_errors++;
967 if (rdes0 & 1)
968 db->stats.rx_fifo_errors++;
969 if (rdes0 & 2)
970 db->stats.rx_crc_errors++;
971 if (rdes0 & 0x80)
972 db->stats.rx_length_errors++;
975 if ( !(rdes0 & 0x8000) ||
976 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
977 skb = rxptr->rx_skb_ptr;
979 /* Received Packet CRC check need or not */
980 if ( (db->dm910x_chk_mode & 1) &&
981 (cal_CRC(skb->data, rxlen, 1) !=
982 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
983 /* Found a error received packet */
984 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
985 db->dm910x_chk_mode = 3;
986 } else {
987 /* Good packet, send to upper layer */
988 /* Shorst packet used new SKB */
989 if ((rxlen < RX_COPY_SIZE) &&
990 ((newskb = dev_alloc_skb(rxlen + 2))
991 != NULL)) {
993 skb = newskb;
994 /* size less than COPY_SIZE, allocate a rxlen SKB */
995 skb_reserve(skb, 2); /* 16byte align */
996 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
997 skb_put(skb, rxlen),
998 rxlen);
999 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1000 } else
1001 skb_put(skb, rxlen);
1003 skb->protocol = eth_type_trans(skb, dev);
1004 netif_rx(skb);
1005 dev->last_rx = jiffies;
1006 db->stats.rx_packets++;
1007 db->stats.rx_bytes += rxlen;
1009 } else {
1010 /* Reuse SKB buffer when the packet is error */
1011 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1012 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1016 rxptr = rxptr->next_rx_desc;
1019 db->rx_ready_ptr = rxptr;
1024 * Get statistics from driver.
1027 static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1029 struct dmfe_board_info *db = netdev_priv(dev);
1031 DMFE_DBUG(0, "dmfe_get_stats", 0);
1032 return &db->stats;
1037 * Set DM910X multicast address
1040 static void dmfe_set_filter_mode(struct DEVICE * dev)
1042 struct dmfe_board_info *db = netdev_priv(dev);
1043 unsigned long flags;
1045 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1046 spin_lock_irqsave(&db->lock, flags);
1048 if (dev->flags & IFF_PROMISC) {
1049 DMFE_DBUG(0, "Enable PROM Mode", 0);
1050 db->cr6_data |= CR6_PM | CR6_PBF;
1051 update_cr6(db->cr6_data, db->ioaddr);
1052 spin_unlock_irqrestore(&db->lock, flags);
1053 return;
1056 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1057 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1058 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1059 db->cr6_data |= CR6_PAM;
1060 spin_unlock_irqrestore(&db->lock, flags);
1061 return;
1064 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1065 if (db->chip_id == PCI_DM9132_ID)
1066 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1067 else
1068 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1069 spin_unlock_irqrestore(&db->lock, flags);
1073 * Ethtool interace
1076 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1077 struct ethtool_drvinfo *info)
1079 struct dmfe_board_info *np = netdev_priv(dev);
1081 strcpy(info->driver, DRV_NAME);
1082 strcpy(info->version, DRV_VERSION);
1083 if (np->pdev)
1084 strcpy(info->bus_info, pci_name(np->pdev));
1085 else
1086 sprintf(info->bus_info, "EISA 0x%lx %d",
1087 dev->base_addr, dev->irq);
1090 static int dmfe_ethtool_set_wol(struct net_device *dev,
1091 struct ethtool_wolinfo *wolinfo)
1093 struct dmfe_board_info *db = netdev_priv(dev);
1095 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1096 WAKE_ARP | WAKE_MAGICSECURE))
1097 return -EOPNOTSUPP;
1099 db->wol_mode = wolinfo->wolopts;
1100 return 0;
1103 static void dmfe_ethtool_get_wol(struct net_device *dev,
1104 struct ethtool_wolinfo *wolinfo)
1106 struct dmfe_board_info *db = netdev_priv(dev);
1108 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1109 wolinfo->wolopts = db->wol_mode;
1110 return;
1114 static const struct ethtool_ops netdev_ethtool_ops = {
1115 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1116 .get_link = ethtool_op_get_link,
1117 .set_wol = dmfe_ethtool_set_wol,
1118 .get_wol = dmfe_ethtool_get_wol,
1122 * A periodic timer routine
1123 * Dynamic media sense, allocate Rx buffer...
1126 static void dmfe_timer(unsigned long data)
1128 u32 tmp_cr8;
1129 unsigned char tmp_cr12;
1130 struct DEVICE *dev = (struct DEVICE *) data;
1131 struct dmfe_board_info *db = netdev_priv(dev);
1132 unsigned long flags;
1134 int link_ok, link_ok_phy;
1136 DMFE_DBUG(0, "dmfe_timer()", 0);
1137 spin_lock_irqsave(&db->lock, flags);
1139 /* Media mode process when Link OK before enter this route */
1140 if (db->first_in_callback == 0) {
1141 db->first_in_callback = 1;
1142 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1143 db->cr6_data &= ~0x40000;
1144 update_cr6(db->cr6_data, db->ioaddr);
1145 phy_write(db->ioaddr,
1146 db->phy_addr, 0, 0x1000, db->chip_id);
1147 db->cr6_data |= 0x40000;
1148 update_cr6(db->cr6_data, db->ioaddr);
1149 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1150 add_timer(&db->timer);
1151 spin_unlock_irqrestore(&db->lock, flags);
1152 return;
1157 /* Operating Mode Check */
1158 if ( (db->dm910x_chk_mode & 0x1) &&
1159 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1160 db->dm910x_chk_mode = 0x4;
1162 /* Dynamic reset DM910X : system error or transmit time-out */
1163 tmp_cr8 = inl(db->ioaddr + DCR8);
1164 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1165 db->reset_cr8++;
1166 db->wait_reset = 1;
1168 db->interval_rx_cnt = 0;
1170 /* TX polling kick monitor */
1171 if ( db->tx_packet_cnt &&
1172 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1173 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1175 /* TX Timeout */
1176 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1177 db->reset_TXtimeout++;
1178 db->wait_reset = 1;
1179 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1180 dev->name);
1184 if (db->wait_reset) {
1185 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1186 db->reset_count++;
1187 dmfe_dynamic_reset(dev);
1188 db->first_in_callback = 0;
1189 db->timer.expires = DMFE_TIMER_WUT;
1190 add_timer(&db->timer);
1191 spin_unlock_irqrestore(&db->lock, flags);
1192 return;
1195 /* Link status check, Dynamic media type change */
1196 if (db->chip_id == PCI_DM9132_ID)
1197 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1198 else
1199 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1201 if ( ((db->chip_id == PCI_DM9102_ID) &&
1202 (db->chip_revision == 0x02000030)) ||
1203 ((db->chip_id == PCI_DM9132_ID) &&
1204 (db->chip_revision == 0x02000010)) ) {
1205 /* DM9102A Chip */
1206 if (tmp_cr12 & 2)
1207 link_ok = 0;
1208 else
1209 link_ok = 1;
1211 else
1212 /*0x43 is used instead of 0x3 because bit 6 should represent
1213 link status of external PHY */
1214 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1217 /* If chip reports that link is failed it could be because external
1218 PHY link status pin is not conected correctly to chip
1219 To be sure ask PHY too.
1222 /* need a dummy read because of PHY's register latch*/
1223 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1224 link_ok_phy = (phy_read (db->ioaddr,
1225 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1227 if (link_ok_phy != link_ok) {
1228 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1229 link_ok = link_ok | link_ok_phy;
1232 if ( !link_ok && netif_carrier_ok(dev)) {
1233 /* Link Failed */
1234 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1235 netif_carrier_off(dev);
1237 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1238 /* AUTO or force 1M Homerun/Longrun don't need */
1239 if ( !(db->media_mode & 0x38) )
1240 phy_write(db->ioaddr, db->phy_addr,
1241 0, 0x1000, db->chip_id);
1243 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1244 if (db->media_mode & DMFE_AUTO) {
1245 /* 10/100M link failed, used 1M Home-Net */
1246 db->cr6_data|=0x00040000; /* bit18=1, MII */
1247 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1248 update_cr6(db->cr6_data, db->ioaddr);
1250 } else if (!netif_carrier_ok(dev)) {
1252 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1254 /* Auto Sense Speed */
1255 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1256 netif_carrier_on(dev);
1257 SHOW_MEDIA_TYPE(db->op_mode);
1260 dmfe_process_mode(db);
1263 /* HPNA remote command check */
1264 if (db->HPNA_command & 0xf00) {
1265 db->HPNA_timer--;
1266 if (!db->HPNA_timer)
1267 dmfe_HPNA_remote_cmd_chk(db);
1270 /* Timer active again */
1271 db->timer.expires = DMFE_TIMER_WUT;
1272 add_timer(&db->timer);
1273 spin_unlock_irqrestore(&db->lock, flags);
1278 * Dynamic reset the DM910X board
1279 * Stop DM910X board
1280 * Free Tx/Rx allocated memory
1281 * Reset DM910X board
1282 * Re-initilize DM910X board
1285 static void dmfe_dynamic_reset(struct DEVICE *dev)
1287 struct dmfe_board_info *db = netdev_priv(dev);
1289 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1291 /* Sopt MAC controller */
1292 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1293 update_cr6(db->cr6_data, dev->base_addr);
1294 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1295 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1297 /* Disable upper layer interface */
1298 netif_stop_queue(dev);
1300 /* Free Rx Allocate buffer */
1301 dmfe_free_rxbuffer(db);
1303 /* system variable init */
1304 db->tx_packet_cnt = 0;
1305 db->tx_queue_cnt = 0;
1306 db->rx_avail_cnt = 0;
1307 netif_carrier_off(dev);
1308 db->wait_reset = 0;
1310 /* Re-initilize DM910X board */
1311 dmfe_init_dm910x(dev);
1313 /* Restart upper layer interface */
1314 netif_wake_queue(dev);
1319 * free all allocated rx buffer
1322 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1324 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1326 /* free allocated rx buffer */
1327 while (db->rx_avail_cnt) {
1328 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1329 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1330 db->rx_avail_cnt--;
1336 * Reuse the SK buffer
1339 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1341 struct rx_desc *rxptr = db->rx_insert_ptr;
1343 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1344 rxptr->rx_skb_ptr = skb;
1345 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1346 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1347 wmb();
1348 rxptr->rdes0 = cpu_to_le32(0x80000000);
1349 db->rx_avail_cnt++;
1350 db->rx_insert_ptr = rxptr->next_rx_desc;
1351 } else
1352 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1357 * Initialize transmit/Receive descriptor
1358 * Using Chain structure, and allocate Tx/Rx buffer
1361 static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1363 struct tx_desc *tmp_tx;
1364 struct rx_desc *tmp_rx;
1365 unsigned char *tmp_buf;
1366 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1367 dma_addr_t tmp_buf_dma;
1368 int i;
1370 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1372 /* tx descriptor start pointer */
1373 db->tx_insert_ptr = db->first_tx_desc;
1374 db->tx_remove_ptr = db->first_tx_desc;
1375 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1377 /* rx descriptor start pointer */
1378 db->first_rx_desc = (void *)db->first_tx_desc +
1379 sizeof(struct tx_desc) * TX_DESC_CNT;
1381 db->first_rx_desc_dma = db->first_tx_desc_dma +
1382 sizeof(struct tx_desc) * TX_DESC_CNT;
1383 db->rx_insert_ptr = db->first_rx_desc;
1384 db->rx_ready_ptr = db->first_rx_desc;
1385 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1387 /* Init Transmit chain */
1388 tmp_buf = db->buf_pool_start;
1389 tmp_buf_dma = db->buf_pool_dma_start;
1390 tmp_tx_dma = db->first_tx_desc_dma;
1391 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1392 tmp_tx->tx_buf_ptr = tmp_buf;
1393 tmp_tx->tdes0 = cpu_to_le32(0);
1394 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1395 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1396 tmp_tx_dma += sizeof(struct tx_desc);
1397 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1398 tmp_tx->next_tx_desc = tmp_tx + 1;
1399 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1400 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1402 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1403 tmp_tx->next_tx_desc = db->first_tx_desc;
1405 /* Init Receive descriptor chain */
1406 tmp_rx_dma=db->first_rx_desc_dma;
1407 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1408 tmp_rx->rdes0 = cpu_to_le32(0);
1409 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1410 tmp_rx_dma += sizeof(struct rx_desc);
1411 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1412 tmp_rx->next_rx_desc = tmp_rx + 1;
1414 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1415 tmp_rx->next_rx_desc = db->first_rx_desc;
1417 /* pre-allocate Rx buffer */
1418 allocate_rx_buffer(db);
1423 * Update CR6 value
1424 * Firstly stop DM910X , then written value and start
1427 static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1429 u32 cr6_tmp;
1431 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1432 outl(cr6_tmp, ioaddr + DCR6);
1433 udelay(5);
1434 outl(cr6_data, ioaddr + DCR6);
1435 udelay(5);
1440 * Send a setup frame for DM9132
1441 * This setup frame initilize DM910X address filter mode
1444 static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1446 struct dev_mc_list *mcptr;
1447 u16 * addrptr;
1448 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1449 u32 hash_val;
1450 u16 i, hash_table[4];
1452 DMFE_DBUG(0, "dm9132_id_table()", 0);
1454 /* Node address */
1455 addrptr = (u16 *) dev->dev_addr;
1456 outw(addrptr[0], ioaddr);
1457 ioaddr += 4;
1458 outw(addrptr[1], ioaddr);
1459 ioaddr += 4;
1460 outw(addrptr[2], ioaddr);
1461 ioaddr += 4;
1463 /* Clear Hash Table */
1464 for (i = 0; i < 4; i++)
1465 hash_table[i] = 0x0;
1467 /* broadcast address */
1468 hash_table[3] = 0x8000;
1470 /* the multicast address in Hash Table : 64 bits */
1471 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1472 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1473 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1476 /* Write the hash table to MAC MD table */
1477 for (i = 0; i < 4; i++, ioaddr += 4)
1478 outw(hash_table[i], ioaddr);
1483 * Send a setup frame for DM9102/DM9102A
1484 * This setup frame initilize DM910X address filter mode
1487 static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1489 struct dmfe_board_info *db = netdev_priv(dev);
1490 struct dev_mc_list *mcptr;
1491 struct tx_desc *txptr;
1492 u16 * addrptr;
1493 u32 * suptr;
1494 int i;
1496 DMFE_DBUG(0, "send_filter_frame()", 0);
1498 txptr = db->tx_insert_ptr;
1499 suptr = (u32 *) txptr->tx_buf_ptr;
1501 /* Node address */
1502 addrptr = (u16 *) dev->dev_addr;
1503 *suptr++ = addrptr[0];
1504 *suptr++ = addrptr[1];
1505 *suptr++ = addrptr[2];
1507 /* broadcast address */
1508 *suptr++ = 0xffff;
1509 *suptr++ = 0xffff;
1510 *suptr++ = 0xffff;
1512 /* fit the multicast address */
1513 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1514 addrptr = (u16 *) mcptr->dmi_addr;
1515 *suptr++ = addrptr[0];
1516 *suptr++ = addrptr[1];
1517 *suptr++ = addrptr[2];
1520 for (; i<14; i++) {
1521 *suptr++ = 0xffff;
1522 *suptr++ = 0xffff;
1523 *suptr++ = 0xffff;
1526 /* prepare the setup frame */
1527 db->tx_insert_ptr = txptr->next_tx_desc;
1528 txptr->tdes1 = cpu_to_le32(0x890000c0);
1530 /* Resource Check and Send the setup packet */
1531 if (!db->tx_packet_cnt) {
1532 /* Resource Empty */
1533 db->tx_packet_cnt++;
1534 txptr->tdes0 = cpu_to_le32(0x80000000);
1535 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1536 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1537 update_cr6(db->cr6_data, dev->base_addr);
1538 dev->trans_start = jiffies;
1539 } else
1540 db->tx_queue_cnt++; /* Put in TX queue */
1545 * Allocate rx buffer,
1546 * As possible as allocate maxiumn Rx buffer
1549 static void allocate_rx_buffer(struct dmfe_board_info *db)
1551 struct rx_desc *rxptr;
1552 struct sk_buff *skb;
1554 rxptr = db->rx_insert_ptr;
1556 while(db->rx_avail_cnt < RX_DESC_CNT) {
1557 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1558 break;
1559 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1560 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1561 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1562 wmb();
1563 rxptr->rdes0 = cpu_to_le32(0x80000000);
1564 rxptr = rxptr->next_rx_desc;
1565 db->rx_avail_cnt++;
1568 db->rx_insert_ptr = rxptr;
1573 * Read one word data from the serial ROM
1576 static u16 read_srom_word(long ioaddr, int offset)
1578 int i;
1579 u16 srom_data = 0;
1580 long cr9_ioaddr = ioaddr + DCR9;
1582 outl(CR9_SROM_READ, cr9_ioaddr);
1583 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1585 /* Send the Read Command 110b */
1586 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1587 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1588 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1590 /* Send the offset */
1591 for (i = 5; i >= 0; i--) {
1592 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1593 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1596 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1598 for (i = 16; i > 0; i--) {
1599 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1600 udelay(5);
1601 srom_data = (srom_data << 1) |
1602 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1603 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1604 udelay(5);
1607 outl(CR9_SROM_READ, cr9_ioaddr);
1608 return srom_data;
1613 * Auto sense the media mode
1616 static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1618 u8 ErrFlag = 0;
1619 u16 phy_mode;
1621 /* CR6 bit18=0, select 10/100M */
1622 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1624 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1625 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1627 if ( (phy_mode & 0x24) == 0x24 ) {
1628 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1629 phy_mode = phy_read(db->ioaddr,
1630 db->phy_addr, 7, db->chip_id) & 0xf000;
1631 else /* DM9102/DM9102A */
1632 phy_mode = phy_read(db->ioaddr,
1633 db->phy_addr, 17, db->chip_id) & 0xf000;
1634 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1635 switch (phy_mode) {
1636 case 0x1000: db->op_mode = DMFE_10MHF; break;
1637 case 0x2000: db->op_mode = DMFE_10MFD; break;
1638 case 0x4000: db->op_mode = DMFE_100MHF; break;
1639 case 0x8000: db->op_mode = DMFE_100MFD; break;
1640 default: db->op_mode = DMFE_10MHF;
1641 ErrFlag = 1;
1642 break;
1644 } else {
1645 db->op_mode = DMFE_10MHF;
1646 DMFE_DBUG(0, "Link Failed :", phy_mode);
1647 ErrFlag = 1;
1650 return ErrFlag;
1655 * Set 10/100 phyxcer capability
1656 * AUTO mode : phyxcer register4 is NIC capability
1657 * Force mode: phyxcer register4 is the force media
1660 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1662 u16 phy_reg;
1664 /* Select 10/100M phyxcer */
1665 db->cr6_data &= ~0x40000;
1666 update_cr6(db->cr6_data, db->ioaddr);
1668 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1669 if (db->chip_id == PCI_DM9009_ID) {
1670 phy_reg = phy_read(db->ioaddr,
1671 db->phy_addr, 18, db->chip_id) & ~0x1000;
1673 phy_write(db->ioaddr,
1674 db->phy_addr, 18, phy_reg, db->chip_id);
1677 /* Phyxcer capability setting */
1678 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1680 if (db->media_mode & DMFE_AUTO) {
1681 /* AUTO Mode */
1682 phy_reg |= db->PHY_reg4;
1683 } else {
1684 /* Force Mode */
1685 switch(db->media_mode) {
1686 case DMFE_10MHF: phy_reg |= 0x20; break;
1687 case DMFE_10MFD: phy_reg |= 0x40; break;
1688 case DMFE_100MHF: phy_reg |= 0x80; break;
1689 case DMFE_100MFD: phy_reg |= 0x100; break;
1691 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1694 /* Write new capability to Phyxcer Reg4 */
1695 if ( !(phy_reg & 0x01e0)) {
1696 phy_reg|=db->PHY_reg4;
1697 db->media_mode|=DMFE_AUTO;
1699 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1701 /* Restart Auto-Negotiation */
1702 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1703 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1704 if ( !db->chip_type )
1705 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1710 * Process op-mode
1711 * AUTO mode : PHY controller in Auto-negotiation Mode
1712 * Force mode: PHY controller in force mode with HUB
1713 * N-way force capability with SWITCH
1716 static void dmfe_process_mode(struct dmfe_board_info *db)
1718 u16 phy_reg;
1720 /* Full Duplex Mode Check */
1721 if (db->op_mode & 0x4)
1722 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1723 else
1724 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1726 /* Transciver Selection */
1727 if (db->op_mode & 0x10) /* 1M HomePNA */
1728 db->cr6_data |= 0x40000;/* External MII select */
1729 else
1730 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1732 update_cr6(db->cr6_data, db->ioaddr);
1734 /* 10/100M phyxcer force mode need */
1735 if ( !(db->media_mode & 0x18)) {
1736 /* Forece Mode */
1737 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1738 if ( !(phy_reg & 0x1) ) {
1739 /* parter without N-Way capability */
1740 phy_reg = 0x0;
1741 switch(db->op_mode) {
1742 case DMFE_10MHF: phy_reg = 0x0; break;
1743 case DMFE_10MFD: phy_reg = 0x100; break;
1744 case DMFE_100MHF: phy_reg = 0x2000; break;
1745 case DMFE_100MFD: phy_reg = 0x2100; break;
1747 phy_write(db->ioaddr,
1748 db->phy_addr, 0, phy_reg, db->chip_id);
1749 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1750 mdelay(20);
1751 phy_write(db->ioaddr,
1752 db->phy_addr, 0, phy_reg, db->chip_id);
1759 * Write a word to Phy register
1762 static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1763 u16 phy_data, u32 chip_id)
1765 u16 i;
1766 unsigned long ioaddr;
1768 if (chip_id == PCI_DM9132_ID) {
1769 ioaddr = iobase + 0x80 + offset * 4;
1770 outw(phy_data, ioaddr);
1771 } else {
1772 /* DM9102/DM9102A Chip */
1773 ioaddr = iobase + DCR9;
1775 /* Send 33 synchronization clock to Phy controller */
1776 for (i = 0; i < 35; i++)
1777 phy_write_1bit(ioaddr, PHY_DATA_1);
1779 /* Send start command(01) to Phy */
1780 phy_write_1bit(ioaddr, PHY_DATA_0);
1781 phy_write_1bit(ioaddr, PHY_DATA_1);
1783 /* Send write command(01) to Phy */
1784 phy_write_1bit(ioaddr, PHY_DATA_0);
1785 phy_write_1bit(ioaddr, PHY_DATA_1);
1787 /* Send Phy address */
1788 for (i = 0x10; i > 0; i = i >> 1)
1789 phy_write_1bit(ioaddr,
1790 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1792 /* Send register address */
1793 for (i = 0x10; i > 0; i = i >> 1)
1794 phy_write_1bit(ioaddr,
1795 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1797 /* written trasnition */
1798 phy_write_1bit(ioaddr, PHY_DATA_1);
1799 phy_write_1bit(ioaddr, PHY_DATA_0);
1801 /* Write a word data to PHY controller */
1802 for ( i = 0x8000; i > 0; i >>= 1)
1803 phy_write_1bit(ioaddr,
1804 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1810 * Read a word data from phy register
1813 static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1815 int i;
1816 u16 phy_data;
1817 unsigned long ioaddr;
1819 if (chip_id == PCI_DM9132_ID) {
1820 /* DM9132 Chip */
1821 ioaddr = iobase + 0x80 + offset * 4;
1822 phy_data = inw(ioaddr);
1823 } else {
1824 /* DM9102/DM9102A Chip */
1825 ioaddr = iobase + DCR9;
1827 /* Send 33 synchronization clock to Phy controller */
1828 for (i = 0; i < 35; i++)
1829 phy_write_1bit(ioaddr, PHY_DATA_1);
1831 /* Send start command(01) to Phy */
1832 phy_write_1bit(ioaddr, PHY_DATA_0);
1833 phy_write_1bit(ioaddr, PHY_DATA_1);
1835 /* Send read command(10) to Phy */
1836 phy_write_1bit(ioaddr, PHY_DATA_1);
1837 phy_write_1bit(ioaddr, PHY_DATA_0);
1839 /* Send Phy address */
1840 for (i = 0x10; i > 0; i = i >> 1)
1841 phy_write_1bit(ioaddr,
1842 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1844 /* Send register address */
1845 for (i = 0x10; i > 0; i = i >> 1)
1846 phy_write_1bit(ioaddr,
1847 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1849 /* Skip transition state */
1850 phy_read_1bit(ioaddr);
1852 /* read 16bit data */
1853 for (phy_data = 0, i = 0; i < 16; i++) {
1854 phy_data <<= 1;
1855 phy_data |= phy_read_1bit(ioaddr);
1859 return phy_data;
1864 * Write one bit data to Phy Controller
1867 static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1869 outl(phy_data, ioaddr); /* MII Clock Low */
1870 udelay(1);
1871 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1872 udelay(1);
1873 outl(phy_data, ioaddr); /* MII Clock Low */
1874 udelay(1);
1879 * Read one bit phy data from PHY controller
1882 static u16 phy_read_1bit(unsigned long ioaddr)
1884 u16 phy_data;
1886 outl(0x50000, ioaddr);
1887 udelay(1);
1888 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1889 outl(0x40000, ioaddr);
1890 udelay(1);
1892 return phy_data;
1897 * Parser SROM and media mode
1900 static void dmfe_parse_srom(struct dmfe_board_info * db)
1902 char * srom = db->srom;
1903 int dmfe_mode, tmp_reg;
1905 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1907 /* Init CR15 */
1908 db->cr15_data = CR15_DEFAULT;
1910 /* Check SROM Version */
1911 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1912 /* SROM V4.01 */
1913 /* Get NIC support media mode */
1914 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1915 db->PHY_reg4 = 0;
1916 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1917 switch( db->NIC_capability & tmp_reg ) {
1918 case 0x1: db->PHY_reg4 |= 0x0020; break;
1919 case 0x2: db->PHY_reg4 |= 0x0040; break;
1920 case 0x4: db->PHY_reg4 |= 0x0080; break;
1921 case 0x8: db->PHY_reg4 |= 0x0100; break;
1925 /* Media Mode Force or not check */
1926 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1927 le32_to_cpup((__le32 *)srom + 36/4);
1928 switch(dmfe_mode) {
1929 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1930 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1931 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1932 case 0x100:
1933 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1936 /* Special Function setting */
1937 /* VLAN function */
1938 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1939 db->cr15_data |= 0x40;
1941 /* Flow Control */
1942 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1943 db->cr15_data |= 0x400;
1945 /* TX pause packet */
1946 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1947 db->cr15_data |= 0x9800;
1950 /* Parse HPNA parameter */
1951 db->HPNA_command = 1;
1953 /* Accept remote command or not */
1954 if (HPNA_rx_cmd == 0)
1955 db->HPNA_command |= 0x8000;
1957 /* Issue remote command & operation mode */
1958 if (HPNA_tx_cmd == 1)
1959 switch(HPNA_mode) { /* Issue Remote Command */
1960 case 0: db->HPNA_command |= 0x0904; break;
1961 case 1: db->HPNA_command |= 0x0a00; break;
1962 case 2: db->HPNA_command |= 0x0506; break;
1963 case 3: db->HPNA_command |= 0x0602; break;
1965 else
1966 switch(HPNA_mode) { /* Don't Issue */
1967 case 0: db->HPNA_command |= 0x0004; break;
1968 case 1: db->HPNA_command |= 0x0000; break;
1969 case 2: db->HPNA_command |= 0x0006; break;
1970 case 3: db->HPNA_command |= 0x0002; break;
1973 /* Check DM9801 or DM9802 present or not */
1974 db->HPNA_present = 0;
1975 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1976 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1977 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1978 /* DM9801 or DM9802 present */
1979 db->HPNA_timer = 8;
1980 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1981 /* DM9801 HomeRun */
1982 db->HPNA_present = 1;
1983 dmfe_program_DM9801(db, tmp_reg);
1984 } else {
1985 /* DM9802 LongRun */
1986 db->HPNA_present = 2;
1987 dmfe_program_DM9802(db);
1995 * Init HomeRun DM9801
1998 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2000 uint reg17, reg25;
2002 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2003 switch(HPNA_rev) {
2004 case 0xb900: /* DM9801 E3 */
2005 db->HPNA_command |= 0x1000;
2006 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2007 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2008 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2009 break;
2010 case 0xb901: /* DM9801 E4 */
2011 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2012 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2013 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2014 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2015 break;
2016 case 0xb902: /* DM9801 E5 */
2017 case 0xb903: /* DM9801 E6 */
2018 default:
2019 db->HPNA_command |= 0x1000;
2020 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2021 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2022 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2023 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2024 break;
2026 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2027 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2028 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2033 * Init HomeRun DM9802
2036 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2038 uint phy_reg;
2040 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2041 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2042 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2043 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2044 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2049 * Check remote HPNA power and speed status. If not correct,
2050 * issue command again.
2053 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2055 uint phy_reg;
2057 /* Got remote device status */
2058 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2059 switch(phy_reg) {
2060 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2061 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2062 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2063 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2066 /* Check remote device status match our setting ot not */
2067 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2068 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2069 db->chip_id);
2070 db->HPNA_timer=8;
2071 } else
2072 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2077 static struct pci_device_id dmfe_pci_tbl[] = {
2078 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2079 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2080 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2081 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2082 { 0, }
2084 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2087 #ifdef CONFIG_PM
2088 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2090 struct net_device *dev = pci_get_drvdata(pci_dev);
2091 struct dmfe_board_info *db = netdev_priv(dev);
2092 u32 tmp;
2094 /* Disable upper layer interface */
2095 netif_device_detach(dev);
2097 /* Disable Tx/Rx */
2098 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2099 update_cr6(db->cr6_data, dev->base_addr);
2101 /* Disable Interrupt */
2102 outl(0, dev->base_addr + DCR7);
2103 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2105 /* Fre RX buffers */
2106 dmfe_free_rxbuffer(db);
2108 /* Enable WOL */
2109 pci_read_config_dword(pci_dev, 0x40, &tmp);
2110 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2112 if (db->wol_mode & WAKE_PHY)
2113 tmp |= DMFE_WOL_LINKCHANGE;
2114 if (db->wol_mode & WAKE_MAGIC)
2115 tmp |= DMFE_WOL_MAGICPACKET;
2117 pci_write_config_dword(pci_dev, 0x40, tmp);
2119 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2120 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2122 /* Power down device*/
2123 pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state));
2124 pci_save_state(pci_dev);
2126 return 0;
2129 static int dmfe_resume(struct pci_dev *pci_dev)
2131 struct net_device *dev = pci_get_drvdata(pci_dev);
2132 u32 tmp;
2134 pci_restore_state(pci_dev);
2135 pci_set_power_state(pci_dev, PCI_D0);
2137 /* Re-initilize DM910X board */
2138 dmfe_init_dm910x(dev);
2140 /* Disable WOL */
2141 pci_read_config_dword(pci_dev, 0x40, &tmp);
2143 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2144 pci_write_config_dword(pci_dev, 0x40, tmp);
2146 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2147 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2149 /* Restart upper layer interface */
2150 netif_device_attach(dev);
2152 return 0;
2154 #else
2155 #define dmfe_suspend NULL
2156 #define dmfe_resume NULL
2157 #endif
2159 static struct pci_driver dmfe_driver = {
2160 .name = "dmfe",
2161 .id_table = dmfe_pci_tbl,
2162 .probe = dmfe_init_one,
2163 .remove = __devexit_p(dmfe_remove_one),
2164 .suspend = dmfe_suspend,
2165 .resume = dmfe_resume
2168 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2169 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2170 MODULE_LICENSE("GPL");
2171 MODULE_VERSION(DRV_VERSION);
2173 module_param(debug, int, 0);
2174 module_param(mode, byte, 0);
2175 module_param(cr6set, int, 0);
2176 module_param(chkmode, byte, 0);
2177 module_param(HPNA_mode, byte, 0);
2178 module_param(HPNA_rx_cmd, byte, 0);
2179 module_param(HPNA_tx_cmd, byte, 0);
2180 module_param(HPNA_NoiseFloor, byte, 0);
2181 module_param(SF_mode, byte, 0);
2182 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2183 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2184 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2186 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2187 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2189 /* Description:
2190 * when user used insmod to add module, system invoked init_module()
2191 * to initilize and register.
2194 static int __init dmfe_init_module(void)
2196 int rc;
2198 printk(version);
2199 printed_version = 1;
2201 DMFE_DBUG(0, "init_module() ", debug);
2203 if (debug)
2204 dmfe_debug = debug; /* set debug flag */
2205 if (cr6set)
2206 dmfe_cr6_user_set = cr6set;
2208 switch(mode) {
2209 case DMFE_10MHF:
2210 case DMFE_100MHF:
2211 case DMFE_10MFD:
2212 case DMFE_100MFD:
2213 case DMFE_1M_HPNA:
2214 dmfe_media_mode = mode;
2215 break;
2216 default:dmfe_media_mode = DMFE_AUTO;
2217 break;
2220 if (HPNA_mode > 4)
2221 HPNA_mode = 0; /* Default: LP/HS */
2222 if (HPNA_rx_cmd > 1)
2223 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2224 if (HPNA_tx_cmd > 1)
2225 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2226 if (HPNA_NoiseFloor > 15)
2227 HPNA_NoiseFloor = 0;
2229 rc = pci_register_driver(&dmfe_driver);
2230 if (rc < 0)
2231 return rc;
2233 return 0;
2238 * Description:
2239 * when user used rmmod to delete module, system invoked clean_module()
2240 * to un-register all registered services.
2243 static void __exit dmfe_cleanup_module(void)
2245 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2246 pci_unregister_driver(&dmfe_driver);
2249 module_init(dmfe_init_module);
2250 module_exit(dmfe_cleanup_module);