dmfe: add support for suspend/resume
[linux-2.6/kvm.git] / drivers / net / tulip / dmfe.c
blobe3a077977e4ccea7d48ced4b7cfb84f361a2ccf5
1 /*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 DAVICOM Web-Site: www.davicom.com.tw
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
56 TODO
58 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
61 Test and make sure PCI latency is now correct for all cases.
64 #define DRV_NAME "dmfe"
65 #define DRV_VERSION "1.36.4"
66 #define DRV_RELDATE "2002-01-17"
68 #include <linux/module.h>
69 #include <linux/kernel.h>
70 #include <linux/string.h>
71 #include <linux/timer.h>
72 #include <linux/ptrace.h>
73 #include <linux/errno.h>
74 #include <linux/ioport.h>
75 #include <linux/slab.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/dma-mapping.h>
79 #include <linux/init.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/ethtool.h>
83 #include <linux/skbuff.h>
84 #include <linux/delay.h>
85 #include <linux/spinlock.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
89 #include <asm/processor.h>
90 #include <asm/io.h>
91 #include <asm/dma.h>
92 #include <asm/uaccess.h>
93 #include <asm/irq.h>
96 /* Board/System/Debug information/definition ---------------- */
97 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
98 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
99 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
100 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
102 #define DM9102_IO_SIZE 0x80
103 #define DM9102A_IO_SIZE 0x100
104 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
105 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
106 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
107 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
108 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
109 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
110 #define TX_BUF_ALLOC 0x600
111 #define RX_ALLOC_SIZE 0x620
112 #define DM910X_RESET 1
113 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
114 #define CR6_DEFAULT 0x00080000 /* HD */
115 #define CR7_DEFAULT 0x180c1
116 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
117 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
118 #define MAX_PACKET_SIZE 1514
119 #define DMFE_MAX_MULTICAST 14
120 #define RX_COPY_SIZE 100
121 #define MAX_CHECK_PACKET 0x8000
122 #define DM9801_NOISE_FLOOR 8
123 #define DM9802_NOISE_FLOOR 5
125 #define DMFE_10MHF 0
126 #define DMFE_100MHF 1
127 #define DMFE_10MFD 4
128 #define DMFE_100MFD 5
129 #define DMFE_AUTO 8
130 #define DMFE_1M_HPNA 0x10
132 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
133 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
134 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
135 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
136 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
137 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
139 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
140 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
141 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
143 #define DMFE_DBUG(dbug_now, msg, value) \
144 do { \
145 if (dmfe_debug || (dbug_now)) \
146 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
147 (msg), (long) (value)); \
148 } while (0)
150 #define SHOW_MEDIA_TYPE(mode) \
151 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
152 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
155 /* CR9 definition: SROM/MII */
156 #define CR9_SROM_READ 0x4800
157 #define CR9_SRCS 0x1
158 #define CR9_SRCLK 0x2
159 #define CR9_CRDOUT 0x8
160 #define SROM_DATA_0 0x0
161 #define SROM_DATA_1 0x4
162 #define PHY_DATA_1 0x20000
163 #define PHY_DATA_0 0x00000
164 #define MDCLKH 0x10000
166 #define PHY_POWER_DOWN 0x800
168 #define SROM_V41_CODE 0x14
170 #define SROM_CLK_WRITE(data, ioaddr) \
171 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
172 udelay(5); \
173 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
174 udelay(5); \
175 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
176 udelay(5);
178 #define __CHK_IO_SIZE(pci_id, dev_rev) \
179 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
180 DM9102A_IO_SIZE: DM9102_IO_SIZE)
182 #define CHK_IO_SIZE(pci_dev, dev_rev) \
183 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
185 /* Sten Check */
186 #define DEVICE net_device
188 /* Structure/enum declaration ------------------------------- */
189 struct tx_desc {
190 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
191 char *tx_buf_ptr; /* Data for us */
192 struct tx_desc *next_tx_desc;
193 } __attribute__(( aligned(32) ));
195 struct rx_desc {
196 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
197 struct sk_buff *rx_skb_ptr; /* Data for us */
198 struct rx_desc *next_rx_desc;
199 } __attribute__(( aligned(32) ));
201 struct dmfe_board_info {
202 u32 chip_id; /* Chip vendor/Device ID */
203 u32 chip_revision; /* Chip revision */
204 struct DEVICE *next_dev; /* next device */
205 struct pci_dev *pdev; /* PCI device */
206 spinlock_t lock;
208 long ioaddr; /* I/O base address */
209 u32 cr0_data;
210 u32 cr5_data;
211 u32 cr6_data;
212 u32 cr7_data;
213 u32 cr15_data;
215 /* pointer for memory physical address */
216 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
217 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
218 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
219 dma_addr_t first_tx_desc_dma;
220 dma_addr_t first_rx_desc_dma;
222 /* descriptor pointer */
223 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
224 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
225 unsigned char *desc_pool_ptr; /* descriptor pool memory */
226 struct tx_desc *first_tx_desc;
227 struct tx_desc *tx_insert_ptr;
228 struct tx_desc *tx_remove_ptr;
229 struct rx_desc *first_rx_desc;
230 struct rx_desc *rx_insert_ptr;
231 struct rx_desc *rx_ready_ptr; /* packet come pointer */
232 unsigned long tx_packet_cnt; /* transmitted packet count */
233 unsigned long tx_queue_cnt; /* wait to send packet count */
234 unsigned long rx_avail_cnt; /* available rx descriptor count */
235 unsigned long interval_rx_cnt; /* rx packet count a callback time */
237 u16 HPNA_command; /* For HPNA register 16 */
238 u16 HPNA_timer; /* For HPNA remote device check */
239 u16 dbug_cnt;
240 u16 NIC_capability; /* NIC media capability */
241 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
243 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
244 u8 chip_type; /* Keep DM9102A chip type */
245 u8 media_mode; /* user specify media mode */
246 u8 op_mode; /* real work media mode */
247 u8 phy_addr;
248 u8 wait_reset; /* Hardware failed, need to reset */
249 u8 dm910x_chk_mode; /* Operating mode check */
250 u8 first_in_callback; /* Flag to record state */
251 struct timer_list timer;
253 /* System defined statistic counter */
254 struct net_device_stats stats;
256 /* Driver defined statistic counter */
257 unsigned long tx_fifo_underrun;
258 unsigned long tx_loss_carrier;
259 unsigned long tx_no_carrier;
260 unsigned long tx_late_collision;
261 unsigned long tx_excessive_collision;
262 unsigned long tx_jabber_timeout;
263 unsigned long reset_count;
264 unsigned long reset_cr8;
265 unsigned long reset_fatal;
266 unsigned long reset_TXtimeout;
268 /* NIC SROM data */
269 unsigned char srom[128];
272 enum dmfe_offsets {
273 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
274 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
275 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
276 DCR15 = 0x78
279 enum dmfe_CR6_bits {
280 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
281 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
282 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
285 /* Global variable declaration ----------------------------- */
286 static int __devinitdata printed_version;
287 static char version[] __devinitdata =
288 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
289 DRV_VERSION " (" DRV_RELDATE ")\n";
291 static int dmfe_debug;
292 static unsigned char dmfe_media_mode = DMFE_AUTO;
293 static u32 dmfe_cr6_user_set;
295 /* For module input parameter */
296 static int debug;
297 static u32 cr6set;
298 static unsigned char mode = 8;
299 static u8 chkmode = 1;
300 static u8 HPNA_mode; /* Default: Low Power/High Speed */
301 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
302 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
303 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
304 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
305 4: TX pause packet */
308 /* function declaration ------------------------------------- */
309 static int dmfe_open(struct DEVICE *);
310 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
311 static int dmfe_stop(struct DEVICE *);
312 static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
313 static void dmfe_set_filter_mode(struct DEVICE *);
314 static const struct ethtool_ops netdev_ethtool_ops;
315 static u16 read_srom_word(long ,int);
316 static irqreturn_t dmfe_interrupt(int , void *);
317 #ifdef CONFIG_NET_POLL_CONTROLLER
318 static void poll_dmfe (struct net_device *dev);
319 #endif
320 static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
321 static void allocate_rx_buffer(struct dmfe_board_info *);
322 static void update_cr6(u32, unsigned long);
323 static void send_filter_frame(struct DEVICE * ,int);
324 static void dm9132_id_table(struct DEVICE * ,int);
325 static u16 phy_read(unsigned long, u8, u8, u32);
326 static void phy_write(unsigned long, u8, u8, u16, u32);
327 static void phy_write_1bit(unsigned long, u32);
328 static u16 phy_read_1bit(unsigned long);
329 static u8 dmfe_sense_speed(struct dmfe_board_info *);
330 static void dmfe_process_mode(struct dmfe_board_info *);
331 static void dmfe_timer(unsigned long);
332 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
333 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
334 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
335 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
336 static void dmfe_dynamic_reset(struct DEVICE *);
337 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
338 static void dmfe_init_dm910x(struct DEVICE *);
339 static void dmfe_parse_srom(struct dmfe_board_info *);
340 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
341 static void dmfe_program_DM9802(struct dmfe_board_info *);
342 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
343 static void dmfe_set_phyxcer(struct dmfe_board_info *);
345 /* DM910X network board routine ---------------------------- */
348 * Search DM910X board ,allocate space and register it
351 static int __devinit dmfe_init_one (struct pci_dev *pdev,
352 const struct pci_device_id *ent)
354 struct dmfe_board_info *db; /* board information structure */
355 struct net_device *dev;
356 u32 dev_rev, pci_pmr;
357 int i, err;
359 DMFE_DBUG(0, "dmfe_init_one()", 0);
361 if (!printed_version++)
362 printk(version);
364 /* Init network device */
365 dev = alloc_etherdev(sizeof(*db));
366 if (dev == NULL)
367 return -ENOMEM;
368 SET_MODULE_OWNER(dev);
369 SET_NETDEV_DEV(dev, &pdev->dev);
371 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
372 printk(KERN_WARNING DRV_NAME
373 ": 32-bit PCI DMA not available.\n");
374 err = -ENODEV;
375 goto err_out_free;
378 /* Enable Master/IO access, Disable memory access */
379 err = pci_enable_device(pdev);
380 if (err)
381 goto err_out_free;
383 if (!pci_resource_start(pdev, 0)) {
384 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
385 err = -ENODEV;
386 goto err_out_disable;
389 /* Read Chip revision */
390 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
392 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
393 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
394 err = -ENODEV;
395 goto err_out_disable;
398 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
400 /* Set Latency Timer 80h */
401 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
402 Need a PCI quirk.. */
404 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
405 #endif
407 if (pci_request_regions(pdev, DRV_NAME)) {
408 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
409 err = -ENODEV;
410 goto err_out_disable;
413 /* Init system & device */
414 db = netdev_priv(dev);
416 /* Allocate Tx/Rx descriptor memory */
417 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
418 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
420 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
421 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
423 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
424 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
425 db->buf_pool_start = db->buf_pool_ptr;
426 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
428 db->chip_id = ent->driver_data;
429 db->ioaddr = pci_resource_start(pdev, 0);
430 db->chip_revision = dev_rev;
432 db->pdev = pdev;
434 dev->base_addr = db->ioaddr;
435 dev->irq = pdev->irq;
436 pci_set_drvdata(pdev, dev);
437 dev->open = &dmfe_open;
438 dev->hard_start_xmit = &dmfe_start_xmit;
439 dev->stop = &dmfe_stop;
440 dev->get_stats = &dmfe_get_stats;
441 dev->set_multicast_list = &dmfe_set_filter_mode;
442 #ifdef CONFIG_NET_POLL_CONTROLLER
443 dev->poll_controller = &poll_dmfe;
444 #endif
445 dev->ethtool_ops = &netdev_ethtool_ops;
446 netif_carrier_off(dev);
447 spin_lock_init(&db->lock);
449 pci_read_config_dword(pdev, 0x50, &pci_pmr);
450 pci_pmr &= 0x70000;
451 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
452 db->chip_type = 1; /* DM9102A E3 */
453 else
454 db->chip_type = 0;
456 /* read 64 word srom data */
457 for (i = 0; i < 64; i++)
458 ((__le16 *) db->srom)[i] =
459 cpu_to_le16(read_srom_word(db->ioaddr, i));
461 /* Set Node address */
462 for (i = 0; i < 6; i++)
463 dev->dev_addr[i] = db->srom[20 + i];
465 err = register_netdev (dev);
466 if (err)
467 goto err_out_res;
469 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
470 dev->name,
471 ent->driver_data >> 16,
472 pci_name(pdev));
473 for (i = 0; i < 6; i++)
474 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
475 printk(", irq %d.\n", dev->irq);
477 pci_set_master(pdev);
479 return 0;
481 err_out_res:
482 pci_release_regions(pdev);
483 err_out_disable:
484 pci_disable_device(pdev);
485 err_out_free:
486 pci_set_drvdata(pdev, NULL);
487 free_netdev(dev);
489 return err;
493 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
495 struct net_device *dev = pci_get_drvdata(pdev);
496 struct dmfe_board_info *db = netdev_priv(dev);
498 DMFE_DBUG(0, "dmfe_remove_one()", 0);
500 if (dev) {
502 unregister_netdev(dev);
504 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
505 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
506 db->desc_pool_dma_ptr);
507 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
508 db->buf_pool_ptr, db->buf_pool_dma_ptr);
509 pci_release_regions(pdev);
510 free_netdev(dev); /* free board information */
512 pci_set_drvdata(pdev, NULL);
515 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
520 * Open the interface.
521 * The interface is opened whenever "ifconfig" actives it.
524 static int dmfe_open(struct DEVICE *dev)
526 int ret;
527 struct dmfe_board_info *db = netdev_priv(dev);
529 DMFE_DBUG(0, "dmfe_open", 0);
531 ret = request_irq(dev->irq, &dmfe_interrupt,
532 IRQF_SHARED, dev->name, dev);
533 if (ret)
534 return ret;
536 /* system variable init */
537 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
538 db->tx_packet_cnt = 0;
539 db->tx_queue_cnt = 0;
540 db->rx_avail_cnt = 0;
541 db->wait_reset = 0;
543 db->first_in_callback = 0;
544 db->NIC_capability = 0xf; /* All capability*/
545 db->PHY_reg4 = 0x1e0;
547 /* CR6 operation mode decision */
548 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
549 (db->chip_revision >= 0x02000030) ) {
550 db->cr6_data |= DMFE_TXTH_256;
551 db->cr0_data = CR0_DEFAULT;
552 db->dm910x_chk_mode=4; /* Enter the normal mode */
553 } else {
554 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
555 db->cr0_data = 0;
556 db->dm910x_chk_mode = 1; /* Enter the check mode */
559 /* Initilize DM910X board */
560 dmfe_init_dm910x(dev);
562 /* Active System Interface */
563 netif_wake_queue(dev);
565 /* set and active a timer process */
566 init_timer(&db->timer);
567 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
568 db->timer.data = (unsigned long)dev;
569 db->timer.function = &dmfe_timer;
570 add_timer(&db->timer);
572 return 0;
576 /* Initilize DM910X board
577 * Reset DM910X board
578 * Initilize TX/Rx descriptor chain structure
579 * Send the set-up frame
580 * Enable Tx/Rx machine
583 static void dmfe_init_dm910x(struct DEVICE *dev)
585 struct dmfe_board_info *db = netdev_priv(dev);
586 unsigned long ioaddr = db->ioaddr;
588 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
590 /* Reset DM910x MAC controller */
591 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
592 udelay(100);
593 outl(db->cr0_data, ioaddr + DCR0);
594 udelay(5);
596 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
597 db->phy_addr = 1;
599 /* Parser SROM and media mode */
600 dmfe_parse_srom(db);
601 db->media_mode = dmfe_media_mode;
603 /* RESET Phyxcer Chip by GPR port bit 7 */
604 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
605 if (db->chip_id == PCI_DM9009_ID) {
606 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
607 mdelay(300); /* Delay 300 ms */
609 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
611 /* Process Phyxcer Media Mode */
612 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
613 dmfe_set_phyxcer(db);
615 /* Media Mode Process */
616 if ( !(db->media_mode & DMFE_AUTO) )
617 db->op_mode = db->media_mode; /* Force Mode */
619 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
620 dmfe_descriptor_init(db, ioaddr);
622 /* Init CR6 to program DM910x operation */
623 update_cr6(db->cr6_data, ioaddr);
625 /* Send setup frame */
626 if (db->chip_id == PCI_DM9132_ID)
627 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
628 else
629 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
631 /* Init CR7, interrupt active bit */
632 db->cr7_data = CR7_DEFAULT;
633 outl(db->cr7_data, ioaddr + DCR7);
635 /* Init CR15, Tx jabber and Rx watchdog timer */
636 outl(db->cr15_data, ioaddr + DCR15);
638 /* Enable DM910X Tx/Rx function */
639 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
640 update_cr6(db->cr6_data, ioaddr);
645 * Hardware start transmission.
646 * Send a packet to media from the upper layer.
649 static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
651 struct dmfe_board_info *db = netdev_priv(dev);
652 struct tx_desc *txptr;
653 unsigned long flags;
655 DMFE_DBUG(0, "dmfe_start_xmit", 0);
657 /* Resource flag check */
658 netif_stop_queue(dev);
660 /* Too large packet check */
661 if (skb->len > MAX_PACKET_SIZE) {
662 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
663 dev_kfree_skb(skb);
664 return 0;
667 spin_lock_irqsave(&db->lock, flags);
669 /* No Tx resource check, it never happen nromally */
670 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
671 spin_unlock_irqrestore(&db->lock, flags);
672 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
673 db->tx_queue_cnt);
674 return 1;
677 /* Disable NIC interrupt */
678 outl(0, dev->base_addr + DCR7);
680 /* transmit this packet */
681 txptr = db->tx_insert_ptr;
682 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
683 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
685 /* Point to next transmit free descriptor */
686 db->tx_insert_ptr = txptr->next_tx_desc;
688 /* Transmit Packet Process */
689 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
690 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
691 db->tx_packet_cnt++; /* Ready to send */
692 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
693 dev->trans_start = jiffies; /* saved time stamp */
694 } else {
695 db->tx_queue_cnt++; /* queue TX packet */
696 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
699 /* Tx resource check */
700 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
701 netif_wake_queue(dev);
703 /* Restore CR7 to enable interrupt */
704 spin_unlock_irqrestore(&db->lock, flags);
705 outl(db->cr7_data, dev->base_addr + DCR7);
707 /* free this SKB */
708 dev_kfree_skb(skb);
710 return 0;
715 * Stop the interface.
716 * The interface is stopped when it is brought.
719 static int dmfe_stop(struct DEVICE *dev)
721 struct dmfe_board_info *db = netdev_priv(dev);
722 unsigned long ioaddr = dev->base_addr;
724 DMFE_DBUG(0, "dmfe_stop", 0);
726 /* disable system */
727 netif_stop_queue(dev);
729 /* deleted timer */
730 del_timer_sync(&db->timer);
732 /* Reset & stop DM910X board */
733 outl(DM910X_RESET, ioaddr + DCR0);
734 udelay(5);
735 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
737 /* free interrupt */
738 free_irq(dev->irq, dev);
740 /* free allocated rx buffer */
741 dmfe_free_rxbuffer(db);
743 #if 0
744 /* show statistic counter */
745 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
746 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
747 db->tx_fifo_underrun, db->tx_excessive_collision,
748 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
749 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
750 db->reset_fatal, db->reset_TXtimeout);
751 #endif
753 return 0;
758 * DM9102 insterrupt handler
759 * receive the packet to upper layer, free the transmitted packet
762 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
764 struct DEVICE *dev = dev_id;
765 struct dmfe_board_info *db = netdev_priv(dev);
766 unsigned long ioaddr = dev->base_addr;
767 unsigned long flags;
769 DMFE_DBUG(0, "dmfe_interrupt()", 0);
771 spin_lock_irqsave(&db->lock, flags);
773 /* Got DM910X status */
774 db->cr5_data = inl(ioaddr + DCR5);
775 outl(db->cr5_data, ioaddr + DCR5);
776 if ( !(db->cr5_data & 0xc1) ) {
777 spin_unlock_irqrestore(&db->lock, flags);
778 return IRQ_HANDLED;
781 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
782 outl(0, ioaddr + DCR7);
784 /* Check system status */
785 if (db->cr5_data & 0x2000) {
786 /* system bus error happen */
787 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
788 db->reset_fatal++;
789 db->wait_reset = 1; /* Need to RESET */
790 spin_unlock_irqrestore(&db->lock, flags);
791 return IRQ_HANDLED;
794 /* Received the coming packet */
795 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
796 dmfe_rx_packet(dev, db);
798 /* reallocate rx descriptor buffer */
799 if (db->rx_avail_cnt<RX_DESC_CNT)
800 allocate_rx_buffer(db);
802 /* Free the transmitted descriptor */
803 if ( db->cr5_data & 0x01)
804 dmfe_free_tx_pkt(dev, db);
806 /* Mode Check */
807 if (db->dm910x_chk_mode & 0x2) {
808 db->dm910x_chk_mode = 0x4;
809 db->cr6_data |= 0x100;
810 update_cr6(db->cr6_data, db->ioaddr);
813 /* Restore CR7 to enable interrupt mask */
814 outl(db->cr7_data, ioaddr + DCR7);
816 spin_unlock_irqrestore(&db->lock, flags);
817 return IRQ_HANDLED;
821 #ifdef CONFIG_NET_POLL_CONTROLLER
823 * Polling 'interrupt' - used by things like netconsole to send skbs
824 * without having to re-enable interrupts. It's not called while
825 * the interrupt routine is executing.
828 static void poll_dmfe (struct net_device *dev)
830 /* disable_irq here is not very nice, but with the lockless
831 interrupt handler we have no other choice. */
832 disable_irq(dev->irq);
833 dmfe_interrupt (dev->irq, dev);
834 enable_irq(dev->irq);
836 #endif
839 * Free TX resource after TX complete
842 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
844 struct tx_desc *txptr;
845 unsigned long ioaddr = dev->base_addr;
846 u32 tdes0;
848 txptr = db->tx_remove_ptr;
849 while(db->tx_packet_cnt) {
850 tdes0 = le32_to_cpu(txptr->tdes0);
851 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
852 if (tdes0 & 0x80000000)
853 break;
855 /* A packet sent completed */
856 db->tx_packet_cnt--;
857 db->stats.tx_packets++;
859 /* Transmit statistic counter */
860 if ( tdes0 != 0x7fffffff ) {
861 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
862 db->stats.collisions += (tdes0 >> 3) & 0xf;
863 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
864 if (tdes0 & TDES0_ERR_MASK) {
865 db->stats.tx_errors++;
867 if (tdes0 & 0x0002) { /* UnderRun */
868 db->tx_fifo_underrun++;
869 if ( !(db->cr6_data & CR6_SFT) ) {
870 db->cr6_data = db->cr6_data | CR6_SFT;
871 update_cr6(db->cr6_data, db->ioaddr);
874 if (tdes0 & 0x0100)
875 db->tx_excessive_collision++;
876 if (tdes0 & 0x0200)
877 db->tx_late_collision++;
878 if (tdes0 & 0x0400)
879 db->tx_no_carrier++;
880 if (tdes0 & 0x0800)
881 db->tx_loss_carrier++;
882 if (tdes0 & 0x4000)
883 db->tx_jabber_timeout++;
887 txptr = txptr->next_tx_desc;
888 }/* End of while */
890 /* Update TX remove pointer to next */
891 db->tx_remove_ptr = txptr;
893 /* Send the Tx packet in queue */
894 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
895 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
896 db->tx_packet_cnt++; /* Ready to send */
897 db->tx_queue_cnt--;
898 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
899 dev->trans_start = jiffies; /* saved time stamp */
902 /* Resource available check */
903 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
904 netif_wake_queue(dev); /* Active upper layer, send again */
909 * Calculate the CRC valude of the Rx packet
910 * flag = 1 : return the reverse CRC (for the received packet CRC)
911 * 0 : return the normal CRC (for Hash Table index)
914 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
916 u32 crc = crc32(~0, Data, Len);
917 if (flag) crc = ~crc;
918 return crc;
923 * Receive the come packet and pass to upper layer
926 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
928 struct rx_desc *rxptr;
929 struct sk_buff *skb, *newskb;
930 int rxlen;
931 u32 rdes0;
933 rxptr = db->rx_ready_ptr;
935 while(db->rx_avail_cnt) {
936 rdes0 = le32_to_cpu(rxptr->rdes0);
937 if (rdes0 & 0x80000000) /* packet owner check */
938 break;
940 db->rx_avail_cnt--;
941 db->interval_rx_cnt++;
943 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
944 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
946 if ( (rdes0 & 0x300) != 0x300) {
947 /* A packet without First/Last flag */
948 /* reuse this SKB */
949 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
950 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
951 } else {
952 /* A packet with First/Last flag */
953 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
955 /* error summary bit check */
956 if (rdes0 & 0x8000) {
957 /* This is a error packet */
958 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
959 db->stats.rx_errors++;
960 if (rdes0 & 1)
961 db->stats.rx_fifo_errors++;
962 if (rdes0 & 2)
963 db->stats.rx_crc_errors++;
964 if (rdes0 & 0x80)
965 db->stats.rx_length_errors++;
968 if ( !(rdes0 & 0x8000) ||
969 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
970 skb = rxptr->rx_skb_ptr;
972 /* Received Packet CRC check need or not */
973 if ( (db->dm910x_chk_mode & 1) &&
974 (cal_CRC(skb->data, rxlen, 1) !=
975 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
976 /* Found a error received packet */
977 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
978 db->dm910x_chk_mode = 3;
979 } else {
980 /* Good packet, send to upper layer */
981 /* Shorst packet used new SKB */
982 if ((rxlen < RX_COPY_SIZE) &&
983 ((newskb = dev_alloc_skb(rxlen + 2))
984 != NULL)) {
986 skb = newskb;
987 /* size less than COPY_SIZE, allocate a rxlen SKB */
988 skb_reserve(skb, 2); /* 16byte align */
989 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
990 skb_put(skb, rxlen),
991 rxlen);
992 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
993 } else
994 skb_put(skb, rxlen);
996 skb->protocol = eth_type_trans(skb, dev);
997 netif_rx(skb);
998 dev->last_rx = jiffies;
999 db->stats.rx_packets++;
1000 db->stats.rx_bytes += rxlen;
1002 } else {
1003 /* Reuse SKB buffer when the packet is error */
1004 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1005 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1009 rxptr = rxptr->next_rx_desc;
1012 db->rx_ready_ptr = rxptr;
1017 * Get statistics from driver.
1020 static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1022 struct dmfe_board_info *db = netdev_priv(dev);
1024 DMFE_DBUG(0, "dmfe_get_stats", 0);
1025 return &db->stats;
1030 * Set DM910X multicast address
1033 static void dmfe_set_filter_mode(struct DEVICE * dev)
1035 struct dmfe_board_info *db = netdev_priv(dev);
1036 unsigned long flags;
1038 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1039 spin_lock_irqsave(&db->lock, flags);
1041 if (dev->flags & IFF_PROMISC) {
1042 DMFE_DBUG(0, "Enable PROM Mode", 0);
1043 db->cr6_data |= CR6_PM | CR6_PBF;
1044 update_cr6(db->cr6_data, db->ioaddr);
1045 spin_unlock_irqrestore(&db->lock, flags);
1046 return;
1049 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1050 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1051 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1052 db->cr6_data |= CR6_PAM;
1053 spin_unlock_irqrestore(&db->lock, flags);
1054 return;
1057 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1058 if (db->chip_id == PCI_DM9132_ID)
1059 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1060 else
1061 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1062 spin_unlock_irqrestore(&db->lock, flags);
1065 static void netdev_get_drvinfo(struct net_device *dev,
1066 struct ethtool_drvinfo *info)
1068 struct dmfe_board_info *np = netdev_priv(dev);
1070 strcpy(info->driver, DRV_NAME);
1071 strcpy(info->version, DRV_VERSION);
1072 if (np->pdev)
1073 strcpy(info->bus_info, pci_name(np->pdev));
1074 else
1075 sprintf(info->bus_info, "EISA 0x%lx %d",
1076 dev->base_addr, dev->irq);
1079 static const struct ethtool_ops netdev_ethtool_ops = {
1080 .get_drvinfo = netdev_get_drvinfo,
1081 .get_link = ethtool_op_get_link,
1085 * A periodic timer routine
1086 * Dynamic media sense, allocate Rx buffer...
1089 static void dmfe_timer(unsigned long data)
1091 u32 tmp_cr8;
1092 unsigned char tmp_cr12;
1093 struct DEVICE *dev = (struct DEVICE *) data;
1094 struct dmfe_board_info *db = netdev_priv(dev);
1095 unsigned long flags;
1097 int link_ok, link_ok_phy;
1099 DMFE_DBUG(0, "dmfe_timer()", 0);
1100 spin_lock_irqsave(&db->lock, flags);
1102 /* Media mode process when Link OK before enter this route */
1103 if (db->first_in_callback == 0) {
1104 db->first_in_callback = 1;
1105 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1106 db->cr6_data &= ~0x40000;
1107 update_cr6(db->cr6_data, db->ioaddr);
1108 phy_write(db->ioaddr,
1109 db->phy_addr, 0, 0x1000, db->chip_id);
1110 db->cr6_data |= 0x40000;
1111 update_cr6(db->cr6_data, db->ioaddr);
1112 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1113 add_timer(&db->timer);
1114 spin_unlock_irqrestore(&db->lock, flags);
1115 return;
1120 /* Operating Mode Check */
1121 if ( (db->dm910x_chk_mode & 0x1) &&
1122 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1123 db->dm910x_chk_mode = 0x4;
1125 /* Dynamic reset DM910X : system error or transmit time-out */
1126 tmp_cr8 = inl(db->ioaddr + DCR8);
1127 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1128 db->reset_cr8++;
1129 db->wait_reset = 1;
1131 db->interval_rx_cnt = 0;
1133 /* TX polling kick monitor */
1134 if ( db->tx_packet_cnt &&
1135 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1136 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1138 /* TX Timeout */
1139 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1140 db->reset_TXtimeout++;
1141 db->wait_reset = 1;
1142 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1143 dev->name);
1147 if (db->wait_reset) {
1148 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1149 db->reset_count++;
1150 dmfe_dynamic_reset(dev);
1151 db->first_in_callback = 0;
1152 db->timer.expires = DMFE_TIMER_WUT;
1153 add_timer(&db->timer);
1154 spin_unlock_irqrestore(&db->lock, flags);
1155 return;
1158 /* Link status check, Dynamic media type change */
1159 if (db->chip_id == PCI_DM9132_ID)
1160 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1161 else
1162 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1164 if ( ((db->chip_id == PCI_DM9102_ID) &&
1165 (db->chip_revision == 0x02000030)) ||
1166 ((db->chip_id == PCI_DM9132_ID) &&
1167 (db->chip_revision == 0x02000010)) ) {
1168 /* DM9102A Chip */
1169 if (tmp_cr12 & 2)
1170 link_ok = 0;
1171 else
1172 link_ok = 1;
1174 else
1175 /*0x43 is used instead of 0x3 because bit 6 should represent
1176 link status of external PHY */
1177 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1180 /* If chip reports that link is failed it could be because external
1181 PHY link status pin is not conected correctly to chip
1182 To be sure ask PHY too.
1185 /* need a dummy read because of PHY's register latch*/
1186 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1187 link_ok_phy = (phy_read (db->ioaddr,
1188 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1190 if (link_ok_phy != link_ok) {
1191 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1192 link_ok = link_ok | link_ok_phy;
1195 if ( !link_ok && netif_carrier_ok(dev)) {
1196 /* Link Failed */
1197 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1198 netif_carrier_off(dev);
1200 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1201 /* AUTO or force 1M Homerun/Longrun don't need */
1202 if ( !(db->media_mode & 0x38) )
1203 phy_write(db->ioaddr, db->phy_addr,
1204 0, 0x1000, db->chip_id);
1206 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1207 if (db->media_mode & DMFE_AUTO) {
1208 /* 10/100M link failed, used 1M Home-Net */
1209 db->cr6_data|=0x00040000; /* bit18=1, MII */
1210 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1211 update_cr6(db->cr6_data, db->ioaddr);
1213 } else if (!netif_carrier_ok(dev)) {
1215 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1217 /* Auto Sense Speed */
1218 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1219 netif_carrier_on(dev);
1220 SHOW_MEDIA_TYPE(db->op_mode);
1223 dmfe_process_mode(db);
1226 /* HPNA remote command check */
1227 if (db->HPNA_command & 0xf00) {
1228 db->HPNA_timer--;
1229 if (!db->HPNA_timer)
1230 dmfe_HPNA_remote_cmd_chk(db);
1233 /* Timer active again */
1234 db->timer.expires = DMFE_TIMER_WUT;
1235 add_timer(&db->timer);
1236 spin_unlock_irqrestore(&db->lock, flags);
1241 * Dynamic reset the DM910X board
1242 * Stop DM910X board
1243 * Free Tx/Rx allocated memory
1244 * Reset DM910X board
1245 * Re-initilize DM910X board
1248 static void dmfe_dynamic_reset(struct DEVICE *dev)
1250 struct dmfe_board_info *db = netdev_priv(dev);
1252 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1254 /* Sopt MAC controller */
1255 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1256 update_cr6(db->cr6_data, dev->base_addr);
1257 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1258 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1260 /* Disable upper layer interface */
1261 netif_stop_queue(dev);
1263 /* Free Rx Allocate buffer */
1264 dmfe_free_rxbuffer(db);
1266 /* system variable init */
1267 db->tx_packet_cnt = 0;
1268 db->tx_queue_cnt = 0;
1269 db->rx_avail_cnt = 0;
1270 netif_carrier_off(dev);
1271 db->wait_reset = 0;
1273 /* Re-initilize DM910X board */
1274 dmfe_init_dm910x(dev);
1276 /* Restart upper layer interface */
1277 netif_wake_queue(dev);
1282 * free all allocated rx buffer
1285 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1287 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1289 /* free allocated rx buffer */
1290 while (db->rx_avail_cnt) {
1291 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1292 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1293 db->rx_avail_cnt--;
1299 * Reuse the SK buffer
1302 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1304 struct rx_desc *rxptr = db->rx_insert_ptr;
1306 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1307 rxptr->rx_skb_ptr = skb;
1308 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1309 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1310 wmb();
1311 rxptr->rdes0 = cpu_to_le32(0x80000000);
1312 db->rx_avail_cnt++;
1313 db->rx_insert_ptr = rxptr->next_rx_desc;
1314 } else
1315 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1320 * Initialize transmit/Receive descriptor
1321 * Using Chain structure, and allocate Tx/Rx buffer
1324 static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1326 struct tx_desc *tmp_tx;
1327 struct rx_desc *tmp_rx;
1328 unsigned char *tmp_buf;
1329 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1330 dma_addr_t tmp_buf_dma;
1331 int i;
1333 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1335 /* tx descriptor start pointer */
1336 db->tx_insert_ptr = db->first_tx_desc;
1337 db->tx_remove_ptr = db->first_tx_desc;
1338 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1340 /* rx descriptor start pointer */
1341 db->first_rx_desc = (void *)db->first_tx_desc +
1342 sizeof(struct tx_desc) * TX_DESC_CNT;
1344 db->first_rx_desc_dma = db->first_tx_desc_dma +
1345 sizeof(struct tx_desc) * TX_DESC_CNT;
1346 db->rx_insert_ptr = db->first_rx_desc;
1347 db->rx_ready_ptr = db->first_rx_desc;
1348 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1350 /* Init Transmit chain */
1351 tmp_buf = db->buf_pool_start;
1352 tmp_buf_dma = db->buf_pool_dma_start;
1353 tmp_tx_dma = db->first_tx_desc_dma;
1354 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1355 tmp_tx->tx_buf_ptr = tmp_buf;
1356 tmp_tx->tdes0 = cpu_to_le32(0);
1357 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1358 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1359 tmp_tx_dma += sizeof(struct tx_desc);
1360 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1361 tmp_tx->next_tx_desc = tmp_tx + 1;
1362 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1363 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1365 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1366 tmp_tx->next_tx_desc = db->first_tx_desc;
1368 /* Init Receive descriptor chain */
1369 tmp_rx_dma=db->first_rx_desc_dma;
1370 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1371 tmp_rx->rdes0 = cpu_to_le32(0);
1372 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1373 tmp_rx_dma += sizeof(struct rx_desc);
1374 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1375 tmp_rx->next_rx_desc = tmp_rx + 1;
1377 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1378 tmp_rx->next_rx_desc = db->first_rx_desc;
1380 /* pre-allocate Rx buffer */
1381 allocate_rx_buffer(db);
1386 * Update CR6 value
1387 * Firstly stop DM910X , then written value and start
1390 static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1392 u32 cr6_tmp;
1394 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1395 outl(cr6_tmp, ioaddr + DCR6);
1396 udelay(5);
1397 outl(cr6_data, ioaddr + DCR6);
1398 udelay(5);
1403 * Send a setup frame for DM9132
1404 * This setup frame initilize DM910X address filter mode
1407 static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1409 struct dev_mc_list *mcptr;
1410 u16 * addrptr;
1411 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1412 u32 hash_val;
1413 u16 i, hash_table[4];
1415 DMFE_DBUG(0, "dm9132_id_table()", 0);
1417 /* Node address */
1418 addrptr = (u16 *) dev->dev_addr;
1419 outw(addrptr[0], ioaddr);
1420 ioaddr += 4;
1421 outw(addrptr[1], ioaddr);
1422 ioaddr += 4;
1423 outw(addrptr[2], ioaddr);
1424 ioaddr += 4;
1426 /* Clear Hash Table */
1427 for (i = 0; i < 4; i++)
1428 hash_table[i] = 0x0;
1430 /* broadcast address */
1431 hash_table[3] = 0x8000;
1433 /* the multicast address in Hash Table : 64 bits */
1434 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1435 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1436 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1439 /* Write the hash table to MAC MD table */
1440 for (i = 0; i < 4; i++, ioaddr += 4)
1441 outw(hash_table[i], ioaddr);
1446 * Send a setup frame for DM9102/DM9102A
1447 * This setup frame initilize DM910X address filter mode
1450 static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1452 struct dmfe_board_info *db = netdev_priv(dev);
1453 struct dev_mc_list *mcptr;
1454 struct tx_desc *txptr;
1455 u16 * addrptr;
1456 u32 * suptr;
1457 int i;
1459 DMFE_DBUG(0, "send_filter_frame()", 0);
1461 txptr = db->tx_insert_ptr;
1462 suptr = (u32 *) txptr->tx_buf_ptr;
1464 /* Node address */
1465 addrptr = (u16 *) dev->dev_addr;
1466 *suptr++ = addrptr[0];
1467 *suptr++ = addrptr[1];
1468 *suptr++ = addrptr[2];
1470 /* broadcast address */
1471 *suptr++ = 0xffff;
1472 *suptr++ = 0xffff;
1473 *suptr++ = 0xffff;
1475 /* fit the multicast address */
1476 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1477 addrptr = (u16 *) mcptr->dmi_addr;
1478 *suptr++ = addrptr[0];
1479 *suptr++ = addrptr[1];
1480 *suptr++ = addrptr[2];
1483 for (; i<14; i++) {
1484 *suptr++ = 0xffff;
1485 *suptr++ = 0xffff;
1486 *suptr++ = 0xffff;
1489 /* prepare the setup frame */
1490 db->tx_insert_ptr = txptr->next_tx_desc;
1491 txptr->tdes1 = cpu_to_le32(0x890000c0);
1493 /* Resource Check and Send the setup packet */
1494 if (!db->tx_packet_cnt) {
1495 /* Resource Empty */
1496 db->tx_packet_cnt++;
1497 txptr->tdes0 = cpu_to_le32(0x80000000);
1498 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1499 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1500 update_cr6(db->cr6_data, dev->base_addr);
1501 dev->trans_start = jiffies;
1502 } else
1503 db->tx_queue_cnt++; /* Put in TX queue */
1508 * Allocate rx buffer,
1509 * As possible as allocate maxiumn Rx buffer
1512 static void allocate_rx_buffer(struct dmfe_board_info *db)
1514 struct rx_desc *rxptr;
1515 struct sk_buff *skb;
1517 rxptr = db->rx_insert_ptr;
1519 while(db->rx_avail_cnt < RX_DESC_CNT) {
1520 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1521 break;
1522 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1523 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1524 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1525 wmb();
1526 rxptr->rdes0 = cpu_to_le32(0x80000000);
1527 rxptr = rxptr->next_rx_desc;
1528 db->rx_avail_cnt++;
1531 db->rx_insert_ptr = rxptr;
1536 * Read one word data from the serial ROM
1539 static u16 read_srom_word(long ioaddr, int offset)
1541 int i;
1542 u16 srom_data = 0;
1543 long cr9_ioaddr = ioaddr + DCR9;
1545 outl(CR9_SROM_READ, cr9_ioaddr);
1546 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1548 /* Send the Read Command 110b */
1549 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1550 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1551 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1553 /* Send the offset */
1554 for (i = 5; i >= 0; i--) {
1555 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1556 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1559 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1561 for (i = 16; i > 0; i--) {
1562 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1563 udelay(5);
1564 srom_data = (srom_data << 1) |
1565 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1566 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1567 udelay(5);
1570 outl(CR9_SROM_READ, cr9_ioaddr);
1571 return srom_data;
1576 * Auto sense the media mode
1579 static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1581 u8 ErrFlag = 0;
1582 u16 phy_mode;
1584 /* CR6 bit18=0, select 10/100M */
1585 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1587 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1588 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1590 if ( (phy_mode & 0x24) == 0x24 ) {
1591 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1592 phy_mode = phy_read(db->ioaddr,
1593 db->phy_addr, 7, db->chip_id) & 0xf000;
1594 else /* DM9102/DM9102A */
1595 phy_mode = phy_read(db->ioaddr,
1596 db->phy_addr, 17, db->chip_id) & 0xf000;
1597 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1598 switch (phy_mode) {
1599 case 0x1000: db->op_mode = DMFE_10MHF; break;
1600 case 0x2000: db->op_mode = DMFE_10MFD; break;
1601 case 0x4000: db->op_mode = DMFE_100MHF; break;
1602 case 0x8000: db->op_mode = DMFE_100MFD; break;
1603 default: db->op_mode = DMFE_10MHF;
1604 ErrFlag = 1;
1605 break;
1607 } else {
1608 db->op_mode = DMFE_10MHF;
1609 DMFE_DBUG(0, "Link Failed :", phy_mode);
1610 ErrFlag = 1;
1613 return ErrFlag;
1618 * Set 10/100 phyxcer capability
1619 * AUTO mode : phyxcer register4 is NIC capability
1620 * Force mode: phyxcer register4 is the force media
1623 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1625 u16 phy_reg;
1627 /* Select 10/100M phyxcer */
1628 db->cr6_data &= ~0x40000;
1629 update_cr6(db->cr6_data, db->ioaddr);
1631 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1632 if (db->chip_id == PCI_DM9009_ID) {
1633 phy_reg = phy_read(db->ioaddr,
1634 db->phy_addr, 18, db->chip_id) & ~0x1000;
1636 phy_write(db->ioaddr,
1637 db->phy_addr, 18, phy_reg, db->chip_id);
1640 /* Phyxcer capability setting */
1641 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1643 if (db->media_mode & DMFE_AUTO) {
1644 /* AUTO Mode */
1645 phy_reg |= db->PHY_reg4;
1646 } else {
1647 /* Force Mode */
1648 switch(db->media_mode) {
1649 case DMFE_10MHF: phy_reg |= 0x20; break;
1650 case DMFE_10MFD: phy_reg |= 0x40; break;
1651 case DMFE_100MHF: phy_reg |= 0x80; break;
1652 case DMFE_100MFD: phy_reg |= 0x100; break;
1654 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1657 /* Write new capability to Phyxcer Reg4 */
1658 if ( !(phy_reg & 0x01e0)) {
1659 phy_reg|=db->PHY_reg4;
1660 db->media_mode|=DMFE_AUTO;
1662 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1664 /* Restart Auto-Negotiation */
1665 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1666 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1667 if ( !db->chip_type )
1668 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1673 * Process op-mode
1674 * AUTO mode : PHY controller in Auto-negotiation Mode
1675 * Force mode: PHY controller in force mode with HUB
1676 * N-way force capability with SWITCH
1679 static void dmfe_process_mode(struct dmfe_board_info *db)
1681 u16 phy_reg;
1683 /* Full Duplex Mode Check */
1684 if (db->op_mode & 0x4)
1685 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1686 else
1687 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1689 /* Transciver Selection */
1690 if (db->op_mode & 0x10) /* 1M HomePNA */
1691 db->cr6_data |= 0x40000;/* External MII select */
1692 else
1693 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1695 update_cr6(db->cr6_data, db->ioaddr);
1697 /* 10/100M phyxcer force mode need */
1698 if ( !(db->media_mode & 0x18)) {
1699 /* Forece Mode */
1700 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1701 if ( !(phy_reg & 0x1) ) {
1702 /* parter without N-Way capability */
1703 phy_reg = 0x0;
1704 switch(db->op_mode) {
1705 case DMFE_10MHF: phy_reg = 0x0; break;
1706 case DMFE_10MFD: phy_reg = 0x100; break;
1707 case DMFE_100MHF: phy_reg = 0x2000; break;
1708 case DMFE_100MFD: phy_reg = 0x2100; break;
1710 phy_write(db->ioaddr,
1711 db->phy_addr, 0, phy_reg, db->chip_id);
1712 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1713 mdelay(20);
1714 phy_write(db->ioaddr,
1715 db->phy_addr, 0, phy_reg, db->chip_id);
1722 * Write a word to Phy register
1725 static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1726 u16 phy_data, u32 chip_id)
1728 u16 i;
1729 unsigned long ioaddr;
1731 if (chip_id == PCI_DM9132_ID) {
1732 ioaddr = iobase + 0x80 + offset * 4;
1733 outw(phy_data, ioaddr);
1734 } else {
1735 /* DM9102/DM9102A Chip */
1736 ioaddr = iobase + DCR9;
1738 /* Send 33 synchronization clock to Phy controller */
1739 for (i = 0; i < 35; i++)
1740 phy_write_1bit(ioaddr, PHY_DATA_1);
1742 /* Send start command(01) to Phy */
1743 phy_write_1bit(ioaddr, PHY_DATA_0);
1744 phy_write_1bit(ioaddr, PHY_DATA_1);
1746 /* Send write command(01) to Phy */
1747 phy_write_1bit(ioaddr, PHY_DATA_0);
1748 phy_write_1bit(ioaddr, PHY_DATA_1);
1750 /* Send Phy address */
1751 for (i = 0x10; i > 0; i = i >> 1)
1752 phy_write_1bit(ioaddr,
1753 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1755 /* Send register address */
1756 for (i = 0x10; i > 0; i = i >> 1)
1757 phy_write_1bit(ioaddr,
1758 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1760 /* written trasnition */
1761 phy_write_1bit(ioaddr, PHY_DATA_1);
1762 phy_write_1bit(ioaddr, PHY_DATA_0);
1764 /* Write a word data to PHY controller */
1765 for ( i = 0x8000; i > 0; i >>= 1)
1766 phy_write_1bit(ioaddr,
1767 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1773 * Read a word data from phy register
1776 static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1778 int i;
1779 u16 phy_data;
1780 unsigned long ioaddr;
1782 if (chip_id == PCI_DM9132_ID) {
1783 /* DM9132 Chip */
1784 ioaddr = iobase + 0x80 + offset * 4;
1785 phy_data = inw(ioaddr);
1786 } else {
1787 /* DM9102/DM9102A Chip */
1788 ioaddr = iobase + DCR9;
1790 /* Send 33 synchronization clock to Phy controller */
1791 for (i = 0; i < 35; i++)
1792 phy_write_1bit(ioaddr, PHY_DATA_1);
1794 /* Send start command(01) to Phy */
1795 phy_write_1bit(ioaddr, PHY_DATA_0);
1796 phy_write_1bit(ioaddr, PHY_DATA_1);
1798 /* Send read command(10) to Phy */
1799 phy_write_1bit(ioaddr, PHY_DATA_1);
1800 phy_write_1bit(ioaddr, PHY_DATA_0);
1802 /* Send Phy address */
1803 for (i = 0x10; i > 0; i = i >> 1)
1804 phy_write_1bit(ioaddr,
1805 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1807 /* Send register address */
1808 for (i = 0x10; i > 0; i = i >> 1)
1809 phy_write_1bit(ioaddr,
1810 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1812 /* Skip transition state */
1813 phy_read_1bit(ioaddr);
1815 /* read 16bit data */
1816 for (phy_data = 0, i = 0; i < 16; i++) {
1817 phy_data <<= 1;
1818 phy_data |= phy_read_1bit(ioaddr);
1822 return phy_data;
1827 * Write one bit data to Phy Controller
1830 static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1832 outl(phy_data, ioaddr); /* MII Clock Low */
1833 udelay(1);
1834 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1835 udelay(1);
1836 outl(phy_data, ioaddr); /* MII Clock Low */
1837 udelay(1);
1842 * Read one bit phy data from PHY controller
1845 static u16 phy_read_1bit(unsigned long ioaddr)
1847 u16 phy_data;
1849 outl(0x50000, ioaddr);
1850 udelay(1);
1851 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1852 outl(0x40000, ioaddr);
1853 udelay(1);
1855 return phy_data;
1860 * Parser SROM and media mode
1863 static void dmfe_parse_srom(struct dmfe_board_info * db)
1865 char * srom = db->srom;
1866 int dmfe_mode, tmp_reg;
1868 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1870 /* Init CR15 */
1871 db->cr15_data = CR15_DEFAULT;
1873 /* Check SROM Version */
1874 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1875 /* SROM V4.01 */
1876 /* Get NIC support media mode */
1877 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1878 db->PHY_reg4 = 0;
1879 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1880 switch( db->NIC_capability & tmp_reg ) {
1881 case 0x1: db->PHY_reg4 |= 0x0020; break;
1882 case 0x2: db->PHY_reg4 |= 0x0040; break;
1883 case 0x4: db->PHY_reg4 |= 0x0080; break;
1884 case 0x8: db->PHY_reg4 |= 0x0100; break;
1888 /* Media Mode Force or not check */
1889 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1890 le32_to_cpup((__le32 *)srom + 36/4);
1891 switch(dmfe_mode) {
1892 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1893 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1894 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1895 case 0x100:
1896 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1899 /* Special Function setting */
1900 /* VLAN function */
1901 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1902 db->cr15_data |= 0x40;
1904 /* Flow Control */
1905 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1906 db->cr15_data |= 0x400;
1908 /* TX pause packet */
1909 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1910 db->cr15_data |= 0x9800;
1913 /* Parse HPNA parameter */
1914 db->HPNA_command = 1;
1916 /* Accept remote command or not */
1917 if (HPNA_rx_cmd == 0)
1918 db->HPNA_command |= 0x8000;
1920 /* Issue remote command & operation mode */
1921 if (HPNA_tx_cmd == 1)
1922 switch(HPNA_mode) { /* Issue Remote Command */
1923 case 0: db->HPNA_command |= 0x0904; break;
1924 case 1: db->HPNA_command |= 0x0a00; break;
1925 case 2: db->HPNA_command |= 0x0506; break;
1926 case 3: db->HPNA_command |= 0x0602; break;
1928 else
1929 switch(HPNA_mode) { /* Don't Issue */
1930 case 0: db->HPNA_command |= 0x0004; break;
1931 case 1: db->HPNA_command |= 0x0000; break;
1932 case 2: db->HPNA_command |= 0x0006; break;
1933 case 3: db->HPNA_command |= 0x0002; break;
1936 /* Check DM9801 or DM9802 present or not */
1937 db->HPNA_present = 0;
1938 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1939 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1940 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1941 /* DM9801 or DM9802 present */
1942 db->HPNA_timer = 8;
1943 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1944 /* DM9801 HomeRun */
1945 db->HPNA_present = 1;
1946 dmfe_program_DM9801(db, tmp_reg);
1947 } else {
1948 /* DM9802 LongRun */
1949 db->HPNA_present = 2;
1950 dmfe_program_DM9802(db);
1958 * Init HomeRun DM9801
1961 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1963 uint reg17, reg25;
1965 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1966 switch(HPNA_rev) {
1967 case 0xb900: /* DM9801 E3 */
1968 db->HPNA_command |= 0x1000;
1969 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1970 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1971 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1972 break;
1973 case 0xb901: /* DM9801 E4 */
1974 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1975 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1976 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1977 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1978 break;
1979 case 0xb902: /* DM9801 E5 */
1980 case 0xb903: /* DM9801 E6 */
1981 default:
1982 db->HPNA_command |= 0x1000;
1983 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1984 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1985 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1986 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1987 break;
1989 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1990 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1991 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1996 * Init HomeRun DM9802
1999 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2001 uint phy_reg;
2003 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2004 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2005 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2006 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2007 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2012 * Check remote HPNA power and speed status. If not correct,
2013 * issue command again.
2016 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2018 uint phy_reg;
2020 /* Got remote device status */
2021 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2022 switch(phy_reg) {
2023 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2024 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2025 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2026 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2029 /* Check remote device status match our setting ot not */
2030 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2031 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2032 db->chip_id);
2033 db->HPNA_timer=8;
2034 } else
2035 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2040 static struct pci_device_id dmfe_pci_tbl[] = {
2041 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2042 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2043 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2044 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2045 { 0, }
2047 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2050 #ifdef CONFIG_PM
2051 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2053 struct net_device *dev = pci_get_drvdata(pci_dev);
2054 struct dmfe_board_info *db = netdev_priv(dev);
2056 /* Disable upper layer interface */
2057 netif_device_detach(dev);
2059 /* Disable Tx/Rx */
2060 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2061 update_cr6(db->cr6_data, dev->base_addr);
2063 /* Disable Interrupt */
2064 outl(0, dev->base_addr + DCR7);
2065 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2067 /* Fre RX buffers */
2068 dmfe_free_rxbuffer(db);
2070 /* Power down device*/
2071 pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state));
2072 pci_save_state(pci_dev);
2074 return 0;
2077 static int dmfe_resume(struct pci_dev *pci_dev)
2079 struct net_device *dev = pci_get_drvdata(pci_dev);
2081 pci_restore_state(pci_dev);
2082 pci_set_power_state(pci_dev, PCI_D0);
2084 /* Re-initilize DM910X board */
2085 dmfe_init_dm910x(dev);
2087 /* Restart upper layer interface */
2088 netif_device_attach(dev);
2090 return 0;
2092 #else
2093 #define dmfe_suspend NULL
2094 #define dmfe_resume NULL
2095 #endif
2097 static struct pci_driver dmfe_driver = {
2098 .name = "dmfe",
2099 .id_table = dmfe_pci_tbl,
2100 .probe = dmfe_init_one,
2101 .remove = __devexit_p(dmfe_remove_one),
2102 .suspend = dmfe_suspend,
2103 .resume = dmfe_resume
2106 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2107 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2108 MODULE_LICENSE("GPL");
2109 MODULE_VERSION(DRV_VERSION);
2111 module_param(debug, int, 0);
2112 module_param(mode, byte, 0);
2113 module_param(cr6set, int, 0);
2114 module_param(chkmode, byte, 0);
2115 module_param(HPNA_mode, byte, 0);
2116 module_param(HPNA_rx_cmd, byte, 0);
2117 module_param(HPNA_tx_cmd, byte, 0);
2118 module_param(HPNA_NoiseFloor, byte, 0);
2119 module_param(SF_mode, byte, 0);
2120 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2121 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2122 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2124 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2125 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2127 /* Description:
2128 * when user used insmod to add module, system invoked init_module()
2129 * to initilize and register.
2132 static int __init dmfe_init_module(void)
2134 int rc;
2136 printk(version);
2137 printed_version = 1;
2139 DMFE_DBUG(0, "init_module() ", debug);
2141 if (debug)
2142 dmfe_debug = debug; /* set debug flag */
2143 if (cr6set)
2144 dmfe_cr6_user_set = cr6set;
2146 switch(mode) {
2147 case DMFE_10MHF:
2148 case DMFE_100MHF:
2149 case DMFE_10MFD:
2150 case DMFE_100MFD:
2151 case DMFE_1M_HPNA:
2152 dmfe_media_mode = mode;
2153 break;
2154 default:dmfe_media_mode = DMFE_AUTO;
2155 break;
2158 if (HPNA_mode > 4)
2159 HPNA_mode = 0; /* Default: LP/HS */
2160 if (HPNA_rx_cmd > 1)
2161 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2162 if (HPNA_tx_cmd > 1)
2163 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2164 if (HPNA_NoiseFloor > 15)
2165 HPNA_NoiseFloor = 0;
2167 rc = pci_register_driver(&dmfe_driver);
2168 if (rc < 0)
2169 return rc;
2171 return 0;
2176 * Description:
2177 * when user used rmmod to delete module, system invoked clean_module()
2178 * to un-register all registered services.
2181 static void __exit dmfe_cleanup_module(void)
2183 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2184 pci_unregister_driver(&dmfe_driver);
2187 module_init(dmfe_init_module);
2188 module_exit(dmfe_cleanup_module);