serial: cpm_uart: implement the cpm_uart_early_write() function for console poll
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / fealnx.c
blob15f4f8d3d46d9e24396f783d79a027f97d90c246
1 /*
2 Written 1998-2000 by Donald Becker.
4 This software may be used and distributed according to the terms of
5 the GNU General Public License (GPL), incorporated herein by reference.
6 Drivers based on or derived from this code fall under the GPL and must
7 retain the authorship, copyright and license notice. This file is not
8 a complete program and may only be used when the entire operating
9 system is licensed under the GPL.
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
14 Annapolis MD 21403
16 Support information and updates available at
17 http://www.scyld.com/network/pci-skeleton.html
19 Linux kernel updates:
21 Version 2.51, Nov 17, 2001 (jgarzik):
22 - Add ethtool support
23 - Replace some MII-related magic numbers with constants
27 #define DRV_NAME "fealnx"
28 #define DRV_VERSION "2.52"
29 #define DRV_RELDATE "Sep-11-2006"
31 static int debug; /* 1-> print debug message */
32 static int max_interrupt_work = 20;
34 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
35 static int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
38 /* Setting to > 1518 effectively disables this feature. */
39 static int rx_copybreak;
41 /* Used to pass the media type, etc. */
42 /* Both 'options[]' and 'full_duplex[]' should exist for driver */
43 /* interoperability. */
44 /* The media type is usually passed in 'options[]'. */
45 #define MAX_UNITS 8 /* More are supported, limit only on options */
46 static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
47 static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
49 /* Operational parameters that are set at compile time. */
50 /* Keep the ring sizes a power of two for compile efficiency. */
51 /* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
52 /* Making the Tx ring too large decreases the effectiveness of channel */
53 /* bonding and packet priority. */
54 /* There are no ill effects from too-large receive rings. */
55 // 88-12-9 modify,
56 // #define TX_RING_SIZE 16
57 // #define RX_RING_SIZE 32
58 #define TX_RING_SIZE 6
59 #define RX_RING_SIZE 12
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
63 /* Operational parameters that usually are not changed. */
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (2*HZ)
67 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
70 /* Include files, designed to support most kernel versions 2.0.0 and later. */
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/mii.h>
84 #include <linux/ethtool.h>
85 #include <linux/crc32.h>
86 #include <linux/delay.h>
87 #include <linux/bitops.h>
89 #include <asm/processor.h> /* Processor type for cache alignment. */
90 #include <asm/io.h>
91 #include <asm/uaccess.h>
92 #include <asm/byteorder.h>
94 /* These identify the driver base version and may not be removed. */
95 static const char version[] __devinitconst =
96 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
99 /* This driver was written to use PCI memory space, however some x86 systems
100 work only with I/O space accesses. */
101 #ifndef __alpha__
102 #define USE_IO_OPS
103 #endif
105 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
106 /* This is only in the support-all-kernels source code. */
108 #define RUN_AT(x) (jiffies + (x))
110 MODULE_AUTHOR("Myson or whoever");
111 MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
112 MODULE_LICENSE("GPL");
113 module_param(max_interrupt_work, int, 0);
114 module_param(debug, int, 0);
115 module_param(rx_copybreak, int, 0);
116 module_param(multicast_filter_limit, int, 0);
117 module_param_array(options, int, NULL, 0);
118 module_param_array(full_duplex, int, NULL, 0);
119 MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
120 MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
121 MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
122 MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
123 MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
124 MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
126 enum {
127 MIN_REGION_SIZE = 136,
130 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
131 enum chip_capability_flags {
132 HAS_MII_XCVR,
133 HAS_CHIP_XCVR,
136 /* 89/6/13 add, */
137 /* for different PHY */
138 enum phy_type_flags {
139 MysonPHY = 1,
140 AhdocPHY = 2,
141 SeeqPHY = 3,
142 MarvellPHY = 4,
143 Myson981 = 5,
144 LevelOnePHY = 6,
145 OtherPHY = 10,
148 struct chip_info {
149 char *chip_name;
150 int flags;
153 static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
159 /* Offsets to the Command and Status Registers. */
160 enum fealnx_offsets {
161 PAR0 = 0x0, /* physical address 0-3 */
162 PAR1 = 0x04, /* physical address 4-5 */
163 MAR0 = 0x08, /* multicast address 0-3 */
164 MAR1 = 0x0C, /* multicast address 4-7 */
165 FAR0 = 0x10, /* flow-control address 0-3 */
166 FAR1 = 0x14, /* flow-control address 4-5 */
167 TCRRCR = 0x18, /* receive & transmit configuration */
168 BCR = 0x1C, /* bus command */
169 TXPDR = 0x20, /* transmit polling demand */
170 RXPDR = 0x24, /* receive polling demand */
171 RXCWP = 0x28, /* receive current word pointer */
172 TXLBA = 0x2C, /* transmit list base address */
173 RXLBA = 0x30, /* receive list base address */
174 ISR = 0x34, /* interrupt status */
175 IMR = 0x38, /* interrupt mask */
176 FTH = 0x3C, /* flow control high/low threshold */
177 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
178 TALLY = 0x44, /* tally counters for crc and mpa */
179 TSR = 0x48, /* tally counter for transmit status */
180 BMCRSR = 0x4c, /* basic mode control and status */
181 PHYIDENTIFIER = 0x50, /* phy identifier */
182 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
183 partner ability */
184 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
185 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
188 /* Bits in the interrupt status/enable registers. */
189 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
190 enum intr_status_bits {
191 RFCON = 0x00020000, /* receive flow control xon packet */
192 RFCOFF = 0x00010000, /* receive flow control xoff packet */
193 LSCStatus = 0x00008000, /* link status change */
194 ANCStatus = 0x00004000, /* autonegotiation completed */
195 FBE = 0x00002000, /* fatal bus error */
196 FBEMask = 0x00001800, /* mask bit12-11 */
197 ParityErr = 0x00000000, /* parity error */
198 TargetErr = 0x00001000, /* target abort */
199 MasterErr = 0x00000800, /* master error */
200 TUNF = 0x00000400, /* transmit underflow */
201 ROVF = 0x00000200, /* receive overflow */
202 ETI = 0x00000100, /* transmit early int */
203 ERI = 0x00000080, /* receive early int */
204 CNTOVF = 0x00000040, /* counter overflow */
205 RBU = 0x00000020, /* receive buffer unavailable */
206 TBU = 0x00000010, /* transmit buffer unavilable */
207 TI = 0x00000008, /* transmit interrupt */
208 RI = 0x00000004, /* receive interrupt */
209 RxErr = 0x00000002, /* receive error */
212 /* Bits in the NetworkConfig register, W for writing, R for reading */
213 /* FIXME: some names are invented by me. Marked with (name?) */
214 /* If you have docs and know bit names, please fix 'em */
215 enum rx_mode_bits {
216 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
217 CR_W_FD = 0x00100000, /* full duplex */
218 CR_W_PS10 = 0x00080000, /* 10 mbit */
219 CR_W_TXEN = 0x00040000, /* tx enable (name?) */
220 CR_W_PS1000 = 0x00010000, /* 1000 mbit */
221 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
222 CR_W_RXMODEMASK = 0x000000e0,
223 CR_W_PROM = 0x00000080, /* promiscuous mode */
224 CR_W_AB = 0x00000040, /* accept broadcast */
225 CR_W_AM = 0x00000020, /* accept mutlicast */
226 CR_W_ARP = 0x00000008, /* receive runt pkt */
227 CR_W_ALP = 0x00000004, /* receive long pkt */
228 CR_W_SEP = 0x00000002, /* receive error pkt */
229 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
231 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
232 CR_R_FD = 0x00100000, /* full duplex detected */
233 CR_R_PS10 = 0x00080000, /* 10 mbit detected */
234 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
237 /* The Tulip Rx and Tx buffer descriptors. */
238 struct fealnx_desc {
239 s32 status;
240 s32 control;
241 u32 buffer;
242 u32 next_desc;
243 struct fealnx_desc *next_desc_logical;
244 struct sk_buff *skbuff;
245 u32 reserved1;
246 u32 reserved2;
249 /* Bits in network_desc.status */
250 enum rx_desc_status_bits {
251 RXOWN = 0x80000000, /* own bit */
252 FLNGMASK = 0x0fff0000, /* frame length */
253 FLNGShift = 16,
254 MARSTATUS = 0x00004000, /* multicast address received */
255 BARSTATUS = 0x00002000, /* broadcast address received */
256 PHYSTATUS = 0x00001000, /* physical address received */
257 RXFSD = 0x00000800, /* first descriptor */
258 RXLSD = 0x00000400, /* last descriptor */
259 ErrorSummary = 0x80, /* error summary */
260 RUNT = 0x40, /* runt packet received */
261 LONG = 0x20, /* long packet received */
262 FAE = 0x10, /* frame align error */
263 CRC = 0x08, /* crc error */
264 RXER = 0x04, /* receive error */
267 enum rx_desc_control_bits {
268 RXIC = 0x00800000, /* interrupt control */
269 RBSShift = 0,
272 enum tx_desc_status_bits {
273 TXOWN = 0x80000000, /* own bit */
274 JABTO = 0x00004000, /* jabber timeout */
275 CSL = 0x00002000, /* carrier sense lost */
276 LC = 0x00001000, /* late collision */
277 EC = 0x00000800, /* excessive collision */
278 UDF = 0x00000400, /* fifo underflow */
279 DFR = 0x00000200, /* deferred */
280 HF = 0x00000100, /* heartbeat fail */
281 NCRMask = 0x000000ff, /* collision retry count */
282 NCRShift = 0,
285 enum tx_desc_control_bits {
286 TXIC = 0x80000000, /* interrupt control */
287 ETIControl = 0x40000000, /* early transmit interrupt */
288 TXLD = 0x20000000, /* last descriptor */
289 TXFD = 0x10000000, /* first descriptor */
290 CRCEnable = 0x08000000, /* crc control */
291 PADEnable = 0x04000000, /* padding control */
292 RetryTxLC = 0x02000000, /* retry late collision */
293 PKTSMask = 0x3ff800, /* packet size bit21-11 */
294 PKTSShift = 11,
295 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
296 TBSShift = 0,
299 /* BootROM/EEPROM/MII Management Register */
300 #define MASK_MIIR_MII_READ 0x00000000
301 #define MASK_MIIR_MII_WRITE 0x00000008
302 #define MASK_MIIR_MII_MDO 0x00000004
303 #define MASK_MIIR_MII_MDI 0x00000002
304 #define MASK_MIIR_MII_MDC 0x00000001
306 /* ST+OP+PHYAD+REGAD+TA */
307 #define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
308 #define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
310 /* ------------------------------------------------------------------------- */
311 /* Constants for Myson PHY */
312 /* ------------------------------------------------------------------------- */
313 #define MysonPHYID 0xd0000302
314 /* 89-7-27 add, (begin) */
315 #define MysonPHYID0 0x0302
316 #define StatusRegister 18
317 #define SPEED100 0x0400 // bit10
318 #define FULLMODE 0x0800 // bit11
319 /* 89-7-27 add, (end) */
321 /* ------------------------------------------------------------------------- */
322 /* Constants for Seeq 80225 PHY */
323 /* ------------------------------------------------------------------------- */
324 #define SeeqPHYID0 0x0016
326 #define MIIRegister18 18
327 #define SPD_DET_100 0x80
328 #define DPLX_DET_FULL 0x40
330 /* ------------------------------------------------------------------------- */
331 /* Constants for Ahdoc 101 PHY */
332 /* ------------------------------------------------------------------------- */
333 #define AhdocPHYID0 0x0022
335 #define DiagnosticReg 18
336 #define DPLX_FULL 0x0800
337 #define Speed_100 0x0400
339 /* 89/6/13 add, */
340 /* -------------------------------------------------------------------------- */
341 /* Constants */
342 /* -------------------------------------------------------------------------- */
343 #define MarvellPHYID0 0x0141
344 #define LevelOnePHYID0 0x0013
346 #define MII1000BaseTControlReg 9
347 #define MII1000BaseTStatusReg 10
348 #define SpecificReg 17
350 /* for 1000BaseT Control Register */
351 #define PHYAbletoPerform1000FullDuplex 0x0200
352 #define PHYAbletoPerform1000HalfDuplex 0x0100
353 #define PHY1000AbilityMask 0x300
355 // for phy specific status register, marvell phy.
356 #define SpeedMask 0x0c000
357 #define Speed_1000M 0x08000
358 #define Speed_100M 0x4000
359 #define Speed_10M 0
360 #define Full_Duplex 0x2000
362 // 89/12/29 add, for phy specific status register, levelone phy, (begin)
363 #define LXT1000_100M 0x08000
364 #define LXT1000_1000M 0x0c000
365 #define LXT1000_Full 0x200
366 // 89/12/29 add, for phy specific status register, levelone phy, (end)
368 /* for 3-in-1 case, BMCRSR register */
369 #define LinkIsUp2 0x00040000
371 /* for PHY */
372 #define LinkIsUp 0x0004
375 struct netdev_private {
376 /* Descriptor rings first for alignment. */
377 struct fealnx_desc *rx_ring;
378 struct fealnx_desc *tx_ring;
380 dma_addr_t rx_ring_dma;
381 dma_addr_t tx_ring_dma;
383 spinlock_t lock;
385 struct net_device_stats stats;
387 /* Media monitoring timer. */
388 struct timer_list timer;
390 /* Reset timer */
391 struct timer_list reset_timer;
392 int reset_timer_armed;
393 unsigned long crvalue_sv;
394 unsigned long imrvalue_sv;
396 /* Frequently used values: keep some adjacent for cache effect. */
397 int flags;
398 struct pci_dev *pci_dev;
399 unsigned long crvalue;
400 unsigned long bcrvalue;
401 unsigned long imrvalue;
402 struct fealnx_desc *cur_rx;
403 struct fealnx_desc *lack_rxbuf;
404 int really_rx_count;
405 struct fealnx_desc *cur_tx;
406 struct fealnx_desc *cur_tx_copy;
407 int really_tx_count;
408 int free_tx_count;
409 unsigned int rx_buf_sz; /* Based on MTU+slack. */
411 /* These values are keep track of the transceiver/media in use. */
412 unsigned int linkok;
413 unsigned int line_speed;
414 unsigned int duplexmode;
415 unsigned int default_port:4; /* Last dev->if_port value. */
416 unsigned int PHYType;
418 /* MII transceiver section. */
419 int mii_cnt; /* MII device addresses. */
420 unsigned char phys[2]; /* MII device addresses. */
421 struct mii_if_info mii;
422 void __iomem *mem;
426 static int mdio_read(struct net_device *dev, int phy_id, int location);
427 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
428 static int netdev_open(struct net_device *dev);
429 static void getlinktype(struct net_device *dev);
430 static void getlinkstatus(struct net_device *dev);
431 static void netdev_timer(unsigned long data);
432 static void reset_timer(unsigned long data);
433 static void fealnx_tx_timeout(struct net_device *dev);
434 static void init_ring(struct net_device *dev);
435 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
436 static irqreturn_t intr_handler(int irq, void *dev_instance);
437 static int netdev_rx(struct net_device *dev);
438 static void set_rx_mode(struct net_device *dev);
439 static void __set_rx_mode(struct net_device *dev);
440 static struct net_device_stats *get_stats(struct net_device *dev);
441 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
442 static const struct ethtool_ops netdev_ethtool_ops;
443 static int netdev_close(struct net_device *dev);
444 static void reset_rx_descriptors(struct net_device *dev);
445 static void reset_tx_descriptors(struct net_device *dev);
447 static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
449 int delay = 0x1000;
450 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
451 while (--delay) {
452 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
453 break;
458 static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
460 int delay = 0x1000;
461 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
462 while (--delay) {
463 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
464 == (CR_R_RXSTOP+CR_R_TXSTOP) )
465 break;
469 static const struct net_device_ops netdev_ops = {
470 .ndo_open = netdev_open,
471 .ndo_stop = netdev_close,
472 .ndo_start_xmit = start_tx,
473 .ndo_get_stats = get_stats,
474 .ndo_set_multicast_list = set_rx_mode,
475 .ndo_do_ioctl = mii_ioctl,
476 .ndo_tx_timeout = fealnx_tx_timeout,
477 .ndo_change_mtu = eth_change_mtu,
478 .ndo_set_mac_address = eth_mac_addr,
479 .ndo_validate_addr = eth_validate_addr,
482 static int __devinit fealnx_init_one(struct pci_dev *pdev,
483 const struct pci_device_id *ent)
485 struct netdev_private *np;
486 int i, option, err, irq;
487 static int card_idx = -1;
488 char boardname[12];
489 void __iomem *ioaddr;
490 unsigned long len;
491 unsigned int chip_id = ent->driver_data;
492 struct net_device *dev;
493 void *ring_space;
494 dma_addr_t ring_dma;
495 #ifdef USE_IO_OPS
496 int bar = 0;
497 #else
498 int bar = 1;
499 #endif
501 /* when built into the kernel, we only print version if device is found */
502 #ifndef MODULE
503 static int printed_version;
504 if (!printed_version++)
505 printk(version);
506 #endif
508 card_idx++;
509 sprintf(boardname, "fealnx%d", card_idx);
511 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
513 i = pci_enable_device(pdev);
514 if (i) return i;
515 pci_set_master(pdev);
517 len = pci_resource_len(pdev, bar);
518 if (len < MIN_REGION_SIZE) {
519 dev_err(&pdev->dev,
520 "region size %ld too small, aborting\n", len);
521 return -ENODEV;
524 i = pci_request_regions(pdev, boardname);
525 if (i)
526 return i;
528 irq = pdev->irq;
530 ioaddr = pci_iomap(pdev, bar, len);
531 if (!ioaddr) {
532 err = -ENOMEM;
533 goto err_out_res;
536 dev = alloc_etherdev(sizeof(struct netdev_private));
537 if (!dev) {
538 err = -ENOMEM;
539 goto err_out_unmap;
541 SET_NETDEV_DEV(dev, &pdev->dev);
543 /* read ethernet id */
544 for (i = 0; i < 6; ++i)
545 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
547 /* Reset the chip to erase previous misconfiguration. */
548 iowrite32(0x00000001, ioaddr + BCR);
550 dev->base_addr = (unsigned long)ioaddr;
551 dev->irq = irq;
553 /* Make certain the descriptor lists are aligned. */
554 np = netdev_priv(dev);
555 np->mem = ioaddr;
556 spin_lock_init(&np->lock);
557 np->pci_dev = pdev;
558 np->flags = skel_netdrv_tbl[chip_id].flags;
559 pci_set_drvdata(pdev, dev);
560 np->mii.dev = dev;
561 np->mii.mdio_read = mdio_read;
562 np->mii.mdio_write = mdio_write;
563 np->mii.phy_id_mask = 0x1f;
564 np->mii.reg_num_mask = 0x1f;
566 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
567 if (!ring_space) {
568 err = -ENOMEM;
569 goto err_out_free_dev;
571 np->rx_ring = (struct fealnx_desc *)ring_space;
572 np->rx_ring_dma = ring_dma;
574 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
575 if (!ring_space) {
576 err = -ENOMEM;
577 goto err_out_free_rx;
579 np->tx_ring = (struct fealnx_desc *)ring_space;
580 np->tx_ring_dma = ring_dma;
582 /* find the connected MII xcvrs */
583 if (np->flags == HAS_MII_XCVR) {
584 int phy, phy_idx = 0;
586 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
587 phy++) {
588 int mii_status = mdio_read(dev, phy, 1);
590 if (mii_status != 0xffff && mii_status != 0x0000) {
591 np->phys[phy_idx++] = phy;
592 dev_info(&pdev->dev,
593 "MII PHY found at address %d, status "
594 "0x%4.4x.\n", phy, mii_status);
595 /* get phy type */
597 unsigned int data;
599 data = mdio_read(dev, np->phys[0], 2);
600 if (data == SeeqPHYID0)
601 np->PHYType = SeeqPHY;
602 else if (data == AhdocPHYID0)
603 np->PHYType = AhdocPHY;
604 else if (data == MarvellPHYID0)
605 np->PHYType = MarvellPHY;
606 else if (data == MysonPHYID0)
607 np->PHYType = Myson981;
608 else if (data == LevelOnePHYID0)
609 np->PHYType = LevelOnePHY;
610 else
611 np->PHYType = OtherPHY;
616 np->mii_cnt = phy_idx;
617 if (phy_idx == 0)
618 dev_warn(&pdev->dev,
619 "MII PHY not found -- this device may "
620 "not operate correctly.\n");
621 } else {
622 np->phys[0] = 32;
623 /* 89/6/23 add, (begin) */
624 /* get phy type */
625 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
626 np->PHYType = MysonPHY;
627 else
628 np->PHYType = OtherPHY;
630 np->mii.phy_id = np->phys[0];
632 if (dev->mem_start)
633 option = dev->mem_start;
635 /* The lower four bits are the media type. */
636 if (option > 0) {
637 if (option & 0x200)
638 np->mii.full_duplex = 1;
639 np->default_port = option & 15;
642 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
643 np->mii.full_duplex = full_duplex[card_idx];
645 if (np->mii.full_duplex) {
646 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
647 /* 89/6/13 add, (begin) */
648 // if (np->PHYType==MarvellPHY)
649 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
650 unsigned int data;
652 data = mdio_read(dev, np->phys[0], 9);
653 data = (data & 0xfcff) | 0x0200;
654 mdio_write(dev, np->phys[0], 9, data);
656 /* 89/6/13 add, (end) */
657 if (np->flags == HAS_MII_XCVR)
658 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
659 else
660 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
661 np->mii.force_media = 1;
664 dev->netdev_ops = &netdev_ops;
665 dev->ethtool_ops = &netdev_ethtool_ops;
666 dev->watchdog_timeo = TX_TIMEOUT;
668 err = register_netdev(dev);
669 if (err)
670 goto err_out_free_tx;
672 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
673 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
674 dev->dev_addr, irq);
676 return 0;
678 err_out_free_tx:
679 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
680 err_out_free_rx:
681 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
682 err_out_free_dev:
683 free_netdev(dev);
684 err_out_unmap:
685 pci_iounmap(pdev, ioaddr);
686 err_out_res:
687 pci_release_regions(pdev);
688 return err;
692 static void __devexit fealnx_remove_one(struct pci_dev *pdev)
694 struct net_device *dev = pci_get_drvdata(pdev);
696 if (dev) {
697 struct netdev_private *np = netdev_priv(dev);
699 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
700 np->tx_ring_dma);
701 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
702 np->rx_ring_dma);
703 unregister_netdev(dev);
704 pci_iounmap(pdev, np->mem);
705 free_netdev(dev);
706 pci_release_regions(pdev);
707 pci_set_drvdata(pdev, NULL);
708 } else
709 printk(KERN_ERR "fealnx: remove for unknown device\n");
713 static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
715 ulong miir;
716 int i;
717 unsigned int mask, data;
719 /* enable MII output */
720 miir = (ulong) ioread32(miiport);
721 miir &= 0xfffffff0;
723 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
725 /* send 32 1's preamble */
726 for (i = 0; i < 32; i++) {
727 /* low MDC; MDO is already high (miir) */
728 miir &= ~MASK_MIIR_MII_MDC;
729 iowrite32(miir, miiport);
731 /* high MDC */
732 miir |= MASK_MIIR_MII_MDC;
733 iowrite32(miir, miiport);
736 /* calculate ST+OP+PHYAD+REGAD+TA */
737 data = opcode | (phyad << 7) | (regad << 2);
739 /* sent out */
740 mask = 0x8000;
741 while (mask) {
742 /* low MDC, prepare MDO */
743 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
744 if (mask & data)
745 miir |= MASK_MIIR_MII_MDO;
747 iowrite32(miir, miiport);
748 /* high MDC */
749 miir |= MASK_MIIR_MII_MDC;
750 iowrite32(miir, miiport);
751 udelay(30);
753 /* next */
754 mask >>= 1;
755 if (mask == 0x2 && opcode == OP_READ)
756 miir &= ~MASK_MIIR_MII_WRITE;
758 return miir;
762 static int mdio_read(struct net_device *dev, int phyad, int regad)
764 struct netdev_private *np = netdev_priv(dev);
765 void __iomem *miiport = np->mem + MANAGEMENT;
766 ulong miir;
767 unsigned int mask, data;
769 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
771 /* read data */
772 mask = 0x8000;
773 data = 0;
774 while (mask) {
775 /* low MDC */
776 miir &= ~MASK_MIIR_MII_MDC;
777 iowrite32(miir, miiport);
779 /* read MDI */
780 miir = ioread32(miiport);
781 if (miir & MASK_MIIR_MII_MDI)
782 data |= mask;
784 /* high MDC, and wait */
785 miir |= MASK_MIIR_MII_MDC;
786 iowrite32(miir, miiport);
787 udelay(30);
789 /* next */
790 mask >>= 1;
793 /* low MDC */
794 miir &= ~MASK_MIIR_MII_MDC;
795 iowrite32(miir, miiport);
797 return data & 0xffff;
801 static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
803 struct netdev_private *np = netdev_priv(dev);
804 void __iomem *miiport = np->mem + MANAGEMENT;
805 ulong miir;
806 unsigned int mask;
808 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
810 /* write data */
811 mask = 0x8000;
812 while (mask) {
813 /* low MDC, prepare MDO */
814 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
815 if (mask & data)
816 miir |= MASK_MIIR_MII_MDO;
817 iowrite32(miir, miiport);
819 /* high MDC */
820 miir |= MASK_MIIR_MII_MDC;
821 iowrite32(miir, miiport);
823 /* next */
824 mask >>= 1;
827 /* low MDC */
828 miir &= ~MASK_MIIR_MII_MDC;
829 iowrite32(miir, miiport);
833 static int netdev_open(struct net_device *dev)
835 struct netdev_private *np = netdev_priv(dev);
836 void __iomem *ioaddr = np->mem;
837 int i;
839 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
841 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
842 return -EAGAIN;
844 for (i = 0; i < 3; i++)
845 iowrite16(((unsigned short*)dev->dev_addr)[i],
846 ioaddr + PAR0 + i*2);
848 init_ring(dev);
850 iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
851 iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
853 /* Initialize other registers. */
854 /* Configure the PCI bus bursts and FIFO thresholds.
855 486: Set 8 longword burst.
856 586: no burst limit.
857 Burst length 5:3
858 0 0 0 1
859 0 0 1 4
860 0 1 0 8
861 0 1 1 16
862 1 0 0 32
863 1 0 1 64
864 1 1 0 128
865 1 1 1 256
866 Wait the specified 50 PCI cycles after a reset by initializing
867 Tx and Rx queues and the address filter list.
868 FIXME (Ueimor): optimistic for alpha + posted writes ? */
870 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
871 #ifdef __BIG_ENDIAN
872 np->bcrvalue |= 0x04; /* big-endian */
873 #endif
875 #if defined(__i386__) && !defined(MODULE)
876 if (boot_cpu_data.x86 <= 4)
877 np->crvalue = 0xa00;
878 else
879 #endif
880 np->crvalue = 0xe00; /* rx 128 burst length */
883 // 89/12/29 add,
884 // 90/1/16 modify,
885 // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
886 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
887 if (np->pci_dev->device == 0x891) {
888 np->bcrvalue |= 0x200; /* set PROG bit */
889 np->crvalue |= CR_W_ENH; /* set enhanced bit */
890 np->imrvalue |= ETI;
892 iowrite32(np->bcrvalue, ioaddr + BCR);
894 if (dev->if_port == 0)
895 dev->if_port = np->default_port;
897 iowrite32(0, ioaddr + RXPDR);
898 // 89/9/1 modify,
899 // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
900 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
901 np->mii.full_duplex = np->mii.force_media;
902 getlinkstatus(dev);
903 if (np->linkok)
904 getlinktype(dev);
905 __set_rx_mode(dev);
907 netif_start_queue(dev);
909 /* Clear and Enable interrupts by setting the interrupt mask. */
910 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
911 iowrite32(np->imrvalue, ioaddr + IMR);
913 if (debug)
914 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
916 /* Set the timer to check for link beat. */
917 init_timer(&np->timer);
918 np->timer.expires = RUN_AT(3 * HZ);
919 np->timer.data = (unsigned long) dev;
920 np->timer.function = &netdev_timer;
922 /* timer handler */
923 add_timer(&np->timer);
925 init_timer(&np->reset_timer);
926 np->reset_timer.data = (unsigned long) dev;
927 np->reset_timer.function = &reset_timer;
928 np->reset_timer_armed = 0;
930 return 0;
934 static void getlinkstatus(struct net_device *dev)
935 /* function: Routine will read MII Status Register to get link status. */
936 /* input : dev... pointer to the adapter block. */
937 /* output : none. */
939 struct netdev_private *np = netdev_priv(dev);
940 unsigned int i, DelayTime = 0x1000;
942 np->linkok = 0;
944 if (np->PHYType == MysonPHY) {
945 for (i = 0; i < DelayTime; ++i) {
946 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
947 np->linkok = 1;
948 return;
950 udelay(100);
952 } else {
953 for (i = 0; i < DelayTime; ++i) {
954 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
955 np->linkok = 1;
956 return;
958 udelay(100);
964 static void getlinktype(struct net_device *dev)
966 struct netdev_private *np = netdev_priv(dev);
968 if (np->PHYType == MysonPHY) { /* 3-in-1 case */
969 if (ioread32(np->mem + TCRRCR) & CR_R_FD)
970 np->duplexmode = 2; /* full duplex */
971 else
972 np->duplexmode = 1; /* half duplex */
973 if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
974 np->line_speed = 1; /* 10M */
975 else
976 np->line_speed = 2; /* 100M */
977 } else {
978 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
979 unsigned int data;
981 data = mdio_read(dev, np->phys[0], MIIRegister18);
982 if (data & SPD_DET_100)
983 np->line_speed = 2; /* 100M */
984 else
985 np->line_speed = 1; /* 10M */
986 if (data & DPLX_DET_FULL)
987 np->duplexmode = 2; /* full duplex mode */
988 else
989 np->duplexmode = 1; /* half duplex mode */
990 } else if (np->PHYType == AhdocPHY) {
991 unsigned int data;
993 data = mdio_read(dev, np->phys[0], DiagnosticReg);
994 if (data & Speed_100)
995 np->line_speed = 2; /* 100M */
996 else
997 np->line_speed = 1; /* 10M */
998 if (data & DPLX_FULL)
999 np->duplexmode = 2; /* full duplex mode */
1000 else
1001 np->duplexmode = 1; /* half duplex mode */
1003 /* 89/6/13 add, (begin) */
1004 else if (np->PHYType == MarvellPHY) {
1005 unsigned int data;
1007 data = mdio_read(dev, np->phys[0], SpecificReg);
1008 if (data & Full_Duplex)
1009 np->duplexmode = 2; /* full duplex mode */
1010 else
1011 np->duplexmode = 1; /* half duplex mode */
1012 data &= SpeedMask;
1013 if (data == Speed_1000M)
1014 np->line_speed = 3; /* 1000M */
1015 else if (data == Speed_100M)
1016 np->line_speed = 2; /* 100M */
1017 else
1018 np->line_speed = 1; /* 10M */
1020 /* 89/6/13 add, (end) */
1021 /* 89/7/27 add, (begin) */
1022 else if (np->PHYType == Myson981) {
1023 unsigned int data;
1025 data = mdio_read(dev, np->phys[0], StatusRegister);
1027 if (data & SPEED100)
1028 np->line_speed = 2;
1029 else
1030 np->line_speed = 1;
1032 if (data & FULLMODE)
1033 np->duplexmode = 2;
1034 else
1035 np->duplexmode = 1;
1037 /* 89/7/27 add, (end) */
1038 /* 89/12/29 add */
1039 else if (np->PHYType == LevelOnePHY) {
1040 unsigned int data;
1042 data = mdio_read(dev, np->phys[0], SpecificReg);
1043 if (data & LXT1000_Full)
1044 np->duplexmode = 2; /* full duplex mode */
1045 else
1046 np->duplexmode = 1; /* half duplex mode */
1047 data &= SpeedMask;
1048 if (data == LXT1000_1000M)
1049 np->line_speed = 3; /* 1000M */
1050 else if (data == LXT1000_100M)
1051 np->line_speed = 2; /* 100M */
1052 else
1053 np->line_speed = 1; /* 10M */
1055 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1056 if (np->line_speed == 1)
1057 np->crvalue |= CR_W_PS10;
1058 else if (np->line_speed == 3)
1059 np->crvalue |= CR_W_PS1000;
1060 if (np->duplexmode == 2)
1061 np->crvalue |= CR_W_FD;
1066 /* Take lock before calling this */
1067 static void allocate_rx_buffers(struct net_device *dev)
1069 struct netdev_private *np = netdev_priv(dev);
1071 /* allocate skb for rx buffers */
1072 while (np->really_rx_count != RX_RING_SIZE) {
1073 struct sk_buff *skb;
1075 skb = dev_alloc_skb(np->rx_buf_sz);
1076 if (skb == NULL)
1077 break; /* Better luck next round. */
1079 while (np->lack_rxbuf->skbuff)
1080 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1082 skb->dev = dev; /* Mark as being used by this device. */
1083 np->lack_rxbuf->skbuff = skb;
1084 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
1085 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1086 np->lack_rxbuf->status = RXOWN;
1087 ++np->really_rx_count;
1092 static void netdev_timer(unsigned long data)
1094 struct net_device *dev = (struct net_device *) data;
1095 struct netdev_private *np = netdev_priv(dev);
1096 void __iomem *ioaddr = np->mem;
1097 int old_crvalue = np->crvalue;
1098 unsigned int old_linkok = np->linkok;
1099 unsigned long flags;
1101 if (debug)
1102 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1103 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1104 ioread32(ioaddr + TCRRCR));
1106 spin_lock_irqsave(&np->lock, flags);
1108 if (np->flags == HAS_MII_XCVR) {
1109 getlinkstatus(dev);
1110 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
1111 getlinktype(dev);
1112 if (np->crvalue != old_crvalue) {
1113 stop_nic_rxtx(ioaddr, np->crvalue);
1114 iowrite32(np->crvalue, ioaddr + TCRRCR);
1119 allocate_rx_buffers(dev);
1121 spin_unlock_irqrestore(&np->lock, flags);
1123 np->timer.expires = RUN_AT(10 * HZ);
1124 add_timer(&np->timer);
1128 /* Take lock before calling */
1129 /* Reset chip and disable rx, tx and interrupts */
1130 static void reset_and_disable_rxtx(struct net_device *dev)
1132 struct netdev_private *np = netdev_priv(dev);
1133 void __iomem *ioaddr = np->mem;
1134 int delay=51;
1136 /* Reset the chip's Tx and Rx processes. */
1137 stop_nic_rxtx(ioaddr, 0);
1139 /* Disable interrupts by clearing the interrupt mask. */
1140 iowrite32(0, ioaddr + IMR);
1142 /* Reset the chip to erase previous misconfiguration. */
1143 iowrite32(0x00000001, ioaddr + BCR);
1145 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1146 We surely wait too long (address+data phase). Who cares? */
1147 while (--delay) {
1148 ioread32(ioaddr + BCR);
1149 rmb();
1154 /* Take lock before calling */
1155 /* Restore chip after reset */
1156 static void enable_rxtx(struct net_device *dev)
1158 struct netdev_private *np = netdev_priv(dev);
1159 void __iomem *ioaddr = np->mem;
1161 reset_rx_descriptors(dev);
1163 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1164 ioaddr + TXLBA);
1165 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1166 ioaddr + RXLBA);
1168 iowrite32(np->bcrvalue, ioaddr + BCR);
1170 iowrite32(0, ioaddr + RXPDR);
1171 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1173 /* Clear and Enable interrupts by setting the interrupt mask. */
1174 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1175 iowrite32(np->imrvalue, ioaddr + IMR);
1177 iowrite32(0, ioaddr + TXPDR);
1181 static void reset_timer(unsigned long data)
1183 struct net_device *dev = (struct net_device *) data;
1184 struct netdev_private *np = netdev_priv(dev);
1185 unsigned long flags;
1187 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1189 spin_lock_irqsave(&np->lock, flags);
1190 np->crvalue = np->crvalue_sv;
1191 np->imrvalue = np->imrvalue_sv;
1193 reset_and_disable_rxtx(dev);
1194 /* works for me without this:
1195 reset_tx_descriptors(dev); */
1196 enable_rxtx(dev);
1197 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1199 np->reset_timer_armed = 0;
1201 spin_unlock_irqrestore(&np->lock, flags);
1205 static void fealnx_tx_timeout(struct net_device *dev)
1207 struct netdev_private *np = netdev_priv(dev);
1208 void __iomem *ioaddr = np->mem;
1209 unsigned long flags;
1210 int i;
1212 printk(KERN_WARNING
1213 "%s: Transmit timed out, status %8.8x, resetting...\n",
1214 dev->name, ioread32(ioaddr + ISR));
1217 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1218 for (i = 0; i < RX_RING_SIZE; i++)
1219 printk(KERN_CONT " %8.8x",
1220 (unsigned int) np->rx_ring[i].status);
1221 printk(KERN_CONT "\n");
1222 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1223 for (i = 0; i < TX_RING_SIZE; i++)
1224 printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1225 printk(KERN_CONT "\n");
1228 spin_lock_irqsave(&np->lock, flags);
1230 reset_and_disable_rxtx(dev);
1231 reset_tx_descriptors(dev);
1232 enable_rxtx(dev);
1234 spin_unlock_irqrestore(&np->lock, flags);
1236 dev->trans_start = jiffies; /* prevent tx timeout */
1237 np->stats.tx_errors++;
1238 netif_wake_queue(dev); /* or .._start_.. ?? */
1242 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1243 static void init_ring(struct net_device *dev)
1245 struct netdev_private *np = netdev_priv(dev);
1246 int i;
1248 /* initialize rx variables */
1249 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1250 np->cur_rx = &np->rx_ring[0];
1251 np->lack_rxbuf = np->rx_ring;
1252 np->really_rx_count = 0;
1254 /* initial rx descriptors. */
1255 for (i = 0; i < RX_RING_SIZE; i++) {
1256 np->rx_ring[i].status = 0;
1257 np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1258 np->rx_ring[i].next_desc = np->rx_ring_dma +
1259 (i + 1)*sizeof(struct fealnx_desc);
1260 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1261 np->rx_ring[i].skbuff = NULL;
1264 /* for the last rx descriptor */
1265 np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1266 np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1268 /* allocate skb for rx buffers */
1269 for (i = 0; i < RX_RING_SIZE; i++) {
1270 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1272 if (skb == NULL) {
1273 np->lack_rxbuf = &np->rx_ring[i];
1274 break;
1277 ++np->really_rx_count;
1278 np->rx_ring[i].skbuff = skb;
1279 skb->dev = dev; /* Mark as being used by this device. */
1280 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
1281 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1282 np->rx_ring[i].status = RXOWN;
1283 np->rx_ring[i].control |= RXIC;
1286 /* initialize tx variables */
1287 np->cur_tx = &np->tx_ring[0];
1288 np->cur_tx_copy = &np->tx_ring[0];
1289 np->really_tx_count = 0;
1290 np->free_tx_count = TX_RING_SIZE;
1292 for (i = 0; i < TX_RING_SIZE; i++) {
1293 np->tx_ring[i].status = 0;
1294 /* do we need np->tx_ring[i].control = XXX; ?? */
1295 np->tx_ring[i].next_desc = np->tx_ring_dma +
1296 (i + 1)*sizeof(struct fealnx_desc);
1297 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1298 np->tx_ring[i].skbuff = NULL;
1301 /* for the last tx descriptor */
1302 np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1303 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1307 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1309 struct netdev_private *np = netdev_priv(dev);
1310 unsigned long flags;
1312 spin_lock_irqsave(&np->lock, flags);
1314 np->cur_tx_copy->skbuff = skb;
1316 #define one_buffer
1317 #define BPT 1022
1318 #if defined(one_buffer)
1319 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1320 skb->len, PCI_DMA_TODEVICE);
1321 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1322 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1323 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1324 // 89/12/29 add,
1325 if (np->pci_dev->device == 0x891)
1326 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1327 np->cur_tx_copy->status = TXOWN;
1328 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1329 --np->free_tx_count;
1330 #elif defined(two_buffer)
1331 if (skb->len > BPT) {
1332 struct fealnx_desc *next;
1334 /* for the first descriptor */
1335 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1336 BPT, PCI_DMA_TODEVICE);
1337 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1338 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1339 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
1341 /* for the last descriptor */
1342 next = np->cur_tx_copy->next_desc_logical;
1343 next->skbuff = skb;
1344 next->control = TXIC | TXLD | CRCEnable | PADEnable;
1345 next->control |= (skb->len << PKTSShift); /* pkt size */
1346 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
1347 // 89/12/29 add,
1348 if (np->pci_dev->device == 0x891)
1349 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1350 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
1351 skb->len - BPT, PCI_DMA_TODEVICE);
1353 next->status = TXOWN;
1354 np->cur_tx_copy->status = TXOWN;
1356 np->cur_tx_copy = next->next_desc_logical;
1357 np->free_tx_count -= 2;
1358 } else {
1359 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1360 skb->len, PCI_DMA_TODEVICE);
1361 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1362 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1363 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1364 // 89/12/29 add,
1365 if (np->pci_dev->device == 0x891)
1366 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1367 np->cur_tx_copy->status = TXOWN;
1368 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1369 --np->free_tx_count;
1371 #endif
1373 if (np->free_tx_count < 2)
1374 netif_stop_queue(dev);
1375 ++np->really_tx_count;
1376 iowrite32(0, np->mem + TXPDR);
1378 spin_unlock_irqrestore(&np->lock, flags);
1379 return NETDEV_TX_OK;
1383 /* Take lock before calling */
1384 /* Chip probably hosed tx ring. Clean up. */
1385 static void reset_tx_descriptors(struct net_device *dev)
1387 struct netdev_private *np = netdev_priv(dev);
1388 struct fealnx_desc *cur;
1389 int i;
1391 /* initialize tx variables */
1392 np->cur_tx = &np->tx_ring[0];
1393 np->cur_tx_copy = &np->tx_ring[0];
1394 np->really_tx_count = 0;
1395 np->free_tx_count = TX_RING_SIZE;
1397 for (i = 0; i < TX_RING_SIZE; i++) {
1398 cur = &np->tx_ring[i];
1399 if (cur->skbuff) {
1400 pci_unmap_single(np->pci_dev, cur->buffer,
1401 cur->skbuff->len, PCI_DMA_TODEVICE);
1402 dev_kfree_skb_any(cur->skbuff);
1403 cur->skbuff = NULL;
1405 cur->status = 0;
1406 cur->control = 0; /* needed? */
1407 /* probably not needed. We do it for purely paranoid reasons */
1408 cur->next_desc = np->tx_ring_dma +
1409 (i + 1)*sizeof(struct fealnx_desc);
1410 cur->next_desc_logical = &np->tx_ring[i + 1];
1412 /* for the last tx descriptor */
1413 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1414 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1418 /* Take lock and stop rx before calling this */
1419 static void reset_rx_descriptors(struct net_device *dev)
1421 struct netdev_private *np = netdev_priv(dev);
1422 struct fealnx_desc *cur = np->cur_rx;
1423 int i;
1425 allocate_rx_buffers(dev);
1427 for (i = 0; i < RX_RING_SIZE; i++) {
1428 if (cur->skbuff)
1429 cur->status = RXOWN;
1430 cur = cur->next_desc_logical;
1433 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1434 np->mem + RXLBA);
1438 /* The interrupt handler does all of the Rx thread work and cleans up
1439 after the Tx thread. */
1440 static irqreturn_t intr_handler(int irq, void *dev_instance)
1442 struct net_device *dev = (struct net_device *) dev_instance;
1443 struct netdev_private *np = netdev_priv(dev);
1444 void __iomem *ioaddr = np->mem;
1445 long boguscnt = max_interrupt_work;
1446 unsigned int num_tx = 0;
1447 int handled = 0;
1449 spin_lock(&np->lock);
1451 iowrite32(0, ioaddr + IMR);
1453 do {
1454 u32 intr_status = ioread32(ioaddr + ISR);
1456 /* Acknowledge all of the current interrupt sources ASAP. */
1457 iowrite32(intr_status, ioaddr + ISR);
1459 if (debug)
1460 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1461 intr_status);
1463 if (!(intr_status & np->imrvalue))
1464 break;
1466 handled = 1;
1468 // 90/1/16 delete,
1470 // if (intr_status & FBE)
1471 // { /* fatal error */
1472 // stop_nic_tx(ioaddr, 0);
1473 // stop_nic_rx(ioaddr, 0);
1474 // break;
1475 // };
1477 if (intr_status & TUNF)
1478 iowrite32(0, ioaddr + TXPDR);
1480 if (intr_status & CNTOVF) {
1481 /* missed pkts */
1482 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1484 /* crc error */
1485 np->stats.rx_crc_errors +=
1486 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1489 if (intr_status & (RI | RBU)) {
1490 if (intr_status & RI)
1491 netdev_rx(dev);
1492 else {
1493 stop_nic_rx(ioaddr, np->crvalue);
1494 reset_rx_descriptors(dev);
1495 iowrite32(np->crvalue, ioaddr + TCRRCR);
1499 while (np->really_tx_count) {
1500 long tx_status = np->cur_tx->status;
1501 long tx_control = np->cur_tx->control;
1503 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
1504 struct fealnx_desc *next;
1506 next = np->cur_tx->next_desc_logical;
1507 tx_status = next->status;
1508 tx_control = next->control;
1511 if (tx_status & TXOWN)
1512 break;
1514 if (!(np->crvalue & CR_W_ENH)) {
1515 if (tx_status & (CSL | LC | EC | UDF | HF)) {
1516 np->stats.tx_errors++;
1517 if (tx_status & EC)
1518 np->stats.tx_aborted_errors++;
1519 if (tx_status & CSL)
1520 np->stats.tx_carrier_errors++;
1521 if (tx_status & LC)
1522 np->stats.tx_window_errors++;
1523 if (tx_status & UDF)
1524 np->stats.tx_fifo_errors++;
1525 if ((tx_status & HF) && np->mii.full_duplex == 0)
1526 np->stats.tx_heartbeat_errors++;
1528 } else {
1529 np->stats.tx_bytes +=
1530 ((tx_control & PKTSMask) >> PKTSShift);
1532 np->stats.collisions +=
1533 ((tx_status & NCRMask) >> NCRShift);
1534 np->stats.tx_packets++;
1536 } else {
1537 np->stats.tx_bytes +=
1538 ((tx_control & PKTSMask) >> PKTSShift);
1539 np->stats.tx_packets++;
1542 /* Free the original skb. */
1543 pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
1544 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
1545 dev_kfree_skb_irq(np->cur_tx->skbuff);
1546 np->cur_tx->skbuff = NULL;
1547 --np->really_tx_count;
1548 if (np->cur_tx->control & TXLD) {
1549 np->cur_tx = np->cur_tx->next_desc_logical;
1550 ++np->free_tx_count;
1551 } else {
1552 np->cur_tx = np->cur_tx->next_desc_logical;
1553 np->cur_tx = np->cur_tx->next_desc_logical;
1554 np->free_tx_count += 2;
1556 num_tx++;
1557 } /* end of for loop */
1559 if (num_tx && np->free_tx_count >= 2)
1560 netif_wake_queue(dev);
1562 /* read transmit status for enhanced mode only */
1563 if (np->crvalue & CR_W_ENH) {
1564 long data;
1566 data = ioread32(ioaddr + TSR);
1567 np->stats.tx_errors += (data & 0xff000000) >> 24;
1568 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24;
1569 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16;
1570 np->stats.collisions += (data & 0x0000ffff);
1573 if (--boguscnt < 0) {
1574 printk(KERN_WARNING "%s: Too much work at interrupt, "
1575 "status=0x%4.4x.\n", dev->name, intr_status);
1576 if (!np->reset_timer_armed) {
1577 np->reset_timer_armed = 1;
1578 np->reset_timer.expires = RUN_AT(HZ/2);
1579 add_timer(&np->reset_timer);
1580 stop_nic_rxtx(ioaddr, 0);
1581 netif_stop_queue(dev);
1582 /* or netif_tx_disable(dev); ?? */
1583 /* Prevent other paths from enabling tx,rx,intrs */
1584 np->crvalue_sv = np->crvalue;
1585 np->imrvalue_sv = np->imrvalue;
1586 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1587 np->imrvalue = 0;
1590 break;
1592 } while (1);
1594 /* read the tally counters */
1595 /* missed pkts */
1596 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1598 /* crc error */
1599 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1601 if (debug)
1602 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1603 dev->name, ioread32(ioaddr + ISR));
1605 iowrite32(np->imrvalue, ioaddr + IMR);
1607 spin_unlock(&np->lock);
1609 return IRQ_RETVAL(handled);
1613 /* This routine is logically part of the interrupt handler, but separated
1614 for clarity and better register allocation. */
1615 static int netdev_rx(struct net_device *dev)
1617 struct netdev_private *np = netdev_priv(dev);
1618 void __iomem *ioaddr = np->mem;
1620 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1621 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1622 s32 rx_status = np->cur_rx->status;
1624 if (np->really_rx_count == 0)
1625 break;
1627 if (debug)
1628 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
1630 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
1631 (rx_status & ErrorSummary)) {
1632 if (rx_status & ErrorSummary) { /* there was a fatal error */
1633 if (debug)
1634 printk(KERN_DEBUG
1635 "%s: Receive error, Rx status %8.8x.\n",
1636 dev->name, rx_status);
1638 np->stats.rx_errors++; /* end of a packet. */
1639 if (rx_status & (LONG | RUNT))
1640 np->stats.rx_length_errors++;
1641 if (rx_status & RXER)
1642 np->stats.rx_frame_errors++;
1643 if (rx_status & CRC)
1644 np->stats.rx_crc_errors++;
1645 } else {
1646 int need_to_reset = 0;
1647 int desno = 0;
1649 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
1650 struct fealnx_desc *cur;
1652 /* check this packet is received completely? */
1653 cur = np->cur_rx;
1654 while (desno <= np->really_rx_count) {
1655 ++desno;
1656 if ((!(cur->status & RXOWN)) &&
1657 (cur->status & RXLSD))
1658 break;
1659 /* goto next rx descriptor */
1660 cur = cur->next_desc_logical;
1662 if (desno > np->really_rx_count)
1663 need_to_reset = 1;
1664 } else /* RXLSD did not find, something error */
1665 need_to_reset = 1;
1667 if (need_to_reset == 0) {
1668 int i;
1670 np->stats.rx_length_errors++;
1672 /* free all rx descriptors related this long pkt */
1673 for (i = 0; i < desno; ++i) {
1674 if (!np->cur_rx->skbuff) {
1675 printk(KERN_DEBUG
1676 "%s: I'm scared\n", dev->name);
1677 break;
1679 np->cur_rx->status = RXOWN;
1680 np->cur_rx = np->cur_rx->next_desc_logical;
1682 continue;
1683 } else { /* rx error, need to reset this chip */
1684 stop_nic_rx(ioaddr, np->crvalue);
1685 reset_rx_descriptors(dev);
1686 iowrite32(np->crvalue, ioaddr + TCRRCR);
1688 break; /* exit the while loop */
1690 } else { /* this received pkt is ok */
1692 struct sk_buff *skb;
1693 /* Omit the four octet CRC from the length. */
1694 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1696 #ifndef final_version
1697 if (debug)
1698 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1699 " status %x.\n", pkt_len, rx_status);
1700 #endif
1702 /* Check if the packet is long enough to accept without copying
1703 to a minimally-sized skbuff. */
1704 if (pkt_len < rx_copybreak &&
1705 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1706 skb_reserve(skb, 2); /* 16 byte align the IP header */
1707 pci_dma_sync_single_for_cpu(np->pci_dev,
1708 np->cur_rx->buffer,
1709 np->rx_buf_sz,
1710 PCI_DMA_FROMDEVICE);
1711 /* Call copy + cksum if available. */
1713 #if ! defined(__alpha__)
1714 skb_copy_to_linear_data(skb,
1715 np->cur_rx->skbuff->data, pkt_len);
1716 skb_put(skb, pkt_len);
1717 #else
1718 memcpy(skb_put(skb, pkt_len),
1719 np->cur_rx->skbuff->data, pkt_len);
1720 #endif
1721 pci_dma_sync_single_for_device(np->pci_dev,
1722 np->cur_rx->buffer,
1723 np->rx_buf_sz,
1724 PCI_DMA_FROMDEVICE);
1725 } else {
1726 pci_unmap_single(np->pci_dev,
1727 np->cur_rx->buffer,
1728 np->rx_buf_sz,
1729 PCI_DMA_FROMDEVICE);
1730 skb_put(skb = np->cur_rx->skbuff, pkt_len);
1731 np->cur_rx->skbuff = NULL;
1732 --np->really_rx_count;
1734 skb->protocol = eth_type_trans(skb, dev);
1735 netif_rx(skb);
1736 np->stats.rx_packets++;
1737 np->stats.rx_bytes += pkt_len;
1740 np->cur_rx = np->cur_rx->next_desc_logical;
1741 } /* end of while loop */
1743 /* allocate skb for rx buffers */
1744 allocate_rx_buffers(dev);
1746 return 0;
1750 static struct net_device_stats *get_stats(struct net_device *dev)
1752 struct netdev_private *np = netdev_priv(dev);
1753 void __iomem *ioaddr = np->mem;
1755 /* The chip only need report frame silently dropped. */
1756 if (netif_running(dev)) {
1757 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1758 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1761 return &np->stats;
1765 /* for dev->set_multicast_list */
1766 static void set_rx_mode(struct net_device *dev)
1768 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1769 unsigned long flags;
1770 spin_lock_irqsave(lp, flags);
1771 __set_rx_mode(dev);
1772 spin_unlock_irqrestore(lp, flags);
1776 /* Take lock before calling */
1777 static void __set_rx_mode(struct net_device *dev)
1779 struct netdev_private *np = netdev_priv(dev);
1780 void __iomem *ioaddr = np->mem;
1781 u32 mc_filter[2]; /* Multicast hash filter */
1782 u32 rx_mode;
1784 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1785 memset(mc_filter, 0xff, sizeof(mc_filter));
1786 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1787 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1788 (dev->flags & IFF_ALLMULTI)) {
1789 /* Too many to match, or accept all multicasts. */
1790 memset(mc_filter, 0xff, sizeof(mc_filter));
1791 rx_mode = CR_W_AB | CR_W_AM;
1792 } else {
1793 struct netdev_hw_addr *ha;
1795 memset(mc_filter, 0, sizeof(mc_filter));
1796 netdev_for_each_mc_addr(ha, dev) {
1797 unsigned int bit;
1798 bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1799 mc_filter[bit >> 5] |= (1 << bit);
1801 rx_mode = CR_W_AB | CR_W_AM;
1804 stop_nic_rxtx(ioaddr, np->crvalue);
1806 iowrite32(mc_filter[0], ioaddr + MAR0);
1807 iowrite32(mc_filter[1], ioaddr + MAR1);
1808 np->crvalue &= ~CR_W_RXMODEMASK;
1809 np->crvalue |= rx_mode;
1810 iowrite32(np->crvalue, ioaddr + TCRRCR);
1813 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1815 struct netdev_private *np = netdev_priv(dev);
1817 strcpy(info->driver, DRV_NAME);
1818 strcpy(info->version, DRV_VERSION);
1819 strcpy(info->bus_info, pci_name(np->pci_dev));
1822 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1824 struct netdev_private *np = netdev_priv(dev);
1825 int rc;
1827 spin_lock_irq(&np->lock);
1828 rc = mii_ethtool_gset(&np->mii, cmd);
1829 spin_unlock_irq(&np->lock);
1831 return rc;
1834 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1836 struct netdev_private *np = netdev_priv(dev);
1837 int rc;
1839 spin_lock_irq(&np->lock);
1840 rc = mii_ethtool_sset(&np->mii, cmd);
1841 spin_unlock_irq(&np->lock);
1843 return rc;
1846 static int netdev_nway_reset(struct net_device *dev)
1848 struct netdev_private *np = netdev_priv(dev);
1849 return mii_nway_restart(&np->mii);
1852 static u32 netdev_get_link(struct net_device *dev)
1854 struct netdev_private *np = netdev_priv(dev);
1855 return mii_link_ok(&np->mii);
1858 static u32 netdev_get_msglevel(struct net_device *dev)
1860 return debug;
1863 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1865 debug = value;
1868 static const struct ethtool_ops netdev_ethtool_ops = {
1869 .get_drvinfo = netdev_get_drvinfo,
1870 .get_settings = netdev_get_settings,
1871 .set_settings = netdev_set_settings,
1872 .nway_reset = netdev_nway_reset,
1873 .get_link = netdev_get_link,
1874 .get_msglevel = netdev_get_msglevel,
1875 .set_msglevel = netdev_set_msglevel,
1878 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1880 struct netdev_private *np = netdev_priv(dev);
1881 int rc;
1883 if (!netif_running(dev))
1884 return -EINVAL;
1886 spin_lock_irq(&np->lock);
1887 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1888 spin_unlock_irq(&np->lock);
1890 return rc;
1894 static int netdev_close(struct net_device *dev)
1896 struct netdev_private *np = netdev_priv(dev);
1897 void __iomem *ioaddr = np->mem;
1898 int i;
1900 netif_stop_queue(dev);
1902 /* Disable interrupts by clearing the interrupt mask. */
1903 iowrite32(0x0000, ioaddr + IMR);
1905 /* Stop the chip's Tx and Rx processes. */
1906 stop_nic_rxtx(ioaddr, 0);
1908 del_timer_sync(&np->timer);
1909 del_timer_sync(&np->reset_timer);
1911 free_irq(dev->irq, dev);
1913 /* Free all the skbuffs in the Rx queue. */
1914 for (i = 0; i < RX_RING_SIZE; i++) {
1915 struct sk_buff *skb = np->rx_ring[i].skbuff;
1917 np->rx_ring[i].status = 0;
1918 if (skb) {
1919 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
1920 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1921 dev_kfree_skb(skb);
1922 np->rx_ring[i].skbuff = NULL;
1926 for (i = 0; i < TX_RING_SIZE; i++) {
1927 struct sk_buff *skb = np->tx_ring[i].skbuff;
1929 if (skb) {
1930 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
1931 skb->len, PCI_DMA_TODEVICE);
1932 dev_kfree_skb(skb);
1933 np->tx_ring[i].skbuff = NULL;
1937 return 0;
1940 static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
1941 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1942 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1943 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1944 {} /* terminate list */
1946 MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1949 static struct pci_driver fealnx_driver = {
1950 .name = "fealnx",
1951 .id_table = fealnx_pci_tbl,
1952 .probe = fealnx_init_one,
1953 .remove = __devexit_p(fealnx_remove_one),
1956 static int __init fealnx_init(void)
1958 /* when a module, this is printed whether or not devices are found in probe */
1959 #ifdef MODULE
1960 printk(version);
1961 #endif
1963 return pci_register_driver(&fealnx_driver);
1966 static void __exit fealnx_exit(void)
1968 pci_unregister_driver(&fealnx_driver);
1971 module_init(fealnx_init);
1972 module_exit(fealnx_exit);