[NET]: Make /proc/net per network namespace
[linux-2.6/kmemtrace.git] / drivers / net / tokenring / olympic.c
blobc323101a895b520a94341d667ec041e7648c7c7d
1 /*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
70 * Wake on lan
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/errno.h>
86 #include <linux/timer.h>
87 #include <linux/in.h>
88 #include <linux/ioport.h>
89 #include <linux/string.h>
90 #include <linux/proc_fs.h>
91 #include <linux/ptrace.h>
92 #include <linux/skbuff.h>
93 #include <linux/interrupt.h>
94 #include <linux/delay.h>
95 #include <linux/netdevice.h>
96 #include <linux/trdevice.h>
97 #include <linux/stddef.h>
98 #include <linux/init.h>
99 #include <linux/pci.h>
100 #include <linux/spinlock.h>
101 #include <linux/bitops.h>
102 #include <linux/jiffies.h>
104 #include <net/checksum.h>
105 #include <net/net_namespace.h>
107 #include <asm/io.h>
108 #include <asm/system.h>
110 #include "olympic.h"
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 * Official releases will only have an a.b.c version number format.
120 static char version[] __devinitdata =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
127 "Unknown stage"};
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135 /* Module paramters */
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140 /* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 module_param_array(ringspeed, int, NULL, 0);
156 /* Packet buffer size */
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 module_param_array(pkt_buf_sz, int, NULL, 0) ;
161 /* Message Level */
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 module_param_array(message_level, int, NULL, 0) ;
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 module_param_array(network_monitor, int, NULL, 0);
175 static struct pci_device_id olympic_pci_tbl[] = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186 static int olympic_close(struct net_device *dev);
187 static void olympic_set_rx_mode(struct net_device *dev);
188 static void olympic_freemem(struct net_device *dev) ;
189 static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190 static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192 static void olympic_arb_cmd(struct net_device *dev);
193 static int olympic_change_mtu(struct net_device *dev, int mtu);
194 static void olympic_srb_bh(struct net_device *dev) ;
195 static void olympic_asb_bh(struct net_device *dev) ;
196 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
198 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
200 struct net_device *dev ;
201 struct olympic_private *olympic_priv;
202 static int card_no = -1 ;
203 int i ;
205 card_no++ ;
207 if ((i = pci_enable_device(pdev))) {
208 return i ;
211 pci_set_master(pdev);
213 if ((i = pci_request_regions(pdev,"olympic"))) {
214 goto op_disable_dev;
217 dev = alloc_trdev(sizeof(struct olympic_private)) ;
218 if (!dev) {
219 i = -ENOMEM;
220 goto op_release_dev;
223 olympic_priv = dev->priv ;
225 spin_lock_init(&olympic_priv->olympic_lock) ;
227 init_waitqueue_head(&olympic_priv->srb_wait);
228 init_waitqueue_head(&olympic_priv->trb_wait);
229 #if OLYMPIC_DEBUG
230 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
231 #endif
232 dev->irq=pdev->irq;
233 dev->base_addr=pci_resource_start(pdev, 0);
234 olympic_priv->olympic_card_name = pci_name(pdev);
235 olympic_priv->pdev = pdev;
236 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
237 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
238 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
239 goto op_free_iomap;
242 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
243 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
244 else
245 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
247 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
248 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
249 olympic_priv->olympic_message_level = message_level[card_no] ;
250 olympic_priv->olympic_network_monitor = network_monitor[card_no];
252 if ((i = olympic_init(dev))) {
253 goto op_free_iomap;
256 dev->open=&olympic_open;
257 dev->hard_start_xmit=&olympic_xmit;
258 dev->change_mtu=&olympic_change_mtu;
259 dev->stop=&olympic_close;
260 dev->do_ioctl=NULL;
261 dev->set_multicast_list=&olympic_set_rx_mode;
262 dev->get_stats=&olympic_get_stats ;
263 dev->set_mac_address=&olympic_set_mac_address ;
264 SET_MODULE_OWNER(dev) ;
265 SET_NETDEV_DEV(dev, &pdev->dev);
267 pci_set_drvdata(pdev,dev) ;
268 register_netdev(dev) ;
269 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
270 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
271 char proc_name[20] ;
272 strcpy(proc_name,"olympic_") ;
273 strcat(proc_name,dev->name) ;
274 create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
275 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
277 return 0 ;
279 op_free_iomap:
280 if (olympic_priv->olympic_mmio)
281 iounmap(olympic_priv->olympic_mmio);
282 if (olympic_priv->olympic_lap)
283 iounmap(olympic_priv->olympic_lap);
285 free_netdev(dev);
286 op_release_dev:
287 pci_release_regions(pdev);
289 op_disable_dev:
290 pci_disable_device(pdev);
291 return i;
294 static int __devinit olympic_init(struct net_device *dev)
296 struct olympic_private *olympic_priv;
297 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
298 unsigned long t;
299 unsigned int uaa_addr;
301 olympic_priv=(struct olympic_private *)dev->priv;
302 olympic_mmio=olympic_priv->olympic_mmio;
304 printk("%s \n", version);
305 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
307 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
308 t=jiffies;
309 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
310 schedule();
311 if(time_after(jiffies, t + 40*HZ)) {
312 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
313 return -ENODEV;
318 /* Needed for cardbus */
319 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
320 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
323 #if OLYMPIC_DEBUG
324 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
325 printk("GPR: %x\n",readw(olympic_mmio+GPR));
326 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
327 #endif
328 /* Aaaahhh, You have got to be real careful setting GPR, the card
329 holds the previous values from flash memory, including autosense
330 and ring speed */
332 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
334 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
335 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
336 if (olympic_priv->olympic_message_level)
337 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
338 } else if (olympic_priv->olympic_ring_speed == 16) {
339 if (olympic_priv->olympic_message_level)
340 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
341 writew(GPR_16MBPS, olympic_mmio+GPR);
342 } else if (olympic_priv->olympic_ring_speed == 4) {
343 if (olympic_priv->olympic_message_level)
344 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
345 writew(0, olympic_mmio+GPR);
348 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
350 #if OLYMPIC_DEBUG
351 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
352 #endif
353 /* Solo has been paused to meet the Cardbus power
354 * specs if the adapter is cardbus. Check to
355 * see its been paused and then restart solo. The
356 * adapter should set the pause bit within 1 second.
359 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
360 t=jiffies;
361 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
362 schedule() ;
363 if(time_after(jiffies, t + 2*HZ)) {
364 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
365 return -ENODEV;
368 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
371 /* start solo init */
372 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
374 t=jiffies;
375 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
376 schedule();
377 if(time_after(jiffies, t + 15*HZ)) {
378 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
379 return -ENODEV;
383 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
385 #if OLYMPIC_DEBUG
386 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
387 #endif
389 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
391 #if OLYMPIC_DEBUG
393 int i;
394 printk("init_srb(%p): ",init_srb);
395 for(i=0;i<20;i++)
396 printk("%x ",readb(init_srb+i));
397 printk("\n");
399 #endif
400 if(readw(init_srb+6)) {
401 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
402 return -ENODEV;
405 if (olympic_priv->olympic_message_level) {
406 if ( readb(init_srb +2) & 0x40) {
407 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
408 } else {
409 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
413 uaa_addr=swab16(readw(init_srb+8));
415 #if OLYMPIC_DEBUG
416 printk("UAA resides at %x\n",uaa_addr);
417 #endif
419 writel(uaa_addr,olympic_mmio+LAPA);
420 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
422 #if OLYMPIC_DEBUG
423 printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
424 readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
425 readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
426 #endif
428 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
430 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
431 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
433 return 0;
437 static int olympic_open(struct net_device *dev)
439 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
440 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
441 unsigned long flags, t;
442 int i, open_finished = 1 ;
443 u8 resp, err;
445 DECLARE_WAITQUEUE(wait,current) ;
447 olympic_init(dev);
449 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
450 return -EAGAIN;
453 #if OLYMPIC_DEBUG
454 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
455 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
456 #endif
458 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
460 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
462 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
464 /* adapter is closed, so SRB is pointed to by LAPWWO */
466 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
467 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
469 #if OLYMPIC_DEBUG
470 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
471 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
472 printk("Before the open command \n");
473 #endif
474 do {
475 memset_io(init_srb,0,SRB_COMMAND_SIZE);
477 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
478 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
480 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
481 if (olympic_priv->olympic_network_monitor)
482 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
483 else
484 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
486 /* Test OR of first 3 bytes as its totally possible for
487 * someone to set the first 2 bytes to be zero, although this
488 * is an error, the first byte must have bit 6 set to 1 */
490 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
491 writeb(olympic_priv->olympic_laa[0],init_srb+12);
492 writeb(olympic_priv->olympic_laa[1],init_srb+13);
493 writeb(olympic_priv->olympic_laa[2],init_srb+14);
494 writeb(olympic_priv->olympic_laa[3],init_srb+15);
495 writeb(olympic_priv->olympic_laa[4],init_srb+16);
496 writeb(olympic_priv->olympic_laa[5],init_srb+17);
497 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
499 writeb(1,init_srb+30);
501 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
502 olympic_priv->srb_queued=1;
504 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
505 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
507 t = jiffies ;
509 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
510 set_current_state(TASK_INTERRUPTIBLE) ;
512 while(olympic_priv->srb_queued) {
513 schedule() ;
514 if(signal_pending(current)) {
515 printk(KERN_WARNING "%s: Signal received in open.\n",
516 dev->name);
517 printk(KERN_WARNING "SISR=%x LISR=%x\n",
518 readl(olympic_mmio+SISR),
519 readl(olympic_mmio+LISR));
520 olympic_priv->srb_queued=0;
521 break;
523 if (time_after(jiffies, t + 10*HZ)) {
524 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
525 olympic_priv->srb_queued=0;
526 break ;
528 set_current_state(TASK_INTERRUPTIBLE) ;
530 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
531 set_current_state(TASK_RUNNING) ;
532 olympic_priv->srb_queued = 0 ;
533 #if OLYMPIC_DEBUG
534 printk("init_srb(%p): ",init_srb);
535 for(i=0;i<20;i++)
536 printk("%02x ",readb(init_srb+i));
537 printk("\n");
538 #endif
540 /* If we get the same return response as we set, the interrupt wasn't raised and the open
541 * timed out.
544 switch (resp = readb(init_srb+2)) {
545 case OLYMPIC_CLEAR_RET_CODE:
546 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
547 goto out;
548 case 0:
549 open_finished = 1;
550 break;
551 case 0x07:
552 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
553 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
554 open_finished = 0 ;
555 continue;
558 err = readb(init_srb+7);
560 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
561 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
562 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
563 } else {
564 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
565 open_maj_error[(err & 0xf0) >> 4],
566 open_min_error[(err & 0x0f)]);
568 goto out;
570 case 0x32:
571 printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
572 dev->name,
573 olympic_priv->olympic_laa[0],
574 olympic_priv->olympic_laa[1],
575 olympic_priv->olympic_laa[2],
576 olympic_priv->olympic_laa[3],
577 olympic_priv->olympic_laa[4],
578 olympic_priv->olympic_laa[5]) ;
579 goto out;
581 default:
582 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
583 goto out;
586 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
588 if (readb(init_srb+18) & (1<<3))
589 if (olympic_priv->olympic_message_level)
590 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
592 if (readb(init_srb+18) & (1<<1))
593 olympic_priv->olympic_ring_speed = 100 ;
594 else if (readb(init_srb+18) & 1)
595 olympic_priv->olympic_ring_speed = 16 ;
596 else
597 olympic_priv->olympic_ring_speed = 4 ;
599 if (olympic_priv->olympic_message_level)
600 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
602 olympic_priv->asb = swab16(readw(init_srb+8));
603 olympic_priv->srb = swab16(readw(init_srb+10));
604 olympic_priv->arb = swab16(readw(init_srb+12));
605 olympic_priv->trb = swab16(readw(init_srb+16));
607 olympic_priv->olympic_receive_options = 0x01 ;
608 olympic_priv->olympic_copy_all_options = 0 ;
610 /* setup rx ring */
612 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
614 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
616 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
618 struct sk_buff *skb;
620 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
621 if(skb == NULL)
622 break;
624 skb->dev = dev;
626 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
627 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
628 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
629 olympic_priv->rx_ring_skb[i]=skb;
632 if (i==0) {
633 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
634 goto out;
637 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
638 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
639 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
640 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
641 writew(i, olympic_mmio+RXDESCQCNT);
643 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
644 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
645 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
646 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
648 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
649 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
651 writew(i, olympic_mmio+RXSTATQCNT);
653 #if OLYMPIC_DEBUG
654 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
655 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
656 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
657 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
658 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
660 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
661 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
662 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
663 #endif
665 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
667 #if OLYMPIC_DEBUG
668 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
669 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
670 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
671 #endif
673 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
675 /* setup tx ring */
677 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
678 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
679 olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
681 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
682 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
683 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
684 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
685 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
686 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
688 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
689 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
690 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
691 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
692 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
694 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
695 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
697 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
698 writel(0,olympic_mmio+EISR) ;
699 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
700 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
702 #if OLYMPIC_DEBUG
703 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
704 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
705 #endif
707 if (olympic_priv->olympic_network_monitor) {
708 u8 __iomem *oat ;
709 u8 __iomem *opt ;
710 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
711 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
713 printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
714 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
715 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
716 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
717 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
718 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
719 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
720 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
721 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
722 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
723 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
724 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
725 printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
726 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
727 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
728 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
729 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
730 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
731 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
734 netif_start_queue(dev);
735 return 0;
737 out:
738 free_irq(dev->irq, dev);
739 return -EIO;
743 * When we enter the rx routine we do not know how many frames have been
744 * queued on the rx channel. Therefore we start at the next rx status
745 * position and travel around the receive ring until we have completed
746 * all the frames.
748 * This means that we may process the frame before we receive the end
749 * of frame interrupt. This is why we always test the status instead
750 * of blindly processing the next frame.
752 * We also remove the last 4 bytes from the packet as well, these are
753 * just token ring trailer info and upset protocols that don't check
754 * their own length, i.e. SNA.
757 static void olympic_rx(struct net_device *dev)
759 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
760 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
761 struct olympic_rx_status *rx_status;
762 struct olympic_rx_desc *rx_desc ;
763 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
764 struct sk_buff *skb, *skb2;
765 int i;
767 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
769 while (rx_status->status_buffercnt) {
770 u32 l_status_buffercnt;
772 olympic_priv->rx_status_last_received++ ;
773 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
774 #if OLYMPIC_DEBUG
775 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
776 #endif
777 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
778 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
779 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
780 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
782 #if OLYMPIC_DEBUG
783 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
784 #endif
785 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
786 if(l_status_buffercnt & 0xC0000000) {
787 if (l_status_buffercnt & 0x3B000000) {
788 if (olympic_priv->olympic_message_level) {
789 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
790 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
791 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
792 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
793 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
794 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
795 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
796 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
797 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
798 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
800 olympic_priv->rx_ring_last_received += i ;
801 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
802 olympic_priv->olympic_stats.rx_errors++;
803 } else {
805 if (buffer_cnt == 1) {
806 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
807 } else {
808 skb = dev_alloc_skb(length) ;
811 if (skb == NULL) {
812 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
813 olympic_priv->olympic_stats.rx_dropped++ ;
814 /* Update counters even though we don't transfer the frame */
815 olympic_priv->rx_ring_last_received += i ;
816 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
817 } else {
818 /* Optimise based upon number of buffers used.
819 If only one buffer is used we can simply swap the buffers around.
820 If more than one then we must use the new buffer and copy the information
821 first. Ideally all frames would be in a single buffer, this can be tuned by
822 altering the buffer size. If the length of the packet is less than
823 1500 bytes we're going to copy it over anyway to stop packets getting
824 dropped from sockets with buffers smaller than our pkt_buf_sz. */
826 if (buffer_cnt==1) {
827 olympic_priv->rx_ring_last_received++ ;
828 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
829 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
830 if (length > 1500) {
831 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
832 /* unmap buffer */
833 pci_unmap_single(olympic_priv->pdev,
834 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
835 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
836 skb_put(skb2,length-4);
837 skb2->protocol = tr_type_trans(skb2,dev);
838 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
839 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
840 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
841 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
842 cpu_to_le32(olympic_priv->pkt_buf_sz);
843 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
844 netif_rx(skb2) ;
845 } else {
846 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
847 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
848 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
849 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
850 skb_put(skb,length - 4),
851 length - 4);
852 pci_dma_sync_single_for_device(olympic_priv->pdev,
853 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
854 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
855 skb->protocol = tr_type_trans(skb,dev) ;
856 netif_rx(skb) ;
858 } else {
859 do { /* Walk the buffers */
860 olympic_priv->rx_ring_last_received++ ;
861 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
862 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
863 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
864 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
865 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
866 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
867 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
868 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
869 skb_put(skb, cpy_length),
870 cpy_length);
871 pci_dma_sync_single_for_device(olympic_priv->pdev,
872 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
873 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
874 } while (--i) ;
875 skb_trim(skb,skb->len-4) ;
876 skb->protocol = tr_type_trans(skb,dev);
877 netif_rx(skb) ;
879 dev->last_rx = jiffies ;
880 olympic_priv->olympic_stats.rx_packets++ ;
881 olympic_priv->olympic_stats.rx_bytes += length ;
882 } /* if skb == null */
883 } /* If status & 0x3b */
885 } else { /*if buffercnt & 0xC */
886 olympic_priv->rx_ring_last_received += i ;
887 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
890 rx_status->fragmentcnt_framelen = 0 ;
891 rx_status->status_buffercnt = 0 ;
892 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
894 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
895 } /* while */
899 static void olympic_freemem(struct net_device *dev)
901 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
902 int i;
904 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
905 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
906 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
907 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
909 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
910 pci_unmap_single(olympic_priv->pdev,
911 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
912 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
914 olympic_priv->rx_status_last_received++;
915 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
917 /* unmap rings */
918 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
919 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
920 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
921 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
923 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
924 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
925 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
926 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
928 return ;
931 static irqreturn_t olympic_interrupt(int irq, void *dev_id)
933 struct net_device *dev= (struct net_device *)dev_id;
934 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
935 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
936 u32 sisr;
937 u8 __iomem *adapter_check_area ;
940 * Read sisr but don't reset it yet.
941 * The indication bit may have been set but the interrupt latch
942 * bit may not be set, so we'd lose the interrupt later.
944 sisr=readl(olympic_mmio+SISR) ;
945 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
946 return IRQ_NONE;
947 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
949 spin_lock(&olympic_priv->olympic_lock);
951 /* Hotswap gives us this on removal */
952 if (sisr == 0xffffffff) {
953 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
954 spin_unlock(&olympic_priv->olympic_lock) ;
955 return IRQ_NONE;
958 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
959 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
961 /* If we ever get this the adapter is seriously dead. Only a reset is going to
962 * bring it back to life. We're talking pci bus errors and such like :( */
963 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
964 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
965 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
966 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
967 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
968 wake_up_interruptible(&olympic_priv->srb_wait);
969 spin_unlock(&olympic_priv->olympic_lock) ;
970 return IRQ_HANDLED;
971 } /* SISR_ERR */
973 if(sisr & SISR_SRB_REPLY) {
974 if(olympic_priv->srb_queued==1) {
975 wake_up_interruptible(&olympic_priv->srb_wait);
976 } else if (olympic_priv->srb_queued==2) {
977 olympic_srb_bh(dev) ;
979 olympic_priv->srb_queued=0;
980 } /* SISR_SRB_REPLY */
982 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
983 we get all tx completions. */
984 if (sisr & SISR_TX1_EOF) {
985 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
986 olympic_priv->tx_ring_last_status++;
987 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
988 olympic_priv->free_tx_ring_entries++;
989 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
990 olympic_priv->olympic_stats.tx_packets++ ;
991 pci_unmap_single(olympic_priv->pdev,
992 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
993 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
994 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
995 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
996 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
998 netif_wake_queue(dev);
999 } /* SISR_TX1_EOF */
1001 if (sisr & SISR_RX_STATUS) {
1002 olympic_rx(dev);
1003 } /* SISR_RX_STATUS */
1005 if (sisr & SISR_ADAPTER_CHECK) {
1006 netif_stop_queue(dev);
1007 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1008 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
1009 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
1010 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
1011 spin_unlock(&olympic_priv->olympic_lock) ;
1012 return IRQ_HANDLED;
1013 } /* SISR_ADAPTER_CHECK */
1015 if (sisr & SISR_ASB_FREE) {
1016 /* Wake up anything that is waiting for the asb response */
1017 if (olympic_priv->asb_queued) {
1018 olympic_asb_bh(dev) ;
1020 } /* SISR_ASB_FREE */
1022 if (sisr & SISR_ARB_CMD) {
1023 olympic_arb_cmd(dev) ;
1024 } /* SISR_ARB_CMD */
1026 if (sisr & SISR_TRB_REPLY) {
1027 /* Wake up anything that is waiting for the trb response */
1028 if (olympic_priv->trb_queued) {
1029 wake_up_interruptible(&olympic_priv->trb_wait);
1031 olympic_priv->trb_queued = 0 ;
1032 } /* SISR_TRB_REPLY */
1034 if (sisr & SISR_RX_NOBUF) {
1035 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1036 /var/log/messages. */
1037 } /* SISR_RX_NOBUF */
1038 } else {
1039 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1040 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1041 } /* One if the interrupts we want */
1042 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1044 spin_unlock(&olympic_priv->olympic_lock) ;
1045 return IRQ_HANDLED;
1048 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1050 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1051 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1052 unsigned long flags ;
1054 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1056 netif_stop_queue(dev);
1058 if(olympic_priv->free_tx_ring_entries) {
1059 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1060 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1061 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1062 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1063 olympic_priv->free_tx_ring_entries--;
1065 olympic_priv->tx_ring_free++;
1066 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1067 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1068 netif_wake_queue(dev);
1069 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1070 return 0;
1071 } else {
1072 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1073 return 1;
1079 static int olympic_close(struct net_device *dev)
1081 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1082 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1083 unsigned long t,flags;
1085 DECLARE_WAITQUEUE(wait,current) ;
1087 netif_stop_queue(dev);
1089 writel(olympic_priv->srb,olympic_mmio+LAPA);
1090 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1092 writeb(SRB_CLOSE_ADAPTER,srb+0);
1093 writeb(0,srb+1);
1094 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1096 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1097 set_current_state(TASK_INTERRUPTIBLE) ;
1099 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1100 olympic_priv->srb_queued=1;
1102 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1103 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1105 while(olympic_priv->srb_queued) {
1107 t = schedule_timeout_interruptible(60*HZ);
1109 if(signal_pending(current)) {
1110 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1111 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1112 olympic_priv->srb_queued=0;
1113 break;
1116 if (t == 0) {
1117 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1119 olympic_priv->srb_queued=0;
1121 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1123 olympic_priv->rx_status_last_received++;
1124 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1126 olympic_freemem(dev) ;
1128 /* reset tx/rx fifo's and busmaster logic */
1130 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1131 udelay(1);
1132 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1134 #if OLYMPIC_DEBUG
1136 int i ;
1137 printk("srb(%p): ",srb);
1138 for(i=0;i<4;i++)
1139 printk("%x ",readb(srb+i));
1140 printk("\n");
1142 #endif
1143 free_irq(dev->irq,dev);
1145 return 0;
1149 static void olympic_set_rx_mode(struct net_device *dev)
1151 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1152 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1153 u8 options = 0;
1154 u8 __iomem *srb;
1155 struct dev_mc_list *dmi ;
1156 unsigned char dev_mc_address[4] ;
1157 int i ;
1159 writel(olympic_priv->srb,olympic_mmio+LAPA);
1160 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1161 options = olympic_priv->olympic_copy_all_options;
1163 if (dev->flags&IFF_PROMISC)
1164 options |= 0x61 ;
1165 else
1166 options &= ~0x61 ;
1168 /* Only issue the srb if there is a change in options */
1170 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1172 /* Now to issue the srb command to alter the copy.all.options */
1174 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1175 writeb(0,srb+1);
1176 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1177 writeb(0,srb+3);
1178 writeb(olympic_priv->olympic_receive_options,srb+4);
1179 writeb(options,srb+5);
1181 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1183 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1185 olympic_priv->olympic_copy_all_options = options ;
1187 return ;
1190 /* Set the functional addresses we need for multicast */
1192 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1194 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1195 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1196 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1197 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1198 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1201 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1202 writeb(0,srb+1);
1203 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1204 writeb(0,srb+3);
1205 writeb(0,srb+4);
1206 writeb(0,srb+5);
1207 writeb(dev_mc_address[0],srb+6);
1208 writeb(dev_mc_address[1],srb+7);
1209 writeb(dev_mc_address[2],srb+8);
1210 writeb(dev_mc_address[3],srb+9);
1212 olympic_priv->srb_queued = 2 ;
1213 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1217 static void olympic_srb_bh(struct net_device *dev)
1219 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1220 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1221 u8 __iomem *srb;
1223 writel(olympic_priv->srb,olympic_mmio+LAPA);
1224 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1226 switch (readb(srb)) {
1228 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1229 * At some point we should do something if we get an error, such as
1230 * resetting the IFF_PROMISC flag in dev
1233 case SRB_MODIFY_RECEIVE_OPTIONS:
1234 switch (readb(srb+2)) {
1235 case 0x01:
1236 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1237 break ;
1238 case 0x04:
1239 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1240 break ;
1241 default:
1242 if (olympic_priv->olympic_message_level)
1243 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1244 break ;
1245 } /* switch srb[2] */
1246 break ;
1248 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1251 case SRB_SET_GROUP_ADDRESS:
1252 switch (readb(srb+2)) {
1253 case 0x00:
1254 break ;
1255 case 0x01:
1256 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1257 break ;
1258 case 0x04:
1259 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1260 break ;
1261 case 0x3c:
1262 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1263 break ;
1264 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1265 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1266 break ;
1267 case 0x55:
1268 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1269 break ;
1270 default:
1271 break ;
1272 } /* switch srb[2] */
1273 break ;
1275 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1278 case SRB_RESET_GROUP_ADDRESS:
1279 switch (readb(srb+2)) {
1280 case 0x00:
1281 break ;
1282 case 0x01:
1283 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1284 break ;
1285 case 0x04:
1286 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1287 break ;
1288 case 0x39: /* Must deal with this if individual multicast addresses used */
1289 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1290 break ;
1291 default:
1292 break ;
1293 } /* switch srb[2] */
1294 break ;
1297 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1300 case SRB_SET_FUNC_ADDRESS:
1301 switch (readb(srb+2)) {
1302 case 0x00:
1303 if (olympic_priv->olympic_message_level)
1304 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1305 break ;
1306 case 0x01:
1307 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1308 break ;
1309 case 0x04:
1310 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1311 break ;
1312 default:
1313 break ;
1314 } /* switch srb[2] */
1315 break ;
1317 /* SRB_READ_LOG - Read and reset the adapter error counters
1320 case SRB_READ_LOG:
1321 switch (readb(srb+2)) {
1322 case 0x00:
1323 if (olympic_priv->olympic_message_level)
1324 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1325 break ;
1326 case 0x01:
1327 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1328 break ;
1329 case 0x04:
1330 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1331 break ;
1333 } /* switch srb[2] */
1334 break ;
1336 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1338 case SRB_READ_SR_COUNTERS:
1339 switch (readb(srb+2)) {
1340 case 0x00:
1341 if (olympic_priv->olympic_message_level)
1342 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1343 break ;
1344 case 0x01:
1345 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1346 break ;
1347 case 0x04:
1348 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1349 break ;
1350 default:
1351 break ;
1352 } /* switch srb[2] */
1353 break ;
1355 default:
1356 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1357 break ;
1358 } /* switch srb[0] */
1362 static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1364 struct olympic_private *olympic_priv ;
1365 olympic_priv=(struct olympic_private *) dev->priv;
1366 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1369 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1371 struct sockaddr *saddr = addr ;
1372 struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1374 if (netif_running(dev)) {
1375 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1376 return -EIO ;
1379 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1381 if (olympic_priv->olympic_message_level) {
1382 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1383 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1384 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1385 olympic_priv->olympic_laa[5]);
1388 return 0 ;
1391 static void olympic_arb_cmd(struct net_device *dev)
1393 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1394 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1395 u8 __iomem *arb_block, *asb_block, *srb ;
1396 u8 header_len ;
1397 u16 frame_len, buffer_len ;
1398 struct sk_buff *mac_frame ;
1399 u8 __iomem *buf_ptr ;
1400 u8 __iomem *frame_data ;
1401 u16 buff_off ;
1402 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1403 u8 fdx_prot_error ;
1404 u16 next_ptr;
1406 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1407 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1408 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1410 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1412 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1413 frame_len = swab16(readw(arb_block + 10)) ;
1415 buff_off = swab16(readw(arb_block + 6)) ;
1417 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1419 #if OLYMPIC_DEBUG
1421 int i;
1422 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1424 for (i=0 ; i < 14 ; i++) {
1425 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1428 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1430 #endif
1431 mac_frame = dev_alloc_skb(frame_len) ;
1432 if (!mac_frame) {
1433 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1434 goto drop_frame;
1437 /* Walk the buffer chain, creating the frame */
1439 do {
1440 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1441 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1442 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1443 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1444 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1446 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1448 if (olympic_priv->olympic_network_monitor) {
1449 struct trh_hdr *mac_hdr ;
1450 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1451 mac_hdr = tr_hdr(mac_frame);
1452 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1453 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1455 netif_rx(mac_frame);
1456 dev->last_rx = jiffies;
1458 drop_frame:
1459 /* Now tell the card we have dealt with the received frame */
1461 /* Set LISR Bit 1 */
1462 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1464 /* Is the ASB free ? */
1466 if (readb(asb_block + 2) != 0xff) {
1467 olympic_priv->asb_queued = 1 ;
1468 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1469 return ;
1470 /* Drop out and wait for the bottom half to be run */
1473 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1474 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1475 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1476 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1478 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1480 olympic_priv->asb_queued = 2 ;
1482 return ;
1484 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1485 lan_status = swab16(readw(arb_block+6));
1486 fdx_prot_error = readb(arb_block+8) ;
1488 /* Issue ARB Free */
1489 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1491 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1493 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1494 if (lan_status_diff & LSC_LWF)
1495 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1496 if (lan_status_diff & LSC_ARW)
1497 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1498 if (lan_status_diff & LSC_FPE)
1499 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1500 if (lan_status_diff & LSC_RR)
1501 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1503 /* Adapter has been closed by the hardware */
1505 /* reset tx/rx fifo's and busmaster logic */
1507 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1508 udelay(1);
1509 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1510 netif_stop_queue(dev);
1511 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1512 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1513 } /* If serious error */
1515 if (olympic_priv->olympic_message_level) {
1516 if (lan_status_diff & LSC_SIG_LOSS)
1517 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1518 if (lan_status_diff & LSC_HARD_ERR)
1519 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1520 if (lan_status_diff & LSC_SOFT_ERR)
1521 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1522 if (lan_status_diff & LSC_TRAN_BCN)
1523 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1524 if (lan_status_diff & LSC_SS)
1525 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1526 if (lan_status_diff & LSC_RING_REC)
1527 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1528 if (lan_status_diff & LSC_FDX_MODE)
1529 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1532 if (lan_status_diff & LSC_CO) {
1534 if (olympic_priv->olympic_message_level)
1535 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1537 /* Issue READ.LOG command */
1539 writeb(SRB_READ_LOG, srb);
1540 writeb(0,srb+1);
1541 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1542 writeb(0,srb+3);
1543 writeb(0,srb+4);
1544 writeb(0,srb+5);
1546 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1548 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1552 if (lan_status_diff & LSC_SR_CO) {
1554 if (olympic_priv->olympic_message_level)
1555 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1557 /* Issue a READ.SR.COUNTERS */
1559 writeb(SRB_READ_SR_COUNTERS,srb);
1560 writeb(0,srb+1);
1561 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1562 writeb(0,srb+3);
1564 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1566 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1570 olympic_priv->olympic_lan_status = lan_status ;
1572 } /* Lan.change.status */
1573 else
1574 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1577 static void olympic_asb_bh(struct net_device *dev)
1579 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1580 u8 __iomem *arb_block, *asb_block ;
1582 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1583 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1585 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1587 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1588 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1589 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1590 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1592 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1593 olympic_priv->asb_queued = 2 ;
1595 return ;
1598 if (olympic_priv->asb_queued == 2) {
1599 switch (readb(asb_block+2)) {
1600 case 0x01:
1601 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1602 break ;
1603 case 0x26:
1604 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1605 break ;
1606 case 0xFF:
1607 /* Valid response, everything should be ok again */
1608 break ;
1609 default:
1610 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1611 break ;
1614 olympic_priv->asb_queued = 0 ;
1617 static int olympic_change_mtu(struct net_device *dev, int mtu)
1619 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1620 u16 max_mtu ;
1622 if (olympic_priv->olympic_ring_speed == 4)
1623 max_mtu = 4500 ;
1624 else
1625 max_mtu = 18000 ;
1627 if (mtu > max_mtu)
1628 return -EINVAL ;
1629 if (mtu < 100)
1630 return -EINVAL ;
1632 dev->mtu = mtu ;
1633 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1635 return 0 ;
1638 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1640 struct net_device *dev = (struct net_device *)data ;
1641 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1642 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1643 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1644 int size = 0 ;
1645 int len=0;
1646 off_t begin=0;
1647 off_t pos=0;
1649 size = sprintf(buffer,
1650 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1651 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1652 dev->name);
1654 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1655 dev->name,
1656 dev->dev_addr[0],
1657 dev->dev_addr[1],
1658 dev->dev_addr[2],
1659 dev->dev_addr[3],
1660 dev->dev_addr[4],
1661 dev->dev_addr[5],
1662 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1663 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1664 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1665 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1666 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1667 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1668 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1669 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1670 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1671 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1673 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1675 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1676 dev->name) ;
1678 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
1679 dev->name,
1680 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1681 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1682 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1683 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1684 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1685 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1686 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1687 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1688 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1689 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1690 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1691 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1692 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1693 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1694 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1695 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1696 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1697 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1698 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1700 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1701 dev->name) ;
1703 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1704 dev->name,
1705 readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1706 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1707 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1708 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1709 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1710 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1711 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1712 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1713 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1714 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1715 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1716 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1718 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1719 dev->name) ;
1721 size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
1722 dev->name,
1723 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1724 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1725 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1726 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1727 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1728 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1729 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1730 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1731 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1732 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1733 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1734 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1736 len=size;
1737 pos=begin+size;
1738 if (pos<offset) {
1739 len=0;
1740 begin=pos;
1742 *start=buffer+(offset-begin); /* Start of wanted data */
1743 len-=(offset-begin); /* Start slop */
1744 if(len>length)
1745 len=length; /* Ending slop */
1746 return len;
1749 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1751 struct net_device *dev = pci_get_drvdata(pdev) ;
1752 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1754 if (olympic_priv->olympic_network_monitor) {
1755 char proc_name[20] ;
1756 strcpy(proc_name,"olympic_") ;
1757 strcat(proc_name,dev->name) ;
1758 remove_proc_entry(proc_name,init_net.proc_net);
1760 unregister_netdev(dev) ;
1761 iounmap(olympic_priv->olympic_mmio) ;
1762 iounmap(olympic_priv->olympic_lap) ;
1763 pci_release_regions(pdev) ;
1764 pci_set_drvdata(pdev,NULL) ;
1765 free_netdev(dev) ;
1768 static struct pci_driver olympic_driver = {
1769 .name = "olympic",
1770 .id_table = olympic_pci_tbl,
1771 .probe = olympic_probe,
1772 .remove = __devexit_p(olympic_remove_one),
1775 static int __init olympic_pci_init(void)
1777 return pci_register_driver(&olympic_driver) ;
1780 static void __exit olympic_pci_cleanup(void)
1782 pci_unregister_driver(&olympic_driver) ;
1786 module_init(olympic_pci_init) ;
1787 module_exit(olympic_pci_cleanup) ;
1789 MODULE_LICENSE("GPL");