x86, sparseirq: move irq_desc according to smp_affinity, v7
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tulip / interrupt.c
blobc6bad987d63e744c452ed9d8b999aa0affe441d0
1 /*
2 drivers/net/tulip/interrupt.c
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
16 #include <linux/pci.h>
17 #include "tulip.h"
18 #include <linux/etherdevice.h>
20 int tulip_rx_copybreak;
21 unsigned int tulip_max_interrupt_work;
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24 #define MIT_SIZE 15
25 #define MIT_TABLE 15 /* We use 0 or max */
27 static unsigned int mit_table[MIT_SIZE+1] =
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
59 #endif
62 int tulip_refill_rx(struct net_device *dev)
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
79 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
80 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
94 iowrite32(0x01, tp->base_addr + CSR2);
97 return refilled;
100 #ifdef CONFIG_TULIP_NAPI
102 void oom_timer(unsigned long data)
104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(dev, &tp->napi);
109 int tulip_poll(struct napi_struct *napi, int budget)
111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
113 int entry = tp->cur_rx % RX_RING_SIZE;
114 int work_done = 0;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116 int received = 0;
117 #endif
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
121 /* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
124 if (budget >=RX_RING_SIZE) budget--;
125 #endif
127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129 tp->rx_ring[entry].status);
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134 break;
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
144 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
145 break;
147 if (tulip_debug > 5)
148 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
149 dev->name, entry, status);
151 if (++work_done >= budget)
152 goto not_done;
154 if ((status & 0x38008300) != 0x0300) {
155 if ((status & 0x38000300) != 0x0300) {
156 /* Ingore earlier buffers. */
157 if ((status & 0xffff) != 0x7fff) {
158 if (tulip_debug > 1)
159 printk(KERN_WARNING "%s: Oversized Ethernet frame "
160 "spanned multiple buffers, status %8.8x!\n",
161 dev->name, status);
162 tp->stats.rx_length_errors++;
164 } else if (status & RxDescFatalErr) {
165 /* There was a fatal error. */
166 if (tulip_debug > 2)
167 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
168 dev->name, status);
169 tp->stats.rx_errors++; /* end of a packet.*/
170 if (status & 0x0890) tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
175 } else {
176 /* Omit the four octet CRC from the length. */
177 short pkt_len = ((status >> 16) & 0x7ff) - 4;
178 struct sk_buff *skb;
180 #ifndef final_version
181 if (pkt_len > 1518) {
182 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183 dev->name, pkt_len, pkt_len);
184 pkt_len = 1518;
185 tp->stats.rx_length_errors++;
187 #endif
188 /* Check if the packet is long enough to accept without copying
189 to a minimally-sized skbuff. */
190 if (pkt_len < tulip_rx_copybreak
191 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
192 skb_reserve(skb, 2); /* 16 byte align the IP header */
193 pci_dma_sync_single_for_cpu(tp->pdev,
194 tp->rx_buffers[entry].mapping,
195 pkt_len, PCI_DMA_FROMDEVICE);
196 #if ! defined(__alpha__)
197 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
198 pkt_len);
199 skb_put(skb, pkt_len);
200 #else
201 memcpy(skb_put(skb, pkt_len),
202 tp->rx_buffers[entry].skb->data,
203 pkt_len);
204 #endif
205 pci_dma_sync_single_for_device(tp->pdev,
206 tp->rx_buffers[entry].mapping,
207 pkt_len, PCI_DMA_FROMDEVICE);
208 } else { /* Pass up the skb already on the Rx ring. */
209 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
210 pkt_len);
212 #ifndef final_version
213 if (tp->rx_buffers[entry].mapping !=
214 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
215 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
216 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
217 dev->name,
218 le32_to_cpu(tp->rx_ring[entry].buffer1),
219 (unsigned long long)tp->rx_buffers[entry].mapping,
220 skb->head, temp);
222 #endif
224 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
225 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
227 tp->rx_buffers[entry].skb = NULL;
228 tp->rx_buffers[entry].mapping = 0;
230 skb->protocol = eth_type_trans(skb, dev);
232 netif_receive_skb(skb);
234 dev->last_rx = jiffies;
235 tp->stats.rx_packets++;
236 tp->stats.rx_bytes += pkt_len;
238 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
239 received++;
240 #endif
242 entry = (++tp->cur_rx) % RX_RING_SIZE;
243 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
244 tulip_refill_rx(dev);
248 /* New ack strategy... irq does not ack Rx any longer
249 hopefully this helps */
251 /* Really bad things can happen here... If new packet arrives
252 * and an irq arrives (tx or just due to occasionally unset
253 * mask), it will be acked by irq handler, but new thread
254 * is not scheduled. It is major hole in design.
255 * No idea how to fix this if "playing with fire" will fail
256 * tomorrow (night 011029). If it will not fail, we won
257 * finally: amount of IO did not increase at all. */
258 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
260 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
262 /* We use this simplistic scheme for IM. It's proven by
263 real life installations. We can have IM enabled
264 continuesly but this would cause unnecessary latency.
265 Unfortunely we can't use all the NET_RX_* feedback here.
266 This would turn on IM for devices that is not contributing
267 to backlog congestion with unnecessary latency.
269 We monitor the device RX-ring and have:
271 HW Interrupt Mitigation either ON or OFF.
273 ON: More then 1 pkt received (per intr.) OR we are dropping
274 OFF: Only 1 pkt received
276 Note. We only use min and max (0, 15) settings from mit_table */
279 if( tp->flags & HAS_INTR_MITIGATION) {
280 if( received > 1 ) {
281 if( ! tp->mit_on ) {
282 tp->mit_on = 1;
283 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
286 else {
287 if( tp->mit_on ) {
288 tp->mit_on = 0;
289 iowrite32(0, tp->base_addr + CSR11);
294 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
296 tulip_refill_rx(dev);
298 /* If RX ring is not full we are out of memory. */
299 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
300 goto oom;
302 /* Remove us from polling list and enable RX intr. */
304 netif_rx_complete(dev, napi);
305 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
307 /* The last op happens after poll completion. Which means the following:
308 * 1. it can race with disabling irqs in irq handler
309 * 2. it can race with dise/enabling irqs in other poll threads
310 * 3. if an irq raised after beginning loop, it will be immediately
311 * triggered here.
313 * Summarizing: the logic results in some redundant irqs both
314 * due to races in masking and due to too late acking of already
315 * processed irqs. But it must not result in losing events.
318 return work_done;
320 not_done:
321 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
322 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
323 tulip_refill_rx(dev);
325 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
326 goto oom;
328 return work_done;
330 oom: /* Executed with RX ints disabled */
332 /* Start timer, stop polling, but do not enable rx interrupts. */
333 mod_timer(&tp->oom_timer, jiffies+1);
335 /* Think: timer_pending() was an explicit signature of bug.
336 * Timer can be pending now but fired and completed
337 * before we did netif_rx_complete(). See? We would lose it. */
339 /* remove ourselves from the polling list */
340 netif_rx_complete(dev, napi);
342 return work_done;
345 #else /* CONFIG_TULIP_NAPI */
347 static int tulip_rx(struct net_device *dev)
349 struct tulip_private *tp = netdev_priv(dev);
350 int entry = tp->cur_rx % RX_RING_SIZE;
351 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
352 int received = 0;
354 if (tulip_debug > 4)
355 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
356 tp->rx_ring[entry].status);
357 /* If we own the next entry, it is a new packet. Send it up. */
358 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
359 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
361 if (tulip_debug > 5)
362 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
363 dev->name, entry, status);
364 if (--rx_work_limit < 0)
365 break;
366 if ((status & 0x38008300) != 0x0300) {
367 if ((status & 0x38000300) != 0x0300) {
368 /* Ingore earlier buffers. */
369 if ((status & 0xffff) != 0x7fff) {
370 if (tulip_debug > 1)
371 printk(KERN_WARNING "%s: Oversized Ethernet frame "
372 "spanned multiple buffers, status %8.8x!\n",
373 dev->name, status);
374 tp->stats.rx_length_errors++;
376 } else if (status & RxDescFatalErr) {
377 /* There was a fatal error. */
378 if (tulip_debug > 2)
379 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
380 dev->name, status);
381 tp->stats.rx_errors++; /* end of a packet.*/
382 if (status & 0x0890) tp->stats.rx_length_errors++;
383 if (status & 0x0004) tp->stats.rx_frame_errors++;
384 if (status & 0x0002) tp->stats.rx_crc_errors++;
385 if (status & 0x0001) tp->stats.rx_fifo_errors++;
387 } else {
388 /* Omit the four octet CRC from the length. */
389 short pkt_len = ((status >> 16) & 0x7ff) - 4;
390 struct sk_buff *skb;
392 #ifndef final_version
393 if (pkt_len > 1518) {
394 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
395 dev->name, pkt_len, pkt_len);
396 pkt_len = 1518;
397 tp->stats.rx_length_errors++;
399 #endif
401 /* Check if the packet is long enough to accept without copying
402 to a minimally-sized skbuff. */
403 if (pkt_len < tulip_rx_copybreak
404 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
405 skb_reserve(skb, 2); /* 16 byte align the IP header */
406 pci_dma_sync_single_for_cpu(tp->pdev,
407 tp->rx_buffers[entry].mapping,
408 pkt_len, PCI_DMA_FROMDEVICE);
409 #if ! defined(__alpha__)
410 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
411 pkt_len);
412 skb_put(skb, pkt_len);
413 #else
414 memcpy(skb_put(skb, pkt_len),
415 tp->rx_buffers[entry].skb->data,
416 pkt_len);
417 #endif
418 pci_dma_sync_single_for_device(tp->pdev,
419 tp->rx_buffers[entry].mapping,
420 pkt_len, PCI_DMA_FROMDEVICE);
421 } else { /* Pass up the skb already on the Rx ring. */
422 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
423 pkt_len);
425 #ifndef final_version
426 if (tp->rx_buffers[entry].mapping !=
427 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
428 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
429 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
430 dev->name,
431 le32_to_cpu(tp->rx_ring[entry].buffer1),
432 (long long)tp->rx_buffers[entry].mapping,
433 skb->head, temp);
435 #endif
437 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
438 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
440 tp->rx_buffers[entry].skb = NULL;
441 tp->rx_buffers[entry].mapping = 0;
443 skb->protocol = eth_type_trans(skb, dev);
445 netif_rx(skb);
447 dev->last_rx = jiffies;
448 tp->stats.rx_packets++;
449 tp->stats.rx_bytes += pkt_len;
451 received++;
452 entry = (++tp->cur_rx) % RX_RING_SIZE;
454 return received;
456 #endif /* CONFIG_TULIP_NAPI */
458 static inline unsigned int phy_interrupt (struct net_device *dev)
460 #ifdef __hppa__
461 struct tulip_private *tp = netdev_priv(dev);
462 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
464 if (csr12 != tp->csr12_shadow) {
465 /* ack interrupt */
466 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
467 tp->csr12_shadow = csr12;
468 /* do link change stuff */
469 spin_lock(&tp->lock);
470 tulip_check_duplex(dev);
471 spin_unlock(&tp->lock);
472 /* clear irq ack bit */
473 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
475 return 1;
477 #endif
479 return 0;
482 /* The interrupt handler does all of the Rx thread work and cleans up
483 after the Tx thread. */
484 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
486 struct net_device *dev = (struct net_device *)dev_instance;
487 struct tulip_private *tp = netdev_priv(dev);
488 void __iomem *ioaddr = tp->base_addr;
489 int csr5;
490 int missed;
491 int rx = 0;
492 int tx = 0;
493 int oi = 0;
494 int maxrx = RX_RING_SIZE;
495 int maxtx = TX_RING_SIZE;
496 int maxoi = TX_RING_SIZE;
497 #ifdef CONFIG_TULIP_NAPI
498 int rxd = 0;
499 #else
500 int entry;
501 #endif
502 unsigned int work_count = tulip_max_interrupt_work;
503 unsigned int handled = 0;
505 /* Let's see whether the interrupt really is for us */
506 csr5 = ioread32(ioaddr + CSR5);
508 if (tp->flags & HAS_PHY_IRQ)
509 handled = phy_interrupt (dev);
511 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
512 return IRQ_RETVAL(handled);
514 tp->nir++;
516 do {
518 #ifdef CONFIG_TULIP_NAPI
520 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
521 rxd++;
522 /* Mask RX intrs and add the device to poll list. */
523 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
524 netif_rx_schedule(dev, &tp->napi);
526 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
527 break;
530 /* Acknowledge the interrupt sources we handle here ASAP
531 the poll function does Rx and RxNoBuf acking */
533 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
535 #else
536 /* Acknowledge all of the current interrupt sources ASAP. */
537 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
540 if (csr5 & (RxIntr | RxNoBuf)) {
541 rx += tulip_rx(dev);
542 tulip_refill_rx(dev);
545 #endif /* CONFIG_TULIP_NAPI */
547 if (tulip_debug > 4)
548 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
549 dev->name, csr5, ioread32(ioaddr + CSR5));
552 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
553 unsigned int dirty_tx;
555 spin_lock(&tp->lock);
557 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
558 dirty_tx++) {
559 int entry = dirty_tx % TX_RING_SIZE;
560 int status = le32_to_cpu(tp->tx_ring[entry].status);
562 if (status < 0)
563 break; /* It still has not been Txed */
565 /* Check for Rx filter setup frames. */
566 if (tp->tx_buffers[entry].skb == NULL) {
567 /* test because dummy frames not mapped */
568 if (tp->tx_buffers[entry].mapping)
569 pci_unmap_single(tp->pdev,
570 tp->tx_buffers[entry].mapping,
571 sizeof(tp->setup_frame),
572 PCI_DMA_TODEVICE);
573 continue;
576 if (status & 0x8000) {
577 /* There was an major error, log it. */
578 #ifndef final_version
579 if (tulip_debug > 1)
580 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
581 dev->name, status);
582 #endif
583 tp->stats.tx_errors++;
584 if (status & 0x4104) tp->stats.tx_aborted_errors++;
585 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
586 if (status & 0x0200) tp->stats.tx_window_errors++;
587 if (status & 0x0002) tp->stats.tx_fifo_errors++;
588 if ((status & 0x0080) && tp->full_duplex == 0)
589 tp->stats.tx_heartbeat_errors++;
590 } else {
591 tp->stats.tx_bytes +=
592 tp->tx_buffers[entry].skb->len;
593 tp->stats.collisions += (status >> 3) & 15;
594 tp->stats.tx_packets++;
597 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
598 tp->tx_buffers[entry].skb->len,
599 PCI_DMA_TODEVICE);
601 /* Free the original skb. */
602 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
603 tp->tx_buffers[entry].skb = NULL;
604 tp->tx_buffers[entry].mapping = 0;
605 tx++;
608 #ifndef final_version
609 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
610 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
611 dev->name, dirty_tx, tp->cur_tx);
612 dirty_tx += TX_RING_SIZE;
614 #endif
616 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
617 netif_wake_queue(dev);
619 tp->dirty_tx = dirty_tx;
620 if (csr5 & TxDied) {
621 if (tulip_debug > 2)
622 printk(KERN_WARNING "%s: The transmitter stopped."
623 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
624 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
625 tulip_restart_rxtx(tp);
627 spin_unlock(&tp->lock);
630 /* Log errors. */
631 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
632 if (csr5 == 0xffffffff)
633 break;
634 if (csr5 & TxJabber) tp->stats.tx_errors++;
635 if (csr5 & TxFIFOUnderflow) {
636 if ((tp->csr6 & 0xC000) != 0xC000)
637 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
638 else
639 tp->csr6 |= 0x00200000; /* Store-n-forward. */
640 /* Restart the transmit process. */
641 tulip_restart_rxtx(tp);
642 iowrite32(0, ioaddr + CSR1);
644 if (csr5 & (RxDied | RxNoBuf)) {
645 if (tp->flags & COMET_MAC_ADDR) {
646 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
647 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
650 if (csr5 & RxDied) { /* Missed a Rx frame. */
651 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
652 tp->stats.rx_errors++;
653 tulip_start_rxtx(tp);
656 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
657 * call is ever done under the spinlock
659 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
660 if (tp->link_change)
661 (tp->link_change)(dev, csr5);
663 if (csr5 & SystemError) {
664 int error = (csr5 >> 23) & 7;
665 /* oops, we hit a PCI error. The code produced corresponds
666 * to the reason:
667 * 0 - parity error
668 * 1 - master abort
669 * 2 - target abort
670 * Note that on parity error, we should do a software reset
671 * of the chip to get it back into a sane state (according
672 * to the 21142/3 docs that is).
673 * -- rmk
675 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
676 dev->name, tp->nir, error);
678 /* Clear all error sources, included undocumented ones! */
679 iowrite32(0x0800f7ba, ioaddr + CSR5);
680 oi++;
682 if (csr5 & TimerInt) {
684 if (tulip_debug > 2)
685 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
686 dev->name, csr5);
687 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
688 tp->ttimer = 0;
689 oi++;
691 if (tx > maxtx || rx > maxrx || oi > maxoi) {
692 if (tulip_debug > 1)
693 printk(KERN_WARNING "%s: Too much work during an interrupt, "
694 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
696 /* Acknowledge all interrupt sources. */
697 iowrite32(0x8001ffff, ioaddr + CSR5);
698 if (tp->flags & HAS_INTR_MITIGATION) {
699 /* Josip Loncaric at ICASE did extensive experimentation
700 to develop a good interrupt mitigation setting.*/
701 iowrite32(0x8b240000, ioaddr + CSR11);
702 } else if (tp->chip_id == LC82C168) {
703 /* the LC82C168 doesn't have a hw timer.*/
704 iowrite32(0x00, ioaddr + CSR7);
705 mod_timer(&tp->timer, RUN_AT(HZ/50));
706 } else {
707 /* Mask all interrupting sources, set timer to
708 re-enable. */
709 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
710 iowrite32(0x0012, ioaddr + CSR11);
712 break;
715 work_count--;
716 if (work_count == 0)
717 break;
719 csr5 = ioread32(ioaddr + CSR5);
721 #ifdef CONFIG_TULIP_NAPI
722 if (rxd)
723 csr5 &= ~RxPollInt;
724 } while ((csr5 & (TxNoBuf |
725 TxDied |
726 TxIntr |
727 TimerInt |
728 /* Abnormal intr. */
729 RxDied |
730 TxFIFOUnderflow |
731 TxJabber |
732 TPLnkFail |
733 SystemError )) != 0);
734 #else
735 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
737 tulip_refill_rx(dev);
739 /* check if the card is in suspend mode */
740 entry = tp->dirty_rx % RX_RING_SIZE;
741 if (tp->rx_buffers[entry].skb == NULL) {
742 if (tulip_debug > 1)
743 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
744 if (tp->chip_id == LC82C168) {
745 iowrite32(0x00, ioaddr + CSR7);
746 mod_timer(&tp->timer, RUN_AT(HZ/50));
747 } else {
748 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
749 if (tulip_debug > 1)
750 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
751 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
752 ioaddr + CSR7);
753 iowrite32(TimerInt, ioaddr + CSR5);
754 iowrite32(12, ioaddr + CSR11);
755 tp->ttimer = 1;
759 #endif /* CONFIG_TULIP_NAPI */
761 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
762 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
765 if (tulip_debug > 4)
766 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
767 dev->name, ioread32(ioaddr + CSR5));
769 return IRQ_HANDLED;