2 drivers/net/tulip/interrupt.c
4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
21 int tulip_rx_copybreak
;
22 unsigned int tulip_max_interrupt_work
;
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
26 #define MIT_TABLE 15 /* We use 0 or max */
28 static unsigned int mit_table
[MIT_SIZE
+1] =
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
57 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
63 int tulip_refill_rx(struct net_device
*dev
)
65 struct tulip_private
*tp
= netdev_priv(dev
);
69 /* Refill the Rx ring buffers. */
70 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
71 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
72 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
76 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
80 mapping
= pci_map_single(tp
->pdev
, skb
->data
, PKT_BUF_SZ
,
82 tp
->rx_buffers
[entry
].mapping
= mapping
;
84 skb
->dev
= dev
; /* Mark as being used by this device. */
85 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
88 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
90 if(tp
->chip_id
== LC82C168
) {
91 if(((ioread32(tp
->base_addr
+ CSR5
)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
95 iowrite32(0x01, tp
->base_addr
+ CSR2
);
101 #ifdef CONFIG_TULIP_NAPI
103 void oom_timer(unsigned long data
)
105 struct net_device
*dev
= (struct net_device
*)data
;
106 netif_rx_schedule(dev
);
109 int tulip_poll(struct net_device
*dev
, int *budget
)
111 struct tulip_private
*tp
= netdev_priv(dev
);
112 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
113 int rx_work_limit
= *budget
;
116 if (!netif_running(dev
))
119 if (rx_work_limit
> dev
->quota
)
120 rx_work_limit
= dev
->quota
;
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124 /* that one buffer is needed for mit activation; or might be a
125 bug in the ring buffer code; check later -- JHS*/
127 if (rx_work_limit
>=RX_RING_SIZE
) rx_work_limit
--;
131 printk(KERN_DEBUG
" In tulip_rx(), entry %d %8.8x.\n", entry
,
132 tp
->rx_ring
[entry
].status
);
135 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
136 printk(KERN_DEBUG
" In tulip_poll(), hardware disappeared.\n");
139 /* Acknowledge current RX interrupt sources. */
140 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
143 /* If we own the next entry, it is a new packet. Send it up. */
144 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
145 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
148 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
152 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %8.8x.\n",
153 dev
->name
, entry
, status
);
154 if (--rx_work_limit
< 0)
157 if ((status
& 0x38008300) != 0x0300) {
158 if ((status
& 0x38000300) != 0x0300) {
159 /* Ingore earlier buffers. */
160 if ((status
& 0xffff) != 0x7fff) {
162 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
163 "spanned multiple buffers, status %8.8x!\n",
165 tp
->stats
.rx_length_errors
++;
167 } else if (status
& RxDescFatalErr
) {
168 /* There was a fatal error. */
170 printk(KERN_DEBUG
"%s: Receive error, Rx status %8.8x.\n",
172 tp
->stats
.rx_errors
++; /* end of a packet.*/
173 if (status
& 0x0890) tp
->stats
.rx_length_errors
++;
174 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
175 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
176 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
179 /* Omit the four octet CRC from the length. */
180 short pkt_len
= ((status
>> 16) & 0x7ff) - 4;
183 #ifndef final_version
184 if (pkt_len
> 1518) {
185 printk(KERN_WARNING
"%s: Bogus packet size of %d (%#x).\n",
186 dev
->name
, pkt_len
, pkt_len
);
188 tp
->stats
.rx_length_errors
++;
191 /* Check if the packet is long enough to accept without copying
192 to a minimally-sized skbuff. */
193 if (pkt_len
< tulip_rx_copybreak
194 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
196 skb_reserve(skb
, 2); /* 16 byte align the IP header */
197 pci_dma_sync_single_for_cpu(tp
->pdev
,
198 tp
->rx_buffers
[entry
].mapping
,
199 pkt_len
, PCI_DMA_FROMDEVICE
);
200 #if ! defined(__alpha__)
201 eth_copy_and_sum(skb
, tp
->rx_buffers
[entry
].skb
->data
,
203 skb_put(skb
, pkt_len
);
205 memcpy(skb_put(skb
, pkt_len
),
206 tp
->rx_buffers
[entry
].skb
->data
,
209 pci_dma_sync_single_for_device(tp
->pdev
,
210 tp
->rx_buffers
[entry
].mapping
,
211 pkt_len
, PCI_DMA_FROMDEVICE
);
212 } else { /* Pass up the skb already on the Rx ring. */
213 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
216 #ifndef final_version
217 if (tp
->rx_buffers
[entry
].mapping
!=
218 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
219 printk(KERN_ERR
"%s: Internal fault: The skbuff addresses "
220 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
222 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
223 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
228 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
229 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
231 tp
->rx_buffers
[entry
].skb
= NULL
;
232 tp
->rx_buffers
[entry
].mapping
= 0;
234 skb
->protocol
= eth_type_trans(skb
, dev
);
236 netif_receive_skb(skb
);
238 dev
->last_rx
= jiffies
;
239 tp
->stats
.rx_packets
++;
240 tp
->stats
.rx_bytes
+= pkt_len
;
244 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
245 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
246 tulip_refill_rx(dev
);
250 /* New ack strategy... irq does not ack Rx any longer
251 hopefully this helps */
253 /* Really bad things can happen here... If new packet arrives
254 * and an irq arrives (tx or just due to occasionally unset
255 * mask), it will be acked by irq handler, but new thread
256 * is not scheduled. It is major hole in design.
257 * No idea how to fix this if "playing with fire" will fail
258 * tomorrow (night 011029). If it will not fail, we won
259 * finally: amount of IO did not increase at all. */
260 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
264 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
266 /* We use this simplistic scheme for IM. It's proven by
267 real life installations. We can have IM enabled
268 continuesly but this would cause unnecessary latency.
269 Unfortunely we can't use all the NET_RX_* feedback here.
270 This would turn on IM for devices that is not contributing
271 to backlog congestion with unnecessary latency.
273 We monitor the the device RX-ring and have:
275 HW Interrupt Mitigation either ON or OFF.
277 ON: More then 1 pkt received (per intr.) OR we are dropping
278 OFF: Only 1 pkt received
280 Note. We only use min and max (0, 15) settings from mit_table */
283 if( tp
->flags
& HAS_INTR_MITIGATION
) {
287 iowrite32(mit_table
[MIT_TABLE
], tp
->base_addr
+ CSR11
);
293 iowrite32(0, tp
->base_addr
+ CSR11
);
298 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
300 dev
->quota
-= received
;
303 tulip_refill_rx(dev
);
305 /* If RX ring is not full we are out of memory. */
306 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
) goto oom
;
308 /* Remove us from polling list and enable RX intr. */
310 netif_rx_complete(dev
);
311 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, tp
->base_addr
+CSR7
);
313 /* The last op happens after poll completion. Which means the following:
314 * 1. it can race with disabling irqs in irq handler
315 * 2. it can race with dise/enabling irqs in other poll threads
316 * 3. if an irq raised after beginning loop, it will be immediately
319 * Summarizing: the logic results in some redundant irqs both
320 * due to races in masking and due to too late acking of already
321 * processed irqs. But it must not result in losing events.
329 received
= dev
->quota
; /* Not to happen */
331 dev
->quota
-= received
;
334 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
335 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
336 tulip_refill_rx(dev
);
338 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
) goto oom
;
343 oom
: /* Executed with RX ints disabled */
346 /* Start timer, stop polling, but do not enable rx interrupts. */
347 mod_timer(&tp
->oom_timer
, jiffies
+1);
349 /* Think: timer_pending() was an explicit signature of bug.
350 * Timer can be pending now but fired and completed
351 * before we did netif_rx_complete(). See? We would lose it. */
353 /* remove ourselves from the polling list */
354 netif_rx_complete(dev
);
359 #else /* CONFIG_TULIP_NAPI */
361 static int tulip_rx(struct net_device
*dev
)
363 struct tulip_private
*tp
= netdev_priv(dev
);
364 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
365 int rx_work_limit
= tp
->dirty_rx
+ RX_RING_SIZE
- tp
->cur_rx
;
369 printk(KERN_DEBUG
" In tulip_rx(), entry %d %8.8x.\n", entry
,
370 tp
->rx_ring
[entry
].status
);
371 /* If we own the next entry, it is a new packet. Send it up. */
372 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
373 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
376 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %8.8x.\n",
377 dev
->name
, entry
, status
);
378 if (--rx_work_limit
< 0)
380 if ((status
& 0x38008300) != 0x0300) {
381 if ((status
& 0x38000300) != 0x0300) {
382 /* Ingore earlier buffers. */
383 if ((status
& 0xffff) != 0x7fff) {
385 printk(KERN_WARNING
"%s: Oversized Ethernet frame "
386 "spanned multiple buffers, status %8.8x!\n",
388 tp
->stats
.rx_length_errors
++;
390 } else if (status
& RxDescFatalErr
) {
391 /* There was a fatal error. */
393 printk(KERN_DEBUG
"%s: Receive error, Rx status %8.8x.\n",
395 tp
->stats
.rx_errors
++; /* end of a packet.*/
396 if (status
& 0x0890) tp
->stats
.rx_length_errors
++;
397 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
398 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
399 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
402 /* Omit the four octet CRC from the length. */
403 short pkt_len
= ((status
>> 16) & 0x7ff) - 4;
406 #ifndef final_version
407 if (pkt_len
> 1518) {
408 printk(KERN_WARNING
"%s: Bogus packet size of %d (%#x).\n",
409 dev
->name
, pkt_len
, pkt_len
);
411 tp
->stats
.rx_length_errors
++;
415 /* Check if the packet is long enough to accept without copying
416 to a minimally-sized skbuff. */
417 if (pkt_len
< tulip_rx_copybreak
418 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
420 skb_reserve(skb
, 2); /* 16 byte align the IP header */
421 pci_dma_sync_single_for_cpu(tp
->pdev
,
422 tp
->rx_buffers
[entry
].mapping
,
423 pkt_len
, PCI_DMA_FROMDEVICE
);
424 #if ! defined(__alpha__)
425 eth_copy_and_sum(skb
, tp
->rx_buffers
[entry
].skb
->data
,
427 skb_put(skb
, pkt_len
);
429 memcpy(skb_put(skb
, pkt_len
),
430 tp
->rx_buffers
[entry
].skb
->data
,
433 pci_dma_sync_single_for_device(tp
->pdev
,
434 tp
->rx_buffers
[entry
].mapping
,
435 pkt_len
, PCI_DMA_FROMDEVICE
);
436 } else { /* Pass up the skb already on the Rx ring. */
437 char *temp
= skb_put(skb
= tp
->rx_buffers
[entry
].skb
,
440 #ifndef final_version
441 if (tp
->rx_buffers
[entry
].mapping
!=
442 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
443 printk(KERN_ERR
"%s: Internal fault: The skbuff addresses "
444 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
446 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
447 (long long)tp
->rx_buffers
[entry
].mapping
,
452 pci_unmap_single(tp
->pdev
, tp
->rx_buffers
[entry
].mapping
,
453 PKT_BUF_SZ
, PCI_DMA_FROMDEVICE
);
455 tp
->rx_buffers
[entry
].skb
= NULL
;
456 tp
->rx_buffers
[entry
].mapping
= 0;
458 skb
->protocol
= eth_type_trans(skb
, dev
);
462 dev
->last_rx
= jiffies
;
463 tp
->stats
.rx_packets
++;
464 tp
->stats
.rx_bytes
+= pkt_len
;
467 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
471 #endif /* CONFIG_TULIP_NAPI */
473 static inline unsigned int phy_interrupt (struct net_device
*dev
)
476 struct tulip_private
*tp
= netdev_priv(dev
);
477 int csr12
= ioread32(tp
->base_addr
+ CSR12
) & 0xff;
479 if (csr12
!= tp
->csr12_shadow
) {
481 iowrite32(csr12
| 0x02, tp
->base_addr
+ CSR12
);
482 tp
->csr12_shadow
= csr12
;
483 /* do link change stuff */
484 spin_lock(&tp
->lock
);
485 tulip_check_duplex(dev
);
486 spin_unlock(&tp
->lock
);
487 /* clear irq ack bit */
488 iowrite32(csr12
& ~0x02, tp
->base_addr
+ CSR12
);
497 /* The interrupt handler does all of the Rx thread work and cleans up
498 after the Tx thread. */
499 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
501 struct net_device
*dev
= (struct net_device
*)dev_instance
;
502 struct tulip_private
*tp
= netdev_priv(dev
);
503 void __iomem
*ioaddr
= tp
->base_addr
;
509 int maxrx
= RX_RING_SIZE
;
510 int maxtx
= TX_RING_SIZE
;
511 int maxoi
= TX_RING_SIZE
;
512 #ifdef CONFIG_TULIP_NAPI
517 unsigned int work_count
= tulip_max_interrupt_work
;
518 unsigned int handled
= 0;
520 /* Let's see whether the interrupt really is for us */
521 csr5
= ioread32(ioaddr
+ CSR5
);
523 if (tp
->flags
& HAS_PHY_IRQ
)
524 handled
= phy_interrupt (dev
);
526 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
527 return IRQ_RETVAL(handled
);
533 #ifdef CONFIG_TULIP_NAPI
535 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
537 /* Mask RX intrs and add the device to poll list. */
538 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
&~RxPollInt
, ioaddr
+ CSR7
);
539 netif_rx_schedule(dev
);
541 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
545 /* Acknowledge the interrupt sources we handle here ASAP
546 the poll function does Rx and RxNoBuf acking */
548 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
551 /* Acknowledge all of the current interrupt sources ASAP. */
552 iowrite32(csr5
& 0x0001ffff, ioaddr
+ CSR5
);
555 if (csr5
& (RxIntr
| RxNoBuf
)) {
557 tulip_refill_rx(dev
);
560 #endif /* CONFIG_TULIP_NAPI */
563 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
564 dev
->name
, csr5
, ioread32(ioaddr
+ CSR5
));
567 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
568 unsigned int dirty_tx
;
570 spin_lock(&tp
->lock
);
572 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
574 int entry
= dirty_tx
% TX_RING_SIZE
;
575 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
578 break; /* It still has not been Txed */
580 /* Check for Rx filter setup frames. */
581 if (tp
->tx_buffers
[entry
].skb
== NULL
) {
582 /* test because dummy frames not mapped */
583 if (tp
->tx_buffers
[entry
].mapping
)
584 pci_unmap_single(tp
->pdev
,
585 tp
->tx_buffers
[entry
].mapping
,
586 sizeof(tp
->setup_frame
),
591 if (status
& 0x8000) {
592 /* There was an major error, log it. */
593 #ifndef final_version
595 printk(KERN_DEBUG
"%s: Transmit error, Tx status %8.8x.\n",
598 tp
->stats
.tx_errors
++;
599 if (status
& 0x4104) tp
->stats
.tx_aborted_errors
++;
600 if (status
& 0x0C00) tp
->stats
.tx_carrier_errors
++;
601 if (status
& 0x0200) tp
->stats
.tx_window_errors
++;
602 if (status
& 0x0002) tp
->stats
.tx_fifo_errors
++;
603 if ((status
& 0x0080) && tp
->full_duplex
== 0)
604 tp
->stats
.tx_heartbeat_errors
++;
606 tp
->stats
.tx_bytes
+=
607 tp
->tx_buffers
[entry
].skb
->len
;
608 tp
->stats
.collisions
+= (status
>> 3) & 15;
609 tp
->stats
.tx_packets
++;
612 pci_unmap_single(tp
->pdev
, tp
->tx_buffers
[entry
].mapping
,
613 tp
->tx_buffers
[entry
].skb
->len
,
616 /* Free the original skb. */
617 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
618 tp
->tx_buffers
[entry
].skb
= NULL
;
619 tp
->tx_buffers
[entry
].mapping
= 0;
623 #ifndef final_version
624 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
625 printk(KERN_ERR
"%s: Out-of-sync dirty pointer, %d vs. %d.\n",
626 dev
->name
, dirty_tx
, tp
->cur_tx
);
627 dirty_tx
+= TX_RING_SIZE
;
631 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
632 netif_wake_queue(dev
);
634 tp
->dirty_tx
= dirty_tx
;
637 printk(KERN_WARNING
"%s: The transmitter stopped."
638 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
639 dev
->name
, csr5
, ioread32(ioaddr
+ CSR6
), tp
->csr6
);
640 tulip_restart_rxtx(tp
);
642 spin_unlock(&tp
->lock
);
646 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
647 if (csr5
== 0xffffffff)
649 if (csr5
& TxJabber
) tp
->stats
.tx_errors
++;
650 if (csr5
& TxFIFOUnderflow
) {
651 if ((tp
->csr6
& 0xC000) != 0xC000)
652 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
654 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
655 /* Restart the transmit process. */
656 tulip_restart_rxtx(tp
);
657 iowrite32(0, ioaddr
+ CSR1
);
659 if (csr5
& (RxDied
| RxNoBuf
)) {
660 if (tp
->flags
& COMET_MAC_ADDR
) {
661 iowrite32(tp
->mc_filter
[0], ioaddr
+ 0xAC);
662 iowrite32(tp
->mc_filter
[1], ioaddr
+ 0xB0);
665 if (csr5
& RxDied
) { /* Missed a Rx frame. */
666 tp
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
667 tp
->stats
.rx_errors
++;
668 tulip_start_rxtx(tp
);
671 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
672 * call is ever done under the spinlock
674 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
676 (tp
->link_change
)(dev
, csr5
);
678 if (csr5
& SytemError
) {
679 int error
= (csr5
>> 23) & 7;
680 /* oops, we hit a PCI error. The code produced corresponds
685 * Note that on parity error, we should do a software reset
686 * of the chip to get it back into a sane state (according
687 * to the 21142/3 docs that is).
690 printk(KERN_ERR
"%s: (%lu) System Error occurred (%d)\n",
691 dev
->name
, tp
->nir
, error
);
693 /* Clear all error sources, included undocumented ones! */
694 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
697 if (csr5
& TimerInt
) {
700 printk(KERN_ERR
"%s: Re-enabling interrupts, %8.8x.\n",
702 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
, ioaddr
+ CSR7
);
706 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
708 printk(KERN_WARNING
"%s: Too much work during an interrupt, "
709 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev
->name
, csr5
, tp
->nir
, tx
, rx
, oi
);
711 /* Acknowledge all interrupt sources. */
712 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
713 if (tp
->flags
& HAS_INTR_MITIGATION
) {
714 /* Josip Loncaric at ICASE did extensive experimentation
715 to develop a good interrupt mitigation setting.*/
716 iowrite32(0x8b240000, ioaddr
+ CSR11
);
717 } else if (tp
->chip_id
== LC82C168
) {
718 /* the LC82C168 doesn't have a hw timer.*/
719 iowrite32(0x00, ioaddr
+ CSR7
);
720 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
722 /* Mask all interrupting sources, set timer to
724 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
725 iowrite32(0x0012, ioaddr
+ CSR11
);
734 csr5
= ioread32(ioaddr
+ CSR5
);
736 #ifdef CONFIG_TULIP_NAPI
739 } while ((csr5
& (TxNoBuf
|
750 } while ((csr5
& (NormalIntr
|AbnormalIntr
)) != 0);
752 tulip_refill_rx(dev
);
754 /* check if the card is in suspend mode */
755 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
756 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
758 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev
->name
, tp
->nir
, tp
->cur_rx
, tp
->ttimer
, rx
);
759 if (tp
->chip_id
== LC82C168
) {
760 iowrite32(0x00, ioaddr
+ CSR7
);
761 mod_timer(&tp
->timer
, RUN_AT(HZ
/50));
763 if (tp
->ttimer
== 0 || (ioread32(ioaddr
+ CSR11
) & 0xffff) == 0) {
765 printk(KERN_WARNING
"%s: in rx suspend mode: (%lu) set timer\n", dev
->name
, tp
->nir
);
766 iowrite32(tulip_tbl
[tp
->chip_id
].valid_intrs
| TimerInt
,
768 iowrite32(TimerInt
, ioaddr
+ CSR5
);
769 iowrite32(12, ioaddr
+ CSR11
);
774 #endif /* CONFIG_TULIP_NAPI */
776 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
777 tp
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
781 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#4.4x.\n",
782 dev
->name
, ioread32(ioaddr
+ CSR5
));