1 /*********************************************************************
3 * Filename: w83977af_ir.c
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
34 * bank = inb( iobase+BSR);
36 * do_your_stuff_here();
38 * outb( bank, iobase+BSR);
40 ********************************************************************/
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/ioport.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/rtnetlink.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
57 #include <asm/byteorder.h>
59 #include <net/irda/irda.h>
60 #include <net/irda/wrapper.h>
61 #include <net/irda/irda_device.h>
63 #include "w83977af_ir.h"
65 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66 #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67 #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
69 #define CONFIG_USE_W977_PNP /* Currently needed */
70 #define PIO_MAX_SPEED 115200
72 static char *driver_name
= "w83977af_ir";
73 static int qos_mtt_bits
= 0x07; /* 1 ms or more */
75 #define CHIP_IO_EXTENT 8
77 static unsigned int io
[] = { 0x180, ~0, ~0, ~0 };
78 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
79 static unsigned int irq
[] = { 6, 0, 0, 0 };
81 static unsigned int irq
[] = { 11, 0, 0, 0 };
83 static unsigned int dma
[] = { 1, 0, 0, 0 };
84 static unsigned int efbase
[] = { W977_EFIO_BASE
, W977_EFIO2_BASE
};
85 static unsigned int efio
= W977_EFIO_BASE
;
87 static struct w83977af_ir
*dev_self
[] = { NULL
, NULL
, NULL
, NULL
};
90 static int w83977af_open(int i
, unsigned int iobase
, unsigned int irq
,
92 static int w83977af_close(struct w83977af_ir
*self
);
93 static int w83977af_probe(int iobase
, int irq
, int dma
);
94 static int w83977af_dma_receive(struct w83977af_ir
*self
);
95 static int w83977af_dma_receive_complete(struct w83977af_ir
*self
);
96 static netdev_tx_t
w83977af_hard_xmit(struct sk_buff
*skb
,
97 struct net_device
*dev
);
98 static int w83977af_pio_write(int iobase
, __u8
*buf
, int len
, int fifo_size
);
99 static void w83977af_dma_write(struct w83977af_ir
*self
, int iobase
);
100 static void w83977af_change_speed(struct w83977af_ir
*self
, __u32 speed
);
101 static int w83977af_is_receiving(struct w83977af_ir
*self
);
103 static int w83977af_net_open(struct net_device
*dev
);
104 static int w83977af_net_close(struct net_device
*dev
);
105 static int w83977af_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
108 * Function w83977af_init ()
110 * Initialize chip. Just try to find out how many chips we are dealing with
113 static int __init
w83977af_init(void)
117 IRDA_DEBUG(0, "%s()\n", __func__
);
119 for (i
=0; i
< ARRAY_SIZE(dev_self
) && io
[i
] < 2000; i
++) {
120 if (w83977af_open(i
, io
[i
], irq
[i
], dma
[i
]) == 0)
127 * Function w83977af_cleanup ()
129 * Close all configured chips
132 static void __exit
w83977af_cleanup(void)
136 IRDA_DEBUG(4, "%s()\n", __func__
);
138 for (i
=0; i
< ARRAY_SIZE(dev_self
); i
++) {
140 w83977af_close(dev_self
[i
]);
144 static const struct net_device_ops w83977_netdev_ops
= {
145 .ndo_open
= w83977af_net_open
,
146 .ndo_stop
= w83977af_net_close
,
147 .ndo_start_xmit
= w83977af_hard_xmit
,
148 .ndo_do_ioctl
= w83977af_net_ioctl
,
152 * Function w83977af_open (iobase, irq)
154 * Open driver instance
157 static int w83977af_open(int i
, unsigned int iobase
, unsigned int irq
,
160 struct net_device
*dev
;
161 struct w83977af_ir
*self
;
164 IRDA_DEBUG(0, "%s()\n", __func__
);
166 /* Lock the port that we need */
167 if (!request_region(iobase
, CHIP_IO_EXTENT
, driver_name
)) {
168 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
173 if (w83977af_probe(iobase
, irq
, dma
) == -1) {
178 * Allocate new instance of the driver
180 dev
= alloc_irdadev(sizeof(struct w83977af_ir
));
182 printk( KERN_ERR
"IrDA: Can't allocate memory for "
183 "IrDA control block!\n");
188 self
= netdev_priv(dev
);
189 spin_lock_init(&self
->lock
);
193 self
->io
.fir_base
= iobase
;
195 self
->io
.fir_ext
= CHIP_IO_EXTENT
;
197 self
->io
.fifo_size
= 32;
199 /* Initialize QoS for this device */
200 irda_init_max_qos_capabilies(&self
->qos
);
202 /* The only value we must override it the baudrate */
204 /* FIXME: The HP HDLS-1100 does not support 1152000! */
205 self
->qos
.baud_rate
.bits
= IR_9600
|IR_19200
|IR_38400
|IR_57600
|
206 IR_115200
|IR_576000
|IR_1152000
|(IR_4000000
<< 8);
208 /* The HP HDLS-1100 needs 1 ms according to the specs */
209 self
->qos
.min_turn_time
.bits
= qos_mtt_bits
;
210 irda_qos_bits_to_value(&self
->qos
);
212 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
213 self
->rx_buff
.truesize
= 14384;
214 self
->tx_buff
.truesize
= 4000;
216 /* Allocate memory if needed */
218 dma_alloc_coherent(NULL
, self
->rx_buff
.truesize
,
219 &self
->rx_buff_dma
, GFP_KERNEL
);
220 if (self
->rx_buff
.head
== NULL
) {
225 memset(self
->rx_buff
.head
, 0, self
->rx_buff
.truesize
);
228 dma_alloc_coherent(NULL
, self
->tx_buff
.truesize
,
229 &self
->tx_buff_dma
, GFP_KERNEL
);
230 if (self
->tx_buff
.head
== NULL
) {
234 memset(self
->tx_buff
.head
, 0, self
->tx_buff
.truesize
);
236 self
->rx_buff
.in_frame
= FALSE
;
237 self
->rx_buff
.state
= OUTSIDE_FRAME
;
238 self
->tx_buff
.data
= self
->tx_buff
.head
;
239 self
->rx_buff
.data
= self
->rx_buff
.head
;
242 dev
->netdev_ops
= &w83977_netdev_ops
;
244 err
= register_netdev(dev
);
246 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__
);
249 IRDA_MESSAGE("IrDA: Registered device %s\n", dev
->name
);
251 /* Need to store self somewhere */
256 dma_free_coherent(NULL
, self
->tx_buff
.truesize
,
257 self
->tx_buff
.head
, self
->tx_buff_dma
);
259 dma_free_coherent(NULL
, self
->rx_buff
.truesize
,
260 self
->rx_buff
.head
, self
->rx_buff_dma
);
264 release_region(iobase
, CHIP_IO_EXTENT
);
269 * Function w83977af_close (self)
271 * Close driver instance
274 static int w83977af_close(struct w83977af_ir
*self
)
278 IRDA_DEBUG(0, "%s()\n", __func__
);
280 iobase
= self
->io
.fir_base
;
282 #ifdef CONFIG_USE_W977_PNP
283 /* enter PnP configuration mode */
284 w977_efm_enter(efio
);
286 w977_select_device(W977_DEVICE_IR
, efio
);
288 /* Deactivate device */
289 w977_write_reg(0x30, 0x00, efio
);
292 #endif /* CONFIG_USE_W977_PNP */
294 /* Remove netdevice */
295 unregister_netdev(self
->netdev
);
297 /* Release the PORT that this driver is using */
298 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
299 __func__
, self
->io
.fir_base
);
300 release_region(self
->io
.fir_base
, self
->io
.fir_ext
);
302 if (self
->tx_buff
.head
)
303 dma_free_coherent(NULL
, self
->tx_buff
.truesize
,
304 self
->tx_buff
.head
, self
->tx_buff_dma
);
306 if (self
->rx_buff
.head
)
307 dma_free_coherent(NULL
, self
->rx_buff
.truesize
,
308 self
->rx_buff
.head
, self
->rx_buff_dma
);
310 free_netdev(self
->netdev
);
315 static int w83977af_probe(int iobase
, int irq
, int dma
)
320 for (i
=0; i
< 2; i
++) {
321 IRDA_DEBUG( 0, "%s()\n", __func__
);
322 #ifdef CONFIG_USE_W977_PNP
323 /* Enter PnP configuration mode */
324 w977_efm_enter(efbase
[i
]);
326 w977_select_device(W977_DEVICE_IR
, efbase
[i
]);
328 /* Configure PnP port, IRQ, and DMA channel */
329 w977_write_reg(0x60, (iobase
>> 8) & 0xff, efbase
[i
]);
330 w977_write_reg(0x61, (iobase
) & 0xff, efbase
[i
]);
332 w977_write_reg(0x70, irq
, efbase
[i
]);
333 #ifdef CONFIG_ARCH_NETWINDER
334 /* Netwinder uses 1 higher than Linux */
335 w977_write_reg(0x74, dma
+1, efbase
[i
]);
337 w977_write_reg(0x74, dma
, efbase
[i
]);
338 #endif /*CONFIG_ARCH_NETWINDER */
339 w977_write_reg(0x75, 0x04, efbase
[i
]); /* Disable Tx DMA */
341 /* Set append hardware CRC, enable IR bank selection */
342 w977_write_reg(0xf0, APEDCRC
|ENBNKSEL
, efbase
[i
]);
344 /* Activate device */
345 w977_write_reg(0x30, 0x01, efbase
[i
]);
347 w977_efm_exit(efbase
[i
]);
348 #endif /* CONFIG_USE_W977_PNP */
349 /* Disable Advanced mode */
350 switch_bank(iobase
, SET2
);
351 outb(iobase
+2, 0x00);
353 /* Turn on UART (global) interrupts */
354 switch_bank(iobase
, SET0
);
355 outb(HCR_EN_IRQ
, iobase
+HCR
);
357 /* Switch to advanced mode */
358 switch_bank(iobase
, SET2
);
359 outb(inb(iobase
+ADCR1
) | ADCR1_ADV_SL
, iobase
+ADCR1
);
361 /* Set default IR-mode */
362 switch_bank(iobase
, SET0
);
363 outb(HCR_SIR
, iobase
+HCR
);
365 /* Read the Advanced IR ID */
366 switch_bank(iobase
, SET3
);
367 version
= inb(iobase
+AUID
);
370 if (0x10 == (version
& 0xf0)) {
373 /* Set FIFO size to 32 */
374 switch_bank(iobase
, SET2
);
375 outb(ADCR2_RXFS32
|ADCR2_TXFS32
, iobase
+ADCR2
);
377 /* Set FIFO threshold to TX17, RX16 */
378 switch_bank(iobase
, SET0
);
379 outb(UFR_RXTL
|UFR_TXTL
|UFR_TXF_RST
|UFR_RXF_RST
|
380 UFR_EN_FIFO
,iobase
+UFR
);
382 /* Receiver frame length */
383 switch_bank(iobase
, SET4
);
384 outb(2048 & 0xff, iobase
+6);
385 outb((2048 >> 8) & 0x1f, iobase
+7);
388 * Init HP HSDL-1100 transceiver.
390 * Set IRX_MSL since we have 2 * receive paths IRRX,
391 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
392 * be a input pin used for IRRXH
394 * IRRX pin 37 connected to receiver
395 * IRTX pin 38 connected to transmitter
396 * FIRRX pin 39 connected to receiver (IRSL0)
397 * CIRRX pin 40 connected to pin 37
399 switch_bank(iobase
, SET7
);
400 outb(0x40, iobase
+7);
402 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
403 "Version: 0x%02x\n", version
);
407 /* Try next extented function register address */
408 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__
);
414 static void w83977af_change_speed(struct w83977af_ir
*self
, __u32 speed
)
416 int ir_mode
= HCR_SIR
;
420 iobase
= self
->io
.fir_base
;
422 /* Update accounting for new speed */
423 self
->io
.speed
= speed
;
425 /* Save current bank */
426 set
= inb(iobase
+SSR
);
428 /* Disable interrupts */
429 switch_bank(iobase
, SET0
);
433 switch_bank(iobase
, SET2
);
434 outb(0x00, iobase
+ABHL
);
437 case 9600: outb(0x0c, iobase
+ABLL
); break;
438 case 19200: outb(0x06, iobase
+ABLL
); break;
439 case 38400: outb(0x03, iobase
+ABLL
); break;
440 case 57600: outb(0x02, iobase
+ABLL
); break;
441 case 115200: outb(0x01, iobase
+ABLL
); break;
443 ir_mode
= HCR_MIR_576
;
444 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__
);
447 ir_mode
= HCR_MIR_1152
;
448 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__
);
452 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__
);
456 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__
, speed
);
461 switch_bank(iobase
, SET0
);
462 outb(ir_mode
, iobase
+HCR
);
464 /* set FIFO size to 32 */
465 switch_bank(iobase
, SET2
);
466 outb(ADCR2_RXFS32
|ADCR2_TXFS32
, iobase
+ADCR2
);
468 /* set FIFO threshold to TX17, RX16 */
469 switch_bank(iobase
, SET0
);
470 outb(0x00, iobase
+UFR
); /* Reset */
471 outb(UFR_EN_FIFO
, iobase
+UFR
); /* First we must enable FIFO */
472 outb(0xa7, iobase
+UFR
);
474 netif_wake_queue(self
->netdev
);
476 /* Enable some interrupts so we can receive frames */
477 switch_bank(iobase
, SET0
);
478 if (speed
> PIO_MAX_SPEED
) {
479 outb(ICR_EFSFI
, iobase
+ICR
);
480 w83977af_dma_receive(self
);
482 outb(ICR_ERBRI
, iobase
+ICR
);
485 outb(set
, iobase
+SSR
);
489 * Function w83977af_hard_xmit (skb, dev)
491 * Sets up a DMA transfer to send the current frame.
494 static netdev_tx_t
w83977af_hard_xmit(struct sk_buff
*skb
,
495 struct net_device
*dev
)
497 struct w83977af_ir
*self
;
503 self
= netdev_priv(dev
);
505 iobase
= self
->io
.fir_base
;
507 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__
, jiffies
,
510 /* Lock transmit buffer */
511 netif_stop_queue(dev
);
513 /* Check if we need to change the speed */
514 speed
= irda_get_next_speed(skb
);
515 if ((speed
!= self
->io
.speed
) && (speed
!= -1)) {
516 /* Check for empty frame */
518 w83977af_change_speed(self
, speed
);
522 self
->new_speed
= speed
;
525 /* Save current set */
526 set
= inb(iobase
+SSR
);
528 /* Decide if we should use PIO or DMA transfer */
529 if (self
->io
.speed
> PIO_MAX_SPEED
) {
530 self
->tx_buff
.data
= self
->tx_buff
.head
;
531 skb_copy_from_linear_data(skb
, self
->tx_buff
.data
, skb
->len
);
532 self
->tx_buff
.len
= skb
->len
;
534 mtt
= irda_get_mtt(skb
);
535 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__
, jiffies
, mtt
);
539 /* Enable DMA interrupt */
540 switch_bank(iobase
, SET0
);
541 outb(ICR_EDMAI
, iobase
+ICR
);
542 w83977af_dma_write(self
, iobase
);
544 self
->tx_buff
.data
= self
->tx_buff
.head
;
545 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
,
546 self
->tx_buff
.truesize
);
548 /* Add interrupt on tx low level (will fire immediately) */
549 switch_bank(iobase
, SET0
);
550 outb(ICR_ETXTHI
, iobase
+ICR
);
554 /* Restore set register */
555 outb(set
, iobase
+SSR
);
561 * Function w83977af_dma_write (self, iobase)
563 * Send frame using DMA
566 static void w83977af_dma_write(struct w83977af_ir
*self
, int iobase
)
569 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
573 IRDA_DEBUG(4, "%s(), len=%d\n", __func__
, self
->tx_buff
.len
);
575 /* Save current set */
576 set
= inb(iobase
+SSR
);
579 switch_bank(iobase
, SET0
);
580 outb(inb(iobase
+HCR
) & ~HCR_EN_DMA
, iobase
+HCR
);
582 /* Choose transmit DMA channel */
583 switch_bank(iobase
, SET2
);
584 outb(ADCR1_D_CHSW
|/*ADCR1_DMA_F|*/ADCR1_ADV_SL
, iobase
+ADCR1
);
585 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
586 spin_lock_irqsave(&self
->lock
, flags
);
588 disable_dma(self
->io
.dma
);
589 clear_dma_ff(self
->io
.dma
);
590 set_dma_mode(self
->io
.dma
, DMA_MODE_READ
);
591 set_dma_addr(self
->io
.dma
, self
->tx_buff_dma
);
592 set_dma_count(self
->io
.dma
, self
->tx_buff
.len
);
594 irda_setup_dma(self
->io
.dma
, self
->tx_buff_dma
, self
->tx_buff
.len
,
597 self
->io
.direction
= IO_XMIT
;
600 switch_bank(iobase
, SET0
);
601 #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
602 hcr
= inb(iobase
+HCR
);
603 outb(hcr
| HCR_EN_DMA
, iobase
+HCR
);
604 enable_dma(self
->io
.dma
);
605 spin_unlock_irqrestore(&self
->lock
, flags
);
607 outb(inb(iobase
+HCR
) | HCR_EN_DMA
| HCR_TX_WT
, iobase
+HCR
);
610 /* Restore set register */
611 outb(set
, iobase
+SSR
);
615 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
620 static int w83977af_pio_write(int iobase
, __u8
*buf
, int len
, int fifo_size
)
625 IRDA_DEBUG(4, "%s()\n", __func__
);
627 /* Save current bank */
628 set
= inb(iobase
+SSR
);
630 switch_bank(iobase
, SET0
);
631 if (!(inb_p(iobase
+USR
) & USR_TSRE
)) {
633 "%s(), warning, FIFO not empty yet!\n", __func__
);
636 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
637 __func__
, fifo_size
);
640 /* Fill FIFO with current frame */
641 while ((fifo_size
-- > 0) && (actual
< len
)) {
642 /* Transmit next byte */
643 outb(buf
[actual
++], iobase
+TBR
);
646 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
647 __func__
, fifo_size
, actual
, len
);
650 outb(set
, iobase
+SSR
);
656 * Function w83977af_dma_xmit_complete (self)
658 * The transfer of a frame in finished. So do the necessary things
662 static void w83977af_dma_xmit_complete(struct w83977af_ir
*self
)
667 IRDA_DEBUG(4, "%s(%ld)\n", __func__
, jiffies
);
669 IRDA_ASSERT(self
!= NULL
, return;);
671 iobase
= self
->io
.fir_base
;
673 /* Save current set */
674 set
= inb(iobase
+SSR
);
677 switch_bank(iobase
, SET0
);
678 outb(inb(iobase
+HCR
) & ~HCR_EN_DMA
, iobase
+HCR
);
680 /* Check for underrun! */
681 if (inb(iobase
+AUDR
) & AUDR_UNDR
) {
682 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__
);
684 self
->netdev
->stats
.tx_errors
++;
685 self
->netdev
->stats
.tx_fifo_errors
++;
687 /* Clear bit, by writing 1 to it */
688 outb(AUDR_UNDR
, iobase
+AUDR
);
690 self
->netdev
->stats
.tx_packets
++;
693 if (self
->new_speed
) {
694 w83977af_change_speed(self
, self
->new_speed
);
698 /* Unlock tx_buff and request another frame */
699 /* Tell the network layer, that we want more frames */
700 netif_wake_queue(self
->netdev
);
703 outb(set
, iobase
+SSR
);
707 * Function w83977af_dma_receive (self)
709 * Get ready for receiving a frame. The device will initiate a DMA
710 * if it starts to receive a frame.
713 static int w83977af_dma_receive(struct w83977af_ir
*self
)
717 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
721 IRDA_ASSERT(self
!= NULL
, return -1;);
723 IRDA_DEBUG(4, "%s\n", __func__
);
725 iobase
= self
->io
.fir_base
;
727 /* Save current set */
728 set
= inb(iobase
+SSR
);
731 switch_bank(iobase
, SET0
);
732 outb(inb(iobase
+HCR
) & ~HCR_EN_DMA
, iobase
+HCR
);
734 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
735 switch_bank(iobase
, SET2
);
736 outb((inb(iobase
+ADCR1
) & ~ADCR1_D_CHSW
)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL
,
739 self
->io
.direction
= IO_RECV
;
740 self
->rx_buff
.data
= self
->rx_buff
.head
;
742 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
743 spin_lock_irqsave(&self
->lock
, flags
);
745 disable_dma(self
->io
.dma
);
746 clear_dma_ff(self
->io
.dma
);
747 set_dma_mode(self
->io
.dma
, DMA_MODE_READ
);
748 set_dma_addr(self
->io
.dma
, self
->rx_buff_dma
);
749 set_dma_count(self
->io
.dma
, self
->rx_buff
.truesize
);
751 irda_setup_dma(self
->io
.dma
, self
->rx_buff_dma
, self
->rx_buff
.truesize
,
755 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
756 * important that we don't reset the Tx FIFO since it might not
757 * be finished transmitting yet
759 switch_bank(iobase
, SET0
);
760 outb(UFR_RXTL
|UFR_TXTL
|UFR_RXF_RST
|UFR_EN_FIFO
, iobase
+UFR
);
761 self
->st_fifo
.len
= self
->st_fifo
.tail
= self
->st_fifo
.head
= 0;
764 switch_bank(iobase
, SET0
);
765 #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
766 hcr
= inb(iobase
+HCR
);
767 outb(hcr
| HCR_EN_DMA
, iobase
+HCR
);
768 enable_dma(self
->io
.dma
);
769 spin_unlock_irqrestore(&self
->lock
, flags
);
771 outb(inb(iobase
+HCR
) | HCR_EN_DMA
, iobase
+HCR
);
774 outb(set
, iobase
+SSR
);
780 * Function w83977af_receive_complete (self)
782 * Finished with receiving a frame
785 static int w83977af_dma_receive_complete(struct w83977af_ir
*self
)
788 struct st_fifo
*st_fifo
;
794 IRDA_DEBUG(4, "%s\n", __func__
);
796 st_fifo
= &self
->st_fifo
;
798 iobase
= self
->io
.fir_base
;
800 /* Save current set */
801 set
= inb(iobase
+SSR
);
803 iobase
= self
->io
.fir_base
;
805 /* Read status FIFO */
806 switch_bank(iobase
, SET5
);
807 while ((status
= inb(iobase
+FS_FO
)) & FS_FO_FSFDR
) {
808 st_fifo
->entries
[st_fifo
->tail
].status
= status
;
810 st_fifo
->entries
[st_fifo
->tail
].len
= inb(iobase
+RFLFL
);
811 st_fifo
->entries
[st_fifo
->tail
].len
|= inb(iobase
+RFLFH
) << 8;
817 while (st_fifo
->len
) {
818 /* Get first entry */
819 status
= st_fifo
->entries
[st_fifo
->head
].status
;
820 len
= st_fifo
->entries
[st_fifo
->head
].len
;
824 /* Check for errors */
825 if (status
& FS_FO_ERR_MSK
) {
826 if (status
& FS_FO_LST_FR
) {
827 /* Add number of lost frames to stats */
828 self
->netdev
->stats
.rx_errors
+= len
;
831 self
->netdev
->stats
.rx_errors
++;
833 self
->rx_buff
.data
+= len
;
835 if (status
& FS_FO_MX_LEX
)
836 self
->netdev
->stats
.rx_length_errors
++;
838 if (status
& FS_FO_PHY_ERR
)
839 self
->netdev
->stats
.rx_frame_errors
++;
841 if (status
& FS_FO_CRC_ERR
)
842 self
->netdev
->stats
.rx_crc_errors
++;
844 /* The errors below can be reported in both cases */
845 if (status
& FS_FO_RX_OV
)
846 self
->netdev
->stats
.rx_fifo_errors
++;
848 if (status
& FS_FO_FSF_OV
)
849 self
->netdev
->stats
.rx_fifo_errors
++;
852 /* Check if we have transferred all data to memory */
853 switch_bank(iobase
, SET0
);
854 if (inb(iobase
+USR
) & USR_RDR
) {
855 udelay(80); /* Should be enough!? */
858 skb
= dev_alloc_skb(len
+1);
861 "%s(), memory squeeze, dropping frame.\n", __func__
);
862 /* Restore set register */
863 outb(set
, iobase
+SSR
);
868 /* Align to 20 bytes */
871 /* Copy frame without CRC */
872 if (self
->io
.speed
< 4000000) {
874 skb_copy_to_linear_data(skb
,
879 skb_copy_to_linear_data(skb
,
884 /* Move to next frame */
885 self
->rx_buff
.data
+= len
;
886 self
->netdev
->stats
.rx_packets
++;
888 skb
->dev
= self
->netdev
;
889 skb_reset_mac_header(skb
);
890 skb
->protocol
= htons(ETH_P_IRDA
);
894 /* Restore set register */
895 outb(set
, iobase
+SSR
);
901 * Function pc87108_pio_receive (self)
903 * Receive all data in receiver FIFO
906 static void w83977af_pio_receive(struct w83977af_ir
*self
)
911 IRDA_DEBUG(4, "%s()\n", __func__
);
913 IRDA_ASSERT(self
!= NULL
, return;);
915 iobase
= self
->io
.fir_base
;
917 /* Receive all characters in Rx FIFO */
919 byte
= inb(iobase
+RBR
);
920 async_unwrap_char(self
->netdev
, &self
->netdev
->stats
, &self
->rx_buff
,
922 } while (inb(iobase
+USR
) & USR_RDR
); /* Data available */
926 * Function w83977af_sir_interrupt (self, eir)
928 * Handle SIR interrupt
931 static __u8
w83977af_sir_interrupt(struct w83977af_ir
*self
, int isr
)
938 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__
, isr
);
940 iobase
= self
->io
.fir_base
;
941 /* Transmit FIFO low on data */
942 if (isr
& ISR_TXTH_I
) {
943 /* Write data left in transmit buffer */
944 actual
= w83977af_pio_write(self
->io
.fir_base
,
949 self
->tx_buff
.data
+= actual
;
950 self
->tx_buff
.len
-= actual
;
952 self
->io
.direction
= IO_XMIT
;
954 /* Check if finished */
955 if (self
->tx_buff
.len
> 0) {
956 new_icr
|= ICR_ETXTHI
;
958 set
= inb(iobase
+SSR
);
959 switch_bank(iobase
, SET0
);
960 outb(AUDR_SFEND
, iobase
+AUDR
);
961 outb(set
, iobase
+SSR
);
963 self
->netdev
->stats
.tx_packets
++;
965 /* Feed me more packets */
966 netif_wake_queue(self
->netdev
);
967 new_icr
|= ICR_ETBREI
;
970 /* Check if transmission has completed */
971 if (isr
& ISR_TXEMP_I
) {
972 /* Check if we need to change the speed? */
973 if (self
->new_speed
) {
975 "%s(), Changing speed!\n", __func__
);
976 w83977af_change_speed(self
, self
->new_speed
);
980 /* Turn around and get ready to receive some data */
981 self
->io
.direction
= IO_RECV
;
982 new_icr
|= ICR_ERBRI
;
985 /* Rx FIFO threshold or timeout */
986 if (isr
& ISR_RXTH_I
) {
987 w83977af_pio_receive(self
);
990 new_icr
|= ICR_ERBRI
;
996 * Function pc87108_fir_interrupt (self, eir)
998 * Handle MIR/FIR interrupt
1001 static __u8
w83977af_fir_interrupt(struct w83977af_ir
*self
, int isr
)
1007 iobase
= self
->io
.fir_base
;
1008 set
= inb(iobase
+SSR
);
1010 /* End of frame detected in FIFO */
1011 if (isr
& (ISR_FEND_I
|ISR_FSF_I
)) {
1012 if (w83977af_dma_receive_complete(self
)) {
1014 /* Wait for next status FIFO interrupt */
1015 new_icr
|= ICR_EFSFI
;
1017 /* DMA not finished yet */
1019 /* Set timer value, resolution 1 ms */
1020 switch_bank(iobase
, SET4
);
1021 outb(0x01, iobase
+TMRL
); /* 1 ms */
1022 outb(0x00, iobase
+TMRH
);
1025 outb(IR_MSL_EN_TMR
, iobase
+IR_MSL
);
1027 new_icr
|= ICR_ETMRI
;
1030 /* Timer finished */
1031 if (isr
& ISR_TMR_I
) {
1033 switch_bank(iobase
, SET4
);
1034 outb(0, iobase
+IR_MSL
);
1036 /* Clear timer event */
1037 /* switch_bank(iobase, SET0); */
1038 /* outb(ASCR_CTE, iobase+ASCR); */
1040 /* Check if this is a TX timer interrupt */
1041 if (self
->io
.direction
== IO_XMIT
) {
1042 w83977af_dma_write(self
, iobase
);
1044 new_icr
|= ICR_EDMAI
;
1046 /* Check if DMA has now finished */
1047 w83977af_dma_receive_complete(self
);
1049 new_icr
|= ICR_EFSFI
;
1052 /* Finished with DMA */
1053 if (isr
& ISR_DMA_I
) {
1054 w83977af_dma_xmit_complete(self
);
1056 /* Check if there are more frames to be transmitted */
1057 /* if (irda_device_txqueue_empty(self)) { */
1059 /* Prepare for receive
1061 * ** Netwinder Tx DMA likes that we do this anyway **
1063 w83977af_dma_receive(self
);
1064 new_icr
= ICR_EFSFI
;
1069 outb(set
, iobase
+SSR
);
1075 * Function w83977af_interrupt (irq, dev_id, regs)
1077 * An interrupt from the chip has arrived. Time to do some work
1080 static irqreturn_t
w83977af_interrupt(int irq
, void *dev_id
)
1082 struct net_device
*dev
= dev_id
;
1083 struct w83977af_ir
*self
;
1087 self
= netdev_priv(dev
);
1089 iobase
= self
->io
.fir_base
;
1091 /* Save current bank */
1092 set
= inb(iobase
+SSR
);
1093 switch_bank(iobase
, SET0
);
1095 icr
= inb(iobase
+ICR
);
1096 isr
= inb(iobase
+ISR
) & icr
; /* Mask out the interesting ones */
1098 outb(0, iobase
+ICR
); /* Disable interrupts */
1101 /* Dispatch interrupt handler for the current speed */
1102 if (self
->io
.speed
> PIO_MAX_SPEED
)
1103 icr
= w83977af_fir_interrupt(self
, isr
);
1105 icr
= w83977af_sir_interrupt(self
, isr
);
1108 outb(icr
, iobase
+ICR
); /* Restore (new) interrupts */
1109 outb(set
, iobase
+SSR
); /* Restore bank register */
1110 return IRQ_RETVAL(isr
);
1114 * Function w83977af_is_receiving (self)
1116 * Return TRUE is we are currently receiving a frame
1119 static int w83977af_is_receiving(struct w83977af_ir
*self
)
1125 IRDA_ASSERT(self
!= NULL
, return FALSE
;);
1127 if (self
->io
.speed
> 115200) {
1128 iobase
= self
->io
.fir_base
;
1130 /* Check if rx FIFO is not empty */
1131 set
= inb(iobase
+SSR
);
1132 switch_bank(iobase
, SET2
);
1133 if ((inb(iobase
+RXFDTH
) & 0x3f) != 0) {
1134 /* We are receiving something */
1137 outb(set
, iobase
+SSR
);
1139 status
= (self
->rx_buff
.state
!= OUTSIDE_FRAME
);
1145 * Function w83977af_net_open (dev)
1150 static int w83977af_net_open(struct net_device
*dev
)
1152 struct w83977af_ir
*self
;
1157 IRDA_DEBUG(0, "%s()\n", __func__
);
1159 IRDA_ASSERT(dev
!= NULL
, return -1;);
1160 self
= netdev_priv(dev
);
1162 IRDA_ASSERT(self
!= NULL
, return 0;);
1164 iobase
= self
->io
.fir_base
;
1166 if (request_irq(self
->io
.irq
, w83977af_interrupt
, 0, dev
->name
,
1171 * Always allocate the DMA channel after the IRQ,
1172 * and clean up on failure.
1174 if (request_dma(self
->io
.dma
, dev
->name
)) {
1175 free_irq(self
->io
.irq
, self
);
1179 /* Save current set */
1180 set
= inb(iobase
+SSR
);
1182 /* Enable some interrupts so we can receive frames again */
1183 switch_bank(iobase
, SET0
);
1184 if (self
->io
.speed
> 115200) {
1185 outb(ICR_EFSFI
, iobase
+ICR
);
1186 w83977af_dma_receive(self
);
1188 outb(ICR_ERBRI
, iobase
+ICR
);
1190 /* Restore bank register */
1191 outb(set
, iobase
+SSR
);
1193 /* Ready to play! */
1194 netif_start_queue(dev
);
1196 /* Give self a hardware name */
1197 sprintf(hwname
, "w83977af @ 0x%03x", self
->io
.fir_base
);
1200 * Open new IrLAP layer instance, now that everything should be
1201 * initialized properly
1203 self
->irlap
= irlap_open(dev
, &self
->qos
, hwname
);
1209 * Function w83977af_net_close (dev)
1214 static int w83977af_net_close(struct net_device
*dev
)
1216 struct w83977af_ir
*self
;
1220 IRDA_DEBUG(0, "%s()\n", __func__
);
1222 IRDA_ASSERT(dev
!= NULL
, return -1;);
1224 self
= netdev_priv(dev
);
1226 IRDA_ASSERT(self
!= NULL
, return 0;);
1228 iobase
= self
->io
.fir_base
;
1231 netif_stop_queue(dev
);
1233 /* Stop and remove instance of IrLAP */
1235 irlap_close(self
->irlap
);
1238 disable_dma(self
->io
.dma
);
1240 /* Save current set */
1241 set
= inb(iobase
+SSR
);
1243 /* Disable interrupts */
1244 switch_bank(iobase
, SET0
);
1245 outb(0, iobase
+ICR
);
1247 free_irq(self
->io
.irq
, dev
);
1248 free_dma(self
->io
.dma
);
1250 /* Restore bank register */
1251 outb(set
, iobase
+SSR
);
1257 * Function w83977af_net_ioctl (dev, rq, cmd)
1259 * Process IOCTL commands for this device
1262 static int w83977af_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1264 struct if_irda_req
*irq
= (struct if_irda_req
*) rq
;
1265 struct w83977af_ir
*self
;
1266 unsigned long flags
;
1269 IRDA_ASSERT(dev
!= NULL
, return -1;);
1271 self
= netdev_priv(dev
);
1273 IRDA_ASSERT(self
!= NULL
, return -1;);
1275 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__
, dev
->name
, cmd
);
1277 spin_lock_irqsave(&self
->lock
, flags
);
1280 case SIOCSBANDWIDTH
: /* Set bandwidth */
1281 if (!capable(CAP_NET_ADMIN
)) {
1285 w83977af_change_speed(self
, irq
->ifr_baudrate
);
1287 case SIOCSMEDIABUSY
: /* Set media busy */
1288 if (!capable(CAP_NET_ADMIN
)) {
1292 irda_device_set_media_busy(self
->netdev
, TRUE
);
1294 case SIOCGRECEIVING
: /* Check if we are receiving right now */
1295 irq
->ifr_receiving
= w83977af_is_receiving(self
);
1301 spin_unlock_irqrestore(&self
->lock
, flags
);
1305 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1306 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1307 MODULE_LICENSE("GPL");
1310 module_param(qos_mtt_bits
, int, 0);
1311 MODULE_PARM_DESC(qos_mtt_bits
, "Mimimum Turn Time");
1312 module_param_array(io
, int, NULL
, 0);
1313 MODULE_PARM_DESC(io
, "Base I/O addresses");
1314 module_param_array(irq
, int, NULL
, 0);
1315 MODULE_PARM_DESC(irq
, "IRQ lines");
1318 * Function init_module (void)
1323 module_init(w83977af_init
);
1326 * Function cleanup_module (void)
1331 module_exit(w83977af_cleanup
);