2 * Blackfin Infra-red Driver
4 * Copyright 2006-2009 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
13 #ifdef CONFIG_SIR_BFIN_DMA
14 #define DMA_SIR_RX_XCNT 10
15 #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
16 #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
20 static int max_rate
= 57600;
22 static int max_rate
= 115200;
25 static void turnaround_delay(unsigned long last_jif
, int mtt
)
29 mtt
= mtt
< 10000 ? 10000 : mtt
;
30 ticks
= 1 + mtt
/ (USEC_PER_SEC
/ HZ
);
31 schedule_timeout_uninterruptible(ticks
);
34 static void __devinit
bfin_sir_init_ports(struct bfin_sir_port
*sp
, struct platform_device
*pdev
)
39 for (i
= 0; i
< pdev
->num_resources
; i
++) {
40 res
= &pdev
->resource
[i
];
43 sp
->membase
= (void __iomem
*)res
->start
;
49 sp
->rx_dma_channel
= res
->start
;
50 sp
->tx_dma_channel
= res
->end
;
58 #ifdef CONFIG_SIR_BFIN_DMA
60 init_timer(&(sp
->rx_dma_timer
));
64 static void bfin_sir_stop_tx(struct bfin_sir_port
*port
)
66 #ifdef CONFIG_SIR_BFIN_DMA
67 disable_dma(port
->tx_dma_channel
);
70 while (!(SIR_UART_GET_LSR(port
) & THRE
)) {
75 SIR_UART_STOP_TX(port
);
78 static void bfin_sir_enable_tx(struct bfin_sir_port
*port
)
80 SIR_UART_ENABLE_TX(port
);
83 static void bfin_sir_stop_rx(struct bfin_sir_port
*port
)
85 SIR_UART_STOP_RX(port
);
88 static void bfin_sir_enable_rx(struct bfin_sir_port
*port
)
90 SIR_UART_ENABLE_RX(port
);
93 static int bfin_sir_set_speed(struct bfin_sir_port
*port
, int speed
)
97 unsigned short val
, lsr
, lcr
;
111 * IRDA is not affected by anomaly 05000230, so there is no
112 * need to tweak the divisor like he UART driver (which will
113 * slightly speed up the baud rate on us).
115 quot
= (port
->clk
+ (8 * speed
)) / (16 * speed
);
119 lsr
= SIR_UART_GET_LSR(port
);
120 } while (!(lsr
& TEMT
) && count
--);
122 /* The useconds for 1 bits to transmit */
123 utime
= 1000000 / speed
+ 1;
125 /* Clear UCEN bit to reset the UART state machine
126 * and control registers
128 val
= SIR_UART_GET_GCTL(port
);
130 SIR_UART_PUT_GCTL(port
, val
);
132 /* Set DLAB in LCR to Access THR RBR IER */
133 SIR_UART_SET_DLAB(port
);
136 SIR_UART_PUT_DLL(port
, quot
& 0xFF);
137 SIR_UART_PUT_DLH(port
, (quot
>> 8) & 0xFF);
140 /* Clear DLAB in LCR */
141 SIR_UART_CLEAR_DLAB(port
);
144 SIR_UART_PUT_LCR(port
, lcr
);
146 val
= SIR_UART_GET_GCTL(port
);
148 SIR_UART_PUT_GCTL(port
, val
);
153 printk(KERN_WARNING
"bfin_sir: Invalid speed %d\n", speed
);
157 val
= SIR_UART_GET_GCTL(port
);
158 /* If not add the 'RPOLC', we can't catch the receive interrupt.
159 * It's related with the HW layout and the IR transiver.
162 SIR_UART_PUT_GCTL(port
, val
);
166 static int bfin_sir_is_receiving(struct net_device
*dev
)
168 struct bfin_sir_self
*self
= netdev_priv(dev
);
169 struct bfin_sir_port
*port
= self
->sir_port
;
171 if (!(SIR_UART_GET_IER(port
) & ERBFI
))
173 return self
->rx_buff
.state
!= OUTSIDE_FRAME
;
176 #ifdef CONFIG_SIR_BFIN_PIO
177 static void bfin_sir_tx_chars(struct net_device
*dev
)
180 struct bfin_sir_self
*self
= netdev_priv(dev
);
181 struct bfin_sir_port
*port
= self
->sir_port
;
183 if (self
->tx_buff
.len
!= 0) {
184 chr
= *(self
->tx_buff
.data
);
185 SIR_UART_PUT_CHAR(port
, chr
);
186 self
->tx_buff
.data
++;
189 self
->stats
.tx_packets
++;
190 self
->stats
.tx_bytes
+= self
->tx_buff
.data
- self
->tx_buff
.head
;
191 if (self
->newspeed
) {
192 bfin_sir_set_speed(port
, self
->newspeed
);
193 self
->speed
= self
->newspeed
;
196 bfin_sir_stop_tx(port
);
197 bfin_sir_enable_rx(port
);
199 netif_wake_queue(dev
);
203 static void bfin_sir_rx_chars(struct net_device
*dev
)
205 struct bfin_sir_self
*self
= netdev_priv(dev
);
206 struct bfin_sir_port
*port
= self
->sir_port
;
209 SIR_UART_CLEAR_LSR(port
);
210 ch
= SIR_UART_GET_CHAR(port
);
211 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, ch
);
212 dev
->last_rx
= jiffies
;
215 static irqreturn_t
bfin_sir_rx_int(int irq
, void *dev_id
)
217 struct net_device
*dev
= dev_id
;
218 struct bfin_sir_self
*self
= netdev_priv(dev
);
219 struct bfin_sir_port
*port
= self
->sir_port
;
221 spin_lock(&self
->lock
);
222 while ((SIR_UART_GET_LSR(port
) & DR
))
223 bfin_sir_rx_chars(dev
);
224 spin_unlock(&self
->lock
);
229 static irqreturn_t
bfin_sir_tx_int(int irq
, void *dev_id
)
231 struct net_device
*dev
= dev_id
;
232 struct bfin_sir_self
*self
= netdev_priv(dev
);
233 struct bfin_sir_port
*port
= self
->sir_port
;
235 spin_lock(&self
->lock
);
236 if (SIR_UART_GET_LSR(port
) & THRE
)
237 bfin_sir_tx_chars(dev
);
238 spin_unlock(&self
->lock
);
242 #endif /* CONFIG_SIR_BFIN_PIO */
244 #ifdef CONFIG_SIR_BFIN_DMA
245 static void bfin_sir_dma_tx_chars(struct net_device
*dev
)
247 struct bfin_sir_self
*self
= netdev_priv(dev
);
248 struct bfin_sir_port
*port
= self
->sir_port
;
254 if (self
->tx_buff
.len
== 0) {
255 self
->stats
.tx_packets
++;
256 if (self
->newspeed
) {
257 bfin_sir_set_speed(port
, self
->newspeed
);
258 self
->speed
= self
->newspeed
;
261 bfin_sir_enable_rx(port
);
263 netif_wake_queue(dev
);
267 blackfin_dcache_flush_range((unsigned long)(self
->tx_buff
.data
),
268 (unsigned long)(self
->tx_buff
.data
+self
->tx_buff
.len
));
269 set_dma_config(port
->tx_dma_channel
,
270 set_bfin_dma_config(DIR_READ
, DMA_FLOW_STOP
,
271 INTR_ON_BUF
, DIMENSION_LINEAR
, DATA_SIZE_8
,
273 set_dma_start_addr(port
->tx_dma_channel
,
274 (unsigned long)(self
->tx_buff
.data
));
275 set_dma_x_count(port
->tx_dma_channel
, self
->tx_buff
.len
);
276 set_dma_x_modify(port
->tx_dma_channel
, 1);
277 enable_dma(port
->tx_dma_channel
);
280 static irqreturn_t
bfin_sir_dma_tx_int(int irq
, void *dev_id
)
282 struct net_device
*dev
= dev_id
;
283 struct bfin_sir_self
*self
= netdev_priv(dev
);
284 struct bfin_sir_port
*port
= self
->sir_port
;
286 spin_lock(&self
->lock
);
287 if (!(get_dma_curr_irqstat(port
->tx_dma_channel
) & DMA_RUN
)) {
288 clear_dma_irqstat(port
->tx_dma_channel
);
289 bfin_sir_stop_tx(port
);
291 self
->stats
.tx_packets
++;
292 self
->stats
.tx_bytes
+= self
->tx_buff
.len
;
293 self
->tx_buff
.len
= 0;
294 if (self
->newspeed
) {
295 bfin_sir_set_speed(port
, self
->newspeed
);
296 self
->speed
= self
->newspeed
;
299 bfin_sir_enable_rx(port
);
301 netif_wake_queue(dev
);
304 spin_unlock(&self
->lock
);
309 static void bfin_sir_dma_rx_chars(struct net_device
*dev
)
311 struct bfin_sir_self
*self
= netdev_priv(dev
);
312 struct bfin_sir_port
*port
= self
->sir_port
;
315 SIR_UART_CLEAR_LSR(port
);
317 for (i
= port
->rx_dma_buf
.head
; i
< port
->rx_dma_buf
.tail
; i
++)
318 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, port
->rx_dma_buf
.buf
[i
]);
321 void bfin_sir_rx_dma_timeout(struct net_device
*dev
)
323 struct bfin_sir_self
*self
= netdev_priv(dev
);
324 struct bfin_sir_port
*port
= self
->sir_port
;
328 spin_lock_irqsave(&self
->lock
, flags
);
329 x_pos
= DMA_SIR_RX_XCNT
- get_dma_curr_xcount(port
->rx_dma_channel
);
330 if (x_pos
== DMA_SIR_RX_XCNT
)
333 pos
= port
->rx_dma_nrows
* DMA_SIR_RX_XCNT
+ x_pos
;
335 if (pos
> port
->rx_dma_buf
.tail
) {
336 port
->rx_dma_buf
.tail
= pos
;
337 bfin_sir_dma_rx_chars(dev
);
338 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
340 spin_unlock_irqrestore(&self
->lock
, flags
);
343 static irqreturn_t
bfin_sir_dma_rx_int(int irq
, void *dev_id
)
345 struct net_device
*dev
= dev_id
;
346 struct bfin_sir_self
*self
= netdev_priv(dev
);
347 struct bfin_sir_port
*port
= self
->sir_port
;
348 unsigned short irqstat
;
350 spin_lock(&self
->lock
);
352 port
->rx_dma_nrows
++;
353 port
->rx_dma_buf
.tail
= DMA_SIR_RX_XCNT
* port
->rx_dma_nrows
;
354 bfin_sir_dma_rx_chars(dev
);
355 if (port
->rx_dma_nrows
>= DMA_SIR_RX_YCNT
) {
356 port
->rx_dma_nrows
= 0;
357 port
->rx_dma_buf
.tail
= 0;
359 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
361 irqstat
= get_dma_curr_irqstat(port
->rx_dma_channel
);
362 clear_dma_irqstat(port
->rx_dma_channel
);
363 spin_unlock(&self
->lock
);
365 mod_timer(&port
->rx_dma_timer
, jiffies
+ DMA_SIR_RX_FLUSH_JIFS
);
368 #endif /* CONFIG_SIR_BFIN_DMA */
370 static int bfin_sir_startup(struct bfin_sir_port
*port
, struct net_device
*dev
)
372 #ifdef CONFIG_SIR_BFIN_DMA
373 dma_addr_t dma_handle
;
374 #endif /* CONFIG_SIR_BFIN_DMA */
376 if (request_dma(port
->rx_dma_channel
, "BFIN_UART_RX") < 0) {
377 dev_warn(&dev
->dev
, "Unable to attach SIR RX DMA channel\n");
381 if (request_dma(port
->tx_dma_channel
, "BFIN_UART_TX") < 0) {
382 dev_warn(&dev
->dev
, "Unable to attach SIR TX DMA channel\n");
383 free_dma(port
->rx_dma_channel
);
387 #ifdef CONFIG_SIR_BFIN_DMA
389 set_dma_callback(port
->rx_dma_channel
, bfin_sir_dma_rx_int
, dev
);
390 set_dma_callback(port
->tx_dma_channel
, bfin_sir_dma_tx_int
, dev
);
392 port
->rx_dma_buf
.buf
= (unsigned char *)dma_alloc_coherent(NULL
, PAGE_SIZE
, &dma_handle
, GFP_DMA
);
393 port
->rx_dma_buf
.head
= 0;
394 port
->rx_dma_buf
.tail
= 0;
395 port
->rx_dma_nrows
= 0;
397 set_dma_config(port
->rx_dma_channel
,
398 set_bfin_dma_config(DIR_WRITE
, DMA_FLOW_AUTO
,
399 INTR_ON_ROW
, DIMENSION_2D
,
400 DATA_SIZE_8
, DMA_SYNC_RESTART
));
401 set_dma_x_count(port
->rx_dma_channel
, DMA_SIR_RX_XCNT
);
402 set_dma_x_modify(port
->rx_dma_channel
, 1);
403 set_dma_y_count(port
->rx_dma_channel
, DMA_SIR_RX_YCNT
);
404 set_dma_y_modify(port
->rx_dma_channel
, 1);
405 set_dma_start_addr(port
->rx_dma_channel
, (unsigned long)port
->rx_dma_buf
.buf
);
406 enable_dma(port
->rx_dma_channel
);
408 port
->rx_dma_timer
.data
= (unsigned long)(dev
);
409 port
->rx_dma_timer
.function
= (void *)bfin_sir_rx_dma_timeout
;
413 if (request_irq(port
->irq
, bfin_sir_rx_int
, IRQF_DISABLED
, "BFIN_SIR_RX", dev
)) {
414 dev_warn(&dev
->dev
, "Unable to attach SIR RX interrupt\n");
418 if (request_irq(port
->irq
+1, bfin_sir_tx_int
, IRQF_DISABLED
, "BFIN_SIR_TX", dev
)) {
419 dev_warn(&dev
->dev
, "Unable to attach SIR TX interrupt\n");
420 free_irq(port
->irq
, dev
);
428 static void bfin_sir_shutdown(struct bfin_sir_port
*port
, struct net_device
*dev
)
432 bfin_sir_stop_rx(port
);
433 SIR_UART_DISABLE_INTS(port
);
435 val
= SIR_UART_GET_GCTL(port
);
436 val
&= ~(UCEN
| IREN
| RPOLC
);
437 SIR_UART_PUT_GCTL(port
, val
);
439 #ifdef CONFIG_SIR_BFIN_DMA
440 disable_dma(port
->tx_dma_channel
);
441 disable_dma(port
->rx_dma_channel
);
442 del_timer(&(port
->rx_dma_timer
));
443 dma_free_coherent(NULL
, PAGE_SIZE
, port
->rx_dma_buf
.buf
, 0);
445 free_irq(port
->irq
+1, dev
);
446 free_irq(port
->irq
, dev
);
448 free_dma(port
->tx_dma_channel
);
449 free_dma(port
->rx_dma_channel
);
453 static int bfin_sir_suspend(struct platform_device
*pdev
, pm_message_t state
)
455 struct bfin_sir_port
*sir_port
;
456 struct net_device
*dev
;
457 struct bfin_sir_self
*self
;
459 sir_port
= platform_get_drvdata(pdev
);
464 self
= netdev_priv(dev
);
466 flush_work(&self
->work
);
467 bfin_sir_shutdown(self
->sir_port
, dev
);
468 netif_device_detach(dev
);
473 static int bfin_sir_resume(struct platform_device
*pdev
)
475 struct bfin_sir_port
*sir_port
;
476 struct net_device
*dev
;
477 struct bfin_sir_self
*self
;
478 struct bfin_sir_port
*port
;
480 sir_port
= platform_get_drvdata(pdev
);
485 self
= netdev_priv(dev
);
486 port
= self
->sir_port
;
488 if (self
->newspeed
) {
489 self
->speed
= self
->newspeed
;
492 bfin_sir_startup(port
, dev
);
493 bfin_sir_set_speed(port
, 9600);
494 bfin_sir_enable_rx(port
);
495 netif_device_attach(dev
);
500 #define bfin_sir_suspend NULL
501 #define bfin_sir_resume NULL
504 static void bfin_sir_send_work(struct work_struct
*work
)
506 struct bfin_sir_self
*self
= container_of(work
, struct bfin_sir_self
, work
);
507 struct net_device
*dev
= self
->sir_port
->dev
;
508 struct bfin_sir_port
*port
= self
->sir_port
;
512 while (bfin_sir_is_receiving(dev
) && --tx_cnt
)
513 turnaround_delay(dev
->last_rx
, self
->mtt
);
515 bfin_sir_stop_rx(port
);
517 /* To avoid losting RX interrupt, we reset IR function before
518 * sending data. We also can set the speed, which will
519 * reset all the UART.
521 val
= SIR_UART_GET_GCTL(port
);
522 val
&= ~(IREN
| RPOLC
);
523 SIR_UART_PUT_GCTL(port
, val
);
526 SIR_UART_PUT_GCTL(port
, val
);
528 /* bfin_sir_set_speed(port, self->speed); */
530 #ifdef CONFIG_SIR_BFIN_DMA
531 bfin_sir_dma_tx_chars(dev
);
533 bfin_sir_enable_tx(port
);
534 dev
->trans_start
= jiffies
;
537 static int bfin_sir_hard_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
539 struct bfin_sir_self
*self
= netdev_priv(dev
);
540 int speed
= irda_get_next_speed(skb
);
542 netif_stop_queue(dev
);
544 self
->mtt
= irda_get_mtt(skb
);
546 if (speed
!= self
->speed
&& speed
!= -1)
547 self
->newspeed
= speed
;
549 self
->tx_buff
.data
= self
->tx_buff
.head
;
551 self
->tx_buff
.len
= 0;
553 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
, self
->tx_buff
.truesize
);
555 schedule_work(&self
->work
);
561 static int bfin_sir_ioctl(struct net_device
*dev
, struct ifreq
*ifreq
, int cmd
)
563 struct if_irda_req
*rq
= (struct if_irda_req
*)ifreq
;
564 struct bfin_sir_self
*self
= netdev_priv(dev
);
565 struct bfin_sir_port
*port
= self
->sir_port
;
570 if (capable(CAP_NET_ADMIN
)) {
572 ret
= bfin_sir_set_speed(port
, rq
->ifr_baudrate
);
573 bfin_sir_enable_rx(port
);
575 dev_warn(&dev
->dev
, "SIOCSBANDWIDTH: !netif_running\n");
583 if (capable(CAP_NET_ADMIN
)) {
584 irda_device_set_media_busy(dev
, TRUE
);
590 rq
->ifr_receiving
= bfin_sir_is_receiving(dev
);
601 static struct net_device_stats
*bfin_sir_stats(struct net_device
*dev
)
603 struct bfin_sir_self
*self
= netdev_priv(dev
);
608 static int bfin_sir_open(struct net_device
*dev
)
610 struct bfin_sir_self
*self
= netdev_priv(dev
);
611 struct bfin_sir_port
*port
= self
->sir_port
;
617 spin_lock_init(&self
->lock
);
619 err
= bfin_sir_startup(port
, dev
);
623 bfin_sir_set_speed(port
, 9600);
625 self
->irlap
= irlap_open(dev
, &self
->qos
, DRIVER_NAME
);
629 INIT_WORK(&self
->work
, bfin_sir_send_work
);
632 * Now enable the interrupt then start the queue
635 bfin_sir_enable_rx(port
);
637 netif_start_queue(dev
);
643 bfin_sir_shutdown(port
, dev
);
648 static int bfin_sir_stop(struct net_device
*dev
)
650 struct bfin_sir_self
*self
= netdev_priv(dev
);
652 flush_work(&self
->work
);
653 bfin_sir_shutdown(self
->sir_port
, dev
);
656 dev_kfree_skb(self
->rxskb
);
662 irlap_close(self
->irlap
);
666 netif_stop_queue(dev
);
672 static int bfin_sir_init_iobuf(iobuff_t
*io
, int size
)
674 io
->head
= kmalloc(size
, GFP_KERNEL
);
678 io
->in_frame
= FALSE
;
679 io
->state
= OUTSIDE_FRAME
;
684 static const struct net_device_ops bfin_sir_ndo
= {
685 .ndo_open
= bfin_sir_open
,
686 .ndo_stop
= bfin_sir_stop
,
687 .ndo_start_xmit
= bfin_sir_hard_xmit
,
688 .ndo_do_ioctl
= bfin_sir_ioctl
,
689 .ndo_get_stats
= bfin_sir_stats
,
692 static int __devinit
bfin_sir_probe(struct platform_device
*pdev
)
694 struct net_device
*dev
;
695 struct bfin_sir_self
*self
;
696 unsigned int baudrate_mask
;
697 struct bfin_sir_port
*sir_port
;
700 if (pdev
->id
>= 0 && pdev
->id
< ARRAY_SIZE(per
) && \
701 per
[pdev
->id
][3] == pdev
->id
) {
702 err
= peripheral_request_list(per
[pdev
->id
], DRIVER_NAME
);
706 dev_err(&pdev
->dev
, "Invalid pdev id, please check board file\n");
711 sir_port
= kmalloc(sizeof(*sir_port
), GFP_KERNEL
);
715 bfin_sir_init_ports(sir_port
, pdev
);
717 dev
= alloc_irdadev(sizeof(*self
));
721 self
= netdev_priv(dev
);
722 self
->dev
= &pdev
->dev
;
723 self
->sir_port
= sir_port
;
726 err
= bfin_sir_init_iobuf(&self
->rx_buff
, IRDA_SKB_MAX_MTU
);
729 err
= bfin_sir_init_iobuf(&self
->tx_buff
, IRDA_SIR_MAX_FRAME
);
733 dev
->netdev_ops
= &bfin_sir_ndo
;
734 dev
->irq
= sir_port
->irq
;
736 irda_init_max_qos_capabilies(&self
->qos
);
738 baudrate_mask
= IR_9600
;
742 baudrate_mask
|= IR_115200
;
744 baudrate_mask
|= IR_57600
;
746 baudrate_mask
|= IR_38400
;
748 baudrate_mask
|= IR_19200
;
752 dev_warn(&pdev
->dev
, "Invalid maximum baud rate, using 9600\n");
755 self
->qos
.baud_rate
.bits
&= baudrate_mask
;
757 self
->qos
.min_turn_time
.bits
= 1; /* 10 ms or more */
759 irda_qos_bits_to_value(&self
->qos
);
761 err
= register_netdev(dev
);
764 kfree(self
->tx_buff
.head
);
766 kfree(self
->rx_buff
.head
);
772 peripheral_free_list(per
[pdev
->id
]);
774 platform_set_drvdata(pdev
, sir_port
);
779 static int __devexit
bfin_sir_remove(struct platform_device
*pdev
)
781 struct bfin_sir_port
*sir_port
;
782 struct net_device
*dev
= NULL
;
783 struct bfin_sir_self
*self
;
785 sir_port
= platform_get_drvdata(pdev
);
789 self
= netdev_priv(dev
);
790 unregister_netdev(dev
);
791 kfree(self
->tx_buff
.head
);
792 kfree(self
->rx_buff
.head
);
795 platform_set_drvdata(pdev
, NULL
);
800 static struct platform_driver bfin_ir_driver
= {
801 .probe
= bfin_sir_probe
,
802 .remove
= __devexit_p(bfin_sir_remove
),
803 .suspend
= bfin_sir_suspend
,
804 .resume
= bfin_sir_resume
,
810 static int __init
bfin_sir_init(void)
812 return platform_driver_register(&bfin_ir_driver
);
815 static void __exit
bfin_sir_exit(void)
817 platform_driver_unregister(&bfin_ir_driver
);
820 module_init(bfin_sir_init
);
821 module_exit(bfin_sir_exit
);
823 module_param(max_rate
, int, 0);
824 MODULE_PARM_DESC(max_rate
, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
826 MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
827 MODULE_DESCRIPTION("Blackfin IrDA driver");
828 MODULE_LICENSE("GPL");