4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
19 * This driver is very simple.
20 * So, it doesn't have below support now
22 * - DMA transfer support
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/clk.h>
31 #include <net/irda/wrapper.h>
32 #include <net/irda/irda_device.h>
34 #define DRIVER_NAME "sh_irda"
36 #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
37 #define __IRDARAM_LEN 0x13FF
39 #define __IRDARAM_LEN 0x1039
42 #define IRTMR 0x1F00 /* Transfer mode */
43 #define IRCFR 0x1F02 /* Configuration */
44 #define IRCTR 0x1F04 /* IR control */
45 #define IRTFLR 0x1F20 /* Transmit frame length */
46 #define IRTCTR 0x1F22 /* Transmit control */
47 #define IRRFLR 0x1F40 /* Receive frame length */
48 #define IRRCTR 0x1F42 /* Receive control */
49 #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
50 #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
51 #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
52 #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
53 #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
54 #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
55 #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
56 #define CRCCTR 0x1F80 /* CRC engine control */
57 #define CRCIR 0x1F86 /* CRC engine input data */
58 #define CRCCR 0x1F8A /* CRC engine calculation */
59 #define CRCOR 0x1F8E /* CRC engine output data */
60 #define FIFOCP 0x1FC0 /* FIFO current pointer */
61 #define FIFOFP 0x1FC2 /* FIFO follow pointer */
62 #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
63 #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
64 #define FIFOSEL 0x1FC8 /* FIFO select */
65 #define FIFORS 0x1FCA /* FIFO receive status */
66 #define FIFORFL 0x1FCC /* FIFO receive frame length */
67 #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
68 #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
69 #define BIFCTL 0x1FD2 /* BUS interface control */
70 #define IRDARAM 0x0000 /* IrDA buffer RAM */
71 #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
74 #define TMD_MASK (0x3 << 14) /* Transfer Mode */
75 #define TMD_SIR (0x0 << 14)
76 #define TMD_MIR (0x3 << 14)
77 #define TMD_FIR (0x2 << 14)
79 #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
80 #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
81 #define SIM (1 << 0) /* SIR Interrupt Mask */
82 #define xIM_MASK (FIFORIM | MIM | SIM)
85 #define RTO_SHIFT 8 /* shift for Receive Timeout */
86 #define RTO (0x3 << RTO_SHIFT)
89 #define ARMOD (1 << 15) /* Auto-Receive Mode */
90 #define TE (1 << 0) /* Transmit Enable */
93 #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
96 #define RE (1 << 0) /* Receive Enable */
99 * SIRISR, SIRIMR, SIRICR,
100 * MFIRISR, MFIRIMR, MFIRICR
102 #define FRE (1 << 15) /* Frame Receive End */
103 #define TROV (1 << 11) /* Transfer Area Overflow */
104 #define xIR_9 (1 << 9)
105 #define TOT xIR_9 /* for SIR Timeout */
106 #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
107 #define xIR_8 (1 << 8)
108 #define FER xIR_8 /* for SIR Framing Error */
109 #define CRCER xIR_8 /* for MIR/FIR CRC error */
110 #define FTE (1 << 7) /* Frame Transmit End */
111 #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
114 #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
117 #define CRC_RST (1 << 15) /* CRC Engine Reset */
118 #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
121 #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
123 /************************************************************************
129 ************************************************************************/
138 struct sh_irda_xir_func
{
139 int (*xir_fre
) (struct sh_irda_self
*self
);
140 int (*xir_trov
) (struct sh_irda_self
*self
);
141 int (*xir_9
) (struct sh_irda_self
*self
);
142 int (*xir_8
) (struct sh_irda_self
*self
);
143 int (*xir_fte
) (struct sh_irda_self
*self
);
146 struct sh_irda_self
{
147 void __iomem
*membase
;
149 struct platform_device
*pdev
;
151 struct net_device
*ndev
;
153 struct irlap_cb
*irlap
;
159 enum sh_irda_mode mode
;
162 struct sh_irda_xir_func
*xir_func
;
165 /************************************************************************
171 ************************************************************************/
172 static void sh_irda_write(struct sh_irda_self
*self
, u32 offset
, u16 data
)
176 spin_lock_irqsave(&self
->lock
, flags
);
177 iowrite16(data
, self
->membase
+ offset
);
178 spin_unlock_irqrestore(&self
->lock
, flags
);
181 static u16
sh_irda_read(struct sh_irda_self
*self
, u32 offset
)
186 spin_lock_irqsave(&self
->lock
, flags
);
187 ret
= ioread16(self
->membase
+ offset
);
188 spin_unlock_irqrestore(&self
->lock
, flags
);
193 static void sh_irda_update_bits(struct sh_irda_self
*self
, u32 offset
,
199 spin_lock_irqsave(&self
->lock
, flags
);
200 old
= ioread16(self
->membase
+ offset
);
201 new = (old
& ~mask
) | data
;
203 iowrite16(data
, self
->membase
+ offset
);
204 spin_unlock_irqrestore(&self
->lock
, flags
);
207 /************************************************************************
213 ************************************************************************/
214 /*=====================================
218 *=====================================*/
219 static void sh_irda_rcv_ctrl(struct sh_irda_self
*self
, int enable
)
221 struct device
*dev
= &self
->ndev
->dev
;
223 sh_irda_update_bits(self
, IRRCTR
, RE
, enable
? RE
: 0);
224 dev_dbg(dev
, "recv %s\n", enable
? "enable" : "disable");
227 static int sh_irda_set_timeout(struct sh_irda_self
*self
, int interval
)
229 struct device
*dev
= &self
->ndev
->dev
;
231 if (SH_IRDA_SIR
!= self
->mode
)
234 if (interval
< 0 || interval
> 2) {
235 dev_err(dev
, "unsupported timeout interval\n");
239 sh_irda_update_bits(self
, IRCFR
, RTO
, interval
<< RTO_SHIFT
);
243 static int sh_irda_set_baudrate(struct sh_irda_self
*self
, int baudrate
)
245 struct device
*dev
= &self
->ndev
->dev
;
251 if (SH_IRDA_SIR
!= self
->mode
) {
252 dev_err(dev
, "it is not SIR mode\n");
257 * Baud rate (bits/s) =
258 * (48 MHz / 26) / (baud rate counter value + 1) x 16
260 val
= (48000000 / 26 / 16 / baudrate
) - 1;
261 dev_dbg(dev
, "baudrate = %d, val = 0x%02x\n", baudrate
, val
);
263 sh_irda_update_bits(self
, SIRBCR
, BRC_MASK
, val
);
268 static int sh_irda_get_rcv_length(struct sh_irda_self
*self
)
270 return RFL_MASK
& sh_irda_read(self
, IRRFLR
);
273 /*=====================================
277 *=====================================*/
278 static int sh_irda_xir_fre(struct sh_irda_self
*self
)
280 struct device
*dev
= &self
->ndev
->dev
;
281 dev_err(dev
, "none mode: frame recv\n");
285 static int sh_irda_xir_trov(struct sh_irda_self
*self
)
287 struct device
*dev
= &self
->ndev
->dev
;
288 dev_err(dev
, "none mode: buffer ram over\n");
292 static int sh_irda_xir_9(struct sh_irda_self
*self
)
294 struct device
*dev
= &self
->ndev
->dev
;
295 dev_err(dev
, "none mode: time over\n");
299 static int sh_irda_xir_8(struct sh_irda_self
*self
)
301 struct device
*dev
= &self
->ndev
->dev
;
302 dev_err(dev
, "none mode: framing error\n");
306 static int sh_irda_xir_fte(struct sh_irda_self
*self
)
308 struct device
*dev
= &self
->ndev
->dev
;
309 dev_err(dev
, "none mode: frame transmit end\n");
313 static struct sh_irda_xir_func sh_irda_xir_func
= {
314 .xir_fre
= sh_irda_xir_fre
,
315 .xir_trov
= sh_irda_xir_trov
,
316 .xir_9
= sh_irda_xir_9
,
317 .xir_8
= sh_irda_xir_8
,
318 .xir_fte
= sh_irda_xir_fte
,
321 /*=====================================
325 * MIR/FIR are not supported now
326 *=====================================*/
327 static struct sh_irda_xir_func sh_irda_mfir_func
= {
328 .xir_fre
= sh_irda_xir_fre
,
329 .xir_trov
= sh_irda_xir_trov
,
330 .xir_9
= sh_irda_xir_9
,
331 .xir_8
= sh_irda_xir_8
,
332 .xir_fte
= sh_irda_xir_fte
,
335 /*=====================================
339 *=====================================*/
340 static int sh_irda_sir_fre(struct sh_irda_self
*self
)
342 struct device
*dev
= &self
->ndev
->dev
;
344 u8
*data
= (u8
*)&data16
;
345 int len
= sh_irda_get_rcv_length(self
);
348 if (len
> IRDARAM_LEN
)
351 dev_dbg(dev
, "frame recv length = %d\n", len
);
353 for (i
= 0; i
< len
; i
++) {
356 data16
= sh_irda_read(self
, IRDARAM
+ i
);
358 async_unwrap_char(self
->ndev
, &self
->ndev
->stats
,
359 &self
->rx_buff
, data
[j
]);
361 self
->ndev
->last_rx
= jiffies
;
363 sh_irda_rcv_ctrl(self
, 1);
368 static int sh_irda_sir_trov(struct sh_irda_self
*self
)
370 struct device
*dev
= &self
->ndev
->dev
;
372 dev_err(dev
, "buffer ram over\n");
373 sh_irda_rcv_ctrl(self
, 1);
377 static int sh_irda_sir_tot(struct sh_irda_self
*self
)
379 struct device
*dev
= &self
->ndev
->dev
;
381 dev_err(dev
, "time over\n");
382 sh_irda_set_baudrate(self
, 9600);
383 sh_irda_rcv_ctrl(self
, 1);
387 static int sh_irda_sir_fer(struct sh_irda_self
*self
)
389 struct device
*dev
= &self
->ndev
->dev
;
391 dev_err(dev
, "framing error\n");
392 sh_irda_rcv_ctrl(self
, 1);
396 static int sh_irda_sir_fte(struct sh_irda_self
*self
)
398 struct device
*dev
= &self
->ndev
->dev
;
400 dev_dbg(dev
, "frame transmit end\n");
401 netif_wake_queue(self
->ndev
);
406 static struct sh_irda_xir_func sh_irda_sir_func
= {
407 .xir_fre
= sh_irda_sir_fre
,
408 .xir_trov
= sh_irda_sir_trov
,
409 .xir_9
= sh_irda_sir_tot
,
410 .xir_8
= sh_irda_sir_fer
,
411 .xir_fte
= sh_irda_sir_fte
,
414 static void sh_irda_set_mode(struct sh_irda_self
*self
, enum sh_irda_mode mode
)
416 struct device
*dev
= &self
->ndev
->dev
;
417 struct sh_irda_xir_func
*func
;
425 func
= &sh_irda_sir_func
;
430 func
= &sh_irda_mfir_func
;
435 func
= &sh_irda_mfir_func
;
440 func
= &sh_irda_xir_func
;
445 self
->xir_func
= func
;
446 sh_irda_update_bits(self
, IRTMR
, TMD_MASK
, data
);
448 dev_dbg(dev
, "switch to %s mode", name
);
451 /************************************************************************
457 ************************************************************************/
458 static void sh_irda_set_irq_mask(struct sh_irda_self
*self
)
464 sh_irda_update_bits(self
, IRTMR
, xIM_MASK
, xIM_MASK
);
465 sh_irda_update_bits(self
, SIRIMR
, xIR_MASK
, xIR_MASK
);
466 sh_irda_update_bits(self
, MFIRIMR
, xIR_MASK
, xIR_MASK
);
469 sh_irda_update_bits(self
, SIRICR
, xIR_MASK
, xIR_MASK
);
470 sh_irda_update_bits(self
, MFIRICR
, xIR_MASK
, xIR_MASK
);
472 switch (self
->mode
) {
490 sh_irda_update_bits(self
, IRTMR
, tmr_hole
, 0);
491 sh_irda_update_bits(self
, xir_reg
, xIR_MASK
, 0);
495 static irqreturn_t
sh_irda_irq(int irq
, void *dev_id
)
497 struct sh_irda_self
*self
= dev_id
;
498 struct sh_irda_xir_func
*func
= self
->xir_func
;
499 u16 isr
= sh_irda_read(self
, SIRISR
);
502 sh_irda_write(self
, SIRICR
, isr
);
507 func
->xir_trov(self
);
518 /************************************************************************
524 ************************************************************************/
525 static void sh_irda_crc_reset(struct sh_irda_self
*self
)
527 sh_irda_write(self
, CRCCTR
, CRC_RST
);
530 static void sh_irda_crc_add(struct sh_irda_self
*self
, u16 data
)
532 sh_irda_write(self
, CRCIR
, data
& CRC_IN_MASK
);
535 static u16
sh_irda_crc_cnt(struct sh_irda_self
*self
)
537 return CRC_CT_MASK
& sh_irda_read(self
, CRCCTR
);
540 static u16
sh_irda_crc_out(struct sh_irda_self
*self
)
542 return sh_irda_read(self
, CRCOR
);
545 static int sh_irda_crc_init(struct sh_irda_self
*self
)
547 struct device
*dev
= &self
->ndev
->dev
;
551 sh_irda_crc_reset(self
);
553 sh_irda_crc_add(self
, 0xCC);
554 sh_irda_crc_add(self
, 0xF5);
555 sh_irda_crc_add(self
, 0xF1);
556 sh_irda_crc_add(self
, 0xA7);
558 val
= sh_irda_crc_cnt(self
);
560 dev_err(dev
, "CRC count error %x\n", val
);
564 val
= sh_irda_crc_out(self
);
566 dev_err(dev
, "CRC result error%x\n", val
);
574 sh_irda_crc_reset(self
);
578 /************************************************************************
584 ************************************************************************/
585 static void sh_irda_remove_iobuf(struct sh_irda_self
*self
)
587 kfree(self
->rx_buff
.head
);
589 self
->tx_buff
.head
= NULL
;
590 self
->tx_buff
.data
= NULL
;
591 self
->rx_buff
.head
= NULL
;
592 self
->rx_buff
.data
= NULL
;
595 static int sh_irda_init_iobuf(struct sh_irda_self
*self
, int rxsize
, int txsize
)
597 if (self
->rx_buff
.head
||
598 self
->tx_buff
.head
) {
599 dev_err(&self
->ndev
->dev
, "iobuff has already existed.");
604 self
->rx_buff
.head
= kmalloc(rxsize
, GFP_KERNEL
);
605 if (!self
->rx_buff
.head
)
608 self
->rx_buff
.truesize
= rxsize
;
609 self
->rx_buff
.in_frame
= FALSE
;
610 self
->rx_buff
.state
= OUTSIDE_FRAME
;
611 self
->rx_buff
.data
= self
->rx_buff
.head
;
614 self
->tx_buff
.head
= self
->membase
+ IRDARAM
;
615 self
->tx_buff
.truesize
= IRDARAM_LEN
;
620 /************************************************************************
623 net_device_ops function
626 ************************************************************************/
627 static int sh_irda_hard_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
629 struct sh_irda_self
*self
= netdev_priv(ndev
);
630 struct device
*dev
= &self
->ndev
->dev
;
631 int speed
= irda_get_next_speed(skb
);
634 dev_dbg(dev
, "hard xmit\n");
636 netif_stop_queue(ndev
);
637 sh_irda_rcv_ctrl(self
, 0);
639 ret
= sh_irda_set_baudrate(self
, speed
);
641 goto sh_irda_hard_xmit_end
;
643 self
->tx_buff
.len
= 0;
647 spin_lock_irqsave(&self
->lock
, flags
);
648 self
->tx_buff
.len
= async_wrap_skb(skb
,
650 self
->tx_buff
.truesize
);
651 spin_unlock_irqrestore(&self
->lock
, flags
);
653 if (self
->tx_buff
.len
> self
->tx_buff
.truesize
)
654 self
->tx_buff
.len
= self
->tx_buff
.truesize
;
656 sh_irda_write(self
, IRTFLR
, self
->tx_buff
.len
);
657 sh_irda_write(self
, IRTCTR
, ARMOD
| TE
);
659 goto sh_irda_hard_xmit_end
;
665 sh_irda_hard_xmit_end
:
666 sh_irda_set_baudrate(self
, 9600);
667 netif_wake_queue(self
->ndev
);
668 sh_irda_rcv_ctrl(self
, 1);
675 static int sh_irda_ioctl(struct net_device
*ndev
, struct ifreq
*ifreq
, int cmd
)
680 * This function is needed for irda framework.
681 * But nothing to do now
686 static struct net_device_stats
*sh_irda_stats(struct net_device
*ndev
)
688 struct sh_irda_self
*self
= netdev_priv(ndev
);
690 return &self
->ndev
->stats
;
693 static int sh_irda_open(struct net_device
*ndev
)
695 struct sh_irda_self
*self
= netdev_priv(ndev
);
698 pm_runtime_get_sync(&self
->pdev
->dev
);
699 err
= sh_irda_crc_init(self
);
703 sh_irda_set_mode(self
, SH_IRDA_SIR
);
704 sh_irda_set_timeout(self
, 2);
705 sh_irda_set_baudrate(self
, 9600);
707 self
->irlap
= irlap_open(ndev
, &self
->qos
, DRIVER_NAME
);
713 netif_start_queue(ndev
);
714 sh_irda_rcv_ctrl(self
, 1);
715 sh_irda_set_irq_mask(self
);
717 dev_info(&ndev
->dev
, "opened\n");
722 pm_runtime_put_sync(&self
->pdev
->dev
);
727 static int sh_irda_stop(struct net_device
*ndev
)
729 struct sh_irda_self
*self
= netdev_priv(ndev
);
733 irlap_close(self
->irlap
);
737 netif_stop_queue(ndev
);
738 pm_runtime_put_sync(&self
->pdev
->dev
);
740 dev_info(&ndev
->dev
, "stoped\n");
745 static const struct net_device_ops sh_irda_ndo
= {
746 .ndo_open
= sh_irda_open
,
747 .ndo_stop
= sh_irda_stop
,
748 .ndo_start_xmit
= sh_irda_hard_xmit
,
749 .ndo_do_ioctl
= sh_irda_ioctl
,
750 .ndo_get_stats
= sh_irda_stats
,
753 /************************************************************************
756 platform_driver function
759 ************************************************************************/
760 static int __devinit
sh_irda_probe(struct platform_device
*pdev
)
762 struct net_device
*ndev
;
763 struct sh_irda_self
*self
;
764 struct resource
*res
;
768 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
769 irq
= platform_get_irq(pdev
, 0);
770 if (!res
|| irq
< 0) {
771 dev_err(&pdev
->dev
, "Not enough platform resources.\n");
775 ndev
= alloc_irdadev(sizeof(*self
));
779 self
= netdev_priv(ndev
);
780 self
->membase
= ioremap_nocache(res
->start
, resource_size(res
));
781 if (!self
->membase
) {
783 dev_err(&pdev
->dev
, "Unable to ioremap.\n");
787 err
= sh_irda_init_iobuf(self
, IRDA_SKB_MAX_MTU
, IRDA_SIR_MAX_FRAME
);
792 pm_runtime_enable(&pdev
->dev
);
794 irda_init_max_qos_capabilies(&self
->qos
);
796 ndev
->netdev_ops
= &sh_irda_ndo
;
800 self
->qos
.baud_rate
.bits
&= IR_9600
; /* FIXME */
801 self
->qos
.min_turn_time
.bits
= 1; /* 10 ms or more */
802 spin_lock_init(&self
->lock
);
804 irda_qos_bits_to_value(&self
->qos
);
806 err
= register_netdev(ndev
);
810 platform_set_drvdata(pdev
, ndev
);
812 if (request_irq(irq
, sh_irda_irq
, IRQF_DISABLED
, "sh_irda", self
)) {
813 dev_warn(&pdev
->dev
, "Unable to attach sh_irda interrupt\n");
817 dev_info(&pdev
->dev
, "SuperH IrDA probed\n");
822 pm_runtime_disable(&pdev
->dev
);
823 sh_irda_remove_iobuf(self
);
825 iounmap(self
->membase
);
832 static int __devexit
sh_irda_remove(struct platform_device
*pdev
)
834 struct net_device
*ndev
= platform_get_drvdata(pdev
);
835 struct sh_irda_self
*self
= netdev_priv(ndev
);
840 unregister_netdev(ndev
);
841 pm_runtime_disable(&pdev
->dev
);
842 sh_irda_remove_iobuf(self
);
843 iounmap(self
->membase
);
845 platform_set_drvdata(pdev
, NULL
);
850 static int sh_irda_runtime_nop(struct device
*dev
)
852 /* Runtime PM callback shared between ->runtime_suspend()
853 * and ->runtime_resume(). Simply returns success.
855 * This driver re-initializes all registers after
856 * pm_runtime_get_sync() anyway so there is no need
857 * to save and restore registers here.
862 static const struct dev_pm_ops sh_irda_pm_ops
= {
863 .runtime_suspend
= sh_irda_runtime_nop
,
864 .runtime_resume
= sh_irda_runtime_nop
,
867 static struct platform_driver sh_irda_driver
= {
868 .probe
= sh_irda_probe
,
869 .remove
= __devexit_p(sh_irda_remove
),
872 .pm
= &sh_irda_pm_ops
,
876 module_platform_driver(sh_irda_driver
);
878 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
879 MODULE_DESCRIPTION("SuperH IrDA driver");
880 MODULE_LICENSE("GPL");