2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 * based off of the old drivers/char/sh-sci.c by:
9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007).
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/interrupt.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/sysrq.h>
36 #include <linux/ioport.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/console.h>
41 #include <linux/platform_device.h>
42 #include <linux/serial_sci.h>
43 #include <linux/notifier.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/cpufreq.h>
46 #include <linux/clk.h>
47 #include <linux/ctype.h>
48 #include <linux/err.h>
49 #include <linux/dmaengine.h>
50 #include <linux/scatterlist.h>
51 #include <linux/slab.h>
54 #include <asm/sh_bios.h>
60 struct uart_port port
;
62 /* Platform configuration */
63 struct plat_sci_port
*cfg
;
66 struct timer_list break_timer
;
74 char *irqstr
[SCIx_NR_IRQS
];
76 struct dma_chan
*chan_tx
;
77 struct dma_chan
*chan_rx
;
79 #ifdef CONFIG_SERIAL_SH_SCI_DMA
80 struct dma_async_tx_descriptor
*desc_tx
;
81 struct dma_async_tx_descriptor
*desc_rx
[2];
82 dma_cookie_t cookie_tx
;
83 dma_cookie_t cookie_rx
[2];
84 dma_cookie_t active_rx
;
85 struct scatterlist sg_tx
;
86 unsigned int sg_len_tx
;
87 struct scatterlist sg_rx
[2];
89 struct sh_dmae_slave param_tx
;
90 struct sh_dmae_slave param_rx
;
91 struct work_struct work_tx
;
92 struct work_struct work_rx
;
93 struct timer_list rx_timer
;
94 unsigned int rx_timeout
;
97 struct notifier_block freq_transition
;
100 /* Function prototypes */
101 static void sci_start_tx(struct uart_port
*port
);
102 static void sci_stop_tx(struct uart_port
*port
);
103 static void sci_start_rx(struct uart_port
*port
);
105 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
107 static struct sci_port sci_ports
[SCI_NPORTS
];
108 static struct uart_driver sci_uart_driver
;
110 static inline struct sci_port
*
111 to_sci_port(struct uart_port
*uart
)
113 return container_of(uart
, struct sci_port
, port
);
116 struct plat_sci_reg
{
120 /* Helper for invalidating specific entries of an inherited map. */
121 #define sci_reg_invalid { .offset = 0, .size = 0 }
123 static struct plat_sci_reg sci_regmap
[SCIx_NR_REGTYPES
][SCIx_NR_REGS
] = {
124 [SCIx_PROBE_REGTYPE
] = {
125 [0 ... SCIx_NR_REGS
- 1] = sci_reg_invalid
,
129 * Common SCI definitions, dependent on the port's regshift
132 [SCIx_SCI_REGTYPE
] = {
133 [SCSMR
] = { 0x00, 8 },
134 [SCBRR
] = { 0x01, 8 },
135 [SCSCR
] = { 0x02, 8 },
136 [SCxTDR
] = { 0x03, 8 },
137 [SCxSR
] = { 0x04, 8 },
138 [SCxRDR
] = { 0x05, 8 },
139 [SCFCR
] = sci_reg_invalid
,
140 [SCFDR
] = sci_reg_invalid
,
141 [SCTFDR
] = sci_reg_invalid
,
142 [SCRFDR
] = sci_reg_invalid
,
143 [SCSPTR
] = sci_reg_invalid
,
144 [SCLSR
] = sci_reg_invalid
,
148 * Common definitions for legacy IrDA ports, dependent on
151 [SCIx_IRDA_REGTYPE
] = {
152 [SCSMR
] = { 0x00, 8 },
153 [SCBRR
] = { 0x01, 8 },
154 [SCSCR
] = { 0x02, 8 },
155 [SCxTDR
] = { 0x03, 8 },
156 [SCxSR
] = { 0x04, 8 },
157 [SCxRDR
] = { 0x05, 8 },
158 [SCFCR
] = { 0x06, 8 },
159 [SCFDR
] = { 0x07, 16 },
160 [SCTFDR
] = sci_reg_invalid
,
161 [SCRFDR
] = sci_reg_invalid
,
162 [SCSPTR
] = sci_reg_invalid
,
163 [SCLSR
] = sci_reg_invalid
,
167 * Common SCIFA definitions.
169 [SCIx_SCIFA_REGTYPE
] = {
170 [SCSMR
] = { 0x00, 16 },
171 [SCBRR
] = { 0x04, 8 },
172 [SCSCR
] = { 0x08, 16 },
173 [SCxTDR
] = { 0x20, 8 },
174 [SCxSR
] = { 0x14, 16 },
175 [SCxRDR
] = { 0x24, 8 },
176 [SCFCR
] = { 0x18, 16 },
177 [SCFDR
] = { 0x1c, 16 },
178 [SCTFDR
] = sci_reg_invalid
,
179 [SCRFDR
] = sci_reg_invalid
,
180 [SCSPTR
] = sci_reg_invalid
,
181 [SCLSR
] = sci_reg_invalid
,
185 * Common SCIFB definitions.
187 [SCIx_SCIFB_REGTYPE
] = {
188 [SCSMR
] = { 0x00, 16 },
189 [SCBRR
] = { 0x04, 8 },
190 [SCSCR
] = { 0x08, 16 },
191 [SCxTDR
] = { 0x40, 8 },
192 [SCxSR
] = { 0x14, 16 },
193 [SCxRDR
] = { 0x60, 8 },
194 [SCFCR
] = { 0x18, 16 },
195 [SCFDR
] = { 0x1c, 16 },
196 [SCTFDR
] = sci_reg_invalid
,
197 [SCRFDR
] = sci_reg_invalid
,
198 [SCSPTR
] = sci_reg_invalid
,
199 [SCLSR
] = sci_reg_invalid
,
203 * Common SH-3 SCIF definitions.
205 [SCIx_SH3_SCIF_REGTYPE
] = {
206 [SCSMR
] = { 0x00, 8 },
207 [SCBRR
] = { 0x02, 8 },
208 [SCSCR
] = { 0x04, 8 },
209 [SCxTDR
] = { 0x06, 8 },
210 [SCxSR
] = { 0x08, 16 },
211 [SCxRDR
] = { 0x0a, 8 },
212 [SCFCR
] = { 0x0c, 8 },
213 [SCFDR
] = { 0x0e, 16 },
214 [SCTFDR
] = sci_reg_invalid
,
215 [SCRFDR
] = sci_reg_invalid
,
216 [SCSPTR
] = sci_reg_invalid
,
217 [SCLSR
] = sci_reg_invalid
,
221 * Common SH-4(A) SCIF(B) definitions.
223 [SCIx_SH4_SCIF_REGTYPE
] = {
224 [SCSMR
] = { 0x00, 16 },
225 [SCBRR
] = { 0x04, 8 },
226 [SCSCR
] = { 0x08, 16 },
227 [SCxTDR
] = { 0x0c, 8 },
228 [SCxSR
] = { 0x10, 16 },
229 [SCxRDR
] = { 0x14, 8 },
230 [SCFCR
] = { 0x18, 16 },
231 [SCFDR
] = { 0x1c, 16 },
232 [SCTFDR
] = sci_reg_invalid
,
233 [SCRFDR
] = sci_reg_invalid
,
234 [SCSPTR
] = { 0x20, 16 },
235 [SCLSR
] = { 0x24, 16 },
239 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
242 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
] = {
243 [SCSMR
] = { 0x00, 16 },
244 [SCBRR
] = { 0x04, 8 },
245 [SCSCR
] = { 0x08, 16 },
246 [SCxTDR
] = { 0x0c, 8 },
247 [SCxSR
] = { 0x10, 16 },
248 [SCxRDR
] = { 0x14, 8 },
249 [SCFCR
] = { 0x18, 16 },
250 [SCFDR
] = { 0x1c, 16 },
251 [SCTFDR
] = sci_reg_invalid
,
252 [SCRFDR
] = sci_reg_invalid
,
253 [SCSPTR
] = sci_reg_invalid
,
254 [SCLSR
] = { 0x24, 16 },
258 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
261 [SCIx_SH4_SCIF_FIFODATA_REGTYPE
] = {
262 [SCSMR
] = { 0x00, 16 },
263 [SCBRR
] = { 0x04, 8 },
264 [SCSCR
] = { 0x08, 16 },
265 [SCxTDR
] = { 0x0c, 8 },
266 [SCxSR
] = { 0x10, 16 },
267 [SCxRDR
] = { 0x14, 8 },
268 [SCFCR
] = { 0x18, 16 },
269 [SCFDR
] = { 0x1c, 16 },
270 [SCTFDR
] = { 0x1c, 16 }, /* aliased to SCFDR */
271 [SCRFDR
] = { 0x20, 16 },
272 [SCSPTR
] = { 0x24, 16 },
273 [SCLSR
] = { 0x28, 16 },
277 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
280 [SCIx_SH7705_SCIF_REGTYPE
] = {
281 [SCSMR
] = { 0x00, 16 },
282 [SCBRR
] = { 0x04, 8 },
283 [SCSCR
] = { 0x08, 16 },
284 [SCxTDR
] = { 0x20, 8 },
285 [SCxSR
] = { 0x14, 16 },
286 [SCxRDR
] = { 0x24, 8 },
287 [SCFCR
] = { 0x18, 16 },
288 [SCFDR
] = { 0x1c, 16 },
289 [SCTFDR
] = sci_reg_invalid
,
290 [SCRFDR
] = sci_reg_invalid
,
291 [SCSPTR
] = sci_reg_invalid
,
292 [SCLSR
] = sci_reg_invalid
,
296 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
299 * The "offset" here is rather misleading, in that it refers to an enum
300 * value relative to the port mapping rather than the fixed offset
301 * itself, which needs to be manually retrieved from the platform's
302 * register map for the given port.
304 static unsigned int sci_serial_in(struct uart_port
*p
, int offset
)
306 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
309 return ioread8(p
->membase
+ (reg
->offset
<< p
->regshift
));
310 else if (reg
->size
== 16)
311 return ioread16(p
->membase
+ (reg
->offset
<< p
->regshift
));
313 WARN(1, "Invalid register access\n");
318 static void sci_serial_out(struct uart_port
*p
, int offset
, int value
)
320 struct plat_sci_reg
*reg
= sci_getreg(p
, offset
);
323 iowrite8(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
324 else if (reg
->size
== 16)
325 iowrite16(value
, p
->membase
+ (reg
->offset
<< p
->regshift
));
327 WARN(1, "Invalid register access\n");
330 #define sci_in(up, offset) (up->serial_in(up, offset))
331 #define sci_out(up, offset, value) (up->serial_out(up, offset, value))
333 static int sci_probe_regmap(struct plat_sci_port
*cfg
)
337 cfg
->regtype
= SCIx_SCI_REGTYPE
;
340 cfg
->regtype
= SCIx_IRDA_REGTYPE
;
343 cfg
->regtype
= SCIx_SCIFA_REGTYPE
;
346 cfg
->regtype
= SCIx_SCIFB_REGTYPE
;
350 * The SH-4 is a bit of a misnomer here, although that's
351 * where this particular port layout originated. This
352 * configuration (or some slight variation thereof)
353 * remains the dominant model for all SCIFs.
355 cfg
->regtype
= SCIx_SH4_SCIF_REGTYPE
;
358 printk(KERN_ERR
"Can't probe register map for given port\n");
365 static void sci_port_enable(struct sci_port
*sci_port
)
367 if (!sci_port
->port
.dev
)
370 pm_runtime_get_sync(sci_port
->port
.dev
);
372 clk_enable(sci_port
->iclk
);
373 sci_port
->port
.uartclk
= clk_get_rate(sci_port
->iclk
);
374 clk_enable(sci_port
->fclk
);
377 static void sci_port_disable(struct sci_port
*sci_port
)
379 if (!sci_port
->port
.dev
)
382 clk_disable(sci_port
->fclk
);
383 clk_disable(sci_port
->iclk
);
385 pm_runtime_put_sync(sci_port
->port
.dev
);
388 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
390 #ifdef CONFIG_CONSOLE_POLL
391 static int sci_poll_get_char(struct uart_port
*port
)
393 unsigned short status
;
397 status
= sci_in(port
, SCxSR
);
398 if (status
& SCxSR_ERRORS(port
)) {
399 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
405 if (!(status
& SCxSR_RDxF(port
)))
408 c
= sci_in(port
, SCxRDR
);
412 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
418 static void sci_poll_put_char(struct uart_port
*port
, unsigned char c
)
420 unsigned short status
;
423 status
= sci_in(port
, SCxSR
);
424 } while (!(status
& SCxSR_TDxE(port
)));
426 sci_out(port
, SCxTDR
, c
);
427 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
) & ~SCxSR_TEND(port
));
429 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
431 static void sci_init_pins(struct uart_port
*port
, unsigned int cflag
)
433 struct sci_port
*s
= to_sci_port(port
);
434 struct plat_sci_reg
*reg
= sci_regmap
[s
->cfg
->regtype
] + SCSPTR
;
437 * Use port-specific handler if provided.
439 if (s
->cfg
->ops
&& s
->cfg
->ops
->init_pins
) {
440 s
->cfg
->ops
->init_pins(port
, cflag
);
445 * For the generic path SCSPTR is necessary. Bail out if that's
451 if (!(cflag
& CRTSCTS
))
452 sci_out(port
, SCSPTR
, 0x0080); /* Set RTS = 1 */
455 static int sci_txfill(struct uart_port
*port
)
457 struct plat_sci_reg
*reg
;
459 reg
= sci_getreg(port
, SCTFDR
);
461 return sci_in(port
, SCTFDR
) & 0xff;
463 reg
= sci_getreg(port
, SCFDR
);
465 return sci_in(port
, SCFDR
) >> 8;
467 return !(sci_in(port
, SCxSR
) & SCI_TDRE
);
470 static int sci_txroom(struct uart_port
*port
)
472 return port
->fifosize
- sci_txfill(port
);
475 static int sci_rxfill(struct uart_port
*port
)
477 struct plat_sci_reg
*reg
;
479 reg
= sci_getreg(port
, SCRFDR
);
481 return sci_in(port
, SCRFDR
) & 0xff;
483 reg
= sci_getreg(port
, SCFDR
);
485 return sci_in(port
, SCFDR
) & ((port
->fifosize
<< 1) - 1);
487 return (sci_in(port
, SCxSR
) & SCxSR_RDxF(port
)) != 0;
491 * SCI helper for checking the state of the muxed port/RXD pins.
493 static inline int sci_rxd_in(struct uart_port
*port
)
495 struct sci_port
*s
= to_sci_port(port
);
497 if (s
->cfg
->port_reg
<= 0)
500 return !!__raw_readb(s
->cfg
->port_reg
);
503 /* ********************************************************************** *
504 * the interrupt related routines *
505 * ********************************************************************** */
507 static void sci_transmit_chars(struct uart_port
*port
)
509 struct circ_buf
*xmit
= &port
->state
->xmit
;
510 unsigned int stopped
= uart_tx_stopped(port
);
511 unsigned short status
;
515 status
= sci_in(port
, SCxSR
);
516 if (!(status
& SCxSR_TDxE(port
))) {
517 ctrl
= sci_in(port
, SCSCR
);
518 if (uart_circ_empty(xmit
))
522 sci_out(port
, SCSCR
, ctrl
);
526 count
= sci_txroom(port
);
534 } else if (!uart_circ_empty(xmit
) && !stopped
) {
535 c
= xmit
->buf
[xmit
->tail
];
536 xmit
->tail
= (xmit
->tail
+ 1) & (UART_XMIT_SIZE
- 1);
541 sci_out(port
, SCxTDR
, c
);
544 } while (--count
> 0);
546 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
548 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
549 uart_write_wakeup(port
);
550 if (uart_circ_empty(xmit
)) {
553 ctrl
= sci_in(port
, SCSCR
);
555 if (port
->type
!= PORT_SCI
) {
556 sci_in(port
, SCxSR
); /* Dummy read */
557 sci_out(port
, SCxSR
, SCxSR_TDxE_CLEAR(port
));
561 sci_out(port
, SCSCR
, ctrl
);
565 /* On SH3, SCIF may read end-of-break as a space->mark char */
566 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
568 static void sci_receive_chars(struct uart_port
*port
)
570 struct sci_port
*sci_port
= to_sci_port(port
);
571 struct tty_struct
*tty
= port
->state
->port
.tty
;
572 int i
, count
, copied
= 0;
573 unsigned short status
;
576 status
= sci_in(port
, SCxSR
);
577 if (!(status
& SCxSR_RDxF(port
)))
581 /* Don't copy more bytes than there is room for in the buffer */
582 count
= tty_buffer_request_room(tty
, sci_rxfill(port
));
584 /* If for any reason we can't copy more data, we're done! */
588 if (port
->type
== PORT_SCI
) {
589 char c
= sci_in(port
, SCxRDR
);
590 if (uart_handle_sysrq_char(port
, c
) ||
591 sci_port
->break_flag
)
594 tty_insert_flip_char(tty
, c
, TTY_NORMAL
);
596 for (i
= 0; i
< count
; i
++) {
597 char c
= sci_in(port
, SCxRDR
);
598 status
= sci_in(port
, SCxSR
);
599 #if defined(CONFIG_CPU_SH3)
600 /* Skip "chars" during break */
601 if (sci_port
->break_flag
) {
603 (status
& SCxSR_FER(port
))) {
608 /* Nonzero => end-of-break */
609 dev_dbg(port
->dev
, "debounce<%02x>\n", c
);
610 sci_port
->break_flag
= 0;
617 #endif /* CONFIG_CPU_SH3 */
618 if (uart_handle_sysrq_char(port
, c
)) {
623 /* Store data and status */
624 if (status
& SCxSR_FER(port
)) {
626 dev_notice(port
->dev
, "frame error\n");
627 } else if (status
& SCxSR_PER(port
)) {
629 dev_notice(port
->dev
, "parity error\n");
633 tty_insert_flip_char(tty
, c
, flag
);
637 sci_in(port
, SCxSR
); /* dummy read */
638 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
641 port
->icount
.rx
+= count
;
645 /* Tell the rest of the system the news. New characters! */
646 tty_flip_buffer_push(tty
);
648 sci_in(port
, SCxSR
); /* dummy read */
649 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
653 #define SCI_BREAK_JIFFIES (HZ/20)
656 * The sci generates interrupts during the break,
657 * 1 per millisecond or so during the break period, for 9600 baud.
658 * So dont bother disabling interrupts.
659 * But dont want more than 1 break event.
660 * Use a kernel timer to periodically poll the rx line until
661 * the break is finished.
663 static inline void sci_schedule_break_timer(struct sci_port
*port
)
665 mod_timer(&port
->break_timer
, jiffies
+ SCI_BREAK_JIFFIES
);
668 /* Ensure that two consecutive samples find the break over. */
669 static void sci_break_timer(unsigned long data
)
671 struct sci_port
*port
= (struct sci_port
*)data
;
673 sci_port_enable(port
);
675 if (sci_rxd_in(&port
->port
) == 0) {
676 port
->break_flag
= 1;
677 sci_schedule_break_timer(port
);
678 } else if (port
->break_flag
== 1) {
680 port
->break_flag
= 2;
681 sci_schedule_break_timer(port
);
683 port
->break_flag
= 0;
685 sci_port_disable(port
);
688 static int sci_handle_errors(struct uart_port
*port
)
691 unsigned short status
= sci_in(port
, SCxSR
);
692 struct tty_struct
*tty
= port
->state
->port
.tty
;
693 struct sci_port
*s
= to_sci_port(port
);
696 * Handle overruns, if supported.
698 if (s
->cfg
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
699 if (status
& (1 << s
->cfg
->overrun_bit
)) {
701 if (tty_insert_flip_char(tty
, 0, TTY_OVERRUN
))
704 dev_notice(port
->dev
, "overrun error");
708 if (status
& SCxSR_FER(port
)) {
709 if (sci_rxd_in(port
) == 0) {
710 /* Notify of BREAK */
711 struct sci_port
*sci_port
= to_sci_port(port
);
713 if (!sci_port
->break_flag
) {
714 sci_port
->break_flag
= 1;
715 sci_schedule_break_timer(sci_port
);
717 /* Do sysrq handling. */
718 if (uart_handle_break(port
))
721 dev_dbg(port
->dev
, "BREAK detected\n");
723 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
729 if (tty_insert_flip_char(tty
, 0, TTY_FRAME
))
732 dev_notice(port
->dev
, "frame error\n");
736 if (status
& SCxSR_PER(port
)) {
738 if (tty_insert_flip_char(tty
, 0, TTY_PARITY
))
741 dev_notice(port
->dev
, "parity error");
745 tty_flip_buffer_push(tty
);
750 static int sci_handle_fifo_overrun(struct uart_port
*port
)
752 struct tty_struct
*tty
= port
->state
->port
.tty
;
753 struct sci_port
*s
= to_sci_port(port
);
754 struct plat_sci_reg
*reg
;
757 reg
= sci_getreg(port
, SCLSR
);
761 if ((sci_in(port
, SCLSR
) & (1 << s
->cfg
->overrun_bit
))) {
762 sci_out(port
, SCLSR
, 0);
764 tty_insert_flip_char(tty
, 0, TTY_OVERRUN
);
765 tty_flip_buffer_push(tty
);
767 dev_notice(port
->dev
, "overrun error\n");
774 static int sci_handle_breaks(struct uart_port
*port
)
777 unsigned short status
= sci_in(port
, SCxSR
);
778 struct tty_struct
*tty
= port
->state
->port
.tty
;
779 struct sci_port
*s
= to_sci_port(port
);
781 if (uart_handle_break(port
))
784 if (!s
->break_flag
&& status
& SCxSR_BRK(port
)) {
785 #if defined(CONFIG_CPU_SH3)
789 /* Notify of BREAK */
790 if (tty_insert_flip_char(tty
, 0, TTY_BREAK
))
793 dev_dbg(port
->dev
, "BREAK detected\n");
797 tty_flip_buffer_push(tty
);
799 copied
+= sci_handle_fifo_overrun(port
);
804 static irqreturn_t
sci_rx_interrupt(int irq
, void *ptr
)
806 #ifdef CONFIG_SERIAL_SH_SCI_DMA
807 struct uart_port
*port
= ptr
;
808 struct sci_port
*s
= to_sci_port(port
);
811 u16 scr
= sci_in(port
, SCSCR
);
812 u16 ssr
= sci_in(port
, SCxSR
);
814 /* Disable future Rx interrupts */
815 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
816 disable_irq_nosync(irq
);
821 sci_out(port
, SCSCR
, scr
);
822 /* Clear current interrupt */
823 sci_out(port
, SCxSR
, ssr
& ~(1 | SCxSR_RDxF(port
)));
824 dev_dbg(port
->dev
, "Rx IRQ %lu: setup t-out in %u jiffies\n",
825 jiffies
, s
->rx_timeout
);
826 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
832 /* I think sci_receive_chars has to be called irrespective
833 * of whether the I_IXOFF is set, otherwise, how is the interrupt
836 sci_receive_chars(ptr
);
841 static irqreturn_t
sci_tx_interrupt(int irq
, void *ptr
)
843 struct uart_port
*port
= ptr
;
846 spin_lock_irqsave(&port
->lock
, flags
);
847 sci_transmit_chars(port
);
848 spin_unlock_irqrestore(&port
->lock
, flags
);
853 static irqreturn_t
sci_er_interrupt(int irq
, void *ptr
)
855 struct uart_port
*port
= ptr
;
858 if (port
->type
== PORT_SCI
) {
859 if (sci_handle_errors(port
)) {
860 /* discard character in rx buffer */
862 sci_out(port
, SCxSR
, SCxSR_RDxF_CLEAR(port
));
865 sci_handle_fifo_overrun(port
);
866 sci_rx_interrupt(irq
, ptr
);
869 sci_out(port
, SCxSR
, SCxSR_ERROR_CLEAR(port
));
871 /* Kick the transmission */
872 sci_tx_interrupt(irq
, ptr
);
877 static irqreturn_t
sci_br_interrupt(int irq
, void *ptr
)
879 struct uart_port
*port
= ptr
;
882 sci_handle_breaks(port
);
883 sci_out(port
, SCxSR
, SCxSR_BREAK_CLEAR(port
));
888 static inline unsigned long port_rx_irq_mask(struct uart_port
*port
)
891 * Not all ports (such as SCIFA) will support REIE. Rather than
892 * special-casing the port type, we check the port initialization
893 * IRQ enable mask to see whether the IRQ is desired at all. If
894 * it's unset, it's logically inferred that there's no point in
897 return SCSCR_RIE
| (to_sci_port(port
)->cfg
->scscr
& SCSCR_REIE
);
900 static irqreturn_t
sci_mpxed_interrupt(int irq
, void *ptr
)
902 unsigned short ssr_status
, scr_status
, err_enabled
;
903 struct uart_port
*port
= ptr
;
904 struct sci_port
*s
= to_sci_port(port
);
905 irqreturn_t ret
= IRQ_NONE
;
907 ssr_status
= sci_in(port
, SCxSR
);
908 scr_status
= sci_in(port
, SCSCR
);
909 err_enabled
= scr_status
& port_rx_irq_mask(port
);
912 if ((ssr_status
& SCxSR_TDxE(port
)) && (scr_status
& SCSCR_TIE
) &&
914 ret
= sci_tx_interrupt(irq
, ptr
);
917 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
920 if (((ssr_status
& SCxSR_RDxF(port
)) || s
->chan_rx
) &&
921 (scr_status
& SCSCR_RIE
))
922 ret
= sci_rx_interrupt(irq
, ptr
);
924 /* Error Interrupt */
925 if ((ssr_status
& SCxSR_ERRORS(port
)) && err_enabled
)
926 ret
= sci_er_interrupt(irq
, ptr
);
928 /* Break Interrupt */
929 if ((ssr_status
& SCxSR_BRK(port
)) && err_enabled
)
930 ret
= sci_br_interrupt(irq
, ptr
);
936 * Here we define a transition notifier so that we can update all of our
937 * ports' baud rate when the peripheral clock changes.
939 static int sci_notifier(struct notifier_block
*self
,
940 unsigned long phase
, void *p
)
942 struct sci_port
*sci_port
;
945 sci_port
= container_of(self
, struct sci_port
, freq_transition
);
947 if ((phase
== CPUFREQ_POSTCHANGE
) ||
948 (phase
== CPUFREQ_RESUMECHANGE
)) {
949 struct uart_port
*port
= &sci_port
->port
;
951 spin_lock_irqsave(&port
->lock
, flags
);
952 port
->uartclk
= clk_get_rate(sci_port
->iclk
);
953 spin_unlock_irqrestore(&port
->lock
, flags
);
959 static struct sci_irq_desc
{
961 irq_handler_t handler
;
964 * Split out handlers, the default case.
968 .handler
= sci_er_interrupt
,
973 .handler
= sci_rx_interrupt
,
978 .handler
= sci_tx_interrupt
,
983 .handler
= sci_br_interrupt
,
987 * Special muxed handler.
991 .handler
= sci_mpxed_interrupt
,
995 static int sci_request_irq(struct sci_port
*port
)
997 struct uart_port
*up
= &port
->port
;
1000 for (i
= j
= 0; i
< SCIx_NR_IRQS
; i
++, j
++) {
1001 struct sci_irq_desc
*desc
;
1004 if (SCIx_IRQ_IS_MUXED(port
)) {
1008 irq
= port
->cfg
->irqs
[i
];
1010 desc
= sci_irq_desc
+ i
;
1011 port
->irqstr
[j
] = kasprintf(GFP_KERNEL
, "%s:%s",
1012 dev_name(up
->dev
), desc
->desc
);
1013 if (!port
->irqstr
[j
]) {
1014 dev_err(up
->dev
, "Failed to allocate %s IRQ string\n",
1019 ret
= request_irq(irq
, desc
->handler
, up
->irqflags
,
1020 port
->irqstr
[j
], port
);
1021 if (unlikely(ret
)) {
1022 dev_err(up
->dev
, "Can't allocate %s IRQ\n", desc
->desc
);
1031 free_irq(port
->cfg
->irqs
[i
], port
);
1035 kfree(port
->irqstr
[j
]);
1040 static void sci_free_irq(struct sci_port
*port
)
1045 * Intentionally in reverse order so we iterate over the muxed
1048 for (i
= 0; i
< SCIx_NR_IRQS
; i
++) {
1049 free_irq(port
->cfg
->irqs
[i
], port
);
1050 kfree(port
->irqstr
[i
]);
1052 if (SCIx_IRQ_IS_MUXED(port
)) {
1053 /* If there's only one IRQ, we're done. */
1059 static unsigned int sci_tx_empty(struct uart_port
*port
)
1061 unsigned short status
= sci_in(port
, SCxSR
);
1062 unsigned short in_tx_fifo
= sci_txfill(port
);
1064 return (status
& SCxSR_TEND(port
)) && !in_tx_fifo
? TIOCSER_TEMT
: 0;
1067 static void sci_set_mctrl(struct uart_port
*port
, unsigned int mctrl
)
1069 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
1070 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
1071 /* If you have signals for DTR and DCD, please implement here. */
1074 static unsigned int sci_get_mctrl(struct uart_port
*port
)
1076 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1079 return TIOCM_DTR
| TIOCM_RTS
| TIOCM_DSR
;
1082 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1083 static void sci_dma_tx_complete(void *arg
)
1085 struct sci_port
*s
= arg
;
1086 struct uart_port
*port
= &s
->port
;
1087 struct circ_buf
*xmit
= &port
->state
->xmit
;
1088 unsigned long flags
;
1090 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1092 spin_lock_irqsave(&port
->lock
, flags
);
1094 xmit
->tail
+= sg_dma_len(&s
->sg_tx
);
1095 xmit
->tail
&= UART_XMIT_SIZE
- 1;
1097 port
->icount
.tx
+= sg_dma_len(&s
->sg_tx
);
1099 async_tx_ack(s
->desc_tx
);
1100 s
->cookie_tx
= -EINVAL
;
1103 if (uart_circ_chars_pending(xmit
) < WAKEUP_CHARS
)
1104 uart_write_wakeup(port
);
1106 if (!uart_circ_empty(xmit
)) {
1107 schedule_work(&s
->work_tx
);
1108 } else if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1109 u16 ctrl
= sci_in(port
, SCSCR
);
1110 sci_out(port
, SCSCR
, ctrl
& ~SCSCR_TIE
);
1113 spin_unlock_irqrestore(&port
->lock
, flags
);
1116 /* Locking: called with port lock held */
1117 static int sci_dma_rx_push(struct sci_port
*s
, struct tty_struct
*tty
,
1120 struct uart_port
*port
= &s
->port
;
1121 int i
, active
, room
;
1123 room
= tty_buffer_request_room(tty
, count
);
1125 if (s
->active_rx
== s
->cookie_rx
[0]) {
1127 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1130 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1135 dev_warn(port
->dev
, "Rx overrun: dropping %u bytes\n",
1140 for (i
= 0; i
< room
; i
++)
1141 tty_insert_flip_char(tty
, ((u8
*)sg_virt(&s
->sg_rx
[active
]))[i
],
1144 port
->icount
.rx
+= room
;
1149 static void sci_dma_rx_complete(void *arg
)
1151 struct sci_port
*s
= arg
;
1152 struct uart_port
*port
= &s
->port
;
1153 struct tty_struct
*tty
= port
->state
->port
.tty
;
1154 unsigned long flags
;
1157 dev_dbg(port
->dev
, "%s(%d) active #%d\n", __func__
, port
->line
, s
->active_rx
);
1159 spin_lock_irqsave(&port
->lock
, flags
);
1161 count
= sci_dma_rx_push(s
, tty
, s
->buf_len_rx
);
1163 mod_timer(&s
->rx_timer
, jiffies
+ s
->rx_timeout
);
1165 spin_unlock_irqrestore(&port
->lock
, flags
);
1168 tty_flip_buffer_push(tty
);
1170 schedule_work(&s
->work_rx
);
1173 static void sci_rx_dma_release(struct sci_port
*s
, bool enable_pio
)
1175 struct dma_chan
*chan
= s
->chan_rx
;
1176 struct uart_port
*port
= &s
->port
;
1179 s
->cookie_rx
[0] = s
->cookie_rx
[1] = -EINVAL
;
1180 dma_release_channel(chan
);
1181 if (sg_dma_address(&s
->sg_rx
[0]))
1182 dma_free_coherent(port
->dev
, s
->buf_len_rx
* 2,
1183 sg_virt(&s
->sg_rx
[0]), sg_dma_address(&s
->sg_rx
[0]));
1188 static void sci_tx_dma_release(struct sci_port
*s
, bool enable_pio
)
1190 struct dma_chan
*chan
= s
->chan_tx
;
1191 struct uart_port
*port
= &s
->port
;
1194 s
->cookie_tx
= -EINVAL
;
1195 dma_release_channel(chan
);
1200 static void sci_submit_rx(struct sci_port
*s
)
1202 struct dma_chan
*chan
= s
->chan_rx
;
1205 for (i
= 0; i
< 2; i
++) {
1206 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1207 struct dma_async_tx_descriptor
*desc
;
1209 desc
= chan
->device
->device_prep_slave_sg(chan
,
1210 sg
, 1, DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
);
1213 s
->desc_rx
[i
] = desc
;
1214 desc
->callback
= sci_dma_rx_complete
;
1215 desc
->callback_param
= s
;
1216 s
->cookie_rx
[i
] = desc
->tx_submit(desc
);
1219 if (!desc
|| s
->cookie_rx
[i
] < 0) {
1221 async_tx_ack(s
->desc_rx
[0]);
1222 s
->cookie_rx
[0] = -EINVAL
;
1226 s
->cookie_rx
[i
] = -EINVAL
;
1228 dev_warn(s
->port
.dev
,
1229 "failed to re-start DMA, using PIO\n");
1230 sci_rx_dma_release(s
, true);
1233 dev_dbg(s
->port
.dev
, "%s(): cookie %d to #%d\n", __func__
,
1234 s
->cookie_rx
[i
], i
);
1237 s
->active_rx
= s
->cookie_rx
[0];
1239 dma_async_issue_pending(chan
);
1242 static void work_fn_rx(struct work_struct
*work
)
1244 struct sci_port
*s
= container_of(work
, struct sci_port
, work_rx
);
1245 struct uart_port
*port
= &s
->port
;
1246 struct dma_async_tx_descriptor
*desc
;
1249 if (s
->active_rx
== s
->cookie_rx
[0]) {
1251 } else if (s
->active_rx
== s
->cookie_rx
[1]) {
1254 dev_err(port
->dev
, "cookie %d not found!\n", s
->active_rx
);
1257 desc
= s
->desc_rx
[new];
1259 if (dma_async_is_tx_complete(s
->chan_rx
, s
->active_rx
, NULL
, NULL
) !=
1261 /* Handle incomplete DMA receive */
1262 struct tty_struct
*tty
= port
->state
->port
.tty
;
1263 struct dma_chan
*chan
= s
->chan_rx
;
1264 struct sh_desc
*sh_desc
= container_of(desc
, struct sh_desc
,
1266 unsigned long flags
;
1269 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
1270 dev_dbg(port
->dev
, "Read %u bytes with cookie %d\n",
1271 sh_desc
->partial
, sh_desc
->cookie
);
1273 spin_lock_irqsave(&port
->lock
, flags
);
1274 count
= sci_dma_rx_push(s
, tty
, sh_desc
->partial
);
1275 spin_unlock_irqrestore(&port
->lock
, flags
);
1278 tty_flip_buffer_push(tty
);
1285 s
->cookie_rx
[new] = desc
->tx_submit(desc
);
1286 if (s
->cookie_rx
[new] < 0) {
1287 dev_warn(port
->dev
, "Failed submitting Rx DMA descriptor\n");
1288 sci_rx_dma_release(s
, true);
1292 s
->active_rx
= s
->cookie_rx
[!new];
1294 dev_dbg(port
->dev
, "%s: cookie %d #%d, new active #%d\n", __func__
,
1295 s
->cookie_rx
[new], new, s
->active_rx
);
1298 static void work_fn_tx(struct work_struct
*work
)
1300 struct sci_port
*s
= container_of(work
, struct sci_port
, work_tx
);
1301 struct dma_async_tx_descriptor
*desc
;
1302 struct dma_chan
*chan
= s
->chan_tx
;
1303 struct uart_port
*port
= &s
->port
;
1304 struct circ_buf
*xmit
= &port
->state
->xmit
;
1305 struct scatterlist
*sg
= &s
->sg_tx
;
1309 * Port xmit buffer is already mapped, and it is one page... Just adjust
1310 * offsets and lengths. Since it is a circular buffer, we have to
1311 * transmit till the end, and then the rest. Take the port lock to get a
1312 * consistent xmit buffer state.
1314 spin_lock_irq(&port
->lock
);
1315 sg
->offset
= xmit
->tail
& (UART_XMIT_SIZE
- 1);
1316 sg_dma_address(sg
) = (sg_dma_address(sg
) & ~(UART_XMIT_SIZE
- 1)) +
1318 sg_dma_len(sg
) = min((int)CIRC_CNT(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
),
1319 CIRC_CNT_TO_END(xmit
->head
, xmit
->tail
, UART_XMIT_SIZE
));
1320 spin_unlock_irq(&port
->lock
);
1322 BUG_ON(!sg_dma_len(sg
));
1324 desc
= chan
->device
->device_prep_slave_sg(chan
,
1325 sg
, s
->sg_len_tx
, DMA_TO_DEVICE
,
1326 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1329 sci_tx_dma_release(s
, true);
1333 dma_sync_sg_for_device(port
->dev
, sg
, 1, DMA_TO_DEVICE
);
1335 spin_lock_irq(&port
->lock
);
1337 desc
->callback
= sci_dma_tx_complete
;
1338 desc
->callback_param
= s
;
1339 spin_unlock_irq(&port
->lock
);
1340 s
->cookie_tx
= desc
->tx_submit(desc
);
1341 if (s
->cookie_tx
< 0) {
1342 dev_warn(port
->dev
, "Failed submitting Tx DMA descriptor\n");
1344 sci_tx_dma_release(s
, true);
1348 dev_dbg(port
->dev
, "%s: %p: %d...%d, cookie %d\n", __func__
,
1349 xmit
->buf
, xmit
->tail
, xmit
->head
, s
->cookie_tx
);
1351 dma_async_issue_pending(chan
);
1355 static void sci_start_tx(struct uart_port
*port
)
1357 struct sci_port
*s
= to_sci_port(port
);
1358 unsigned short ctrl
;
1360 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1361 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1362 u16
new, scr
= sci_in(port
, SCSCR
);
1366 new = scr
& ~0x8000;
1368 sci_out(port
, SCSCR
, new);
1371 if (s
->chan_tx
&& !uart_circ_empty(&s
->port
.state
->xmit
) &&
1373 schedule_work(&s
->work_tx
);
1376 if (!s
->chan_tx
|| port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1377 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1378 ctrl
= sci_in(port
, SCSCR
);
1379 sci_out(port
, SCSCR
, ctrl
| SCSCR_TIE
);
1383 static void sci_stop_tx(struct uart_port
*port
)
1385 unsigned short ctrl
;
1387 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1388 ctrl
= sci_in(port
, SCSCR
);
1390 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1395 sci_out(port
, SCSCR
, ctrl
);
1398 static void sci_start_rx(struct uart_port
*port
)
1400 unsigned short ctrl
;
1402 ctrl
= sci_in(port
, SCSCR
) | port_rx_irq_mask(port
);
1404 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1407 sci_out(port
, SCSCR
, ctrl
);
1410 static void sci_stop_rx(struct uart_port
*port
)
1412 unsigned short ctrl
;
1414 ctrl
= sci_in(port
, SCSCR
);
1416 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
)
1419 ctrl
&= ~port_rx_irq_mask(port
);
1421 sci_out(port
, SCSCR
, ctrl
);
1424 static void sci_enable_ms(struct uart_port
*port
)
1426 /* Nothing here yet .. */
1429 static void sci_break_ctl(struct uart_port
*port
, int break_state
)
1431 /* Nothing here yet .. */
1434 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1435 static bool filter(struct dma_chan
*chan
, void *slave
)
1437 struct sh_dmae_slave
*param
= slave
;
1439 dev_dbg(chan
->device
->dev
, "%s: slave ID %d\n", __func__
,
1442 if (param
->dma_dev
== chan
->device
->dev
) {
1443 chan
->private = param
;
1450 static void rx_timer_fn(unsigned long arg
)
1452 struct sci_port
*s
= (struct sci_port
*)arg
;
1453 struct uart_port
*port
= &s
->port
;
1454 u16 scr
= sci_in(port
, SCSCR
);
1456 if (port
->type
== PORT_SCIFA
|| port
->type
== PORT_SCIFB
) {
1458 enable_irq(s
->cfg
->irqs
[1]);
1460 sci_out(port
, SCSCR
, scr
| SCSCR_RIE
);
1461 dev_dbg(port
->dev
, "DMA Rx timed out\n");
1462 schedule_work(&s
->work_rx
);
1465 static void sci_request_dma(struct uart_port
*port
)
1467 struct sci_port
*s
= to_sci_port(port
);
1468 struct sh_dmae_slave
*param
;
1469 struct dma_chan
*chan
;
1470 dma_cap_mask_t mask
;
1473 dev_dbg(port
->dev
, "%s: port %d DMA %p\n", __func__
,
1474 port
->line
, s
->cfg
->dma_dev
);
1476 if (!s
->cfg
->dma_dev
)
1480 dma_cap_set(DMA_SLAVE
, mask
);
1482 param
= &s
->param_tx
;
1484 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1485 param
->slave_id
= s
->cfg
->dma_slave_tx
;
1486 param
->dma_dev
= s
->cfg
->dma_dev
;
1488 s
->cookie_tx
= -EINVAL
;
1489 chan
= dma_request_channel(mask
, filter
, param
);
1490 dev_dbg(port
->dev
, "%s: TX: got channel %p\n", __func__
, chan
);
1493 sg_init_table(&s
->sg_tx
, 1);
1494 /* UART circular tx buffer is an aligned page. */
1495 BUG_ON((int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1496 sg_set_page(&s
->sg_tx
, virt_to_page(port
->state
->xmit
.buf
),
1497 UART_XMIT_SIZE
, (int)port
->state
->xmit
.buf
& ~PAGE_MASK
);
1498 nent
= dma_map_sg(port
->dev
, &s
->sg_tx
, 1, DMA_TO_DEVICE
);
1500 sci_tx_dma_release(s
, false);
1502 dev_dbg(port
->dev
, "%s: mapped %d@%p to %x\n", __func__
,
1503 sg_dma_len(&s
->sg_tx
),
1504 port
->state
->xmit
.buf
, sg_dma_address(&s
->sg_tx
));
1506 s
->sg_len_tx
= nent
;
1508 INIT_WORK(&s
->work_tx
, work_fn_tx
);
1511 param
= &s
->param_rx
;
1513 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1514 param
->slave_id
= s
->cfg
->dma_slave_rx
;
1515 param
->dma_dev
= s
->cfg
->dma_dev
;
1517 chan
= dma_request_channel(mask
, filter
, param
);
1518 dev_dbg(port
->dev
, "%s: RX: got channel %p\n", __func__
, chan
);
1526 s
->buf_len_rx
= 2 * max(16, (int)port
->fifosize
);
1527 buf
[0] = dma_alloc_coherent(port
->dev
, s
->buf_len_rx
* 2,
1528 &dma
[0], GFP_KERNEL
);
1532 "failed to allocate dma buffer, using PIO\n");
1533 sci_rx_dma_release(s
, true);
1537 buf
[1] = buf
[0] + s
->buf_len_rx
;
1538 dma
[1] = dma
[0] + s
->buf_len_rx
;
1540 for (i
= 0; i
< 2; i
++) {
1541 struct scatterlist
*sg
= &s
->sg_rx
[i
];
1543 sg_init_table(sg
, 1);
1544 sg_set_page(sg
, virt_to_page(buf
[i
]), s
->buf_len_rx
,
1545 (int)buf
[i
] & ~PAGE_MASK
);
1546 sg_dma_address(sg
) = dma
[i
];
1549 INIT_WORK(&s
->work_rx
, work_fn_rx
);
1550 setup_timer(&s
->rx_timer
, rx_timer_fn
, (unsigned long)s
);
1556 static void sci_free_dma(struct uart_port
*port
)
1558 struct sci_port
*s
= to_sci_port(port
);
1560 if (!s
->cfg
->dma_dev
)
1564 sci_tx_dma_release(s
, false);
1566 sci_rx_dma_release(s
, false);
1569 static inline void sci_request_dma(struct uart_port
*port
)
1573 static inline void sci_free_dma(struct uart_port
*port
)
1578 static int sci_startup(struct uart_port
*port
)
1580 struct sci_port
*s
= to_sci_port(port
);
1583 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1587 ret
= sci_request_irq(s
);
1588 if (unlikely(ret
< 0))
1591 sci_request_dma(port
);
1599 static void sci_shutdown(struct uart_port
*port
)
1601 struct sci_port
*s
= to_sci_port(port
);
1603 dev_dbg(port
->dev
, "%s(%d)\n", __func__
, port
->line
);
1611 sci_port_disable(s
);
1614 static unsigned int sci_scbrr_calc(unsigned int algo_id
, unsigned int bps
,
1619 return ((freq
+ 16 * bps
) / (16 * bps
) - 1);
1621 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1623 return (((freq
* 2) + 16 * bps
) / (16 * bps
) - 1);
1625 return (((freq
* 2) + 16 * bps
) / (32 * bps
) - 1);
1627 return (((freq
* 1000 / 32) / bps
) - 1);
1630 /* Warn, but use a safe default */
1633 return ((freq
+ 16 * bps
) / (32 * bps
) - 1);
1636 static void sci_set_termios(struct uart_port
*port
, struct ktermios
*termios
,
1637 struct ktermios
*old
)
1639 struct sci_port
*s
= to_sci_port(port
);
1640 unsigned int status
, baud
, smr_val
, max_baud
;
1645 * earlyprintk comes here early on with port->uartclk set to zero.
1646 * the clock framework is not up and running at this point so here
1647 * we assume that 115200 is the maximum baud rate. please note that
1648 * the baud rate is not programmed during earlyprintk - it is assumed
1649 * that the previous boot loader has enabled required clocks and
1650 * setup the baud rate generator hardware for us already.
1652 max_baud
= port
->uartclk
? port
->uartclk
/ 16 : 115200;
1654 baud
= uart_get_baud_rate(port
, termios
, old
, 0, max_baud
);
1655 if (likely(baud
&& port
->uartclk
))
1656 t
= sci_scbrr_calc(s
->cfg
->scbrr_algo_id
, baud
, port
->uartclk
);
1661 status
= sci_in(port
, SCxSR
);
1662 } while (!(status
& SCxSR_TEND(port
)));
1664 sci_out(port
, SCSCR
, 0x00); /* TE=0, RE=0, CKE1=0 */
1666 if (port
->type
!= PORT_SCI
)
1667 sci_out(port
, SCFCR
, scfcr
| SCFCR_RFRST
| SCFCR_TFRST
);
1669 smr_val
= sci_in(port
, SCSMR
) & 3;
1671 if ((termios
->c_cflag
& CSIZE
) == CS7
)
1673 if (termios
->c_cflag
& PARENB
)
1675 if (termios
->c_cflag
& PARODD
)
1677 if (termios
->c_cflag
& CSTOPB
)
1680 uart_update_timeout(port
, termios
->c_cflag
, baud
);
1682 sci_out(port
, SCSMR
, smr_val
);
1684 dev_dbg(port
->dev
, "%s: SMR %x, t %x, SCSCR %x\n", __func__
, smr_val
, t
,
1689 sci_out(port
, SCSMR
, (sci_in(port
, SCSMR
) & ~3) | 1);
1692 sci_out(port
, SCSMR
, sci_in(port
, SCSMR
) & ~3);
1694 sci_out(port
, SCBRR
, t
);
1695 udelay((1000000+(baud
-1)) / baud
); /* Wait one bit interval */
1698 sci_init_pins(port
, termios
->c_cflag
);
1699 sci_out(port
, SCFCR
, scfcr
| ((termios
->c_cflag
& CRTSCTS
) ? SCFCR_MCE
: 0));
1701 sci_out(port
, SCSCR
, s
->cfg
->scscr
);
1703 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1705 * Calculate delay for 1.5 DMA buffers: see
1706 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1707 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1708 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1709 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1710 * sizes), but it has been found out experimentally, that this is not
1711 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1712 * as a minimum seem to work perfectly.
1715 s
->rx_timeout
= (port
->timeout
- HZ
/ 50) * s
->buf_len_rx
* 3 /
1718 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1719 s
->rx_timeout
* 1000 / HZ
, port
->timeout
);
1720 if (s
->rx_timeout
< msecs_to_jiffies(20))
1721 s
->rx_timeout
= msecs_to_jiffies(20);
1725 if ((termios
->c_cflag
& CREAD
) != 0)
1728 sci_port_disable(s
);
1731 static const char *sci_type(struct uart_port
*port
)
1733 switch (port
->type
) {
1749 static inline unsigned long sci_port_size(struct uart_port
*port
)
1752 * Pick an arbitrary size that encapsulates all of the base
1753 * registers by default. This can be optimized later, or derived
1754 * from platform resource data at such a time that ports begin to
1755 * behave more erratically.
1760 static int sci_remap_port(struct uart_port
*port
)
1762 unsigned long size
= sci_port_size(port
);
1765 * Nothing to do if there's already an established membase.
1770 if (port
->flags
& UPF_IOREMAP
) {
1771 port
->membase
= ioremap_nocache(port
->mapbase
, size
);
1772 if (unlikely(!port
->membase
)) {
1773 dev_err(port
->dev
, "can't remap port#%d\n", port
->line
);
1778 * For the simple (and majority of) cases where we don't
1779 * need to do any remapping, just cast the cookie
1782 port
->membase
= (void __iomem
*)port
->mapbase
;
1788 static void sci_release_port(struct uart_port
*port
)
1790 if (port
->flags
& UPF_IOREMAP
) {
1791 iounmap(port
->membase
);
1792 port
->membase
= NULL
;
1795 release_mem_region(port
->mapbase
, sci_port_size(port
));
1798 static int sci_request_port(struct uart_port
*port
)
1800 unsigned long size
= sci_port_size(port
);
1801 struct resource
*res
;
1804 res
= request_mem_region(port
->mapbase
, size
, dev_name(port
->dev
));
1805 if (unlikely(res
== NULL
))
1808 ret
= sci_remap_port(port
);
1809 if (unlikely(ret
!= 0)) {
1810 release_resource(res
);
1817 static void sci_config_port(struct uart_port
*port
, int flags
)
1819 if (flags
& UART_CONFIG_TYPE
) {
1820 struct sci_port
*sport
= to_sci_port(port
);
1822 port
->type
= sport
->cfg
->type
;
1823 sci_request_port(port
);
1827 static int sci_verify_port(struct uart_port
*port
, struct serial_struct
*ser
)
1829 struct sci_port
*s
= to_sci_port(port
);
1831 if (ser
->irq
!= s
->cfg
->irqs
[SCIx_TXI_IRQ
] || ser
->irq
> nr_irqs
)
1833 if (ser
->baud_base
< 2400)
1834 /* No paper tape reader for Mitch.. */
1840 static struct uart_ops sci_uart_ops
= {
1841 .tx_empty
= sci_tx_empty
,
1842 .set_mctrl
= sci_set_mctrl
,
1843 .get_mctrl
= sci_get_mctrl
,
1844 .start_tx
= sci_start_tx
,
1845 .stop_tx
= sci_stop_tx
,
1846 .stop_rx
= sci_stop_rx
,
1847 .enable_ms
= sci_enable_ms
,
1848 .break_ctl
= sci_break_ctl
,
1849 .startup
= sci_startup
,
1850 .shutdown
= sci_shutdown
,
1851 .set_termios
= sci_set_termios
,
1853 .release_port
= sci_release_port
,
1854 .request_port
= sci_request_port
,
1855 .config_port
= sci_config_port
,
1856 .verify_port
= sci_verify_port
,
1857 #ifdef CONFIG_CONSOLE_POLL
1858 .poll_get_char
= sci_poll_get_char
,
1859 .poll_put_char
= sci_poll_put_char
,
1863 static int __devinit
sci_init_single(struct platform_device
*dev
,
1864 struct sci_port
*sci_port
,
1866 struct plat_sci_port
*p
)
1868 struct uart_port
*port
= &sci_port
->port
;
1871 port
->ops
= &sci_uart_ops
;
1872 port
->iotype
= UPIO_MEM
;
1877 port
->fifosize
= 256;
1880 port
->fifosize
= 64;
1883 port
->fifosize
= 16;
1890 if (p
->regtype
== SCIx_PROBE_REGTYPE
) {
1891 ret
= sci_probe_regmap(p
);
1897 sci_port
->iclk
= clk_get(&dev
->dev
, "sci_ick");
1898 if (IS_ERR(sci_port
->iclk
)) {
1899 sci_port
->iclk
= clk_get(&dev
->dev
, "peripheral_clk");
1900 if (IS_ERR(sci_port
->iclk
)) {
1901 dev_err(&dev
->dev
, "can't get iclk\n");
1902 return PTR_ERR(sci_port
->iclk
);
1907 * The function clock is optional, ignore it if we can't
1910 sci_port
->fclk
= clk_get(&dev
->dev
, "sci_fck");
1911 if (IS_ERR(sci_port
->fclk
))
1912 sci_port
->fclk
= NULL
;
1914 port
->dev
= &dev
->dev
;
1916 pm_runtime_enable(&dev
->dev
);
1919 sci_port
->break_timer
.data
= (unsigned long)sci_port
;
1920 sci_port
->break_timer
.function
= sci_break_timer
;
1921 init_timer(&sci_port
->break_timer
);
1924 * Establish some sensible defaults for the error detection.
1927 p
->error_mask
= (p
->type
== PORT_SCI
) ?
1928 SCI_DEFAULT_ERROR_MASK
: SCIF_DEFAULT_ERROR_MASK
;
1931 * Establish sensible defaults for the overrun detection, unless
1932 * the part has explicitly disabled support for it.
1934 if (p
->overrun_bit
!= SCIx_NOT_SUPPORTED
) {
1935 if (p
->type
== PORT_SCI
)
1937 else if (p
->scbrr_algo_id
== SCBRR_ALGO_4
)
1943 * Make the error mask inclusive of overrun detection, if
1946 p
->error_mask
|= (1 << p
->overrun_bit
);
1951 port
->mapbase
= p
->mapbase
;
1952 port
->type
= p
->type
;
1953 port
->flags
= p
->flags
;
1954 port
->regshift
= p
->regshift
;
1957 * The UART port needs an IRQ value, so we peg this to the RX IRQ
1958 * for the multi-IRQ ports, which is where we are primarily
1959 * concerned with the shutdown path synchronization.
1961 * For the muxed case there's nothing more to do.
1963 port
->irq
= p
->irqs
[SCIx_RXI_IRQ
];
1964 port
->irqflags
= IRQF_DISABLED
;
1966 port
->serial_in
= sci_serial_in
;
1967 port
->serial_out
= sci_serial_out
;
1970 dev_dbg(port
->dev
, "DMA device %p, tx %d, rx %d\n",
1971 p
->dma_dev
, p
->dma_slave_tx
, p
->dma_slave_rx
);
1976 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1977 static void serial_console_putchar(struct uart_port
*port
, int ch
)
1979 sci_poll_put_char(port
, ch
);
1983 * Print a string to the serial port trying not to disturb
1984 * any possible real use of the port...
1986 static void serial_console_write(struct console
*co
, const char *s
,
1989 struct sci_port
*sci_port
= &sci_ports
[co
->index
];
1990 struct uart_port
*port
= &sci_port
->port
;
1991 unsigned short bits
;
1993 sci_port_enable(sci_port
);
1995 uart_console_write(port
, s
, count
, serial_console_putchar
);
1997 /* wait until fifo is empty and last bit has been transmitted */
1998 bits
= SCxSR_TDxE(port
) | SCxSR_TEND(port
);
1999 while ((sci_in(port
, SCxSR
) & bits
) != bits
)
2002 sci_port_disable(sci_port
);
2005 static int __devinit
serial_console_setup(struct console
*co
, char *options
)
2007 struct sci_port
*sci_port
;
2008 struct uart_port
*port
;
2016 * Refuse to handle any bogus ports.
2018 if (co
->index
< 0 || co
->index
>= SCI_NPORTS
)
2021 sci_port
= &sci_ports
[co
->index
];
2022 port
= &sci_port
->port
;
2025 * Refuse to handle uninitialized ports.
2030 ret
= sci_remap_port(port
);
2031 if (unlikely(ret
!= 0))
2034 sci_port_enable(sci_port
);
2037 uart_parse_options(options
, &baud
, &parity
, &bits
, &flow
);
2039 /* TODO: disable clock */
2040 return uart_set_options(port
, co
, baud
, parity
, bits
, flow
);
2043 static struct console serial_console
= {
2045 .device
= uart_console_device
,
2046 .write
= serial_console_write
,
2047 .setup
= serial_console_setup
,
2048 .flags
= CON_PRINTBUFFER
,
2050 .data
= &sci_uart_driver
,
2053 static struct console early_serial_console
= {
2054 .name
= "early_ttySC",
2055 .write
= serial_console_write
,
2056 .flags
= CON_PRINTBUFFER
,
2060 static char early_serial_buf
[32];
2062 static int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2064 struct plat_sci_port
*cfg
= pdev
->dev
.platform_data
;
2066 if (early_serial_console
.data
)
2069 early_serial_console
.index
= pdev
->id
;
2071 sci_init_single(NULL
, &sci_ports
[pdev
->id
], pdev
->id
, cfg
);
2073 serial_console_setup(&early_serial_console
, early_serial_buf
);
2075 if (!strstr(early_serial_buf
, "keep"))
2076 early_serial_console
.flags
|= CON_BOOT
;
2078 register_console(&early_serial_console
);
2082 #define SCI_CONSOLE (&serial_console)
2085 static inline int __devinit
sci_probe_earlyprintk(struct platform_device
*pdev
)
2090 #define SCI_CONSOLE NULL
2092 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2094 static char banner
[] __initdata
=
2095 KERN_INFO
"SuperH SCI(F) driver initialized\n";
2097 static struct uart_driver sci_uart_driver
= {
2098 .owner
= THIS_MODULE
,
2099 .driver_name
= "sci",
2100 .dev_name
= "ttySC",
2102 .minor
= SCI_MINOR_START
,
2104 .cons
= SCI_CONSOLE
,
2107 static int sci_remove(struct platform_device
*dev
)
2109 struct sci_port
*port
= platform_get_drvdata(dev
);
2111 cpufreq_unregister_notifier(&port
->freq_transition
,
2112 CPUFREQ_TRANSITION_NOTIFIER
);
2114 uart_remove_one_port(&sci_uart_driver
, &port
->port
);
2116 clk_put(port
->iclk
);
2117 clk_put(port
->fclk
);
2119 pm_runtime_disable(&dev
->dev
);
2123 static int __devinit
sci_probe_single(struct platform_device
*dev
,
2125 struct plat_sci_port
*p
,
2126 struct sci_port
*sciport
)
2131 if (unlikely(index
>= SCI_NPORTS
)) {
2132 dev_notice(&dev
->dev
, "Attempting to register port "
2133 "%d when only %d are available.\n",
2134 index
+1, SCI_NPORTS
);
2135 dev_notice(&dev
->dev
, "Consider bumping "
2136 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2140 ret
= sci_init_single(dev
, sciport
, index
, p
);
2144 return uart_add_one_port(&sci_uart_driver
, &sciport
->port
);
2147 static int __devinit
sci_probe(struct platform_device
*dev
)
2149 struct plat_sci_port
*p
= dev
->dev
.platform_data
;
2150 struct sci_port
*sp
= &sci_ports
[dev
->id
];
2154 * If we've come here via earlyprintk initialization, head off to
2155 * the special early probe. We don't have sufficient device state
2156 * to make it beyond this yet.
2158 if (is_early_platform_device(dev
))
2159 return sci_probe_earlyprintk(dev
);
2161 platform_set_drvdata(dev
, sp
);
2163 ret
= sci_probe_single(dev
, dev
->id
, p
, sp
);
2167 sp
->freq_transition
.notifier_call
= sci_notifier
;
2169 ret
= cpufreq_register_notifier(&sp
->freq_transition
,
2170 CPUFREQ_TRANSITION_NOTIFIER
);
2171 if (unlikely(ret
< 0))
2174 #ifdef CONFIG_SH_STANDARD_BIOS
2175 sh_bios_gdb_detach();
2185 static int sci_suspend(struct device
*dev
)
2187 struct sci_port
*sport
= dev_get_drvdata(dev
);
2190 uart_suspend_port(&sci_uart_driver
, &sport
->port
);
2195 static int sci_resume(struct device
*dev
)
2197 struct sci_port
*sport
= dev_get_drvdata(dev
);
2200 uart_resume_port(&sci_uart_driver
, &sport
->port
);
2205 static const struct dev_pm_ops sci_dev_pm_ops
= {
2206 .suspend
= sci_suspend
,
2207 .resume
= sci_resume
,
2210 static struct platform_driver sci_driver
= {
2212 .remove
= sci_remove
,
2215 .owner
= THIS_MODULE
,
2216 .pm
= &sci_dev_pm_ops
,
2220 static int __init
sci_init(void)
2226 ret
= uart_register_driver(&sci_uart_driver
);
2227 if (likely(ret
== 0)) {
2228 ret
= platform_driver_register(&sci_driver
);
2230 uart_unregister_driver(&sci_uart_driver
);
2236 static void __exit
sci_exit(void)
2238 platform_driver_unregister(&sci_driver
);
2239 uart_unregister_driver(&sci_uart_driver
);
2242 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2243 early_platform_init_buffer("earlyprintk", &sci_driver
,
2244 early_serial_buf
, ARRAY_SIZE(early_serial_buf
));
2246 module_init(sci_init
);
2247 module_exit(sci_exit
);
2249 MODULE_LICENSE("GPL");
2250 MODULE_ALIAS("platform:sh-sci");
2251 MODULE_AUTHOR("Paul Mundt");
2252 MODULE_DESCRIPTION("SuperH SCI(F) serial driver");