GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / cris / arch-v32 / drivers / sync_serial.c
blobe85ac94789aa6922a5c6b08d7c77780b994f8335
1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
8 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/smp_lock.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
23 #include <asm/io.h>
24 #include <dma.h>
25 #include <pinmux.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
36 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
37 /* */
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
40 /* */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
45 /* | | | */
46 /* v v v */
47 /* +-------------------------------------+ */
48 /* | BUFFER | */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
51 /* readp writep */
52 /* */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
59 /* of Descr2 */
61 #define SYNC_SERIAL_MAJOR 125
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
78 #define DEBUG(x)
79 #define DEBUGREAD(x)
80 #define DEBUGWRITE(x)
81 #define DEBUGPOLL(x)
82 #define DEBUGRXINT(x)
83 #define DEBUGTXINT(x)
84 #define DEBUGTRDMA(x)
85 #define DEBUGOUTBUF(x)
87 typedef struct sync_port
89 reg_scope_instances regi_sser;
90 reg_scope_instances regi_dmain;
91 reg_scope_instances regi_dmaout;
93 char started; /* 1 if port has been started */
94 char port_nbr; /* Port 0 or 1 */
95 char busy; /* 1 if port is busy */
97 char enabled; /* 1 if port is enabled */
98 char use_dma; /* 1 if port uses dma */
99 char tr_running;
101 char init_irqs;
102 int output;
103 int input;
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep;
110 unsigned int in_buffer_size;
111 unsigned int inbufchunk;
112 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
113 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
114 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
115 struct dma_descr_data* next_rx_desc;
116 struct dma_descr_data* prev_rx_desc;
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data *active_tr_descr;
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data *catch_tr_descr;
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data *prev_tr_descr;
128 int full;
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr;
134 /* Number of bytes currently locked for being read by DMA */
135 int out_buf_count;
137 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context __attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr[NBR_OUT_DESCR]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context __attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q;
143 wait_queue_head_t in_wait_q;
145 spinlock_t lock;
146 } sync_port;
148 static int etrax_sync_serial_init(void);
149 static void initialize_port(int portnbr);
150 static inline int sync_data_avail(struct sync_port *port);
152 static int sync_serial_open(struct inode *, struct file*);
153 static int sync_serial_release(struct inode*, struct file*);
154 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
156 static int sync_serial_ioctl(struct file *,
157 unsigned int cmd, unsigned long arg);
158 static ssize_t sync_serial_write(struct file * file, const char * buf,
159 size_t count, loff_t *ppos);
160 static ssize_t sync_serial_read(struct file *file, char *buf,
161 size_t count, loff_t *ppos);
163 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
164 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
165 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
167 #define SYNC_SER_DMA
168 #endif
170 static void send_word(sync_port* port);
171 static void start_dma_out(struct sync_port *port, const char *data, int count);
172 static void start_dma_in(sync_port* port);
173 #ifdef SYNC_SER_DMA
174 static irqreturn_t tr_interrupt(int irq, void *dev_id);
175 static irqreturn_t rx_interrupt(int irq, void *dev_id);
176 #endif
178 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
179 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
180 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
182 #define SYNC_SER_MANUAL
183 #endif
184 #ifdef SYNC_SER_MANUAL
185 static irqreturn_t manual_interrupt(int irq, void *dev_id);
186 #endif
188 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
189 #define OUT_DMA_NBR 4
190 #define IN_DMA_NBR 5
191 #define PINMUX_SSER pinmux_sser0
192 #define SYNCSER_INST regi_sser0
193 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
194 #define OUT_DMA_INST regi_dma4
195 #define IN_DMA_INST regi_dma5
196 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
197 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
198 #define REQ_DMA_SYNCSER dma_sser0
199 #else /* Artpec-3 */
200 #define OUT_DMA_NBR 6
201 #define IN_DMA_NBR 7
202 #define PINMUX_SSER pinmux_sser
203 #define SYNCSER_INST regi_sser
204 #define SYNCSER_INTR_VECT SSER_INTR_VECT
205 #define OUT_DMA_INST regi_dma6
206 #define IN_DMA_INST regi_dma7
207 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
208 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
209 #define REQ_DMA_SYNCSER dma_sser
210 #endif
212 /* The ports */
213 static struct sync_port ports[]=
216 .regi_sser = SYNCSER_INST,
217 .regi_dmaout = OUT_DMA_INST,
218 .regi_dmain = IN_DMA_INST,
219 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
220 .use_dma = 1,
221 #else
222 .use_dma = 0,
223 #endif
225 #ifdef CONFIG_ETRAXFS
229 .regi_sser = regi_sser1,
230 .regi_dmaout = regi_dma6,
231 .regi_dmain = regi_dma7,
232 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
233 .use_dma = 1,
234 #else
235 .use_dma = 0,
236 #endif
238 #endif
241 #define NBR_PORTS ARRAY_SIZE(ports)
243 static const struct file_operations sync_serial_fops = {
244 .owner = THIS_MODULE,
245 .write = sync_serial_write,
246 .read = sync_serial_read,
247 .poll = sync_serial_poll,
248 .unlocked_ioctl = sync_serial_ioctl,
249 .open = sync_serial_open,
250 .release = sync_serial_release
253 static int __init etrax_sync_serial_init(void)
255 ports[0].enabled = 0;
256 #ifdef CONFIG_ETRAXFS
257 ports[1].enabled = 0;
258 #endif
259 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
260 &sync_serial_fops) < 0) {
261 printk(KERN_WARNING
262 "Unable to get major for synchronous serial port\n");
263 return -EBUSY;
266 /* Initialize Ports */
267 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
268 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
269 printk(KERN_WARNING
270 "Unable to alloc pins for synchronous serial port 0\n");
271 return -EIO;
273 ports[0].enabled = 1;
274 initialize_port(0);
275 #endif
277 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
278 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
279 printk(KERN_WARNING
280 "Unable to alloc pins for synchronous serial port 0\n");
281 return -EIO;
283 ports[1].enabled = 1;
284 initialize_port(1);
285 #endif
287 #ifdef CONFIG_ETRAXFS
288 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
289 #else
290 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
291 #endif
292 return 0;
295 static void __init initialize_port(int portnbr)
297 int __attribute__((unused)) i;
298 struct sync_port *port = &ports[portnbr];
299 reg_sser_rw_cfg cfg = {0};
300 reg_sser_rw_frm_cfg frm_cfg = {0};
301 reg_sser_rw_tr_cfg tr_cfg = {0};
302 reg_sser_rw_rec_cfg rec_cfg = {0};
304 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
306 port->port_nbr = portnbr;
307 port->init_irqs = 1;
309 port->out_rd_ptr = port->out_buffer;
310 port->out_buf_count = 0;
312 port->output = 1;
313 port->input = 0;
315 port->readp = port->flip;
316 port->writep = port->flip;
317 port->in_buffer_size = IN_BUFFER_SIZE;
318 port->inbufchunk = IN_DESCR_SIZE;
319 port->next_rx_desc = &port->in_descr[0];
320 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
321 port->prev_rx_desc->eol = 1;
323 init_waitqueue_head(&port->out_wait_q);
324 init_waitqueue_head(&port->in_wait_q);
326 spin_lock_init(&port->lock);
328 cfg.out_clk_src = regk_sser_intern_clk;
329 cfg.out_clk_pol = regk_sser_pos;
330 cfg.clk_od_mode = regk_sser_no;
331 cfg.clk_dir = regk_sser_out;
332 cfg.gate_clk = regk_sser_no;
333 cfg.base_freq = regk_sser_f29_493;
334 cfg.clk_div = 256;
335 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
337 frm_cfg.wordrate = DEFAULT_WORD_RATE;
338 frm_cfg.type = regk_sser_edge;
339 frm_cfg.frame_pin_dir = regk_sser_out;
340 frm_cfg.frame_pin_use = regk_sser_frm;
341 frm_cfg.status_pin_dir = regk_sser_in;
342 frm_cfg.status_pin_use = regk_sser_hold;
343 frm_cfg.out_on = regk_sser_tr;
344 frm_cfg.tr_delay = 1;
345 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
347 tr_cfg.urun_stop = regk_sser_no;
348 tr_cfg.sample_size = 7;
349 tr_cfg.sh_dir = regk_sser_msbfirst;
350 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
351 tr_cfg.rate_ctrl = regk_sser_iso;
352 tr_cfg.data_pin_use = regk_sser_dout;
353 tr_cfg.bulk_wspace = 1;
354 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
356 rec_cfg.sample_size = 7;
357 rec_cfg.sh_dir = regk_sser_msbfirst;
358 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
359 rec_cfg.fifo_thr = regk_sser_inf;
360 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
362 #ifdef SYNC_SER_DMA
363 /* Setup the descriptor ring for dma out/transmit. */
364 for (i = 0; i < NBR_OUT_DESCR; i++) {
365 port->out_descr[i].wait = 0;
366 port->out_descr[i].intr = 1;
367 port->out_descr[i].eol = 0;
368 port->out_descr[i].out_eop = 0;
369 port->out_descr[i].next =
370 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
373 /* Create a ring from the list. */
374 port->out_descr[NBR_OUT_DESCR-1].next =
375 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
377 /* Setup context for traversing the ring. */
378 port->active_tr_descr = &port->out_descr[0];
379 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
380 port->catch_tr_descr = &port->out_descr[0];
381 #endif
384 static inline int sync_data_avail(struct sync_port *port)
386 int avail;
387 unsigned char *start;
388 unsigned char *end;
390 start = (unsigned char*)port->readp; /* cast away volatile */
391 end = (unsigned char*)port->writep; /* cast away volatile */
392 /* 0123456789 0123456789
393 * ----- - -----
394 * ^rp ^wp ^wp ^rp
397 if (end >= start)
398 avail = end - start;
399 else
400 avail = port->in_buffer_size - (start - end);
401 return avail;
404 static inline int sync_data_avail_to_end(struct sync_port *port)
406 int avail;
407 unsigned char *start;
408 unsigned char *end;
410 start = (unsigned char*)port->readp; /* cast away volatile */
411 end = (unsigned char*)port->writep; /* cast away volatile */
412 /* 0123456789 0123456789
413 * ----- -----
414 * ^rp ^wp ^wp ^rp
417 if (end >= start)
418 avail = end - start;
419 else
420 avail = port->flip + port->in_buffer_size - start;
421 return avail;
424 static int sync_serial_open(struct inode *inode, struct file *file)
426 int dev = iminor(inode);
427 int ret = -EBUSY;
428 sync_port *port;
429 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
430 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
432 lock_kernel();
433 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
435 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
437 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
438 ret = -ENODEV;
439 goto out;
441 port = &ports[dev];
442 /* Allow open this device twice (assuming one reader and one writer) */
443 if (port->busy == 2)
445 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
446 goto out;
450 if (port->init_irqs) {
451 if (port->use_dma) {
452 if (port == &ports[0]) {
453 #ifdef SYNC_SER_DMA
454 if (request_irq(DMA_OUT_INTR_VECT,
455 tr_interrupt,
457 "synchronous serial 0 dma tr",
458 &ports[0])) {
459 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
460 goto out;
461 } else if (request_irq(DMA_IN_INTR_VECT,
462 rx_interrupt,
464 "synchronous serial 1 dma rx",
465 &ports[0])) {
466 free_irq(DMA_OUT_INTR_VECT, &port[0]);
467 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
468 goto out;
469 } else if (crisv32_request_dma(OUT_DMA_NBR,
470 "synchronous serial 0 dma tr",
471 DMA_VERBOSE_ON_ERROR,
473 REQ_DMA_SYNCSER)) {
474 free_irq(DMA_OUT_INTR_VECT, &port[0]);
475 free_irq(DMA_IN_INTR_VECT, &port[0]);
476 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
477 goto out;
478 } else if (crisv32_request_dma(IN_DMA_NBR,
479 "synchronous serial 0 dma rec",
480 DMA_VERBOSE_ON_ERROR,
482 REQ_DMA_SYNCSER)) {
483 crisv32_free_dma(OUT_DMA_NBR);
484 free_irq(DMA_OUT_INTR_VECT, &port[0]);
485 free_irq(DMA_IN_INTR_VECT, &port[0]);
486 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
487 goto out;
489 #endif
491 #ifdef CONFIG_ETRAXFS
492 else if (port == &ports[1]) {
493 #ifdef SYNC_SER_DMA
494 if (request_irq(DMA6_INTR_VECT,
495 tr_interrupt,
497 "synchronous serial 1 dma tr",
498 &ports[1])) {
499 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
500 goto out;
501 } else if (request_irq(DMA7_INTR_VECT,
502 rx_interrupt,
504 "synchronous serial 1 dma rx",
505 &ports[1])) {
506 free_irq(DMA6_INTR_VECT, &ports[1]);
507 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
508 goto out;
509 } else if (crisv32_request_dma(
510 SYNC_SER1_TX_DMA_NBR,
511 "synchronous serial 1 dma tr",
512 DMA_VERBOSE_ON_ERROR,
514 dma_sser1)) {
515 free_irq(DMA6_INTR_VECT, &ports[1]);
516 free_irq(DMA7_INTR_VECT, &ports[1]);
517 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
518 goto out;
519 } else if (crisv32_request_dma(
520 SYNC_SER1_RX_DMA_NBR,
521 "synchronous serial 3 dma rec",
522 DMA_VERBOSE_ON_ERROR,
524 dma_sser1)) {
525 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
526 free_irq(DMA6_INTR_VECT, &ports[1]);
527 free_irq(DMA7_INTR_VECT, &ports[1]);
528 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
529 goto out;
531 #endif
533 #endif
534 /* Enable DMAs */
535 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
536 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
537 /* Enable DMA IRQs */
538 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
539 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
540 /* Set up wordsize = 1 for DMAs. */
541 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
542 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
544 start_dma_in(port);
545 port->init_irqs = 0;
546 } else { /* !port->use_dma */
547 #ifdef SYNC_SER_MANUAL
548 if (port == &ports[0]) {
549 if (request_irq(SYNCSER_INTR_VECT,
550 manual_interrupt,
552 "synchronous serial manual irq",
553 &ports[0])) {
554 printk("Can't allocate sync serial manual irq");
555 goto out;
558 #ifdef CONFIG_ETRAXFS
559 else if (port == &ports[1]) {
560 if (request_irq(SSER1_INTR_VECT,
561 manual_interrupt,
563 "synchronous serial manual irq",
564 &ports[1])) {
565 printk(KERN_CRIT "Can't allocate sync serial manual irq");
566 goto out;
569 #endif
570 port->init_irqs = 0;
571 #else
572 panic("sync_serial: Manual mode not supported.\n");
573 #endif /* SYNC_SER_MANUAL */
576 } /* port->init_irqs */
578 port->busy++;
579 ret = 0;
580 out:
581 unlock_kernel();
582 return ret;
585 static int sync_serial_release(struct inode *inode, struct file *file)
587 int dev = iminor(inode);
588 sync_port *port;
590 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
592 DEBUG(printk("Invalid minor %d\n", dev));
593 return -ENODEV;
595 port = &ports[dev];
596 if (port->busy)
597 port->busy--;
598 if (!port->busy)
600 return 0;
603 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
605 int dev = iminor(file->f_path.dentry->d_inode);
606 unsigned int mask = 0;
607 sync_port *port;
608 DEBUGPOLL( static unsigned int prev_mask = 0; );
610 port = &ports[dev];
612 if (!port->started) {
613 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
614 reg_sser_rw_rec_cfg rec_cfg =
615 REG_RD(sser, port->regi_sser, rw_rec_cfg);
616 cfg.en = regk_sser_yes;
617 rec_cfg.rec_en = port->input;
618 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
619 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
620 port->started = 1;
623 poll_wait(file, &port->out_wait_q, wait);
624 poll_wait(file, &port->in_wait_q, wait);
626 /* No active transfer, descriptors are available */
627 if (port->output && !port->tr_running)
628 mask |= POLLOUT | POLLWRNORM;
630 /* Descriptor and buffer space available. */
631 if (port->output &&
632 port->active_tr_descr != port->catch_tr_descr &&
633 port->out_buf_count < OUT_BUFFER_SIZE)
634 mask |= POLLOUT | POLLWRNORM;
636 /* At least an inbufchunk of data */
637 if (port->input && sync_data_avail(port) >= port->inbufchunk)
638 mask |= POLLIN | POLLRDNORM;
640 DEBUGPOLL(if (mask != prev_mask)
641 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
642 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
643 prev_mask = mask;
645 return mask;
648 static int sync_serial_ioctl(struct file *file,
649 unsigned int cmd, unsigned long arg)
651 int return_val = 0;
652 int dma_w_size = regk_dma_set_w_size1;
653 int dev = iminor(file->f_path.dentry->d_inode);
654 sync_port *port;
655 reg_sser_rw_tr_cfg tr_cfg;
656 reg_sser_rw_rec_cfg rec_cfg;
657 reg_sser_rw_frm_cfg frm_cfg;
658 reg_sser_rw_cfg gen_cfg;
659 reg_sser_rw_intr_mask intr_mask;
661 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
663 DEBUG(printk("Invalid minor %d\n", dev));
664 return -1;
666 port = &ports[dev];
667 spin_lock_irq(&port->lock);
669 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
670 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
671 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
672 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
673 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
675 switch(cmd)
677 case SSP_SPEED:
678 if (GET_SPEED(arg) == CODEC)
680 unsigned int freq;
682 gen_cfg.base_freq = regk_sser_f32;
684 /* Clock divider will internally be
685 * gen_cfg.clk_div + 1.
688 freq = GET_FREQ(arg);
689 switch (freq) {
690 case FREQ_32kHz:
691 case FREQ_64kHz:
692 case FREQ_128kHz:
693 case FREQ_256kHz:
694 gen_cfg.clk_div = 125 *
695 (1 << (freq - FREQ_256kHz)) - 1;
696 break;
697 case FREQ_512kHz:
698 gen_cfg.clk_div = 62;
699 break;
700 case FREQ_1MHz:
701 case FREQ_2MHz:
702 case FREQ_4MHz:
703 gen_cfg.clk_div = 8 * (1 << freq) - 1;
704 break;
706 } else {
707 gen_cfg.base_freq = regk_sser_f29_493;
708 switch (GET_SPEED(arg)) {
709 case SSP150:
710 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
711 break;
712 case SSP300:
713 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
714 break;
715 case SSP600:
716 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
717 break;
718 case SSP1200:
719 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
720 break;
721 case SSP2400:
722 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
723 break;
724 case SSP4800:
725 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
726 break;
727 case SSP9600:
728 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
729 break;
730 case SSP19200:
731 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
732 break;
733 case SSP28800:
734 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
735 break;
736 case SSP57600:
737 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
738 break;
739 case SSP115200:
740 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
741 break;
742 case SSP230400:
743 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
744 break;
745 case SSP460800:
746 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
747 break;
748 case SSP921600:
749 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
750 break;
751 case SSP3125000:
752 gen_cfg.base_freq = regk_sser_f100;
753 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
754 break;
758 frm_cfg.wordrate = GET_WORD_RATE(arg);
760 break;
761 case SSP_MODE:
762 switch(arg)
764 case MASTER_OUTPUT:
765 port->output = 1;
766 port->input = 0;
767 frm_cfg.out_on = regk_sser_tr;
768 frm_cfg.frame_pin_dir = regk_sser_out;
769 gen_cfg.clk_dir = regk_sser_out;
770 break;
771 case SLAVE_OUTPUT:
772 port->output = 1;
773 port->input = 0;
774 frm_cfg.frame_pin_dir = regk_sser_in;
775 gen_cfg.clk_dir = regk_sser_in;
776 break;
777 case MASTER_INPUT:
778 port->output = 0;
779 port->input = 1;
780 frm_cfg.frame_pin_dir = regk_sser_out;
781 frm_cfg.out_on = regk_sser_intern_tb;
782 gen_cfg.clk_dir = regk_sser_out;
783 break;
784 case SLAVE_INPUT:
785 port->output = 0;
786 port->input = 1;
787 frm_cfg.frame_pin_dir = regk_sser_in;
788 gen_cfg.clk_dir = regk_sser_in;
789 break;
790 case MASTER_BIDIR:
791 port->output = 1;
792 port->input = 1;
793 frm_cfg.frame_pin_dir = regk_sser_out;
794 frm_cfg.out_on = regk_sser_intern_tb;
795 gen_cfg.clk_dir = regk_sser_out;
796 break;
797 case SLAVE_BIDIR:
798 port->output = 1;
799 port->input = 1;
800 frm_cfg.frame_pin_dir = regk_sser_in;
801 gen_cfg.clk_dir = regk_sser_in;
802 break;
803 default:
804 spin_unlock_irq(&port->lock);
805 return -EINVAL;
807 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
808 intr_mask.rdav = regk_sser_yes;
809 break;
810 case SSP_FRAME_SYNC:
811 if (arg & NORMAL_SYNC) {
812 frm_cfg.rec_delay = 1;
813 frm_cfg.tr_delay = 1;
815 else if (arg & EARLY_SYNC)
816 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
817 else if (arg & SECOND_WORD_SYNC) {
818 frm_cfg.rec_delay = 7;
819 frm_cfg.tr_delay = 1;
822 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
823 frm_cfg.early_wend = regk_sser_yes;
824 if (arg & BIT_SYNC)
825 frm_cfg.type = regk_sser_edge;
826 else if (arg & WORD_SYNC)
827 frm_cfg.type = regk_sser_level;
828 else if (arg & EXTENDED_SYNC)
829 frm_cfg.early_wend = regk_sser_no;
831 if (arg & SYNC_ON)
832 frm_cfg.frame_pin_use = regk_sser_frm;
833 else if (arg & SYNC_OFF)
834 frm_cfg.frame_pin_use = regk_sser_gio0;
836 dma_w_size = regk_dma_set_w_size2;
837 if (arg & WORD_SIZE_8) {
838 rec_cfg.sample_size = tr_cfg.sample_size = 7;
839 dma_w_size = regk_dma_set_w_size1;
840 } else if (arg & WORD_SIZE_12)
841 rec_cfg.sample_size = tr_cfg.sample_size = 11;
842 else if (arg & WORD_SIZE_16)
843 rec_cfg.sample_size = tr_cfg.sample_size = 15;
844 else if (arg & WORD_SIZE_24)
845 rec_cfg.sample_size = tr_cfg.sample_size = 23;
846 else if (arg & WORD_SIZE_32)
847 rec_cfg.sample_size = tr_cfg.sample_size = 31;
849 if (arg & BIT_ORDER_MSB)
850 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
851 else if (arg & BIT_ORDER_LSB)
852 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
854 if (arg & FLOW_CONTROL_ENABLE) {
855 frm_cfg.status_pin_use = regk_sser_frm;
856 rec_cfg.fifo_thr = regk_sser_thr16;
857 } else if (arg & FLOW_CONTROL_DISABLE) {
858 frm_cfg.status_pin_use = regk_sser_gio0;
859 rec_cfg.fifo_thr = regk_sser_inf;
862 if (arg & CLOCK_NOT_GATED)
863 gen_cfg.gate_clk = regk_sser_no;
864 else if (arg & CLOCK_GATED)
865 gen_cfg.gate_clk = regk_sser_yes;
867 break;
868 case SSP_IPOLARITY:
869 /* NOTE!! negedge is considered NORMAL */
870 if (arg & CLOCK_NORMAL)
871 rec_cfg.clk_pol = regk_sser_neg;
872 else if (arg & CLOCK_INVERT)
873 rec_cfg.clk_pol = regk_sser_pos;
875 if (arg & FRAME_NORMAL)
876 frm_cfg.level = regk_sser_pos_hi;
877 else if (arg & FRAME_INVERT)
878 frm_cfg.level = regk_sser_neg_lo;
880 if (arg & STATUS_NORMAL)
881 gen_cfg.hold_pol = regk_sser_pos;
882 else if (arg & STATUS_INVERT)
883 gen_cfg.hold_pol = regk_sser_neg;
884 break;
885 case SSP_OPOLARITY:
886 if (arg & CLOCK_NORMAL)
887 gen_cfg.out_clk_pol = regk_sser_pos;
888 else if (arg & CLOCK_INVERT)
889 gen_cfg.out_clk_pol = regk_sser_neg;
891 if (arg & FRAME_NORMAL)
892 frm_cfg.level = regk_sser_pos_hi;
893 else if (arg & FRAME_INVERT)
894 frm_cfg.level = regk_sser_neg_lo;
896 if (arg & STATUS_NORMAL)
897 gen_cfg.hold_pol = regk_sser_pos;
898 else if (arg & STATUS_INVERT)
899 gen_cfg.hold_pol = regk_sser_neg;
900 break;
901 case SSP_SPI:
902 rec_cfg.fifo_thr = regk_sser_inf;
903 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
904 rec_cfg.sample_size = tr_cfg.sample_size = 7;
905 frm_cfg.frame_pin_use = regk_sser_frm;
906 frm_cfg.type = regk_sser_level;
907 frm_cfg.tr_delay = 1;
908 frm_cfg.level = regk_sser_neg_lo;
909 if (arg & SPI_SLAVE)
911 rec_cfg.clk_pol = regk_sser_neg;
912 gen_cfg.clk_dir = regk_sser_in;
913 port->input = 1;
914 port->output = 0;
916 else
918 gen_cfg.out_clk_pol = regk_sser_pos;
919 port->input = 0;
920 port->output = 1;
921 gen_cfg.clk_dir = regk_sser_out;
923 break;
924 case SSP_INBUFCHUNK:
925 break;
926 default:
927 return_val = -1;
931 if (port->started) {
932 rec_cfg.rec_en = port->input;
933 gen_cfg.en = (port->output | port->input);
936 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
937 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
938 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
939 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
940 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
943 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
944 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
945 int en = gen_cfg.en;
946 gen_cfg.en = 0;
947 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
948 /* ##### Should DMA be stoped before we change dma size? */
949 DMA_WR_CMD(port->regi_dmain, dma_w_size);
950 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
951 gen_cfg.en = en;
952 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
955 spin_unlock_irq(&port->lock);
956 return return_val;
959 static long sync_serial_ioctl(struct file *file,
960 unsigned int cmd, unsigned long arg)
962 long ret;
964 lock_kernel();
965 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
966 unlock_kernel();
968 return ret;
971 /* NOTE: sync_serial_write does not support concurrency */
972 static ssize_t sync_serial_write(struct file *file, const char *buf,
973 size_t count, loff_t *ppos)
975 int dev = iminor(file->f_path.dentry->d_inode);
976 DECLARE_WAITQUEUE(wait, current);
977 struct sync_port *port;
978 int trunc_count;
979 unsigned long flags;
980 int bytes_free;
981 int out_buf_count;
983 unsigned char *rd_ptr; /* First allocated byte in the buffer */
984 unsigned char *wr_ptr; /* First free byte in the buffer */
985 unsigned char *buf_stop_ptr; /* Last byte + 1 */
987 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
988 DEBUG(printk("Invalid minor %d\n", dev));
989 return -ENODEV;
991 port = &ports[dev];
993 /* |<- OUT_BUFFER_SIZE ->|
994 * |<- out_buf_count ->|
995 * |<- trunc_count ->| ...->|
996 * ______________________________________________________
997 * | free | data | free |
998 * |_________|___________________|________________________|
999 * ^ rd_ptr ^ wr_ptr
1001 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
1002 port->port_nbr, count, port->active_tr_descr,
1003 port->catch_tr_descr));
1005 /* Read variables that may be updated by interrupts */
1006 spin_lock_irqsave(&port->lock, flags);
1007 rd_ptr = port->out_rd_ptr;
1008 out_buf_count = port->out_buf_count;
1009 spin_unlock_irqrestore(&port->lock, flags);
1011 /* Check if resources are available */
1012 if (port->tr_running &&
1013 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1014 out_buf_count >= OUT_BUFFER_SIZE)) {
1015 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1016 return -EAGAIN;
1019 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1021 /* Determine pointer to the first free byte, before copying. */
1022 wr_ptr = rd_ptr + out_buf_count;
1023 if (wr_ptr >= buf_stop_ptr)
1024 wr_ptr -= OUT_BUFFER_SIZE;
1026 /* If we wrap the ring buffer, let the user space program handle it by
1027 * truncating the data. This could be more elegant, small buffer
1028 * fragments may occur.
1030 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1031 if (wr_ptr + bytes_free > buf_stop_ptr)
1032 bytes_free = buf_stop_ptr - wr_ptr;
1033 trunc_count = (count < bytes_free) ? count : bytes_free;
1035 if (copy_from_user(wr_ptr, buf, trunc_count))
1036 return -EFAULT;
1038 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1039 out_buf_count, trunc_count,
1040 port->out_buf_count, port->out_buffer,
1041 wr_ptr, buf_stop_ptr));
1043 /* Make sure transmitter/receiver is running */
1044 if (!port->started) {
1045 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1046 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1047 cfg.en = regk_sser_yes;
1048 rec_cfg.rec_en = port->input;
1049 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1050 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1051 port->started = 1;
1054 /* Setup wait if blocking */
1055 if (!(file->f_flags & O_NONBLOCK)) {
1056 add_wait_queue(&port->out_wait_q, &wait);
1057 set_current_state(TASK_INTERRUPTIBLE);
1060 spin_lock_irqsave(&port->lock, flags);
1061 port->out_buf_count += trunc_count;
1062 if (port->use_dma) {
1063 start_dma_out(port, wr_ptr, trunc_count);
1064 } else if (!port->tr_running) {
1065 reg_sser_rw_intr_mask intr_mask;
1066 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1067 /* Start sender by writing data */
1068 send_word(port);
1069 /* and enable transmitter ready IRQ */
1070 intr_mask.trdy = 1;
1071 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1073 spin_unlock_irqrestore(&port->lock, flags);
1075 /* Exit if non blocking */
1076 if (file->f_flags & O_NONBLOCK) {
1077 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1078 port->port_nbr, trunc_count,
1079 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1080 return trunc_count;
1083 schedule();
1084 set_current_state(TASK_RUNNING);
1085 remove_wait_queue(&port->out_wait_q, &wait);
1087 if (signal_pending(current))
1088 return -EINTR;
1090 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1091 port->port_nbr, trunc_count));
1092 return trunc_count;
1095 static ssize_t sync_serial_read(struct file * file, char * buf,
1096 size_t count, loff_t *ppos)
1098 int dev = iminor(file->f_path.dentry->d_inode);
1099 int avail;
1100 sync_port *port;
1101 unsigned char* start;
1102 unsigned char* end;
1103 unsigned long flags;
1105 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1107 DEBUG(printk("Invalid minor %d\n", dev));
1108 return -ENODEV;
1110 port = &ports[dev];
1112 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1114 if (!port->started)
1116 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1117 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1118 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1119 cfg.en = regk_sser_yes;
1120 tr_cfg.tr_en = regk_sser_yes;
1121 rec_cfg.rec_en = regk_sser_yes;
1122 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1123 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1124 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1125 port->started = 1;
1128 /* Calculate number of available bytes */
1129 /* Save pointers to avoid that they are modified by interrupt */
1130 spin_lock_irqsave(&port->lock, flags);
1131 start = (unsigned char*)port->readp; /* cast away volatile */
1132 end = (unsigned char*)port->writep; /* cast away volatile */
1133 spin_unlock_irqrestore(&port->lock, flags);
1134 while ((start == end) && !port->full) /* No data */
1136 DEBUGREAD(printk(KERN_DEBUG "&"));
1137 if (file->f_flags & O_NONBLOCK)
1138 return -EAGAIN;
1140 interruptible_sleep_on(&port->in_wait_q);
1141 if (signal_pending(current))
1142 return -EINTR;
1144 spin_lock_irqsave(&port->lock, flags);
1145 start = (unsigned char*)port->readp; /* cast away volatile */
1146 end = (unsigned char*)port->writep; /* cast away volatile */
1147 spin_unlock_irqrestore(&port->lock, flags);
1150 /* Lazy read, never return wrapped data. */
1151 if (port->full)
1152 avail = port->in_buffer_size;
1153 else if (end > start)
1154 avail = end - start;
1155 else
1156 avail = port->flip + port->in_buffer_size - start;
1158 count = count > avail ? avail : count;
1159 if (copy_to_user(buf, start, count))
1160 return -EFAULT;
1161 /* Disable interrupts while updating readp */
1162 spin_lock_irqsave(&port->lock, flags);
1163 port->readp += count;
1164 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1165 port->readp = port->flip;
1166 port->full = 0;
1167 spin_unlock_irqrestore(&port->lock, flags);
1168 DEBUGREAD(printk("r %d\n", count));
1169 return count;
1172 static void send_word(sync_port* port)
1174 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1175 reg_sser_rw_tr_data tr_data = {0};
1177 switch(tr_cfg.sample_size)
1179 case 8:
1180 port->out_buf_count--;
1181 tr_data.data = *port->out_rd_ptr++;
1182 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1183 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1184 port->out_rd_ptr = port->out_buffer;
1185 break;
1186 case 12:
1188 int data = (*port->out_rd_ptr++) << 8;
1189 data |= *port->out_rd_ptr++;
1190 port->out_buf_count -= 2;
1191 tr_data.data = data;
1192 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1193 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1194 port->out_rd_ptr = port->out_buffer;
1196 break;
1197 case 16:
1198 port->out_buf_count -= 2;
1199 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1200 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1201 port->out_rd_ptr += 2;
1202 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1203 port->out_rd_ptr = port->out_buffer;
1204 break;
1205 case 24:
1206 port->out_buf_count -= 3;
1207 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1208 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1209 port->out_rd_ptr += 2;
1210 tr_data.data = *port->out_rd_ptr++;
1211 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1212 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1213 port->out_rd_ptr = port->out_buffer;
1214 break;
1215 case 32:
1216 port->out_buf_count -= 4;
1217 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1218 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1219 port->out_rd_ptr += 2;
1220 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1221 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1222 port->out_rd_ptr += 2;
1223 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1224 port->out_rd_ptr = port->out_buffer;
1225 break;
1229 static void start_dma_out(struct sync_port *port,
1230 const char *data, int count)
1232 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1233 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1234 port->active_tr_descr->intr = 1;
1236 port->active_tr_descr->eol = 1;
1237 port->prev_tr_descr->eol = 0;
1239 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1240 port->prev_tr_descr, port->active_tr_descr));
1241 port->prev_tr_descr = port->active_tr_descr;
1242 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1244 if (!port->tr_running) {
1245 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1246 rw_tr_cfg);
1248 port->out_context.next = 0;
1249 port->out_context.saved_data =
1250 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1251 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1253 DMA_START_CONTEXT(port->regi_dmaout,
1254 virt_to_phys((char *)&port->out_context));
1256 tr_cfg.tr_en = regk_sser_yes;
1257 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1258 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1259 } else {
1260 DMA_CONTINUE_DATA(port->regi_dmaout);
1261 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1264 port->tr_running = 1;
1267 static void start_dma_in(sync_port *port)
1269 int i;
1270 char *buf;
1271 port->writep = port->flip;
1273 if (port->writep > port->flip + port->in_buffer_size) {
1274 panic("Offset too large in sync serial driver\n");
1275 return;
1277 buf = (char*)virt_to_phys(port->in_buffer);
1278 for (i = 0; i < NBR_IN_DESCR; i++) {
1279 port->in_descr[i].buf = buf;
1280 port->in_descr[i].after = buf + port->inbufchunk;
1281 port->in_descr[i].intr = 1;
1282 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1283 port->in_descr[i].buf = buf;
1284 buf += port->inbufchunk;
1286 /* Link the last descriptor to the first */
1287 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1288 port->in_descr[i-1].eol = regk_sser_yes;
1289 port->next_rx_desc = &port->in_descr[0];
1290 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1291 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1292 port->in_context.saved_data_buf = port->in_descr[0].buf;
1293 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1296 #ifdef SYNC_SER_DMA
1297 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1299 reg_dma_r_masked_intr masked;
1300 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1301 reg_dma_rw_stat stat;
1302 int i;
1303 int found = 0;
1304 int stop_sser = 0;
1306 for (i = 0; i < NBR_PORTS; i++) {
1307 sync_port *port = &ports[i];
1308 if (!port->enabled || !port->use_dma)
1309 continue;
1311 /* IRQ active for the port? */
1312 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1313 if (!masked.data)
1314 continue;
1316 found = 1;
1318 /* Check if we should stop the DMA transfer */
1319 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1320 if (stat.list_state == regk_dma_data_at_eol)
1321 stop_sser = 1;
1323 /* Clear IRQ */
1324 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1326 if (!stop_sser) {
1327 /* The DMA has completed a descriptor, EOL was not
1328 * encountered, so step relevant descriptor and
1329 * datapointers forward. */
1330 int sent;
1331 sent = port->catch_tr_descr->after -
1332 port->catch_tr_descr->buf;
1333 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1334 "in descr %p (ac: %p)\n",
1335 port->out_buf_count, sent,
1336 port->out_buf_count - sent,
1337 port->catch_tr_descr,
1338 port->active_tr_descr););
1339 port->out_buf_count -= sent;
1340 port->catch_tr_descr =
1341 phys_to_virt((int) port->catch_tr_descr->next);
1342 port->out_rd_ptr =
1343 phys_to_virt((int) port->catch_tr_descr->buf);
1344 } else {
1345 int i, sent;
1346 /* EOL handler.
1347 * Note that if an EOL was encountered during the irq
1348 * locked section of sync_ser_write the DMA will be
1349 * restarted and the eol flag will be cleared.
1350 * The remaining descriptors will be traversed by
1351 * the descriptor interrupts as usual.
1353 i = 0;
1354 while (!port->catch_tr_descr->eol) {
1355 sent = port->catch_tr_descr->after -
1356 port->catch_tr_descr->buf;
1357 DEBUGOUTBUF(printk(KERN_DEBUG
1358 "traversing descr %p -%d (%d)\n",
1359 port->catch_tr_descr,
1360 sent,
1361 port->out_buf_count));
1362 port->out_buf_count -= sent;
1363 port->catch_tr_descr = phys_to_virt(
1364 (int)port->catch_tr_descr->next);
1365 i++;
1366 if (i >= NBR_OUT_DESCR) {
1367 /* TODO: Reset and recover */
1368 panic("sync_serial: missing eol");
1371 sent = port->catch_tr_descr->after -
1372 port->catch_tr_descr->buf;
1373 DEBUGOUTBUF(printk(KERN_DEBUG
1374 "eol at descr %p -%d (%d)\n",
1375 port->catch_tr_descr,
1376 sent,
1377 port->out_buf_count));
1379 port->out_buf_count -= sent;
1381 /* Update read pointer to first free byte, we
1382 * may already be writing data there. */
1383 port->out_rd_ptr =
1384 phys_to_virt((int) port->catch_tr_descr->after);
1385 if (port->out_rd_ptr > port->out_buffer +
1386 OUT_BUFFER_SIZE)
1387 port->out_rd_ptr = port->out_buffer;
1389 reg_sser_rw_tr_cfg tr_cfg =
1390 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1391 DEBUGTXINT(printk(KERN_DEBUG
1392 "tr_int DMA stop %d, set catch @ %p\n",
1393 port->out_buf_count,
1394 port->active_tr_descr));
1395 if (port->out_buf_count != 0)
1396 printk(KERN_CRIT "sync_ser: buffer not "
1397 "empty after eol.\n");
1398 port->catch_tr_descr = port->active_tr_descr;
1399 port->tr_running = 0;
1400 tr_cfg.tr_en = regk_sser_no;
1401 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1403 /* wake up the waiting process */
1404 wake_up_interruptible(&port->out_wait_q);
1406 return IRQ_RETVAL(found);
1407 } /* tr_interrupt */
1409 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1411 reg_dma_r_masked_intr masked;
1412 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1414 int i;
1415 int found = 0;
1417 for (i = 0; i < NBR_PORTS; i++)
1419 sync_port *port = &ports[i];
1421 if (!port->enabled || !port->use_dma )
1422 continue;
1424 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1426 if (masked.data) /* Descriptor interrupt */
1428 found = 1;
1429 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1430 virt_to_phys(port->next_rx_desc)) {
1431 DEBUGRXINT(printk(KERN_DEBUG "!"));
1432 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1433 int first_size = port->flip + port->in_buffer_size - port->writep;
1434 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1435 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1436 port->writep = port->flip + port->inbufchunk - first_size;
1437 } else {
1438 memcpy((char*)port->writep,
1439 phys_to_virt((unsigned)port->next_rx_desc->buf),
1440 port->inbufchunk);
1441 port->writep += port->inbufchunk;
1442 if (port->writep >= port->flip + port->in_buffer_size)
1443 port->writep = port->flip;
1445 if (port->writep == port->readp)
1447 port->full = 1;
1450 port->next_rx_desc->eol = 1;
1451 port->prev_rx_desc->eol = 0;
1452 flush_dma_descr(port->prev_rx_desc, 0);
1453 port->prev_rx_desc = port->next_rx_desc;
1454 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1455 flush_dma_descr(port->prev_rx_desc, 1);
1456 /* wake up the waiting process */
1457 wake_up_interruptible(&port->in_wait_q);
1458 DMA_CONTINUE(port->regi_dmain);
1459 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1464 return IRQ_RETVAL(found);
1465 } /* rx_interrupt */
1466 #endif /* SYNC_SER_DMA */
1468 #ifdef SYNC_SER_MANUAL
1469 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1471 int i;
1472 int found = 0;
1473 reg_sser_r_masked_intr masked;
1475 for (i = 0; i < NBR_PORTS; i++)
1477 sync_port *port = &ports[i];
1479 if (!port->enabled || port->use_dma)
1481 continue;
1484 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1485 if (masked.rdav) /* Data received? */
1487 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1488 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1489 found = 1;
1490 /* Read data */
1491 switch(rec_cfg.sample_size)
1493 case 8:
1494 *port->writep++ = data.data & 0xff;
1495 break;
1496 case 12:
1497 *port->writep = (data.data & 0x0ff0) >> 4;
1498 *(port->writep + 1) = data.data & 0x0f;
1499 port->writep+=2;
1500 break;
1501 case 16:
1502 *(unsigned short*)port->writep = data.data;
1503 port->writep+=2;
1504 break;
1505 case 24:
1506 *(unsigned int*)port->writep = data.data;
1507 port->writep+=3;
1508 break;
1509 case 32:
1510 *(unsigned int*)port->writep = data.data;
1511 port->writep+=4;
1512 break;
1515 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1516 port->writep = port->flip;
1517 if (port->writep == port->readp) {
1518 /* receive buffer overrun, discard oldest data
1520 port->readp++;
1521 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1522 port->readp = port->flip;
1524 if (sync_data_avail(port) >= port->inbufchunk)
1525 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1528 if (masked.trdy) /* Transmitter ready? */
1530 found = 1;
1531 if (port->out_buf_count > 0) /* More data to send */
1532 send_word(port);
1533 else /* transmission finished */
1535 reg_sser_rw_intr_mask intr_mask;
1536 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1537 intr_mask.trdy = 0;
1538 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1539 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1543 return IRQ_RETVAL(found);
1545 #endif
1547 module_init(etrax_sync_serial_init);