cris: autoconvert trivial BKL users
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / cris / arch-v32 / drivers / sync_serial.c
bloba01ae9f3b814575c9e226a1958c9eb7308436d35
1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
8 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/mutex.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
23 #include <asm/io.h>
24 #include <dma.h>
25 #include <pinmux.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
36 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
37 /* */
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
40 /* */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
45 /* | | | */
46 /* v v v */
47 /* +-------------------------------------+ */
48 /* | BUFFER | */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
51 /* readp writep */
52 /* */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
59 /* of Descr2 */
61 #define SYNC_SERIAL_MAJOR 125
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
78 #define DEBUG(x)
79 #define DEBUGREAD(x)
80 #define DEBUGWRITE(x)
81 #define DEBUGPOLL(x)
82 #define DEBUGRXINT(x)
83 #define DEBUGTXINT(x)
84 #define DEBUGTRDMA(x)
85 #define DEBUGOUTBUF(x)
87 typedef struct sync_port
89 reg_scope_instances regi_sser;
90 reg_scope_instances regi_dmain;
91 reg_scope_instances regi_dmaout;
93 char started; /* 1 if port has been started */
94 char port_nbr; /* Port 0 or 1 */
95 char busy; /* 1 if port is busy */
97 char enabled; /* 1 if port is enabled */
98 char use_dma; /* 1 if port uses dma */
99 char tr_running;
101 char init_irqs;
102 int output;
103 int input;
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep;
110 unsigned int in_buffer_size;
111 unsigned int inbufchunk;
112 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
113 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
114 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
115 struct dma_descr_data* next_rx_desc;
116 struct dma_descr_data* prev_rx_desc;
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data *active_tr_descr;
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data *catch_tr_descr;
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data *prev_tr_descr;
128 int full;
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr;
134 /* Number of bytes currently locked for being read by DMA */
135 int out_buf_count;
137 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context __attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr[NBR_OUT_DESCR]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context __attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q;
143 wait_queue_head_t in_wait_q;
145 spinlock_t lock;
146 } sync_port;
148 static DEFINE_MUTEX(sync_serial_mutex);
149 static int etrax_sync_serial_init(void);
150 static void initialize_port(int portnbr);
151 static inline int sync_data_avail(struct sync_port *port);
153 static int sync_serial_open(struct inode *, struct file*);
154 static int sync_serial_release(struct inode*, struct file*);
155 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
157 static int sync_serial_ioctl(struct file *,
158 unsigned int cmd, unsigned long arg);
159 static ssize_t sync_serial_write(struct file * file, const char * buf,
160 size_t count, loff_t *ppos);
161 static ssize_t sync_serial_read(struct file *file, char *buf,
162 size_t count, loff_t *ppos);
164 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
165 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
166 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
167 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
168 #define SYNC_SER_DMA
169 #endif
171 static void send_word(sync_port* port);
172 static void start_dma_out(struct sync_port *port, const char *data, int count);
173 static void start_dma_in(sync_port* port);
174 #ifdef SYNC_SER_DMA
175 static irqreturn_t tr_interrupt(int irq, void *dev_id);
176 static irqreturn_t rx_interrupt(int irq, void *dev_id);
177 #endif
179 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
180 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
181 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
182 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
183 #define SYNC_SER_MANUAL
184 #endif
185 #ifdef SYNC_SER_MANUAL
186 static irqreturn_t manual_interrupt(int irq, void *dev_id);
187 #endif
189 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
190 #define OUT_DMA_NBR 4
191 #define IN_DMA_NBR 5
192 #define PINMUX_SSER pinmux_sser0
193 #define SYNCSER_INST regi_sser0
194 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
195 #define OUT_DMA_INST regi_dma4
196 #define IN_DMA_INST regi_dma5
197 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
198 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
199 #define REQ_DMA_SYNCSER dma_sser0
200 #else /* Artpec-3 */
201 #define OUT_DMA_NBR 6
202 #define IN_DMA_NBR 7
203 #define PINMUX_SSER pinmux_sser
204 #define SYNCSER_INST regi_sser
205 #define SYNCSER_INTR_VECT SSER_INTR_VECT
206 #define OUT_DMA_INST regi_dma6
207 #define IN_DMA_INST regi_dma7
208 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
209 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
210 #define REQ_DMA_SYNCSER dma_sser
211 #endif
213 /* The ports */
214 static struct sync_port ports[]=
217 .regi_sser = SYNCSER_INST,
218 .regi_dmaout = OUT_DMA_INST,
219 .regi_dmain = IN_DMA_INST,
220 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
221 .use_dma = 1,
222 #else
223 .use_dma = 0,
224 #endif
226 #ifdef CONFIG_ETRAXFS
230 .regi_sser = regi_sser1,
231 .regi_dmaout = regi_dma6,
232 .regi_dmain = regi_dma7,
233 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
234 .use_dma = 1,
235 #else
236 .use_dma = 0,
237 #endif
239 #endif
242 #define NBR_PORTS ARRAY_SIZE(ports)
244 static const struct file_operations sync_serial_fops = {
245 .owner = THIS_MODULE,
246 .write = sync_serial_write,
247 .read = sync_serial_read,
248 .poll = sync_serial_poll,
249 .unlocked_ioctl = sync_serial_ioctl,
250 .open = sync_serial_open,
251 .release = sync_serial_release
254 static int __init etrax_sync_serial_init(void)
256 ports[0].enabled = 0;
257 #ifdef CONFIG_ETRAXFS
258 ports[1].enabled = 0;
259 #endif
260 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
261 &sync_serial_fops) < 0) {
262 printk(KERN_WARNING
263 "Unable to get major for synchronous serial port\n");
264 return -EBUSY;
267 /* Initialize Ports */
268 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
269 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
270 printk(KERN_WARNING
271 "Unable to alloc pins for synchronous serial port 0\n");
272 return -EIO;
274 ports[0].enabled = 1;
275 initialize_port(0);
276 #endif
278 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
279 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
280 printk(KERN_WARNING
281 "Unable to alloc pins for synchronous serial port 0\n");
282 return -EIO;
284 ports[1].enabled = 1;
285 initialize_port(1);
286 #endif
288 #ifdef CONFIG_ETRAXFS
289 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
290 #else
291 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
292 #endif
293 return 0;
296 static void __init initialize_port(int portnbr)
298 int __attribute__((unused)) i;
299 struct sync_port *port = &ports[portnbr];
300 reg_sser_rw_cfg cfg = {0};
301 reg_sser_rw_frm_cfg frm_cfg = {0};
302 reg_sser_rw_tr_cfg tr_cfg = {0};
303 reg_sser_rw_rec_cfg rec_cfg = {0};
305 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
307 port->port_nbr = portnbr;
308 port->init_irqs = 1;
310 port->out_rd_ptr = port->out_buffer;
311 port->out_buf_count = 0;
313 port->output = 1;
314 port->input = 0;
316 port->readp = port->flip;
317 port->writep = port->flip;
318 port->in_buffer_size = IN_BUFFER_SIZE;
319 port->inbufchunk = IN_DESCR_SIZE;
320 port->next_rx_desc = &port->in_descr[0];
321 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
322 port->prev_rx_desc->eol = 1;
324 init_waitqueue_head(&port->out_wait_q);
325 init_waitqueue_head(&port->in_wait_q);
327 spin_lock_init(&port->lock);
329 cfg.out_clk_src = regk_sser_intern_clk;
330 cfg.out_clk_pol = regk_sser_pos;
331 cfg.clk_od_mode = regk_sser_no;
332 cfg.clk_dir = regk_sser_out;
333 cfg.gate_clk = regk_sser_no;
334 cfg.base_freq = regk_sser_f29_493;
335 cfg.clk_div = 256;
336 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
338 frm_cfg.wordrate = DEFAULT_WORD_RATE;
339 frm_cfg.type = regk_sser_edge;
340 frm_cfg.frame_pin_dir = regk_sser_out;
341 frm_cfg.frame_pin_use = regk_sser_frm;
342 frm_cfg.status_pin_dir = regk_sser_in;
343 frm_cfg.status_pin_use = regk_sser_hold;
344 frm_cfg.out_on = regk_sser_tr;
345 frm_cfg.tr_delay = 1;
346 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
348 tr_cfg.urun_stop = regk_sser_no;
349 tr_cfg.sample_size = 7;
350 tr_cfg.sh_dir = regk_sser_msbfirst;
351 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
352 #if 0
353 tr_cfg.rate_ctrl = regk_sser_bulk;
354 tr_cfg.data_pin_use = regk_sser_dout;
355 #else
356 tr_cfg.rate_ctrl = regk_sser_iso;
357 tr_cfg.data_pin_use = regk_sser_dout;
358 #endif
359 tr_cfg.bulk_wspace = 1;
360 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
362 rec_cfg.sample_size = 7;
363 rec_cfg.sh_dir = regk_sser_msbfirst;
364 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
365 rec_cfg.fifo_thr = regk_sser_inf;
366 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
368 #ifdef SYNC_SER_DMA
369 /* Setup the descriptor ring for dma out/transmit. */
370 for (i = 0; i < NBR_OUT_DESCR; i++) {
371 port->out_descr[i].wait = 0;
372 port->out_descr[i].intr = 1;
373 port->out_descr[i].eol = 0;
374 port->out_descr[i].out_eop = 0;
375 port->out_descr[i].next =
376 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
379 /* Create a ring from the list. */
380 port->out_descr[NBR_OUT_DESCR-1].next =
381 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
383 /* Setup context for traversing the ring. */
384 port->active_tr_descr = &port->out_descr[0];
385 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
386 port->catch_tr_descr = &port->out_descr[0];
387 #endif
390 static inline int sync_data_avail(struct sync_port *port)
392 int avail;
393 unsigned char *start;
394 unsigned char *end;
396 start = (unsigned char*)port->readp; /* cast away volatile */
397 end = (unsigned char*)port->writep; /* cast away volatile */
398 /* 0123456789 0123456789
399 * ----- - -----
400 * ^rp ^wp ^wp ^rp
403 if (end >= start)
404 avail = end - start;
405 else
406 avail = port->in_buffer_size - (start - end);
407 return avail;
410 static inline int sync_data_avail_to_end(struct sync_port *port)
412 int avail;
413 unsigned char *start;
414 unsigned char *end;
416 start = (unsigned char*)port->readp; /* cast away volatile */
417 end = (unsigned char*)port->writep; /* cast away volatile */
418 /* 0123456789 0123456789
419 * ----- -----
420 * ^rp ^wp ^wp ^rp
423 if (end >= start)
424 avail = end - start;
425 else
426 avail = port->flip + port->in_buffer_size - start;
427 return avail;
430 static int sync_serial_open(struct inode *inode, struct file *file)
432 int dev = iminor(inode);
433 int ret = -EBUSY;
434 sync_port *port;
435 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
436 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
438 mutex_lock(&sync_serial_mutex);
439 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
441 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
443 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
444 ret = -ENODEV;
445 goto out;
447 port = &ports[dev];
448 /* Allow open this device twice (assuming one reader and one writer) */
449 if (port->busy == 2)
451 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
452 goto out;
456 if (port->init_irqs) {
457 if (port->use_dma) {
458 if (port == &ports[0]) {
459 #ifdef SYNC_SER_DMA
460 if (request_irq(DMA_OUT_INTR_VECT,
461 tr_interrupt,
463 "synchronous serial 0 dma tr",
464 &ports[0])) {
465 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
466 goto out;
467 } else if (request_irq(DMA_IN_INTR_VECT,
468 rx_interrupt,
470 "synchronous serial 1 dma rx",
471 &ports[0])) {
472 free_irq(DMA_OUT_INTR_VECT, &port[0]);
473 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
474 goto out;
475 } else if (crisv32_request_dma(OUT_DMA_NBR,
476 "synchronous serial 0 dma tr",
477 DMA_VERBOSE_ON_ERROR,
479 REQ_DMA_SYNCSER)) {
480 free_irq(DMA_OUT_INTR_VECT, &port[0]);
481 free_irq(DMA_IN_INTR_VECT, &port[0]);
482 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
483 goto out;
484 } else if (crisv32_request_dma(IN_DMA_NBR,
485 "synchronous serial 0 dma rec",
486 DMA_VERBOSE_ON_ERROR,
488 REQ_DMA_SYNCSER)) {
489 crisv32_free_dma(OUT_DMA_NBR);
490 free_irq(DMA_OUT_INTR_VECT, &port[0]);
491 free_irq(DMA_IN_INTR_VECT, &port[0]);
492 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
493 goto out;
495 #endif
497 #ifdef CONFIG_ETRAXFS
498 else if (port == &ports[1]) {
499 #ifdef SYNC_SER_DMA
500 if (request_irq(DMA6_INTR_VECT,
501 tr_interrupt,
503 "synchronous serial 1 dma tr",
504 &ports[1])) {
505 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
506 goto out;
507 } else if (request_irq(DMA7_INTR_VECT,
508 rx_interrupt,
510 "synchronous serial 1 dma rx",
511 &ports[1])) {
512 free_irq(DMA6_INTR_VECT, &ports[1]);
513 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
514 goto out;
515 } else if (crisv32_request_dma(
516 SYNC_SER1_TX_DMA_NBR,
517 "synchronous serial 1 dma tr",
518 DMA_VERBOSE_ON_ERROR,
520 dma_sser1)) {
521 free_irq(DMA6_INTR_VECT, &ports[1]);
522 free_irq(DMA7_INTR_VECT, &ports[1]);
523 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
524 goto out;
525 } else if (crisv32_request_dma(
526 SYNC_SER1_RX_DMA_NBR,
527 "synchronous serial 3 dma rec",
528 DMA_VERBOSE_ON_ERROR,
530 dma_sser1)) {
531 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
532 free_irq(DMA6_INTR_VECT, &ports[1]);
533 free_irq(DMA7_INTR_VECT, &ports[1]);
534 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
535 goto out;
537 #endif
539 #endif
540 /* Enable DMAs */
541 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
542 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
543 /* Enable DMA IRQs */
544 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
545 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
546 /* Set up wordsize = 1 for DMAs. */
547 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
548 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
550 start_dma_in(port);
551 port->init_irqs = 0;
552 } else { /* !port->use_dma */
553 #ifdef SYNC_SER_MANUAL
554 if (port == &ports[0]) {
555 if (request_irq(SYNCSER_INTR_VECT,
556 manual_interrupt,
558 "synchronous serial manual irq",
559 &ports[0])) {
560 printk("Can't allocate sync serial manual irq");
561 goto out;
564 #ifdef CONFIG_ETRAXFS
565 else if (port == &ports[1]) {
566 if (request_irq(SSER1_INTR_VECT,
567 manual_interrupt,
569 "synchronous serial manual irq",
570 &ports[1])) {
571 printk(KERN_CRIT "Can't allocate sync serial manual irq");
572 goto out;
575 #endif
576 port->init_irqs = 0;
577 #else
578 panic("sync_serial: Manual mode not supported.\n");
579 #endif /* SYNC_SER_MANUAL */
582 } /* port->init_irqs */
584 port->busy++;
585 ret = 0;
586 out:
587 mutex_unlock(&sync_serial_mutex);
588 return ret;
591 static int sync_serial_release(struct inode *inode, struct file *file)
593 int dev = iminor(inode);
594 sync_port *port;
596 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
598 DEBUG(printk("Invalid minor %d\n", dev));
599 return -ENODEV;
601 port = &ports[dev];
602 if (port->busy)
603 port->busy--;
604 if (!port->busy)
605 /* XXX */ ;
606 return 0;
609 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
611 int dev = iminor(file->f_path.dentry->d_inode);
612 unsigned int mask = 0;
613 sync_port *port;
614 DEBUGPOLL( static unsigned int prev_mask = 0; );
616 port = &ports[dev];
618 if (!port->started) {
619 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
620 reg_sser_rw_rec_cfg rec_cfg =
621 REG_RD(sser, port->regi_sser, rw_rec_cfg);
622 cfg.en = regk_sser_yes;
623 rec_cfg.rec_en = port->input;
624 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
625 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
626 port->started = 1;
629 poll_wait(file, &port->out_wait_q, wait);
630 poll_wait(file, &port->in_wait_q, wait);
632 /* No active transfer, descriptors are available */
633 if (port->output && !port->tr_running)
634 mask |= POLLOUT | POLLWRNORM;
636 /* Descriptor and buffer space available. */
637 if (port->output &&
638 port->active_tr_descr != port->catch_tr_descr &&
639 port->out_buf_count < OUT_BUFFER_SIZE)
640 mask |= POLLOUT | POLLWRNORM;
642 /* At least an inbufchunk of data */
643 if (port->input && sync_data_avail(port) >= port->inbufchunk)
644 mask |= POLLIN | POLLRDNORM;
646 DEBUGPOLL(if (mask != prev_mask)
647 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
648 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
649 prev_mask = mask;
651 return mask;
654 static int sync_serial_ioctl(struct file *file,
655 unsigned int cmd, unsigned long arg)
657 int return_val = 0;
658 int dma_w_size = regk_dma_set_w_size1;
659 int dev = iminor(file->f_path.dentry->d_inode);
660 sync_port *port;
661 reg_sser_rw_tr_cfg tr_cfg;
662 reg_sser_rw_rec_cfg rec_cfg;
663 reg_sser_rw_frm_cfg frm_cfg;
664 reg_sser_rw_cfg gen_cfg;
665 reg_sser_rw_intr_mask intr_mask;
667 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
669 DEBUG(printk("Invalid minor %d\n", dev));
670 return -1;
672 port = &ports[dev];
673 spin_lock_irq(&port->lock);
675 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
676 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
677 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
678 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
679 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
681 switch(cmd)
683 case SSP_SPEED:
684 if (GET_SPEED(arg) == CODEC)
686 unsigned int freq;
688 gen_cfg.base_freq = regk_sser_f32;
690 /* Clock divider will internally be
691 * gen_cfg.clk_div + 1.
694 freq = GET_FREQ(arg);
695 switch (freq) {
696 case FREQ_32kHz:
697 case FREQ_64kHz:
698 case FREQ_128kHz:
699 case FREQ_256kHz:
700 gen_cfg.clk_div = 125 *
701 (1 << (freq - FREQ_256kHz)) - 1;
702 break;
703 case FREQ_512kHz:
704 gen_cfg.clk_div = 62;
705 break;
706 case FREQ_1MHz:
707 case FREQ_2MHz:
708 case FREQ_4MHz:
709 gen_cfg.clk_div = 8 * (1 << freq) - 1;
710 break;
712 } else {
713 gen_cfg.base_freq = regk_sser_f29_493;
714 switch (GET_SPEED(arg)) {
715 case SSP150:
716 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
717 break;
718 case SSP300:
719 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
720 break;
721 case SSP600:
722 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
723 break;
724 case SSP1200:
725 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
726 break;
727 case SSP2400:
728 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
729 break;
730 case SSP4800:
731 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
732 break;
733 case SSP9600:
734 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
735 break;
736 case SSP19200:
737 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
738 break;
739 case SSP28800:
740 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
741 break;
742 case SSP57600:
743 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
744 break;
745 case SSP115200:
746 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
747 break;
748 case SSP230400:
749 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
750 break;
751 case SSP460800:
752 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
753 break;
754 case SSP921600:
755 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
756 break;
757 case SSP3125000:
758 gen_cfg.base_freq = regk_sser_f100;
759 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
760 break;
764 frm_cfg.wordrate = GET_WORD_RATE(arg);
766 break;
767 case SSP_MODE:
768 switch(arg)
770 case MASTER_OUTPUT:
771 port->output = 1;
772 port->input = 0;
773 frm_cfg.out_on = regk_sser_tr;
774 frm_cfg.frame_pin_dir = regk_sser_out;
775 gen_cfg.clk_dir = regk_sser_out;
776 break;
777 case SLAVE_OUTPUT:
778 port->output = 1;
779 port->input = 0;
780 frm_cfg.frame_pin_dir = regk_sser_in;
781 gen_cfg.clk_dir = regk_sser_in;
782 break;
783 case MASTER_INPUT:
784 port->output = 0;
785 port->input = 1;
786 frm_cfg.frame_pin_dir = regk_sser_out;
787 frm_cfg.out_on = regk_sser_intern_tb;
788 gen_cfg.clk_dir = regk_sser_out;
789 break;
790 case SLAVE_INPUT:
791 port->output = 0;
792 port->input = 1;
793 frm_cfg.frame_pin_dir = regk_sser_in;
794 gen_cfg.clk_dir = regk_sser_in;
795 break;
796 case MASTER_BIDIR:
797 port->output = 1;
798 port->input = 1;
799 frm_cfg.frame_pin_dir = regk_sser_out;
800 frm_cfg.out_on = regk_sser_intern_tb;
801 gen_cfg.clk_dir = regk_sser_out;
802 break;
803 case SLAVE_BIDIR:
804 port->output = 1;
805 port->input = 1;
806 frm_cfg.frame_pin_dir = regk_sser_in;
807 gen_cfg.clk_dir = regk_sser_in;
808 break;
809 default:
810 spin_unlock_irq(&port->lock);
811 return -EINVAL;
813 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
814 intr_mask.rdav = regk_sser_yes;
815 break;
816 case SSP_FRAME_SYNC:
817 if (arg & NORMAL_SYNC) {
818 frm_cfg.rec_delay = 1;
819 frm_cfg.tr_delay = 1;
821 else if (arg & EARLY_SYNC)
822 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
823 else if (arg & SECOND_WORD_SYNC) {
824 frm_cfg.rec_delay = 7;
825 frm_cfg.tr_delay = 1;
828 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
829 frm_cfg.early_wend = regk_sser_yes;
830 if (arg & BIT_SYNC)
831 frm_cfg.type = regk_sser_edge;
832 else if (arg & WORD_SYNC)
833 frm_cfg.type = regk_sser_level;
834 else if (arg & EXTENDED_SYNC)
835 frm_cfg.early_wend = regk_sser_no;
837 if (arg & SYNC_ON)
838 frm_cfg.frame_pin_use = regk_sser_frm;
839 else if (arg & SYNC_OFF)
840 frm_cfg.frame_pin_use = regk_sser_gio0;
842 dma_w_size = regk_dma_set_w_size2;
843 if (arg & WORD_SIZE_8) {
844 rec_cfg.sample_size = tr_cfg.sample_size = 7;
845 dma_w_size = regk_dma_set_w_size1;
846 } else if (arg & WORD_SIZE_12)
847 rec_cfg.sample_size = tr_cfg.sample_size = 11;
848 else if (arg & WORD_SIZE_16)
849 rec_cfg.sample_size = tr_cfg.sample_size = 15;
850 else if (arg & WORD_SIZE_24)
851 rec_cfg.sample_size = tr_cfg.sample_size = 23;
852 else if (arg & WORD_SIZE_32)
853 rec_cfg.sample_size = tr_cfg.sample_size = 31;
855 if (arg & BIT_ORDER_MSB)
856 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
857 else if (arg & BIT_ORDER_LSB)
858 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
860 if (arg & FLOW_CONTROL_ENABLE) {
861 frm_cfg.status_pin_use = regk_sser_frm;
862 rec_cfg.fifo_thr = regk_sser_thr16;
863 } else if (arg & FLOW_CONTROL_DISABLE) {
864 frm_cfg.status_pin_use = regk_sser_gio0;
865 rec_cfg.fifo_thr = regk_sser_inf;
868 if (arg & CLOCK_NOT_GATED)
869 gen_cfg.gate_clk = regk_sser_no;
870 else if (arg & CLOCK_GATED)
871 gen_cfg.gate_clk = regk_sser_yes;
873 break;
874 case SSP_IPOLARITY:
875 /* NOTE!! negedge is considered NORMAL */
876 if (arg & CLOCK_NORMAL)
877 rec_cfg.clk_pol = regk_sser_neg;
878 else if (arg & CLOCK_INVERT)
879 rec_cfg.clk_pol = regk_sser_pos;
881 if (arg & FRAME_NORMAL)
882 frm_cfg.level = regk_sser_pos_hi;
883 else if (arg & FRAME_INVERT)
884 frm_cfg.level = regk_sser_neg_lo;
886 if (arg & STATUS_NORMAL)
887 gen_cfg.hold_pol = regk_sser_pos;
888 else if (arg & STATUS_INVERT)
889 gen_cfg.hold_pol = regk_sser_neg;
890 break;
891 case SSP_OPOLARITY:
892 if (arg & CLOCK_NORMAL)
893 gen_cfg.out_clk_pol = regk_sser_pos;
894 else if (arg & CLOCK_INVERT)
895 gen_cfg.out_clk_pol = regk_sser_neg;
897 if (arg & FRAME_NORMAL)
898 frm_cfg.level = regk_sser_pos_hi;
899 else if (arg & FRAME_INVERT)
900 frm_cfg.level = regk_sser_neg_lo;
902 if (arg & STATUS_NORMAL)
903 gen_cfg.hold_pol = regk_sser_pos;
904 else if (arg & STATUS_INVERT)
905 gen_cfg.hold_pol = regk_sser_neg;
906 break;
907 case SSP_SPI:
908 rec_cfg.fifo_thr = regk_sser_inf;
909 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
910 rec_cfg.sample_size = tr_cfg.sample_size = 7;
911 frm_cfg.frame_pin_use = regk_sser_frm;
912 frm_cfg.type = regk_sser_level;
913 frm_cfg.tr_delay = 1;
914 frm_cfg.level = regk_sser_neg_lo;
915 if (arg & SPI_SLAVE)
917 rec_cfg.clk_pol = regk_sser_neg;
918 gen_cfg.clk_dir = regk_sser_in;
919 port->input = 1;
920 port->output = 0;
922 else
924 gen_cfg.out_clk_pol = regk_sser_pos;
925 port->input = 0;
926 port->output = 1;
927 gen_cfg.clk_dir = regk_sser_out;
929 break;
930 case SSP_INBUFCHUNK:
931 break;
932 default:
933 return_val = -1;
937 if (port->started) {
938 rec_cfg.rec_en = port->input;
939 gen_cfg.en = (port->output | port->input);
942 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
943 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
944 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
945 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
946 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
949 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
950 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
951 int en = gen_cfg.en;
952 gen_cfg.en = 0;
953 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
954 /* ##### Should DMA be stoped before we change dma size? */
955 DMA_WR_CMD(port->regi_dmain, dma_w_size);
956 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
957 gen_cfg.en = en;
958 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
961 spin_unlock_irq(&port->lock);
962 return return_val;
965 static long sync_serial_ioctl(struct file *file,
966 unsigned int cmd, unsigned long arg)
968 long ret;
970 mutex_lock(&sync_serial_mutex);
971 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
972 mutex_unlock(&sync_serial_mutex);
974 return ret;
977 /* NOTE: sync_serial_write does not support concurrency */
978 static ssize_t sync_serial_write(struct file *file, const char *buf,
979 size_t count, loff_t *ppos)
981 int dev = iminor(file->f_path.dentry->d_inode);
982 DECLARE_WAITQUEUE(wait, current);
983 struct sync_port *port;
984 int trunc_count;
985 unsigned long flags;
986 int bytes_free;
987 int out_buf_count;
989 unsigned char *rd_ptr; /* First allocated byte in the buffer */
990 unsigned char *wr_ptr; /* First free byte in the buffer */
991 unsigned char *buf_stop_ptr; /* Last byte + 1 */
993 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
994 DEBUG(printk("Invalid minor %d\n", dev));
995 return -ENODEV;
997 port = &ports[dev];
999 /* |<- OUT_BUFFER_SIZE ->|
1000 * |<- out_buf_count ->|
1001 * |<- trunc_count ->| ...->|
1002 * ______________________________________________________
1003 * | free | data | free |
1004 * |_________|___________________|________________________|
1005 * ^ rd_ptr ^ wr_ptr
1007 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
1008 port->port_nbr, count, port->active_tr_descr,
1009 port->catch_tr_descr));
1011 /* Read variables that may be updated by interrupts */
1012 spin_lock_irqsave(&port->lock, flags);
1013 rd_ptr = port->out_rd_ptr;
1014 out_buf_count = port->out_buf_count;
1015 spin_unlock_irqrestore(&port->lock, flags);
1017 /* Check if resources are available */
1018 if (port->tr_running &&
1019 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1020 out_buf_count >= OUT_BUFFER_SIZE)) {
1021 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1022 return -EAGAIN;
1025 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1027 /* Determine pointer to the first free byte, before copying. */
1028 wr_ptr = rd_ptr + out_buf_count;
1029 if (wr_ptr >= buf_stop_ptr)
1030 wr_ptr -= OUT_BUFFER_SIZE;
1032 /* If we wrap the ring buffer, let the user space program handle it by
1033 * truncating the data. This could be more elegant, small buffer
1034 * fragments may occur.
1036 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1037 if (wr_ptr + bytes_free > buf_stop_ptr)
1038 bytes_free = buf_stop_ptr - wr_ptr;
1039 trunc_count = (count < bytes_free) ? count : bytes_free;
1041 if (copy_from_user(wr_ptr, buf, trunc_count))
1042 return -EFAULT;
1044 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1045 out_buf_count, trunc_count,
1046 port->out_buf_count, port->out_buffer,
1047 wr_ptr, buf_stop_ptr));
1049 /* Make sure transmitter/receiver is running */
1050 if (!port->started) {
1051 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1052 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1053 cfg.en = regk_sser_yes;
1054 rec_cfg.rec_en = port->input;
1055 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1056 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1057 port->started = 1;
1060 /* Setup wait if blocking */
1061 if (!(file->f_flags & O_NONBLOCK)) {
1062 add_wait_queue(&port->out_wait_q, &wait);
1063 set_current_state(TASK_INTERRUPTIBLE);
1066 spin_lock_irqsave(&port->lock, flags);
1067 port->out_buf_count += trunc_count;
1068 if (port->use_dma) {
1069 start_dma_out(port, wr_ptr, trunc_count);
1070 } else if (!port->tr_running) {
1071 reg_sser_rw_intr_mask intr_mask;
1072 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1073 /* Start sender by writing data */
1074 send_word(port);
1075 /* and enable transmitter ready IRQ */
1076 intr_mask.trdy = 1;
1077 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1079 spin_unlock_irqrestore(&port->lock, flags);
1081 /* Exit if non blocking */
1082 if (file->f_flags & O_NONBLOCK) {
1083 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1084 port->port_nbr, trunc_count,
1085 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1086 return trunc_count;
1089 schedule();
1090 set_current_state(TASK_RUNNING);
1091 remove_wait_queue(&port->out_wait_q, &wait);
1093 if (signal_pending(current))
1094 return -EINTR;
1096 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1097 port->port_nbr, trunc_count));
1098 return trunc_count;
1101 static ssize_t sync_serial_read(struct file * file, char * buf,
1102 size_t count, loff_t *ppos)
1104 int dev = iminor(file->f_path.dentry->d_inode);
1105 int avail;
1106 sync_port *port;
1107 unsigned char* start;
1108 unsigned char* end;
1109 unsigned long flags;
1111 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1113 DEBUG(printk("Invalid minor %d\n", dev));
1114 return -ENODEV;
1116 port = &ports[dev];
1118 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1120 if (!port->started)
1122 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1123 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1124 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1125 cfg.en = regk_sser_yes;
1126 tr_cfg.tr_en = regk_sser_yes;
1127 rec_cfg.rec_en = regk_sser_yes;
1128 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1129 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1130 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1131 port->started = 1;
1134 /* Calculate number of available bytes */
1135 /* Save pointers to avoid that they are modified by interrupt */
1136 spin_lock_irqsave(&port->lock, flags);
1137 start = (unsigned char*)port->readp; /* cast away volatile */
1138 end = (unsigned char*)port->writep; /* cast away volatile */
1139 spin_unlock_irqrestore(&port->lock, flags);
1140 while ((start == end) && !port->full) /* No data */
1142 DEBUGREAD(printk(KERN_DEBUG "&"));
1143 if (file->f_flags & O_NONBLOCK)
1144 return -EAGAIN;
1146 interruptible_sleep_on(&port->in_wait_q);
1147 if (signal_pending(current))
1148 return -EINTR;
1150 spin_lock_irqsave(&port->lock, flags);
1151 start = (unsigned char*)port->readp; /* cast away volatile */
1152 end = (unsigned char*)port->writep; /* cast away volatile */
1153 spin_unlock_irqrestore(&port->lock, flags);
1156 /* Lazy read, never return wrapped data. */
1157 if (port->full)
1158 avail = port->in_buffer_size;
1159 else if (end > start)
1160 avail = end - start;
1161 else
1162 avail = port->flip + port->in_buffer_size - start;
1164 count = count > avail ? avail : count;
1165 if (copy_to_user(buf, start, count))
1166 return -EFAULT;
1167 /* Disable interrupts while updating readp */
1168 spin_lock_irqsave(&port->lock, flags);
1169 port->readp += count;
1170 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1171 port->readp = port->flip;
1172 port->full = 0;
1173 spin_unlock_irqrestore(&port->lock, flags);
1174 DEBUGREAD(printk("r %d\n", count));
1175 return count;
1178 static void send_word(sync_port* port)
1180 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1181 reg_sser_rw_tr_data tr_data = {0};
1183 switch(tr_cfg.sample_size)
1185 case 8:
1186 port->out_buf_count--;
1187 tr_data.data = *port->out_rd_ptr++;
1188 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1189 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1190 port->out_rd_ptr = port->out_buffer;
1191 break;
1192 case 12:
1194 int data = (*port->out_rd_ptr++) << 8;
1195 data |= *port->out_rd_ptr++;
1196 port->out_buf_count -= 2;
1197 tr_data.data = data;
1198 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1199 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1200 port->out_rd_ptr = port->out_buffer;
1202 break;
1203 case 16:
1204 port->out_buf_count -= 2;
1205 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1206 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1207 port->out_rd_ptr += 2;
1208 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1209 port->out_rd_ptr = port->out_buffer;
1210 break;
1211 case 24:
1212 port->out_buf_count -= 3;
1213 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1214 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1215 port->out_rd_ptr += 2;
1216 tr_data.data = *port->out_rd_ptr++;
1217 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1218 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1219 port->out_rd_ptr = port->out_buffer;
1220 break;
1221 case 32:
1222 port->out_buf_count -= 4;
1223 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1224 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1225 port->out_rd_ptr += 2;
1226 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1227 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1228 port->out_rd_ptr += 2;
1229 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1230 port->out_rd_ptr = port->out_buffer;
1231 break;
1235 static void start_dma_out(struct sync_port *port,
1236 const char *data, int count)
1238 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1239 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1240 port->active_tr_descr->intr = 1;
1242 port->active_tr_descr->eol = 1;
1243 port->prev_tr_descr->eol = 0;
1245 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1246 port->prev_tr_descr, port->active_tr_descr));
1247 port->prev_tr_descr = port->active_tr_descr;
1248 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1250 if (!port->tr_running) {
1251 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1252 rw_tr_cfg);
1254 port->out_context.next = 0;
1255 port->out_context.saved_data =
1256 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1257 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1259 DMA_START_CONTEXT(port->regi_dmaout,
1260 virt_to_phys((char *)&port->out_context));
1262 tr_cfg.tr_en = regk_sser_yes;
1263 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1264 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1265 } else {
1266 DMA_CONTINUE_DATA(port->regi_dmaout);
1267 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1270 port->tr_running = 1;
1273 static void start_dma_in(sync_port *port)
1275 int i;
1276 char *buf;
1277 port->writep = port->flip;
1279 if (port->writep > port->flip + port->in_buffer_size) {
1280 panic("Offset too large in sync serial driver\n");
1281 return;
1283 buf = (char*)virt_to_phys(port->in_buffer);
1284 for (i = 0; i < NBR_IN_DESCR; i++) {
1285 port->in_descr[i].buf = buf;
1286 port->in_descr[i].after = buf + port->inbufchunk;
1287 port->in_descr[i].intr = 1;
1288 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1289 port->in_descr[i].buf = buf;
1290 buf += port->inbufchunk;
1292 /* Link the last descriptor to the first */
1293 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1294 port->in_descr[i-1].eol = regk_sser_yes;
1295 port->next_rx_desc = &port->in_descr[0];
1296 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1297 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1298 port->in_context.saved_data_buf = port->in_descr[0].buf;
1299 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1302 #ifdef SYNC_SER_DMA
1303 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1305 reg_dma_r_masked_intr masked;
1306 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1307 reg_dma_rw_stat stat;
1308 int i;
1309 int found = 0;
1310 int stop_sser = 0;
1312 for (i = 0; i < NBR_PORTS; i++) {
1313 sync_port *port = &ports[i];
1314 if (!port->enabled || !port->use_dma)
1315 continue;
1317 /* IRQ active for the port? */
1318 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1319 if (!masked.data)
1320 continue;
1322 found = 1;
1324 /* Check if we should stop the DMA transfer */
1325 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1326 if (stat.list_state == regk_dma_data_at_eol)
1327 stop_sser = 1;
1329 /* Clear IRQ */
1330 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1332 if (!stop_sser) {
1333 /* The DMA has completed a descriptor, EOL was not
1334 * encountered, so step relevant descriptor and
1335 * datapointers forward. */
1336 int sent;
1337 sent = port->catch_tr_descr->after -
1338 port->catch_tr_descr->buf;
1339 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1340 "in descr %p (ac: %p)\n",
1341 port->out_buf_count, sent,
1342 port->out_buf_count - sent,
1343 port->catch_tr_descr,
1344 port->active_tr_descr););
1345 port->out_buf_count -= sent;
1346 port->catch_tr_descr =
1347 phys_to_virt((int) port->catch_tr_descr->next);
1348 port->out_rd_ptr =
1349 phys_to_virt((int) port->catch_tr_descr->buf);
1350 } else {
1351 int i, sent;
1352 /* EOL handler.
1353 * Note that if an EOL was encountered during the irq
1354 * locked section of sync_ser_write the DMA will be
1355 * restarted and the eol flag will be cleared.
1356 * The remaining descriptors will be traversed by
1357 * the descriptor interrupts as usual.
1359 i = 0;
1360 while (!port->catch_tr_descr->eol) {
1361 sent = port->catch_tr_descr->after -
1362 port->catch_tr_descr->buf;
1363 DEBUGOUTBUF(printk(KERN_DEBUG
1364 "traversing descr %p -%d (%d)\n",
1365 port->catch_tr_descr,
1366 sent,
1367 port->out_buf_count));
1368 port->out_buf_count -= sent;
1369 port->catch_tr_descr = phys_to_virt(
1370 (int)port->catch_tr_descr->next);
1371 i++;
1372 if (i >= NBR_OUT_DESCR) {
1373 /* TODO: Reset and recover */
1374 panic("sync_serial: missing eol");
1377 sent = port->catch_tr_descr->after -
1378 port->catch_tr_descr->buf;
1379 DEBUGOUTBUF(printk(KERN_DEBUG
1380 "eol at descr %p -%d (%d)\n",
1381 port->catch_tr_descr,
1382 sent,
1383 port->out_buf_count));
1385 port->out_buf_count -= sent;
1387 /* Update read pointer to first free byte, we
1388 * may already be writing data there. */
1389 port->out_rd_ptr =
1390 phys_to_virt((int) port->catch_tr_descr->after);
1391 if (port->out_rd_ptr > port->out_buffer +
1392 OUT_BUFFER_SIZE)
1393 port->out_rd_ptr = port->out_buffer;
1395 reg_sser_rw_tr_cfg tr_cfg =
1396 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1397 DEBUGTXINT(printk(KERN_DEBUG
1398 "tr_int DMA stop %d, set catch @ %p\n",
1399 port->out_buf_count,
1400 port->active_tr_descr));
1401 if (port->out_buf_count != 0)
1402 printk(KERN_CRIT "sync_ser: buffer not "
1403 "empty after eol.\n");
1404 port->catch_tr_descr = port->active_tr_descr;
1405 port->tr_running = 0;
1406 tr_cfg.tr_en = regk_sser_no;
1407 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1409 /* wake up the waiting process */
1410 wake_up_interruptible(&port->out_wait_q);
1412 return IRQ_RETVAL(found);
1413 } /* tr_interrupt */
1415 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1417 reg_dma_r_masked_intr masked;
1418 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1420 int i;
1421 int found = 0;
1423 for (i = 0; i < NBR_PORTS; i++)
1425 sync_port *port = &ports[i];
1427 if (!port->enabled || !port->use_dma )
1428 continue;
1430 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1432 if (masked.data) /* Descriptor interrupt */
1434 found = 1;
1435 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1436 virt_to_phys(port->next_rx_desc)) {
1437 DEBUGRXINT(printk(KERN_DEBUG "!"));
1438 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1439 int first_size = port->flip + port->in_buffer_size - port->writep;
1440 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1441 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1442 port->writep = port->flip + port->inbufchunk - first_size;
1443 } else {
1444 memcpy((char*)port->writep,
1445 phys_to_virt((unsigned)port->next_rx_desc->buf),
1446 port->inbufchunk);
1447 port->writep += port->inbufchunk;
1448 if (port->writep >= port->flip + port->in_buffer_size)
1449 port->writep = port->flip;
1451 if (port->writep == port->readp)
1453 port->full = 1;
1456 port->next_rx_desc->eol = 1;
1457 port->prev_rx_desc->eol = 0;
1458 /* Cache bug workaround */
1459 flush_dma_descr(port->prev_rx_desc, 0);
1460 port->prev_rx_desc = port->next_rx_desc;
1461 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1462 /* Cache bug workaround */
1463 flush_dma_descr(port->prev_rx_desc, 1);
1464 /* wake up the waiting process */
1465 wake_up_interruptible(&port->in_wait_q);
1466 DMA_CONTINUE(port->regi_dmain);
1467 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1472 return IRQ_RETVAL(found);
1473 } /* rx_interrupt */
1474 #endif /* SYNC_SER_DMA */
1476 #ifdef SYNC_SER_MANUAL
1477 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1479 int i;
1480 int found = 0;
1481 reg_sser_r_masked_intr masked;
1483 for (i = 0; i < NBR_PORTS; i++)
1485 sync_port *port = &ports[i];
1487 if (!port->enabled || port->use_dma)
1489 continue;
1492 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1493 if (masked.rdav) /* Data received? */
1495 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1496 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1497 found = 1;
1498 /* Read data */
1499 switch(rec_cfg.sample_size)
1501 case 8:
1502 *port->writep++ = data.data & 0xff;
1503 break;
1504 case 12:
1505 *port->writep = (data.data & 0x0ff0) >> 4;
1506 *(port->writep + 1) = data.data & 0x0f;
1507 port->writep+=2;
1508 break;
1509 case 16:
1510 *(unsigned short*)port->writep = data.data;
1511 port->writep+=2;
1512 break;
1513 case 24:
1514 *(unsigned int*)port->writep = data.data;
1515 port->writep+=3;
1516 break;
1517 case 32:
1518 *(unsigned int*)port->writep = data.data;
1519 port->writep+=4;
1520 break;
1523 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1524 port->writep = port->flip;
1525 if (port->writep == port->readp) {
1526 /* receive buffer overrun, discard oldest data
1528 port->readp++;
1529 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1530 port->readp = port->flip;
1532 if (sync_data_avail(port) >= port->inbufchunk)
1533 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1536 if (masked.trdy) /* Transmitter ready? */
1538 found = 1;
1539 if (port->out_buf_count > 0) /* More data to send */
1540 send_word(port);
1541 else /* transmission finished */
1543 reg_sser_rw_intr_mask intr_mask;
1544 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1545 intr_mask.trdy = 0;
1546 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1547 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1551 return IRQ_RETVAL(found);
1553 #endif
1555 module_init(etrax_sync_serial_init);