GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / arm / plat-samsung / s3c-pl330.c
bloba91305a60aed9d67126153f481351cf22cb17aac
1 /* linux/arch/arm/plat-samsung/s3c-pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/slab.h>
17 #include <linux/platform_device.h>
19 #include <asm/hardware/pl330.h>
21 #include <plat/s3c-pl330-pdata.h>
23 /**
24 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
25 * @busy_chan: Number of channels currently busy.
26 * @peri: List of IDs of peripherals this DMAC can work with.
27 * @node: To attach to the global list of DMACs.
28 * @pi: PL330 configuration info for the DMAC.
29 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
31 struct s3c_pl330_dmac {
32 unsigned busy_chan;
33 enum dma_ch *peri;
34 struct list_head node;
35 struct pl330_info *pi;
36 struct kmem_cache *kmcache;
39 /**
40 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
41 * @token: Xfer ID provided by the client.
42 * @node: To attach to the list of xfers on a channel.
43 * @px: Xfer for PL330 core.
44 * @chan: Owner channel of this xfer.
46 struct s3c_pl330_xfer {
47 void *token;
48 struct list_head node;
49 struct pl330_xfer px;
50 struct s3c_pl330_chan *chan;
53 /**
54 * struct s3c_pl330_chan - Logical channel to communicate with
55 * a Physical peripheral.
56 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
57 * NULL if the channel is available to be acquired.
58 * @id: ID of the peripheral that this channel can communicate with.
59 * @options: Options specified by the client.
60 * @sdaddr: Address provided via s3c2410_dma_devconfig.
61 * @node: To attach to the global list of channels.
62 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
63 * @xfer_list: To manage list of xfers enqueued.
64 * @req: Two requests to communicate with the PL330 engine.
65 * @callback_fn: Callback function to the client.
66 * @rqcfg: Channel configuration for the xfers.
67 * @xfer_head: Pointer to the xfer to be next excecuted.
68 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
69 * channel is available to be acquired.
70 * @client: Client of this channel. NULL if the
71 * channel is available to be acquired.
73 struct s3c_pl330_chan {
74 void *pl330_chan_id;
75 enum dma_ch id;
76 unsigned int options;
77 unsigned long sdaddr;
78 struct list_head node;
79 struct pl330_req *lrq;
80 struct list_head xfer_list;
81 struct pl330_req req[2];
82 s3c2410_dma_cbfn_t callback_fn;
83 struct pl330_reqcfg rqcfg;
84 struct s3c_pl330_xfer *xfer_head;
85 struct s3c_pl330_dmac *dmac;
86 struct s3c2410_dma_client *client;
89 /* All DMACs in the platform */
90 static LIST_HEAD(dmac_list);
92 /* All channels to peripherals in the platform */
93 static LIST_HEAD(chan_list);
96 * Since we add resources(DMACs and Channels) to the global pool,
97 * we need to guard access to the resources using a global lock
99 static DEFINE_SPINLOCK(res_lock);
101 /* Returns the channel with ID 'id' in the chan_list */
102 static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
104 struct s3c_pl330_chan *ch;
106 list_for_each_entry(ch, &chan_list, node)
107 if (ch->id == id)
108 return ch;
110 return NULL;
113 /* Allocate a new channel with ID 'id' and add to chan_list */
114 static void chan_add(const enum dma_ch id)
116 struct s3c_pl330_chan *ch = id_to_chan(id);
118 /* Return if the channel already exists */
119 if (ch)
120 return;
122 ch = kmalloc(sizeof(*ch), GFP_KERNEL);
123 /* Return silently to work with other channels */
124 if (!ch)
125 return;
127 ch->id = id;
128 ch->dmac = NULL;
130 list_add_tail(&ch->node, &chan_list);
133 /* If the channel is not yet acquired by any client */
134 static bool chan_free(struct s3c_pl330_chan *ch)
136 if (!ch)
137 return false;
139 /* Channel points to some DMAC only when it's acquired */
140 return ch->dmac ? false : true;
144 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
145 * Index + 1, otherwise.
147 static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
149 enum dma_ch *id = dmac->peri;
150 int i;
152 /* Discount invalid markers */
153 if (ch_id == DMACH_MAX)
154 return 0;
156 for (i = 0; i < PL330_MAX_PERI; i++)
157 if (id[i] == ch_id)
158 return i + 1;
160 return 0;
163 /* If all channel threads of the DMAC are busy */
164 static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
166 struct pl330_info *pi = dmac->pi;
168 return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
172 * Returns the number of free channels that
173 * can be handled by this dmac only.
175 static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
177 enum dma_ch *id = dmac->peri;
178 struct s3c_pl330_dmac *d;
179 struct s3c_pl330_chan *ch;
180 unsigned found, count = 0;
181 enum dma_ch p;
182 int i;
184 for (i = 0; i < PL330_MAX_PERI; i++) {
185 p = id[i];
186 ch = id_to_chan(p);
188 if (p == DMACH_MAX || !chan_free(ch))
189 continue;
191 found = 0;
192 list_for_each_entry(d, &dmac_list, node) {
193 if (d != dmac && iface_of_dmac(d, ch->id)) {
194 found = 1;
195 break;
198 if (!found)
199 count++;
202 return count;
206 * Measure of suitability of 'dmac' handling 'ch'
208 * 0 indicates 'dmac' can not handle 'ch' either
209 * because it is not supported by the hardware or
210 * because all dmac channels are currently busy.
212 * >0 vlaue indicates 'dmac' has the capability.
213 * The bigger the value the more suitable the dmac.
215 #define MAX_SUIT UINT_MAX
216 #define MIN_SUIT 0
218 static unsigned suitablility(struct s3c_pl330_dmac *dmac,
219 struct s3c_pl330_chan *ch)
221 struct pl330_info *pi = dmac->pi;
222 enum dma_ch *id = dmac->peri;
223 struct s3c_pl330_dmac *d;
224 unsigned s;
225 int i;
227 s = MIN_SUIT;
228 /* If all the DMAC channel threads are busy */
229 if (dmac_busy(dmac))
230 return s;
232 for (i = 0; i < PL330_MAX_PERI; i++)
233 if (id[i] == ch->id)
234 break;
236 /* If the 'dmac' can't talk to 'ch' */
237 if (i == PL330_MAX_PERI)
238 return s;
240 s = MAX_SUIT;
241 list_for_each_entry(d, &dmac_list, node) {
243 * If some other dmac can talk to this
244 * peri and has some channel free.
246 if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
247 s = 0;
248 break;
251 if (s)
252 return s;
254 s = 100;
256 /* Good if free chans are more, bad otherwise */
257 s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
259 return s;
262 /* More than one DMAC may have capability to transfer data with the
263 * peripheral. This function assigns most suitable DMAC to manage the
264 * channel and hence communicate with the peripheral.
266 static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
268 struct s3c_pl330_dmac *d, *dmac = NULL;
269 unsigned sn, sl = MIN_SUIT;
271 list_for_each_entry(d, &dmac_list, node) {
272 sn = suitablility(d, ch);
274 if (sn == MAX_SUIT)
275 return d;
277 if (sn > sl)
278 dmac = d;
281 return dmac;
284 /* Acquire the channel for peripheral 'id' */
285 static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
287 struct s3c_pl330_chan *ch = id_to_chan(id);
288 struct s3c_pl330_dmac *dmac;
290 /* If the channel doesn't exist or is already acquired */
291 if (!ch || !chan_free(ch)) {
292 ch = NULL;
293 goto acq_exit;
296 dmac = map_chan_to_dmac(ch);
297 /* If couldn't map */
298 if (!dmac) {
299 ch = NULL;
300 goto acq_exit;
303 dmac->busy_chan++;
304 ch->dmac = dmac;
306 acq_exit:
307 return ch;
310 /* Delete xfer from the queue */
311 static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
313 struct s3c_pl330_xfer *t;
314 struct s3c_pl330_chan *ch;
315 int found;
317 if (!xfer)
318 return;
320 ch = xfer->chan;
322 /* Make sure xfer is in the queue */
323 found = 0;
324 list_for_each_entry(t, &ch->xfer_list, node)
325 if (t == xfer) {
326 found = 1;
327 break;
330 if (!found)
331 return;
333 /* If xfer is last entry in the queue */
334 if (xfer->node.next == &ch->xfer_list)
335 t = list_entry(ch->xfer_list.next,
336 struct s3c_pl330_xfer, node);
337 else
338 t = list_entry(xfer->node.next,
339 struct s3c_pl330_xfer, node);
341 /* If there was only one node left */
342 if (t == xfer)
343 ch->xfer_head = NULL;
344 else if (ch->xfer_head == xfer)
345 ch->xfer_head = t;
347 list_del(&xfer->node);
350 /* Provides pointer to the next xfer in the queue.
351 * If CIRCULAR option is set, the list is left intact,
352 * otherwise the xfer is removed from the list.
353 * Forced delete 'pluck' can be set to override the CIRCULAR option.
355 static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
356 int pluck)
358 struct s3c_pl330_xfer *xfer = ch->xfer_head;
360 if (!xfer)
361 return NULL;
363 /* If xfer is last entry in the queue */
364 if (xfer->node.next == &ch->xfer_list)
365 ch->xfer_head = list_entry(ch->xfer_list.next,
366 struct s3c_pl330_xfer, node);
367 else
368 ch->xfer_head = list_entry(xfer->node.next,
369 struct s3c_pl330_xfer, node);
371 if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
372 del_from_queue(xfer);
374 return xfer;
377 static inline void add_to_queue(struct s3c_pl330_chan *ch,
378 struct s3c_pl330_xfer *xfer, int front)
380 struct pl330_xfer *xt;
382 /* If queue empty */
383 if (ch->xfer_head == NULL)
384 ch->xfer_head = xfer;
386 xt = &ch->xfer_head->px;
387 /* If the head already submitted (CIRCULAR head) */
388 if (ch->options & S3C2410_DMAF_CIRCULAR &&
389 (xt == ch->req[0].x || xt == ch->req[1].x))
390 ch->xfer_head = xfer;
392 /* If this is a resubmission, it should go at the head */
393 if (front) {
394 ch->xfer_head = xfer;
395 list_add(&xfer->node, &ch->xfer_list);
396 } else {
397 list_add_tail(&xfer->node, &ch->xfer_list);
401 static inline void _finish_off(struct s3c_pl330_xfer *xfer,
402 enum s3c2410_dma_buffresult res, int ffree)
404 struct s3c_pl330_chan *ch;
406 if (!xfer)
407 return;
409 ch = xfer->chan;
411 /* Do callback */
412 if (ch->callback_fn)
413 ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
415 /* Force Free or if buffer is not needed anymore */
416 if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
417 kmem_cache_free(ch->dmac->kmcache, xfer);
420 static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
421 struct pl330_req *r)
423 struct s3c_pl330_xfer *xfer;
424 int ret = 0;
426 /* If already submitted */
427 if (r->x)
428 return 0;
430 xfer = get_from_queue(ch, 0);
431 if (xfer) {
432 r->x = &xfer->px;
434 /* Use max bandwidth for M<->M xfers */
435 if (r->rqtype == MEMTOMEM) {
436 struct pl330_info *pi = xfer->chan->dmac->pi;
437 int burst = 1 << ch->rqcfg.brst_size;
438 u32 bytes = r->x->bytes;
439 int bl;
441 bl = pi->pcfg.data_bus_width / 8;
442 bl *= pi->pcfg.data_buf_dep;
443 bl /= burst;
445 /* src/dst_burst_len can't be more than 16 */
446 if (bl > 16)
447 bl = 16;
449 while (bl > 1) {
450 if (!(bytes % (bl * burst)))
451 break;
452 bl--;
455 ch->rqcfg.brst_len = bl;
456 } else {
457 ch->rqcfg.brst_len = 1;
460 ret = pl330_submit_req(ch->pl330_chan_id, r);
462 /* If submission was successful */
463 if (!ret) {
464 ch->lrq = r; /* latest submitted req */
465 return 0;
468 r->x = NULL;
470 /* If both of the PL330 ping-pong buffers filled */
471 if (ret == -EAGAIN) {
472 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
473 __func__, __LINE__);
474 /* Queue back again */
475 add_to_queue(ch, xfer, 1);
476 ret = 0;
477 } else {
478 dev_err(ch->dmac->pi->dev, "%s:%d!\n",
479 __func__, __LINE__);
480 _finish_off(xfer, S3C2410_RES_ERR, 0);
484 return ret;
487 static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
488 struct pl330_req *r, enum pl330_op_err err)
490 unsigned long flags;
491 struct s3c_pl330_xfer *xfer;
492 struct pl330_xfer *xl = r->x;
493 enum s3c2410_dma_buffresult res;
495 spin_lock_irqsave(&res_lock, flags);
497 r->x = NULL;
499 s3c_pl330_submit(ch, r);
501 spin_unlock_irqrestore(&res_lock, flags);
503 /* Map result to S3C DMA API */
504 if (err == PL330_ERR_NONE)
505 res = S3C2410_RES_OK;
506 else if (err == PL330_ERR_ABORT)
507 res = S3C2410_RES_ABORT;
508 else
509 res = S3C2410_RES_ERR;
511 /* If last request had some xfer */
512 if (xl) {
513 xfer = container_of(xl, struct s3c_pl330_xfer, px);
514 _finish_off(xfer, res, 0);
515 } else {
516 dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
517 __func__, __LINE__);
521 static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
523 struct pl330_req *r = token;
524 struct s3c_pl330_chan *ch = container_of(r,
525 struct s3c_pl330_chan, req[0]);
526 s3c_pl330_rq(ch, r, err);
529 static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
531 struct pl330_req *r = token;
532 struct s3c_pl330_chan *ch = container_of(r,
533 struct s3c_pl330_chan, req[1]);
534 s3c_pl330_rq(ch, r, err);
537 /* Release an acquired channel */
538 static void chan_release(struct s3c_pl330_chan *ch)
540 struct s3c_pl330_dmac *dmac;
542 if (chan_free(ch))
543 return;
545 dmac = ch->dmac;
546 ch->dmac = NULL;
547 dmac->busy_chan--;
550 int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
552 struct s3c_pl330_xfer *xfer;
553 enum pl330_chan_op pl330op;
554 struct s3c_pl330_chan *ch;
555 unsigned long flags;
556 int idx, ret;
558 spin_lock_irqsave(&res_lock, flags);
560 ch = id_to_chan(id);
562 if (!ch || chan_free(ch)) {
563 ret = -EINVAL;
564 goto ctrl_exit;
567 switch (op) {
568 case S3C2410_DMAOP_START:
569 /* Make sure both reqs are enqueued */
570 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
571 s3c_pl330_submit(ch, &ch->req[idx]);
572 s3c_pl330_submit(ch, &ch->req[1 - idx]);
573 pl330op = PL330_OP_START;
574 break;
576 case S3C2410_DMAOP_STOP:
577 pl330op = PL330_OP_ABORT;
578 break;
580 case S3C2410_DMAOP_FLUSH:
581 pl330op = PL330_OP_FLUSH;
582 break;
584 case S3C2410_DMAOP_PAUSE:
585 case S3C2410_DMAOP_RESUME:
586 case S3C2410_DMAOP_TIMEOUT:
587 case S3C2410_DMAOP_STARTED:
588 spin_unlock_irqrestore(&res_lock, flags);
589 return 0;
591 default:
592 spin_unlock_irqrestore(&res_lock, flags);
593 return -EINVAL;
596 ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
598 if (pl330op == PL330_OP_START) {
599 spin_unlock_irqrestore(&res_lock, flags);
600 return ret;
603 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
605 /* Abort the current xfer */
606 if (ch->req[idx].x) {
607 xfer = container_of(ch->req[idx].x,
608 struct s3c_pl330_xfer, px);
610 /* Drop xfer during FLUSH */
611 if (pl330op == PL330_OP_FLUSH)
612 del_from_queue(xfer);
614 ch->req[idx].x = NULL;
616 spin_unlock_irqrestore(&res_lock, flags);
617 _finish_off(xfer, S3C2410_RES_ABORT,
618 pl330op == PL330_OP_FLUSH ? 1 : 0);
619 spin_lock_irqsave(&res_lock, flags);
622 /* Flush the whole queue */
623 if (pl330op == PL330_OP_FLUSH) {
625 if (ch->req[1 - idx].x) {
626 xfer = container_of(ch->req[1 - idx].x,
627 struct s3c_pl330_xfer, px);
629 del_from_queue(xfer);
631 ch->req[1 - idx].x = NULL;
633 spin_unlock_irqrestore(&res_lock, flags);
634 _finish_off(xfer, S3C2410_RES_ABORT, 1);
635 spin_lock_irqsave(&res_lock, flags);
638 /* Finish off the remaining in the queue */
639 xfer = ch->xfer_head;
640 while (xfer) {
642 del_from_queue(xfer);
644 spin_unlock_irqrestore(&res_lock, flags);
645 _finish_off(xfer, S3C2410_RES_ABORT, 1);
646 spin_lock_irqsave(&res_lock, flags);
648 xfer = ch->xfer_head;
652 ctrl_exit:
653 spin_unlock_irqrestore(&res_lock, flags);
655 return ret;
657 EXPORT_SYMBOL(s3c2410_dma_ctrl);
659 int s3c2410_dma_enqueue(enum dma_ch id, void *token,
660 dma_addr_t addr, int size)
662 struct s3c_pl330_chan *ch;
663 struct s3c_pl330_xfer *xfer;
664 unsigned long flags;
665 int idx, ret = 0;
667 spin_lock_irqsave(&res_lock, flags);
669 ch = id_to_chan(id);
671 /* Error if invalid or free channel */
672 if (!ch || chan_free(ch)) {
673 ret = -EINVAL;
674 goto enq_exit;
677 /* Error if size is unaligned */
678 if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
679 ret = -EINVAL;
680 goto enq_exit;
683 xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
684 if (!xfer) {
685 ret = -ENOMEM;
686 goto enq_exit;
689 xfer->token = token;
690 xfer->chan = ch;
691 xfer->px.bytes = size;
692 xfer->px.next = NULL; /* Single request */
694 /* For S3C DMA API, direction is always fixed for all xfers */
695 if (ch->req[0].rqtype == MEMTODEV) {
696 xfer->px.src_addr = addr;
697 xfer->px.dst_addr = ch->sdaddr;
698 } else {
699 xfer->px.src_addr = ch->sdaddr;
700 xfer->px.dst_addr = addr;
703 add_to_queue(ch, xfer, 0);
705 /* Try submitting on either request */
706 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
708 if (!ch->req[idx].x)
709 s3c_pl330_submit(ch, &ch->req[idx]);
710 else
711 s3c_pl330_submit(ch, &ch->req[1 - idx]);
713 spin_unlock_irqrestore(&res_lock, flags);
715 if (ch->options & S3C2410_DMAF_AUTOSTART)
716 s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
718 return 0;
720 enq_exit:
721 spin_unlock_irqrestore(&res_lock, flags);
723 return ret;
725 EXPORT_SYMBOL(s3c2410_dma_enqueue);
727 int s3c2410_dma_request(enum dma_ch id,
728 struct s3c2410_dma_client *client,
729 void *dev)
731 struct s3c_pl330_dmac *dmac;
732 struct s3c_pl330_chan *ch;
733 unsigned long flags;
734 int ret = 0;
736 spin_lock_irqsave(&res_lock, flags);
738 ch = chan_acquire(id);
739 if (!ch) {
740 ret = -EBUSY;
741 goto req_exit;
744 dmac = ch->dmac;
746 ch->pl330_chan_id = pl330_request_channel(dmac->pi);
747 if (!ch->pl330_chan_id) {
748 chan_release(ch);
749 ret = -EBUSY;
750 goto req_exit;
753 ch->client = client;
754 ch->options = 0; /* Clear any option */
755 ch->callback_fn = NULL; /* Clear any callback */
756 ch->lrq = NULL;
758 ch->rqcfg.brst_size = 2; /* Default word size */
759 ch->rqcfg.swap = SWAP_NO;
760 ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
761 ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
762 ch->rqcfg.privileged = 0;
763 ch->rqcfg.insnaccess = 0;
765 /* Set invalid direction */
766 ch->req[0].rqtype = DEVTODEV;
767 ch->req[1].rqtype = ch->req[0].rqtype;
769 ch->req[0].cfg = &ch->rqcfg;
770 ch->req[1].cfg = ch->req[0].cfg;
772 ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
773 ch->req[1].peri = ch->req[0].peri;
775 ch->req[0].token = &ch->req[0];
776 ch->req[0].xfer_cb = s3c_pl330_rq0;
777 ch->req[1].token = &ch->req[1];
778 ch->req[1].xfer_cb = s3c_pl330_rq1;
780 ch->req[0].x = NULL;
781 ch->req[1].x = NULL;
783 /* Reset xfer list */
784 INIT_LIST_HEAD(&ch->xfer_list);
785 ch->xfer_head = NULL;
787 req_exit:
788 spin_unlock_irqrestore(&res_lock, flags);
790 return ret;
792 EXPORT_SYMBOL(s3c2410_dma_request);
794 int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
796 struct s3c_pl330_chan *ch;
797 struct s3c_pl330_xfer *xfer;
798 unsigned long flags;
799 int ret = 0;
800 unsigned idx;
802 spin_lock_irqsave(&res_lock, flags);
804 ch = id_to_chan(id);
806 if (!ch || chan_free(ch))
807 goto free_exit;
809 /* Refuse if someone else wanted to free the channel */
810 if (ch->client != client) {
811 ret = -EBUSY;
812 goto free_exit;
815 /* Stop any active xfer, Flushe the queue and do callbacks */
816 pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
818 /* Abort the submitted requests */
819 idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
821 if (ch->req[idx].x) {
822 xfer = container_of(ch->req[idx].x,
823 struct s3c_pl330_xfer, px);
825 ch->req[idx].x = NULL;
826 del_from_queue(xfer);
828 spin_unlock_irqrestore(&res_lock, flags);
829 _finish_off(xfer, S3C2410_RES_ABORT, 1);
830 spin_lock_irqsave(&res_lock, flags);
833 if (ch->req[1 - idx].x) {
834 xfer = container_of(ch->req[1 - idx].x,
835 struct s3c_pl330_xfer, px);
837 ch->req[1 - idx].x = NULL;
838 del_from_queue(xfer);
840 spin_unlock_irqrestore(&res_lock, flags);
841 _finish_off(xfer, S3C2410_RES_ABORT, 1);
842 spin_lock_irqsave(&res_lock, flags);
845 /* Pluck and Abort the queued requests in order */
846 do {
847 xfer = get_from_queue(ch, 1);
849 spin_unlock_irqrestore(&res_lock, flags);
850 _finish_off(xfer, S3C2410_RES_ABORT, 1);
851 spin_lock_irqsave(&res_lock, flags);
852 } while (xfer);
854 ch->client = NULL;
856 pl330_release_channel(ch->pl330_chan_id);
858 ch->pl330_chan_id = NULL;
860 chan_release(ch);
862 free_exit:
863 spin_unlock_irqrestore(&res_lock, flags);
865 return ret;
867 EXPORT_SYMBOL(s3c2410_dma_free);
869 int s3c2410_dma_config(enum dma_ch id, int xferunit)
871 struct s3c_pl330_chan *ch;
872 struct pl330_info *pi;
873 unsigned long flags;
874 int i, dbwidth, ret = 0;
876 spin_lock_irqsave(&res_lock, flags);
878 ch = id_to_chan(id);
880 if (!ch || chan_free(ch)) {
881 ret = -EINVAL;
882 goto cfg_exit;
885 pi = ch->dmac->pi;
886 dbwidth = pi->pcfg.data_bus_width / 8;
888 /* Max size of xfer can be pcfg.data_bus_width */
889 if (xferunit > dbwidth) {
890 ret = -EINVAL;
891 goto cfg_exit;
894 i = 0;
895 while (xferunit != (1 << i))
896 i++;
898 /* If valid value */
899 if (xferunit == (1 << i))
900 ch->rqcfg.brst_size = i;
901 else
902 ret = -EINVAL;
904 cfg_exit:
905 spin_unlock_irqrestore(&res_lock, flags);
907 return ret;
909 EXPORT_SYMBOL(s3c2410_dma_config);
911 /* Options that are supported by this driver */
912 #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
914 int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
916 struct s3c_pl330_chan *ch;
917 unsigned long flags;
918 int ret = 0;
920 spin_lock_irqsave(&res_lock, flags);
922 ch = id_to_chan(id);
924 if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
925 ret = -EINVAL;
926 else
927 ch->options = options;
929 spin_unlock_irqrestore(&res_lock, flags);
931 return 0;
933 EXPORT_SYMBOL(s3c2410_dma_setflags);
935 int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
937 struct s3c_pl330_chan *ch;
938 unsigned long flags;
939 int ret = 0;
941 spin_lock_irqsave(&res_lock, flags);
943 ch = id_to_chan(id);
945 if (!ch || chan_free(ch))
946 ret = -EINVAL;
947 else
948 ch->callback_fn = rtn;
950 spin_unlock_irqrestore(&res_lock, flags);
952 return ret;
954 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
956 int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
957 unsigned long address)
959 struct s3c_pl330_chan *ch;
960 unsigned long flags;
961 int ret = 0;
963 spin_lock_irqsave(&res_lock, flags);
965 ch = id_to_chan(id);
967 if (!ch || chan_free(ch)) {
968 ret = -EINVAL;
969 goto devcfg_exit;
972 switch (source) {
973 case S3C2410_DMASRC_HW: /* P->M */
974 ch->req[0].rqtype = DEVTOMEM;
975 ch->req[1].rqtype = DEVTOMEM;
976 ch->rqcfg.src_inc = 0;
977 ch->rqcfg.dst_inc = 1;
978 break;
979 case S3C2410_DMASRC_MEM: /* M->P */
980 ch->req[0].rqtype = MEMTODEV;
981 ch->req[1].rqtype = MEMTODEV;
982 ch->rqcfg.src_inc = 1;
983 ch->rqcfg.dst_inc = 0;
984 break;
985 default:
986 ret = -EINVAL;
987 goto devcfg_exit;
990 ch->sdaddr = address;
992 devcfg_exit:
993 spin_unlock_irqrestore(&res_lock, flags);
995 return ret;
997 EXPORT_SYMBOL(s3c2410_dma_devconfig);
999 int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1001 struct s3c_pl330_chan *ch = id_to_chan(id);
1002 struct pl330_chanstatus status;
1003 int ret;
1005 if (!ch || chan_free(ch))
1006 return -EINVAL;
1008 ret = pl330_chan_status(ch->pl330_chan_id, &status);
1009 if (ret < 0)
1010 return ret;
1012 *src = status.src_addr;
1013 *dst = status.dst_addr;
1015 return 0;
1017 EXPORT_SYMBOL(s3c2410_dma_getposition);
1019 static irqreturn_t pl330_irq_handler(int irq, void *data)
1021 if (pl330_update(data))
1022 return IRQ_HANDLED;
1023 else
1024 return IRQ_NONE;
1027 static int pl330_probe(struct platform_device *pdev)
1029 struct s3c_pl330_dmac *s3c_pl330_dmac;
1030 struct s3c_pl330_platdata *pl330pd;
1031 struct pl330_info *pl330_info;
1032 struct resource *res;
1033 int i, ret, irq;
1035 pl330pd = pdev->dev.platform_data;
1037 /* Can't do without the list of _32_ peripherals */
1038 if (!pl330pd || !pl330pd->peri) {
1039 dev_err(&pdev->dev, "platform data missing!\n");
1040 return -ENODEV;
1043 pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1044 if (!pl330_info)
1045 return -ENOMEM;
1047 pl330_info->pl330_data = NULL;
1048 pl330_info->dev = &pdev->dev;
1050 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1051 if (!res) {
1052 ret = -ENODEV;
1053 goto probe_err1;
1056 request_mem_region(res->start, resource_size(res), pdev->name);
1058 pl330_info->base = ioremap(res->start, resource_size(res));
1059 if (!pl330_info->base) {
1060 ret = -ENXIO;
1061 goto probe_err2;
1064 irq = platform_get_irq(pdev, 0);
1065 if (irq < 0) {
1066 ret = irq;
1067 goto probe_err3;
1070 ret = request_irq(irq, pl330_irq_handler, 0,
1071 dev_name(&pdev->dev), pl330_info);
1072 if (ret)
1073 goto probe_err4;
1075 ret = pl330_add(pl330_info);
1076 if (ret)
1077 goto probe_err5;
1079 /* Allocate a new DMAC */
1080 s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1081 if (!s3c_pl330_dmac) {
1082 ret = -ENOMEM;
1083 goto probe_err6;
1086 /* Hook the info */
1087 s3c_pl330_dmac->pi = pl330_info;
1089 /* No busy channels */
1090 s3c_pl330_dmac->busy_chan = 0;
1092 s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1093 sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1095 if (!s3c_pl330_dmac->kmcache) {
1096 ret = -ENOMEM;
1097 goto probe_err7;
1100 /* Get the list of peripherals */
1101 s3c_pl330_dmac->peri = pl330pd->peri;
1103 /* Attach to the list of DMACs */
1104 list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1106 /* Create a channel for each peripheral in the DMAC
1107 * that is, if it doesn't already exist
1109 for (i = 0; i < PL330_MAX_PERI; i++)
1110 if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1111 chan_add(s3c_pl330_dmac->peri[i]);
1113 printk(KERN_INFO
1114 "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1115 printk(KERN_INFO
1116 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1117 pl330_info->pcfg.data_buf_dep,
1118 pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1119 pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1121 return 0;
1123 probe_err7:
1124 kfree(s3c_pl330_dmac);
1125 probe_err6:
1126 pl330_del(pl330_info);
1127 probe_err5:
1128 free_irq(irq, pl330_info);
1129 probe_err4:
1130 probe_err3:
1131 iounmap(pl330_info->base);
1132 probe_err2:
1133 release_mem_region(res->start, resource_size(res));
1134 probe_err1:
1135 kfree(pl330_info);
1137 return ret;
1140 static int pl330_remove(struct platform_device *pdev)
1142 struct s3c_pl330_dmac *dmac, *d;
1143 struct s3c_pl330_chan *ch;
1144 unsigned long flags;
1145 int del, found;
1147 if (!pdev->dev.platform_data)
1148 return -EINVAL;
1150 spin_lock_irqsave(&res_lock, flags);
1152 found = 0;
1153 list_for_each_entry(d, &dmac_list, node)
1154 if (d->pi->dev == &pdev->dev) {
1155 found = 1;
1156 break;
1159 if (!found) {
1160 spin_unlock_irqrestore(&res_lock, flags);
1161 return 0;
1164 dmac = d;
1166 /* Remove all Channels that are managed only by this DMAC */
1167 list_for_each_entry(ch, &chan_list, node) {
1169 /* Only channels that are handled by this DMAC */
1170 if (iface_of_dmac(dmac, ch->id))
1171 del = 1;
1172 else
1173 continue;
1175 /* Don't remove if some other DMAC has it too */
1176 list_for_each_entry(d, &dmac_list, node)
1177 if (d != dmac && iface_of_dmac(d, ch->id)) {
1178 del = 0;
1179 break;
1182 if (del) {
1183 spin_unlock_irqrestore(&res_lock, flags);
1184 s3c2410_dma_free(ch->id, ch->client);
1185 spin_lock_irqsave(&res_lock, flags);
1186 list_del(&ch->node);
1187 kfree(ch);
1191 /* Remove the DMAC */
1192 list_del(&dmac->node);
1193 kfree(dmac);
1195 spin_unlock_irqrestore(&res_lock, flags);
1197 return 0;
1200 static struct platform_driver pl330_driver = {
1201 .driver = {
1202 .owner = THIS_MODULE,
1203 .name = "s3c-pl330",
1205 .probe = pl330_probe,
1206 .remove = pl330_remove,
1209 static int __init pl330_init(void)
1211 return platform_driver_register(&pl330_driver);
1213 module_init(pl330_init);
1215 static void __exit pl330_exit(void)
1217 platform_driver_unregister(&pl330_driver);
1218 return;
1220 module_exit(pl330_exit);
1222 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1223 MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1224 MODULE_LICENSE("GPL");