Merge tag 'pci-v3.9-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux-2.6/cjktty.git] / drivers / dma / mv_xor.c
blobe17fad03cb804b7d538bd2928923a50928475d4f
1 /*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
29 #include <linux/of.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
34 #include "dmaengine.h"
35 #include "mv_xor.h"
37 static void mv_xor_issue_pending(struct dma_chan *chan);
39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
45 #define mv_chan_to_devp(chan) \
46 ((chan)->dmadev.dev)
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx)
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 return hw_desc->phy_src_addr[src_idx];
71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count)
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79 u32 next_desc_addr)
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
92 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
94 desc->value = val;
97 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
98 dma_addr_t addr)
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
101 hw_desc->phy_dest_addr = addr;
104 static int mv_chan_memset_slot_count(size_t len)
106 return 1;
109 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
111 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
112 int index, dma_addr_t addr)
114 struct mv_xor_desc *hw_desc = desc->hw_desc;
115 hw_desc->phy_src_addr[index] = addr;
116 if (desc->type == DMA_XOR)
117 hw_desc->desc_command |= (1 << index);
120 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
122 return __raw_readl(XOR_CURR_DESC(chan));
125 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
126 u32 next_desc_addr)
128 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
131 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
133 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
136 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
138 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
141 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
143 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
144 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
147 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
149 u32 val = __raw_readl(XOR_INTR_MASK(chan));
150 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
151 __raw_writel(val, XOR_INTR_MASK(chan));
154 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
156 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
157 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
158 return intr_cause;
161 static int mv_is_err_intr(u32 intr_cause)
163 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
164 return 1;
166 return 0;
169 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
171 u32 val = ~(1 << (chan->idx * 16));
172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
176 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
178 u32 val = 0xFFFF0000 >> (chan->idx * 16);
179 __raw_writel(val, XOR_INTR_CAUSE(chan));
182 static int mv_can_chain(struct mv_xor_desc_slot *desc)
184 struct mv_xor_desc_slot *chain_old_tail = list_entry(
185 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
187 if (chain_old_tail->type != desc->type)
188 return 0;
189 if (desc->type == DMA_MEMSET)
190 return 0;
192 return 1;
195 static void mv_set_mode(struct mv_xor_chan *chan,
196 enum dma_transaction_type type)
198 u32 op_mode;
199 u32 config = __raw_readl(XOR_CONFIG(chan));
201 switch (type) {
202 case DMA_XOR:
203 op_mode = XOR_OPERATION_MODE_XOR;
204 break;
205 case DMA_MEMCPY:
206 op_mode = XOR_OPERATION_MODE_MEMCPY;
207 break;
208 case DMA_MEMSET:
209 op_mode = XOR_OPERATION_MODE_MEMSET;
210 break;
211 default:
212 dev_err(mv_chan_to_devp(chan),
213 "error: unsupported operation %d.\n",
214 type);
215 BUG();
216 return;
219 config &= ~0x7;
220 config |= op_mode;
221 __raw_writel(config, XOR_CONFIG(chan));
222 chan->current_type = type;
225 static void mv_chan_activate(struct mv_xor_chan *chan)
227 u32 activation;
229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
230 activation = __raw_readl(XOR_ACTIVATION(chan));
231 activation |= 0x1;
232 __raw_writel(activation, XOR_ACTIVATION(chan));
235 static char mv_chan_is_busy(struct mv_xor_chan *chan)
237 u32 state = __raw_readl(XOR_ACTIVATION(chan));
239 state = (state >> 4) & 0x3;
241 return (state == 1) ? 1 : 0;
244 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
246 return 1;
250 * mv_xor_free_slots - flags descriptor slots for reuse
251 * @slot: Slot to free
252 * Caller must hold &mv_chan->lock while calling this function
254 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
255 struct mv_xor_desc_slot *slot)
257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
258 __func__, __LINE__, slot);
260 slot->slots_per_op = 0;
265 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
266 * sw_desc
267 * Caller must hold &mv_chan->lock while calling this function
269 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
270 struct mv_xor_desc_slot *sw_desc)
272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
273 __func__, __LINE__, sw_desc);
274 if (sw_desc->type != mv_chan->current_type)
275 mv_set_mode(mv_chan, sw_desc->type);
277 if (sw_desc->type == DMA_MEMSET) {
278 /* for memset requests we need to program the engine, no
279 * descriptors used.
281 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
282 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
283 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
284 mv_chan_set_value(mv_chan, sw_desc->value);
285 } else {
286 /* set the hardware chain */
287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
289 mv_chan->pending += sw_desc->slot_cnt;
290 mv_xor_issue_pending(&mv_chan->dmachan);
293 static dma_cookie_t
294 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
295 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
297 BUG_ON(desc->async_tx.cookie < 0);
299 if (desc->async_tx.cookie > 0) {
300 cookie = desc->async_tx.cookie;
302 /* call the callback (must not sleep or submit new
303 * operations to this channel)
305 if (desc->async_tx.callback)
306 desc->async_tx.callback(
307 desc->async_tx.callback_param);
309 /* unmap dma addresses
310 * (unmap_single vs unmap_page?)
312 if (desc->group_head && desc->unmap_len) {
313 struct mv_xor_desc_slot *unmap = desc->group_head;
314 struct device *dev = mv_chan_to_devp(mv_chan);
315 u32 len = unmap->unmap_len;
316 enum dma_ctrl_flags flags = desc->async_tx.flags;
317 u32 src_cnt;
318 dma_addr_t addr;
319 dma_addr_t dest;
321 src_cnt = unmap->unmap_src_cnt;
322 dest = mv_desc_get_dest_addr(unmap);
323 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
324 enum dma_data_direction dir;
326 if (src_cnt > 1) /* is xor ? */
327 dir = DMA_BIDIRECTIONAL;
328 else
329 dir = DMA_FROM_DEVICE;
330 dma_unmap_page(dev, dest, len, dir);
333 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
334 while (src_cnt--) {
335 addr = mv_desc_get_src_addr(unmap,
336 src_cnt);
337 if (addr == dest)
338 continue;
339 dma_unmap_page(dev, addr, len,
340 DMA_TO_DEVICE);
343 desc->group_head = NULL;
347 /* run dependent operations */
348 dma_run_dependencies(&desc->async_tx);
350 return cookie;
353 static int
354 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
356 struct mv_xor_desc_slot *iter, *_iter;
358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
360 completed_node) {
362 if (async_tx_test_ack(&iter->async_tx)) {
363 list_del(&iter->completed_node);
364 mv_xor_free_slots(mv_chan, iter);
367 return 0;
370 static int
371 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
372 struct mv_xor_chan *mv_chan)
374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
375 __func__, __LINE__, desc, desc->async_tx.flags);
376 list_del(&desc->chain_node);
377 /* the client is allowed to attach dependent operations
378 * until 'ack' is set
380 if (!async_tx_test_ack(&desc->async_tx)) {
381 /* move this slot to the completed_slots */
382 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
383 return 0;
386 mv_xor_free_slots(mv_chan, desc);
387 return 0;
390 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
392 struct mv_xor_desc_slot *iter, *_iter;
393 dma_cookie_t cookie = 0;
394 int busy = mv_chan_is_busy(mv_chan);
395 u32 current_desc = mv_chan_get_current_desc(mv_chan);
396 int seen_current = 0;
398 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
399 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
400 mv_xor_clean_completed_slots(mv_chan);
402 /* free completed slots from the chain starting with
403 * the oldest descriptor
406 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
407 chain_node) {
408 prefetch(_iter);
409 prefetch(&_iter->async_tx);
411 /* do not advance past the current descriptor loaded into the
412 * hardware channel, subsequent descriptors are either in
413 * process or have not been submitted
415 if (seen_current)
416 break;
418 /* stop the search if we reach the current descriptor and the
419 * channel is busy
421 if (iter->async_tx.phys == current_desc) {
422 seen_current = 1;
423 if (busy)
424 break;
427 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
429 if (mv_xor_clean_slot(iter, mv_chan))
430 break;
433 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
434 struct mv_xor_desc_slot *chain_head;
435 chain_head = list_entry(mv_chan->chain.next,
436 struct mv_xor_desc_slot,
437 chain_node);
439 mv_xor_start_new_chain(mv_chan, chain_head);
442 if (cookie > 0)
443 mv_chan->dmachan.completed_cookie = cookie;
446 static void
447 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
449 spin_lock_bh(&mv_chan->lock);
450 __mv_xor_slot_cleanup(mv_chan);
451 spin_unlock_bh(&mv_chan->lock);
454 static void mv_xor_tasklet(unsigned long data)
456 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
457 mv_xor_slot_cleanup(chan);
460 static struct mv_xor_desc_slot *
461 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
462 int slots_per_op)
464 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
465 LIST_HEAD(chain);
466 int slots_found, retry = 0;
468 /* start search from the last allocated descrtiptor
469 * if a contiguous allocation can not be found start searching
470 * from the beginning of the list
472 retry:
473 slots_found = 0;
474 if (retry == 0)
475 iter = mv_chan->last_used;
476 else
477 iter = list_entry(&mv_chan->all_slots,
478 struct mv_xor_desc_slot,
479 slot_node);
481 list_for_each_entry_safe_continue(
482 iter, _iter, &mv_chan->all_slots, slot_node) {
483 prefetch(_iter);
484 prefetch(&_iter->async_tx);
485 if (iter->slots_per_op) {
486 /* give up after finding the first busy slot
487 * on the second pass through the list
489 if (retry)
490 break;
492 slots_found = 0;
493 continue;
496 /* start the allocation if the slot is correctly aligned */
497 if (!slots_found++)
498 alloc_start = iter;
500 if (slots_found == num_slots) {
501 struct mv_xor_desc_slot *alloc_tail = NULL;
502 struct mv_xor_desc_slot *last_used = NULL;
503 iter = alloc_start;
504 while (num_slots) {
505 int i;
507 /* pre-ack all but the last descriptor */
508 async_tx_ack(&iter->async_tx);
510 list_add_tail(&iter->chain_node, &chain);
511 alloc_tail = iter;
512 iter->async_tx.cookie = 0;
513 iter->slot_cnt = num_slots;
514 iter->xor_check_result = NULL;
515 for (i = 0; i < slots_per_op; i++) {
516 iter->slots_per_op = slots_per_op - i;
517 last_used = iter;
518 iter = list_entry(iter->slot_node.next,
519 struct mv_xor_desc_slot,
520 slot_node);
522 num_slots -= slots_per_op;
524 alloc_tail->group_head = alloc_start;
525 alloc_tail->async_tx.cookie = -EBUSY;
526 list_splice(&chain, &alloc_tail->tx_list);
527 mv_chan->last_used = last_used;
528 mv_desc_clear_next_desc(alloc_start);
529 mv_desc_clear_next_desc(alloc_tail);
530 return alloc_tail;
533 if (!retry++)
534 goto retry;
536 /* try to free some slots if the allocation fails */
537 tasklet_schedule(&mv_chan->irq_tasklet);
539 return NULL;
542 /************************ DMA engine API functions ****************************/
543 static dma_cookie_t
544 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
546 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
547 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
548 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
549 dma_cookie_t cookie;
550 int new_hw_chain = 1;
552 dev_dbg(mv_chan_to_devp(mv_chan),
553 "%s sw_desc %p: async_tx %p\n",
554 __func__, sw_desc, &sw_desc->async_tx);
556 grp_start = sw_desc->group_head;
558 spin_lock_bh(&mv_chan->lock);
559 cookie = dma_cookie_assign(tx);
561 if (list_empty(&mv_chan->chain))
562 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
563 else {
564 new_hw_chain = 0;
566 old_chain_tail = list_entry(mv_chan->chain.prev,
567 struct mv_xor_desc_slot,
568 chain_node);
569 list_splice_init(&grp_start->tx_list,
570 &old_chain_tail->chain_node);
572 if (!mv_can_chain(grp_start))
573 goto submit_done;
575 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
576 old_chain_tail->async_tx.phys);
578 /* fix up the hardware chain */
579 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
581 /* if the channel is not busy */
582 if (!mv_chan_is_busy(mv_chan)) {
583 u32 current_desc = mv_chan_get_current_desc(mv_chan);
585 * and the curren desc is the end of the chain before
586 * the append, then we need to start the channel
588 if (current_desc == old_chain_tail->async_tx.phys)
589 new_hw_chain = 1;
593 if (new_hw_chain)
594 mv_xor_start_new_chain(mv_chan, grp_start);
596 submit_done:
597 spin_unlock_bh(&mv_chan->lock);
599 return cookie;
602 /* returns the number of allocated descriptors */
603 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
605 char *hw_desc;
606 int idx;
607 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
608 struct mv_xor_desc_slot *slot = NULL;
609 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
611 /* Allocate descriptor slots */
612 idx = mv_chan->slots_allocated;
613 while (idx < num_descs_in_pool) {
614 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
615 if (!slot) {
616 printk(KERN_INFO "MV XOR Channel only initialized"
617 " %d descriptor slots", idx);
618 break;
620 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
623 dma_async_tx_descriptor_init(&slot->async_tx, chan);
624 slot->async_tx.tx_submit = mv_xor_tx_submit;
625 INIT_LIST_HEAD(&slot->chain_node);
626 INIT_LIST_HEAD(&slot->slot_node);
627 INIT_LIST_HEAD(&slot->tx_list);
628 hw_desc = (char *) mv_chan->dma_desc_pool;
629 slot->async_tx.phys =
630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631 slot->idx = idx++;
633 spin_lock_bh(&mv_chan->lock);
634 mv_chan->slots_allocated = idx;
635 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
636 spin_unlock_bh(&mv_chan->lock);
639 if (mv_chan->slots_allocated && !mv_chan->last_used)
640 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
641 struct mv_xor_desc_slot,
642 slot_node);
644 dev_dbg(mv_chan_to_devp(mv_chan),
645 "allocated %d descriptor slots last_used: %p\n",
646 mv_chan->slots_allocated, mv_chan->last_used);
648 return mv_chan->slots_allocated ? : -ENOMEM;
651 static struct dma_async_tx_descriptor *
652 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 size_t len, unsigned long flags)
655 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
656 struct mv_xor_desc_slot *sw_desc, *grp_start;
657 int slot_cnt;
659 dev_dbg(mv_chan_to_devp(mv_chan),
660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags);
662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
663 return NULL;
665 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
667 spin_lock_bh(&mv_chan->lock);
668 slot_cnt = mv_chan_memcpy_slot_count(len);
669 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
670 if (sw_desc) {
671 sw_desc->type = DMA_MEMCPY;
672 sw_desc->async_tx.flags = flags;
673 grp_start = sw_desc->group_head;
674 mv_desc_init(grp_start, flags);
675 mv_desc_set_byte_count(grp_start, len);
676 mv_desc_set_dest_addr(sw_desc->group_head, dest);
677 mv_desc_set_src_addr(grp_start, 0, src);
678 sw_desc->unmap_src_cnt = 1;
679 sw_desc->unmap_len = len;
681 spin_unlock_bh(&mv_chan->lock);
683 dev_dbg(mv_chan_to_devp(mv_chan),
684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
687 return sw_desc ? &sw_desc->async_tx : NULL;
690 static struct dma_async_tx_descriptor *
691 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
692 size_t len, unsigned long flags)
694 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt;
698 dev_dbg(mv_chan_to_devp(mv_chan),
699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
702 return NULL;
704 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
706 spin_lock_bh(&mv_chan->lock);
707 slot_cnt = mv_chan_memset_slot_count(len);
708 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
709 if (sw_desc) {
710 sw_desc->type = DMA_MEMSET;
711 sw_desc->async_tx.flags = flags;
712 grp_start = sw_desc->group_head;
713 mv_desc_init(grp_start, flags);
714 mv_desc_set_byte_count(grp_start, len);
715 mv_desc_set_dest_addr(sw_desc->group_head, dest);
716 mv_desc_set_block_fill_val(grp_start, value);
717 sw_desc->unmap_src_cnt = 1;
718 sw_desc->unmap_len = len;
720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan_to_devp(mv_chan),
722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL;
727 static struct dma_async_tx_descriptor *
728 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
729 unsigned int src_cnt, size_t len, unsigned long flags)
731 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
732 struct mv_xor_desc_slot *sw_desc, *grp_start;
733 int slot_cnt;
735 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
736 return NULL;
738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
740 dev_dbg(mv_chan_to_devp(mv_chan),
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags);
744 spin_lock_bh(&mv_chan->lock);
745 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
746 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
747 if (sw_desc) {
748 sw_desc->type = DMA_XOR;
749 sw_desc->async_tx.flags = flags;
750 grp_start = sw_desc->group_head;
751 mv_desc_init(grp_start, flags);
752 /* the byte count field is the same as in memcpy desc*/
753 mv_desc_set_byte_count(grp_start, len);
754 mv_desc_set_dest_addr(sw_desc->group_head, dest);
755 sw_desc->unmap_src_cnt = src_cnt;
756 sw_desc->unmap_len = len;
757 while (src_cnt--)
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 spin_unlock_bh(&mv_chan->lock);
761 dev_dbg(mv_chan_to_devp(mv_chan),
762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->async_tx);
764 return sw_desc ? &sw_desc->async_tx : NULL;
767 static void mv_xor_free_chan_resources(struct dma_chan *chan)
769 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
770 struct mv_xor_desc_slot *iter, *_iter;
771 int in_use_descs = 0;
773 mv_xor_slot_cleanup(mv_chan);
775 spin_lock_bh(&mv_chan->lock);
776 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
777 chain_node) {
778 in_use_descs++;
779 list_del(&iter->chain_node);
781 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
782 completed_node) {
783 in_use_descs++;
784 list_del(&iter->completed_node);
786 list_for_each_entry_safe_reverse(
787 iter, _iter, &mv_chan->all_slots, slot_node) {
788 list_del(&iter->slot_node);
789 kfree(iter);
790 mv_chan->slots_allocated--;
792 mv_chan->last_used = NULL;
794 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
795 __func__, mv_chan->slots_allocated);
796 spin_unlock_bh(&mv_chan->lock);
798 if (in_use_descs)
799 dev_err(mv_chan_to_devp(mv_chan),
800 "freeing %d in use descriptors!\n", in_use_descs);
804 * mv_xor_status - poll the status of an XOR transaction
805 * @chan: XOR channel handle
806 * @cookie: XOR transaction identifier
807 * @txstate: XOR transactions state holder (or NULL)
809 static enum dma_status mv_xor_status(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 struct dma_tx_state *txstate)
813 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
814 enum dma_status ret;
816 ret = dma_cookie_status(chan, cookie, txstate);
817 if (ret == DMA_SUCCESS) {
818 mv_xor_clean_completed_slots(mv_chan);
819 return ret;
821 mv_xor_slot_cleanup(mv_chan);
823 return dma_cookie_status(chan, cookie, txstate);
826 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
828 u32 val;
830 val = __raw_readl(XOR_CONFIG(chan));
831 dev_err(mv_chan_to_devp(chan),
832 "config 0x%08x.\n", val);
834 val = __raw_readl(XOR_ACTIVATION(chan));
835 dev_err(mv_chan_to_devp(chan),
836 "activation 0x%08x.\n", val);
838 val = __raw_readl(XOR_INTR_CAUSE(chan));
839 dev_err(mv_chan_to_devp(chan),
840 "intr cause 0x%08x.\n", val);
842 val = __raw_readl(XOR_INTR_MASK(chan));
843 dev_err(mv_chan_to_devp(chan),
844 "intr mask 0x%08x.\n", val);
846 val = __raw_readl(XOR_ERROR_CAUSE(chan));
847 dev_err(mv_chan_to_devp(chan),
848 "error cause 0x%08x.\n", val);
850 val = __raw_readl(XOR_ERROR_ADDR(chan));
851 dev_err(mv_chan_to_devp(chan),
852 "error addr 0x%08x.\n", val);
855 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
856 u32 intr_cause)
858 if (intr_cause & (1 << 4)) {
859 dev_dbg(mv_chan_to_devp(chan),
860 "ignore this error\n");
861 return;
864 dev_err(mv_chan_to_devp(chan),
865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->idx, intr_cause);
868 mv_dump_xor_regs(chan);
869 BUG();
872 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
874 struct mv_xor_chan *chan = data;
875 u32 intr_cause = mv_chan_get_intr_cause(chan);
877 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause);
882 tasklet_schedule(&chan->irq_tasklet);
884 mv_xor_device_clear_eoc_cause(chan);
886 return IRQ_HANDLED;
889 static void mv_xor_issue_pending(struct dma_chan *chan)
891 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
893 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
894 mv_chan->pending = 0;
895 mv_chan_activate(mv_chan);
900 * Perform a transaction to verify the HW works.
902 #define MV_XOR_TEST_SIZE 2000
904 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
906 int i;
907 void *src, *dest;
908 dma_addr_t src_dma, dest_dma;
909 struct dma_chan *dma_chan;
910 dma_cookie_t cookie;
911 struct dma_async_tx_descriptor *tx;
912 int err = 0;
914 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
915 if (!src)
916 return -ENOMEM;
918 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
919 if (!dest) {
920 kfree(src);
921 return -ENOMEM;
924 /* Fill in src buffer */
925 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
926 ((u8 *) src)[i] = (u8)i;
928 dma_chan = &mv_chan->dmachan;
929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
930 err = -ENODEV;
931 goto out;
934 dest_dma = dma_map_single(dma_chan->device->dev, dest,
935 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
937 src_dma = dma_map_single(dma_chan->device->dev, src,
938 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
940 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
941 MV_XOR_TEST_SIZE, 0);
942 cookie = mv_xor_tx_submit(tx);
943 mv_xor_issue_pending(dma_chan);
944 async_tx_ack(tx);
945 msleep(1);
947 if (mv_xor_status(dma_chan, cookie, NULL) !=
948 DMA_SUCCESS) {
949 dev_err(dma_chan->device->dev,
950 "Self-test copy timed out, disabling\n");
951 err = -ENODEV;
952 goto free_resources;
955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
958 dev_err(dma_chan->device->dev,
959 "Self-test copy failed compare, disabling\n");
960 err = -ENODEV;
961 goto free_resources;
964 free_resources:
965 mv_xor_free_chan_resources(dma_chan);
966 out:
967 kfree(src);
968 kfree(dest);
969 return err;
972 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
973 static int
974 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
976 int i, src_idx;
977 struct page *dest;
978 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
979 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
980 dma_addr_t dest_dma;
981 struct dma_async_tx_descriptor *tx;
982 struct dma_chan *dma_chan;
983 dma_cookie_t cookie;
984 u8 cmp_byte = 0;
985 u32 cmp_word;
986 int err = 0;
988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
990 if (!xor_srcs[src_idx]) {
991 while (src_idx--)
992 __free_page(xor_srcs[src_idx]);
993 return -ENOMEM;
997 dest = alloc_page(GFP_KERNEL);
998 if (!dest) {
999 while (src_idx--)
1000 __free_page(xor_srcs[src_idx]);
1001 return -ENOMEM;
1004 /* Fill in src buffers */
1005 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1006 u8 *ptr = page_address(xor_srcs[src_idx]);
1007 for (i = 0; i < PAGE_SIZE; i++)
1008 ptr[i] = (1 << src_idx);
1011 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1012 cmp_byte ^= (u8) (1 << src_idx);
1014 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1015 (cmp_byte << 8) | cmp_byte;
1017 memset(page_address(dest), 0, PAGE_SIZE);
1019 dma_chan = &mv_chan->dmachan;
1020 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1021 err = -ENODEV;
1022 goto out;
1025 /* test xor */
1026 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1027 DMA_FROM_DEVICE);
1029 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1030 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1031 0, PAGE_SIZE, DMA_TO_DEVICE);
1033 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1034 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1036 cookie = mv_xor_tx_submit(tx);
1037 mv_xor_issue_pending(dma_chan);
1038 async_tx_ack(tx);
1039 msleep(8);
1041 if (mv_xor_status(dma_chan, cookie, NULL) !=
1042 DMA_SUCCESS) {
1043 dev_err(dma_chan->device->dev,
1044 "Self-test xor timed out, disabling\n");
1045 err = -ENODEV;
1046 goto free_resources;
1049 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1050 PAGE_SIZE, DMA_FROM_DEVICE);
1051 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1052 u32 *ptr = page_address(dest);
1053 if (ptr[i] != cmp_word) {
1054 dev_err(dma_chan->device->dev,
1055 "Self-test xor failed compare, disabling."
1056 " index %d, data %x, expected %x\n", i,
1057 ptr[i], cmp_word);
1058 err = -ENODEV;
1059 goto free_resources;
1063 free_resources:
1064 mv_xor_free_chan_resources(dma_chan);
1065 out:
1066 src_idx = MV_XOR_NUM_SRC_TEST;
1067 while (src_idx--)
1068 __free_page(xor_srcs[src_idx]);
1069 __free_page(dest);
1070 return err;
1073 /* This driver does not implement any of the optional DMA operations. */
1074 static int
1075 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1076 unsigned long arg)
1078 return -ENOSYS;
1081 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1083 struct dma_chan *chan, *_chan;
1084 struct device *dev = mv_chan->dmadev.dev;
1086 dma_async_device_unregister(&mv_chan->dmadev);
1088 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1089 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1091 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1092 device_node) {
1093 list_del(&chan->device_node);
1096 free_irq(mv_chan->irq, mv_chan);
1098 return 0;
1101 static struct mv_xor_chan *
1102 mv_xor_channel_add(struct mv_xor_device *xordev,
1103 struct platform_device *pdev,
1104 int idx, dma_cap_mask_t cap_mask, int irq)
1106 int ret = 0;
1107 struct mv_xor_chan *mv_chan;
1108 struct dma_device *dma_dev;
1110 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1111 if (!mv_chan) {
1112 ret = -ENOMEM;
1113 goto err_free_dma;
1116 mv_chan->idx = idx;
1117 mv_chan->irq = irq;
1119 dma_dev = &mv_chan->dmadev;
1121 /* allocate coherent memory for hardware descriptors
1122 * note: writecombine gives slightly better performance, but
1123 * requires that we explicitly flush the writes
1125 mv_chan->dma_desc_pool_virt =
1126 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1127 &mv_chan->dma_desc_pool, GFP_KERNEL);
1128 if (!mv_chan->dma_desc_pool_virt)
1129 return ERR_PTR(-ENOMEM);
1131 /* discover transaction capabilites from the platform data */
1132 dma_dev->cap_mask = cap_mask;
1134 INIT_LIST_HEAD(&dma_dev->channels);
1136 /* set base routines */
1137 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1138 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1139 dma_dev->device_tx_status = mv_xor_status;
1140 dma_dev->device_issue_pending = mv_xor_issue_pending;
1141 dma_dev->device_control = mv_xor_control;
1142 dma_dev->dev = &pdev->dev;
1144 /* set prep routines based on capability */
1145 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1146 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1147 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1148 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1149 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1150 dma_dev->max_xor = 8;
1151 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1154 mv_chan->mmr_base = xordev->xor_base;
1155 if (!mv_chan->mmr_base) {
1156 ret = -ENOMEM;
1157 goto err_free_dma;
1159 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1160 mv_chan);
1162 /* clear errors before enabling interrupts */
1163 mv_xor_device_clear_err_status(mv_chan);
1165 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1166 0, dev_name(&pdev->dev), mv_chan);
1167 if (ret)
1168 goto err_free_dma;
1170 mv_chan_unmask_interrupts(mv_chan);
1172 mv_set_mode(mv_chan, DMA_MEMCPY);
1174 spin_lock_init(&mv_chan->lock);
1175 INIT_LIST_HEAD(&mv_chan->chain);
1176 INIT_LIST_HEAD(&mv_chan->completed_slots);
1177 INIT_LIST_HEAD(&mv_chan->all_slots);
1178 mv_chan->dmachan.device = dma_dev;
1179 dma_cookie_init(&mv_chan->dmachan);
1181 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1183 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1184 ret = mv_xor_memcpy_self_test(mv_chan);
1185 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1186 if (ret)
1187 goto err_free_irq;
1190 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1191 ret = mv_xor_xor_self_test(mv_chan);
1192 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1193 if (ret)
1194 goto err_free_irq;
1197 dev_info(&pdev->dev, "Marvell XOR: "
1198 "( %s%s%s%s)\n",
1199 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1200 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1201 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1202 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1204 dma_async_device_register(dma_dev);
1205 return mv_chan;
1207 err_free_irq:
1208 free_irq(mv_chan->irq, mv_chan);
1209 err_free_dma:
1210 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1211 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1212 return ERR_PTR(ret);
1215 static void
1216 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1217 const struct mbus_dram_target_info *dram)
1219 void __iomem *base = xordev->xor_base;
1220 u32 win_enable = 0;
1221 int i;
1223 for (i = 0; i < 8; i++) {
1224 writel(0, base + WINDOW_BASE(i));
1225 writel(0, base + WINDOW_SIZE(i));
1226 if (i < 4)
1227 writel(0, base + WINDOW_REMAP_HIGH(i));
1230 for (i = 0; i < dram->num_cs; i++) {
1231 const struct mbus_dram_window *cs = dram->cs + i;
1233 writel((cs->base & 0xffff0000) |
1234 (cs->mbus_attr << 8) |
1235 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1236 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1238 win_enable |= (1 << i);
1239 win_enable |= 3 << (16 + (2 * i));
1242 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1243 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1244 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1245 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1248 static int mv_xor_probe(struct platform_device *pdev)
1250 const struct mbus_dram_target_info *dram;
1251 struct mv_xor_device *xordev;
1252 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1253 struct resource *res;
1254 int i, ret;
1256 dev_notice(&pdev->dev, "Marvell XOR driver\n");
1258 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1259 if (!xordev)
1260 return -ENOMEM;
1262 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1263 if (!res)
1264 return -ENODEV;
1266 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1267 resource_size(res));
1268 if (!xordev->xor_base)
1269 return -EBUSY;
1271 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1272 if (!res)
1273 return -ENODEV;
1275 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1276 resource_size(res));
1277 if (!xordev->xor_high_base)
1278 return -EBUSY;
1280 platform_set_drvdata(pdev, xordev);
1283 * (Re-)program MBUS remapping windows if we are asked to.
1285 dram = mv_mbus_dram_info();
1286 if (dram)
1287 mv_xor_conf_mbus_windows(xordev, dram);
1289 /* Not all platforms can gate the clock, so it is not
1290 * an error if the clock does not exists.
1292 xordev->clk = clk_get(&pdev->dev, NULL);
1293 if (!IS_ERR(xordev->clk))
1294 clk_prepare_enable(xordev->clk);
1296 if (pdev->dev.of_node) {
1297 struct device_node *np;
1298 int i = 0;
1300 for_each_child_of_node(pdev->dev.of_node, np) {
1301 dma_cap_mask_t cap_mask;
1302 int irq;
1304 dma_cap_zero(cap_mask);
1305 if (of_property_read_bool(np, "dmacap,memcpy"))
1306 dma_cap_set(DMA_MEMCPY, cap_mask);
1307 if (of_property_read_bool(np, "dmacap,xor"))
1308 dma_cap_set(DMA_XOR, cap_mask);
1309 if (of_property_read_bool(np, "dmacap,memset"))
1310 dma_cap_set(DMA_MEMSET, cap_mask);
1311 if (of_property_read_bool(np, "dmacap,interrupt"))
1312 dma_cap_set(DMA_INTERRUPT, cap_mask);
1314 irq = irq_of_parse_and_map(np, 0);
1315 if (!irq) {
1316 ret = -ENODEV;
1317 goto err_channel_add;
1320 xordev->channels[i] =
1321 mv_xor_channel_add(xordev, pdev, i,
1322 cap_mask, irq);
1323 if (IS_ERR(xordev->channels[i])) {
1324 ret = PTR_ERR(xordev->channels[i]);
1325 xordev->channels[i] = NULL;
1326 irq_dispose_mapping(irq);
1327 goto err_channel_add;
1330 i++;
1332 } else if (pdata && pdata->channels) {
1333 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1334 struct mv_xor_channel_data *cd;
1335 int irq;
1337 cd = &pdata->channels[i];
1338 if (!cd) {
1339 ret = -ENODEV;
1340 goto err_channel_add;
1343 irq = platform_get_irq(pdev, i);
1344 if (irq < 0) {
1345 ret = irq;
1346 goto err_channel_add;
1349 xordev->channels[i] =
1350 mv_xor_channel_add(xordev, pdev, i,
1351 cd->cap_mask, irq);
1352 if (IS_ERR(xordev->channels[i])) {
1353 ret = PTR_ERR(xordev->channels[i]);
1354 goto err_channel_add;
1359 return 0;
1361 err_channel_add:
1362 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1363 if (xordev->channels[i]) {
1364 mv_xor_channel_remove(xordev->channels[i]);
1365 if (pdev->dev.of_node)
1366 irq_dispose_mapping(xordev->channels[i]->irq);
1369 if (!IS_ERR(xordev->clk)) {
1370 clk_disable_unprepare(xordev->clk);
1371 clk_put(xordev->clk);
1374 return ret;
1377 static int mv_xor_remove(struct platform_device *pdev)
1379 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1380 int i;
1382 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1383 if (xordev->channels[i])
1384 mv_xor_channel_remove(xordev->channels[i]);
1387 if (!IS_ERR(xordev->clk)) {
1388 clk_disable_unprepare(xordev->clk);
1389 clk_put(xordev->clk);
1392 return 0;
1395 #ifdef CONFIG_OF
1396 static struct of_device_id mv_xor_dt_ids[] = {
1397 { .compatible = "marvell,orion-xor", },
1400 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1401 #endif
1403 static struct platform_driver mv_xor_driver = {
1404 .probe = mv_xor_probe,
1405 .remove = mv_xor_remove,
1406 .driver = {
1407 .owner = THIS_MODULE,
1408 .name = MV_XOR_NAME,
1409 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1414 static int __init mv_xor_init(void)
1416 return platform_driver_register(&mv_xor_driver);
1418 module_init(mv_xor_init);
1420 /* it's currently unsafe to unload this module */
1421 #if 0
1422 static void __exit mv_xor_exit(void)
1424 platform_driver_unregister(&mv_xor_driver);
1425 return;
1428 module_exit(mv_xor_exit);
1429 #endif
1431 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1432 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1433 MODULE_LICENSE("GPL");