2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/async_tx.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <plat/mv_xor.h>
31 static void mv_xor_issue_pending(struct dma_chan
*chan
);
33 #define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
36 #define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
39 #define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
42 static void mv_desc_init(struct mv_xor_desc_slot
*desc
, unsigned long flags
)
44 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
46 hw_desc
->status
= (1 << 31);
47 hw_desc
->phy_next_desc
= 0;
48 hw_desc
->desc_command
= (1 << 31);
51 static u32
mv_desc_get_dest_addr(struct mv_xor_desc_slot
*desc
)
53 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
54 return hw_desc
->phy_dest_addr
;
57 static u32
mv_desc_get_src_addr(struct mv_xor_desc_slot
*desc
,
60 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
61 return hw_desc
->phy_src_addr
[src_idx
];
65 static void mv_desc_set_byte_count(struct mv_xor_desc_slot
*desc
,
68 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
69 hw_desc
->byte_count
= byte_count
;
72 static void mv_desc_set_next_desc(struct mv_xor_desc_slot
*desc
,
75 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
76 BUG_ON(hw_desc
->phy_next_desc
);
77 hw_desc
->phy_next_desc
= next_desc_addr
;
80 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot
*desc
)
82 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
83 hw_desc
->phy_next_desc
= 0;
86 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot
*desc
, u32 val
)
91 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot
*desc
,
94 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
95 hw_desc
->phy_dest_addr
= addr
;
98 static int mv_chan_memset_slot_count(size_t len
)
103 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105 static void mv_desc_set_src_addr(struct mv_xor_desc_slot
*desc
,
106 int index
, dma_addr_t addr
)
108 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
109 hw_desc
->phy_src_addr
[index
] = addr
;
110 if (desc
->type
== DMA_XOR
)
111 hw_desc
->desc_command
|= (1 << index
);
114 static u32
mv_chan_get_current_desc(struct mv_xor_chan
*chan
)
116 return __raw_readl(XOR_CURR_DESC(chan
));
119 static void mv_chan_set_next_descriptor(struct mv_xor_chan
*chan
,
122 __raw_writel(next_desc_addr
, XOR_NEXT_DESC(chan
));
125 static void mv_chan_set_dest_pointer(struct mv_xor_chan
*chan
, u32 desc_addr
)
127 __raw_writel(desc_addr
, XOR_DEST_POINTER(chan
));
130 static void mv_chan_set_block_size(struct mv_xor_chan
*chan
, u32 block_size
)
132 __raw_writel(block_size
, XOR_BLOCK_SIZE(chan
));
135 static void mv_chan_set_value(struct mv_xor_chan
*chan
, u32 value
)
137 __raw_writel(value
, XOR_INIT_VALUE_LOW(chan
));
138 __raw_writel(value
, XOR_INIT_VALUE_HIGH(chan
));
141 static void mv_chan_unmask_interrupts(struct mv_xor_chan
*chan
)
143 u32 val
= __raw_readl(XOR_INTR_MASK(chan
));
144 val
|= XOR_INTR_MASK_VALUE
<< (chan
->idx
* 16);
145 __raw_writel(val
, XOR_INTR_MASK(chan
));
148 static u32
mv_chan_get_intr_cause(struct mv_xor_chan
*chan
)
150 u32 intr_cause
= __raw_readl(XOR_INTR_CAUSE(chan
));
151 intr_cause
= (intr_cause
>> (chan
->idx
* 16)) & 0xFFFF;
155 static int mv_is_err_intr(u32 intr_cause
)
157 if (intr_cause
& ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan
*chan
)
165 u32 val
= (1 << (1 + (chan
->idx
* 16)));
166 dev_dbg(chan
->device
->common
.dev
, "%s, val 0x%08x\n", __func__
, val
);
167 __raw_writel(val
, XOR_INTR_CAUSE(chan
));
170 static void mv_xor_device_clear_err_status(struct mv_xor_chan
*chan
)
172 u32 val
= 0xFFFF0000 >> (chan
->idx
* 16);
173 __raw_writel(val
, XOR_INTR_CAUSE(chan
));
176 static int mv_can_chain(struct mv_xor_desc_slot
*desc
)
178 struct mv_xor_desc_slot
*chain_old_tail
= list_entry(
179 desc
->chain_node
.prev
, struct mv_xor_desc_slot
, chain_node
);
181 if (chain_old_tail
->type
!= desc
->type
)
183 if (desc
->type
== DMA_MEMSET
)
189 static void mv_set_mode(struct mv_xor_chan
*chan
,
190 enum dma_transaction_type type
)
193 u32 config
= __raw_readl(XOR_CONFIG(chan
));
197 op_mode
= XOR_OPERATION_MODE_XOR
;
200 op_mode
= XOR_OPERATION_MODE_MEMCPY
;
203 op_mode
= XOR_OPERATION_MODE_MEMSET
;
206 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
207 "error: unsupported operation %d.\n",
215 __raw_writel(config
, XOR_CONFIG(chan
));
216 chan
->current_type
= type
;
219 static void mv_chan_activate(struct mv_xor_chan
*chan
)
223 dev_dbg(chan
->device
->common
.dev
, " activate chan.\n");
224 activation
= __raw_readl(XOR_ACTIVATION(chan
));
226 __raw_writel(activation
, XOR_ACTIVATION(chan
));
229 static char mv_chan_is_busy(struct mv_xor_chan
*chan
)
231 u32 state
= __raw_readl(XOR_ACTIVATION(chan
));
233 state
= (state
>> 4) & 0x3;
235 return (state
== 1) ? 1 : 0;
238 static int mv_chan_xor_slot_count(size_t len
, int src_cnt
)
244 * mv_xor_free_slots - flags descriptor slots for reuse
245 * @slot: Slot to free
246 * Caller must hold &mv_chan->lock while calling this function
248 static void mv_xor_free_slots(struct mv_xor_chan
*mv_chan
,
249 struct mv_xor_desc_slot
*slot
)
251 dev_dbg(mv_chan
->device
->common
.dev
, "%s %d slot %p\n",
252 __func__
, __LINE__
, slot
);
254 slot
->slots_per_op
= 0;
259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
261 * Caller must hold &mv_chan->lock while calling this function
263 static void mv_xor_start_new_chain(struct mv_xor_chan
*mv_chan
,
264 struct mv_xor_desc_slot
*sw_desc
)
266 dev_dbg(mv_chan
->device
->common
.dev
, "%s %d: sw_desc %p\n",
267 __func__
, __LINE__
, sw_desc
);
268 if (sw_desc
->type
!= mv_chan
->current_type
)
269 mv_set_mode(mv_chan
, sw_desc
->type
);
271 if (sw_desc
->type
== DMA_MEMSET
) {
272 /* for memset requests we need to program the engine, no
275 struct mv_xor_desc
*hw_desc
= sw_desc
->hw_desc
;
276 mv_chan_set_dest_pointer(mv_chan
, hw_desc
->phy_dest_addr
);
277 mv_chan_set_block_size(mv_chan
, sw_desc
->unmap_len
);
278 mv_chan_set_value(mv_chan
, sw_desc
->value
);
280 /* set the hardware chain */
281 mv_chan_set_next_descriptor(mv_chan
, sw_desc
->async_tx
.phys
);
283 mv_chan
->pending
+= sw_desc
->slot_cnt
;
284 mv_xor_issue_pending(&mv_chan
->common
);
288 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot
*desc
,
289 struct mv_xor_chan
*mv_chan
, dma_cookie_t cookie
)
291 BUG_ON(desc
->async_tx
.cookie
< 0);
293 if (desc
->async_tx
.cookie
> 0) {
294 cookie
= desc
->async_tx
.cookie
;
296 /* call the callback (must not sleep or submit new
297 * operations to this channel)
299 if (desc
->async_tx
.callback
)
300 desc
->async_tx
.callback(
301 desc
->async_tx
.callback_param
);
303 /* unmap dma addresses
304 * (unmap_single vs unmap_page?)
306 if (desc
->group_head
&& desc
->unmap_len
) {
307 struct mv_xor_desc_slot
*unmap
= desc
->group_head
;
309 &mv_chan
->device
->pdev
->dev
;
310 u32 len
= unmap
->unmap_len
;
311 enum dma_ctrl_flags flags
= desc
->async_tx
.flags
;
315 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
316 addr
= mv_desc_get_dest_addr(unmap
);
317 dma_unmap_page(dev
, addr
, len
, DMA_FROM_DEVICE
);
320 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
321 src_cnt
= unmap
->unmap_src_cnt
;
323 addr
= mv_desc_get_src_addr(unmap
,
325 dma_unmap_page(dev
, addr
, len
,
329 desc
->group_head
= NULL
;
333 /* run dependent operations */
334 async_tx_run_dependencies(&desc
->async_tx
);
340 mv_xor_clean_completed_slots(struct mv_xor_chan
*mv_chan
)
342 struct mv_xor_desc_slot
*iter
, *_iter
;
344 dev_dbg(mv_chan
->device
->common
.dev
, "%s %d\n", __func__
, __LINE__
);
345 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
348 if (async_tx_test_ack(&iter
->async_tx
)) {
349 list_del(&iter
->completed_node
);
350 mv_xor_free_slots(mv_chan
, iter
);
357 mv_xor_clean_slot(struct mv_xor_desc_slot
*desc
,
358 struct mv_xor_chan
*mv_chan
)
360 dev_dbg(mv_chan
->device
->common
.dev
, "%s %d: desc %p flags %d\n",
361 __func__
, __LINE__
, desc
, desc
->async_tx
.flags
);
362 list_del(&desc
->chain_node
);
363 /* the client is allowed to attach dependent operations
366 if (!async_tx_test_ack(&desc
->async_tx
)) {
367 /* move this slot to the completed_slots */
368 list_add_tail(&desc
->completed_node
, &mv_chan
->completed_slots
);
372 mv_xor_free_slots(mv_chan
, desc
);
376 static void __mv_xor_slot_cleanup(struct mv_xor_chan
*mv_chan
)
378 struct mv_xor_desc_slot
*iter
, *_iter
;
379 dma_cookie_t cookie
= 0;
380 int busy
= mv_chan_is_busy(mv_chan
);
381 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
382 int seen_current
= 0;
384 dev_dbg(mv_chan
->device
->common
.dev
, "%s %d\n", __func__
, __LINE__
);
385 dev_dbg(mv_chan
->device
->common
.dev
, "current_desc %x\n", current_desc
);
386 mv_xor_clean_completed_slots(mv_chan
);
388 /* free completed slots from the chain starting with
389 * the oldest descriptor
392 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
395 prefetch(&_iter
->async_tx
);
397 /* do not advance past the current descriptor loaded into the
398 * hardware channel, subsequent descriptors are either in
399 * process or have not been submitted
404 /* stop the search if we reach the current descriptor and the
407 if (iter
->async_tx
.phys
== current_desc
) {
413 cookie
= mv_xor_run_tx_complete_actions(iter
, mv_chan
, cookie
);
415 if (mv_xor_clean_slot(iter
, mv_chan
))
419 if ((busy
== 0) && !list_empty(&mv_chan
->chain
)) {
420 struct mv_xor_desc_slot
*chain_head
;
421 chain_head
= list_entry(mv_chan
->chain
.next
,
422 struct mv_xor_desc_slot
,
425 mv_xor_start_new_chain(mv_chan
, chain_head
);
429 mv_chan
->completed_cookie
= cookie
;
433 mv_xor_slot_cleanup(struct mv_xor_chan
*mv_chan
)
435 spin_lock_bh(&mv_chan
->lock
);
436 __mv_xor_slot_cleanup(mv_chan
);
437 spin_unlock_bh(&mv_chan
->lock
);
440 static void mv_xor_tasklet(unsigned long data
)
442 struct mv_xor_chan
*chan
= (struct mv_xor_chan
*) data
;
443 __mv_xor_slot_cleanup(chan
);
446 static struct mv_xor_desc_slot
*
447 mv_xor_alloc_slots(struct mv_xor_chan
*mv_chan
, int num_slots
,
450 struct mv_xor_desc_slot
*iter
, *_iter
, *alloc_start
= NULL
;
452 int slots_found
, retry
= 0;
454 /* start search from the last allocated descrtiptor
455 * if a contiguous allocation can not be found start searching
456 * from the beginning of the list
461 iter
= mv_chan
->last_used
;
463 iter
= list_entry(&mv_chan
->all_slots
,
464 struct mv_xor_desc_slot
,
467 list_for_each_entry_safe_continue(
468 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
470 prefetch(&_iter
->async_tx
);
471 if (iter
->slots_per_op
) {
472 /* give up after finding the first busy slot
473 * on the second pass through the list
482 /* start the allocation if the slot is correctly aligned */
486 if (slots_found
== num_slots
) {
487 struct mv_xor_desc_slot
*alloc_tail
= NULL
;
488 struct mv_xor_desc_slot
*last_used
= NULL
;
493 /* pre-ack all but the last descriptor */
494 async_tx_ack(&iter
->async_tx
);
496 list_add_tail(&iter
->chain_node
, &chain
);
498 iter
->async_tx
.cookie
= 0;
499 iter
->slot_cnt
= num_slots
;
500 iter
->xor_check_result
= NULL
;
501 for (i
= 0; i
< slots_per_op
; i
++) {
502 iter
->slots_per_op
= slots_per_op
- i
;
504 iter
= list_entry(iter
->slot_node
.next
,
505 struct mv_xor_desc_slot
,
508 num_slots
-= slots_per_op
;
510 alloc_tail
->group_head
= alloc_start
;
511 alloc_tail
->async_tx
.cookie
= -EBUSY
;
512 list_splice(&chain
, &alloc_tail
->async_tx
.tx_list
);
513 mv_chan
->last_used
= last_used
;
514 mv_desc_clear_next_desc(alloc_start
);
515 mv_desc_clear_next_desc(alloc_tail
);
522 /* try to free some slots if the allocation fails */
523 tasklet_schedule(&mv_chan
->irq_tasklet
);
529 mv_desc_assign_cookie(struct mv_xor_chan
*mv_chan
,
530 struct mv_xor_desc_slot
*desc
)
532 dma_cookie_t cookie
= mv_chan
->common
.cookie
;
536 mv_chan
->common
.cookie
= desc
->async_tx
.cookie
= cookie
;
540 /************************ DMA engine API functions ****************************/
542 mv_xor_tx_submit(struct dma_async_tx_descriptor
*tx
)
544 struct mv_xor_desc_slot
*sw_desc
= to_mv_xor_slot(tx
);
545 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(tx
->chan
);
546 struct mv_xor_desc_slot
*grp_start
, *old_chain_tail
;
548 int new_hw_chain
= 1;
550 dev_dbg(mv_chan
->device
->common
.dev
,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__
, sw_desc
, &sw_desc
->async_tx
);
554 grp_start
= sw_desc
->group_head
;
556 spin_lock_bh(&mv_chan
->lock
);
557 cookie
= mv_desc_assign_cookie(mv_chan
, sw_desc
);
559 if (list_empty(&mv_chan
->chain
))
560 list_splice_init(&sw_desc
->async_tx
.tx_list
, &mv_chan
->chain
);
564 old_chain_tail
= list_entry(mv_chan
->chain
.prev
,
565 struct mv_xor_desc_slot
,
567 list_splice_init(&grp_start
->async_tx
.tx_list
,
568 &old_chain_tail
->chain_node
);
570 if (!mv_can_chain(grp_start
))
573 dev_dbg(mv_chan
->device
->common
.dev
, "Append to last desc %x\n",
574 old_chain_tail
->async_tx
.phys
);
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail
, grp_start
->async_tx
.phys
);
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan
)) {
581 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
586 if (current_desc
== old_chain_tail
->async_tx
.phys
)
592 mv_xor_start_new_chain(mv_chan
, grp_start
);
595 spin_unlock_bh(&mv_chan
->lock
);
600 /* returns the number of allocated descriptors */
601 static int mv_xor_alloc_chan_resources(struct dma_chan
*chan
,
602 struct dma_client
*client
)
606 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
607 struct mv_xor_desc_slot
*slot
= NULL
;
608 struct mv_xor_platform_data
*plat_data
=
609 mv_chan
->device
->pdev
->dev
.platform_data
;
610 int num_descs_in_pool
= plat_data
->pool_size
/MV_XOR_SLOT_SIZE
;
612 /* Allocate descriptor slots */
613 idx
= mv_chan
->slots_allocated
;
614 while (idx
< num_descs_in_pool
) {
615 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
617 printk(KERN_INFO
"MV XOR Channel only initialized"
618 " %d descriptor slots", idx
);
621 hw_desc
= (char *) mv_chan
->device
->dma_desc_pool_virt
;
622 slot
->hw_desc
= (void *) &hw_desc
[idx
* MV_XOR_SLOT_SIZE
];
624 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
625 slot
->async_tx
.tx_submit
= mv_xor_tx_submit
;
626 INIT_LIST_HEAD(&slot
->chain_node
);
627 INIT_LIST_HEAD(&slot
->slot_node
);
628 INIT_LIST_HEAD(&slot
->async_tx
.tx_list
);
629 hw_desc
= (char *) mv_chan
->device
->dma_desc_pool
;
630 slot
->async_tx
.phys
=
631 (dma_addr_t
) &hw_desc
[idx
* MV_XOR_SLOT_SIZE
];
634 spin_lock_bh(&mv_chan
->lock
);
635 mv_chan
->slots_allocated
= idx
;
636 list_add_tail(&slot
->slot_node
, &mv_chan
->all_slots
);
637 spin_unlock_bh(&mv_chan
->lock
);
640 if (mv_chan
->slots_allocated
&& !mv_chan
->last_used
)
641 mv_chan
->last_used
= list_entry(mv_chan
->all_slots
.next
,
642 struct mv_xor_desc_slot
,
645 dev_dbg(mv_chan
->device
->common
.dev
,
646 "allocated %d descriptor slots last_used: %p\n",
647 mv_chan
->slots_allocated
, mv_chan
->last_used
);
649 return mv_chan
->slots_allocated
? : -ENOMEM
;
652 static struct dma_async_tx_descriptor
*
653 mv_xor_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
654 size_t len
, unsigned long flags
)
656 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
657 struct mv_xor_desc_slot
*sw_desc
, *grp_start
;
660 dev_dbg(mv_chan
->device
->common
.dev
,
661 "%s dest: %x src %x len: %u flags: %ld\n",
662 __func__
, dest
, src
, len
, flags
);
663 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
666 BUG_ON(unlikely(len
> MV_XOR_MAX_BYTE_COUNT
));
668 spin_lock_bh(&mv_chan
->lock
);
669 slot_cnt
= mv_chan_memcpy_slot_count(len
);
670 sw_desc
= mv_xor_alloc_slots(mv_chan
, slot_cnt
, 1);
672 sw_desc
->type
= DMA_MEMCPY
;
673 sw_desc
->async_tx
.flags
= flags
;
674 grp_start
= sw_desc
->group_head
;
675 mv_desc_init(grp_start
, flags
);
676 mv_desc_set_byte_count(grp_start
, len
);
677 mv_desc_set_dest_addr(sw_desc
->group_head
, dest
);
678 mv_desc_set_src_addr(grp_start
, 0, src
);
679 sw_desc
->unmap_src_cnt
= 1;
680 sw_desc
->unmap_len
= len
;
682 spin_unlock_bh(&mv_chan
->lock
);
684 dev_dbg(mv_chan
->device
->common
.dev
,
685 "%s sw_desc %p async_tx %p\n",
686 __func__
, sw_desc
, sw_desc
? &sw_desc
->async_tx
: 0);
688 return sw_desc
? &sw_desc
->async_tx
: NULL
;
691 static struct dma_async_tx_descriptor
*
692 mv_xor_prep_dma_memset(struct dma_chan
*chan
, dma_addr_t dest
, int value
,
693 size_t len
, unsigned long flags
)
695 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
696 struct mv_xor_desc_slot
*sw_desc
, *grp_start
;
699 dev_dbg(mv_chan
->device
->common
.dev
,
700 "%s dest: %x len: %u flags: %ld\n",
701 __func__
, dest
, len
, flags
);
702 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
705 BUG_ON(unlikely(len
> MV_XOR_MAX_BYTE_COUNT
));
707 spin_lock_bh(&mv_chan
->lock
);
708 slot_cnt
= mv_chan_memset_slot_count(len
);
709 sw_desc
= mv_xor_alloc_slots(mv_chan
, slot_cnt
, 1);
711 sw_desc
->type
= DMA_MEMSET
;
712 sw_desc
->async_tx
.flags
= flags
;
713 grp_start
= sw_desc
->group_head
;
714 mv_desc_init(grp_start
, flags
);
715 mv_desc_set_byte_count(grp_start
, len
);
716 mv_desc_set_dest_addr(sw_desc
->group_head
, dest
);
717 mv_desc_set_block_fill_val(grp_start
, value
);
718 sw_desc
->unmap_src_cnt
= 1;
719 sw_desc
->unmap_len
= len
;
721 spin_unlock_bh(&mv_chan
->lock
);
722 dev_dbg(mv_chan
->device
->common
.dev
,
723 "%s sw_desc %p async_tx %p \n",
724 __func__
, sw_desc
, &sw_desc
->async_tx
);
725 return sw_desc
? &sw_desc
->async_tx
: NULL
;
728 static struct dma_async_tx_descriptor
*
729 mv_xor_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
730 unsigned int src_cnt
, size_t len
, unsigned long flags
)
732 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
733 struct mv_xor_desc_slot
*sw_desc
, *grp_start
;
736 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
739 BUG_ON(unlikely(len
> MV_XOR_MAX_BYTE_COUNT
));
741 dev_dbg(mv_chan
->device
->common
.dev
,
742 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
743 __func__
, src_cnt
, len
, dest
, flags
);
745 spin_lock_bh(&mv_chan
->lock
);
746 slot_cnt
= mv_chan_xor_slot_count(len
, src_cnt
);
747 sw_desc
= mv_xor_alloc_slots(mv_chan
, slot_cnt
, 1);
749 sw_desc
->type
= DMA_XOR
;
750 sw_desc
->async_tx
.flags
= flags
;
751 grp_start
= sw_desc
->group_head
;
752 mv_desc_init(grp_start
, flags
);
753 /* the byte count field is the same as in memcpy desc*/
754 mv_desc_set_byte_count(grp_start
, len
);
755 mv_desc_set_dest_addr(sw_desc
->group_head
, dest
);
756 sw_desc
->unmap_src_cnt
= src_cnt
;
757 sw_desc
->unmap_len
= len
;
759 mv_desc_set_src_addr(grp_start
, src_cnt
, src
[src_cnt
]);
761 spin_unlock_bh(&mv_chan
->lock
);
762 dev_dbg(mv_chan
->device
->common
.dev
,
763 "%s sw_desc %p async_tx %p \n",
764 __func__
, sw_desc
, &sw_desc
->async_tx
);
765 return sw_desc
? &sw_desc
->async_tx
: NULL
;
768 static void mv_xor_free_chan_resources(struct dma_chan
*chan
)
770 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
771 struct mv_xor_desc_slot
*iter
, *_iter
;
772 int in_use_descs
= 0;
774 mv_xor_slot_cleanup(mv_chan
);
776 spin_lock_bh(&mv_chan
->lock
);
777 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
780 list_del(&iter
->chain_node
);
782 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
785 list_del(&iter
->completed_node
);
787 list_for_each_entry_safe_reverse(
788 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
789 list_del(&iter
->slot_node
);
791 mv_chan
->slots_allocated
--;
793 mv_chan
->last_used
= NULL
;
795 dev_dbg(mv_chan
->device
->common
.dev
, "%s slots_allocated %d\n",
796 __func__
, mv_chan
->slots_allocated
);
797 spin_unlock_bh(&mv_chan
->lock
);
800 dev_err(mv_chan
->device
->common
.dev
,
801 "freeing %d in use descriptors!\n", in_use_descs
);
805 * mv_xor_is_complete - poll the status of an XOR transaction
806 * @chan: XOR channel handle
807 * @cookie: XOR transaction identifier
809 static enum dma_status
mv_xor_is_complete(struct dma_chan
*chan
,
814 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
815 dma_cookie_t last_used
;
816 dma_cookie_t last_complete
;
819 last_used
= chan
->cookie
;
820 last_complete
= mv_chan
->completed_cookie
;
821 mv_chan
->is_complete_cookie
= cookie
;
823 *done
= last_complete
;
827 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
828 if (ret
== DMA_SUCCESS
) {
829 mv_xor_clean_completed_slots(mv_chan
);
832 mv_xor_slot_cleanup(mv_chan
);
834 last_used
= chan
->cookie
;
835 last_complete
= mv_chan
->completed_cookie
;
838 *done
= last_complete
;
842 return dma_async_is_complete(cookie
, last_complete
, last_used
);
845 static void mv_dump_xor_regs(struct mv_xor_chan
*chan
)
849 val
= __raw_readl(XOR_CONFIG(chan
));
850 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
851 "config 0x%08x.\n", val
);
853 val
= __raw_readl(XOR_ACTIVATION(chan
));
854 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
855 "activation 0x%08x.\n", val
);
857 val
= __raw_readl(XOR_INTR_CAUSE(chan
));
858 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
859 "intr cause 0x%08x.\n", val
);
861 val
= __raw_readl(XOR_INTR_MASK(chan
));
862 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
863 "intr mask 0x%08x.\n", val
);
865 val
= __raw_readl(XOR_ERROR_CAUSE(chan
));
866 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
867 "error cause 0x%08x.\n", val
);
869 val
= __raw_readl(XOR_ERROR_ADDR(chan
));
870 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
871 "error addr 0x%08x.\n", val
);
874 static void mv_xor_err_interrupt_handler(struct mv_xor_chan
*chan
,
877 if (intr_cause
& (1 << 4)) {
878 dev_dbg(chan
->device
->common
.dev
,
879 "ignore this error\n");
883 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
884 "error on chan %d. intr cause 0x%08x.\n",
885 chan
->idx
, intr_cause
);
887 mv_dump_xor_regs(chan
);
891 static irqreturn_t
mv_xor_interrupt_handler(int irq
, void *data
)
893 struct mv_xor_chan
*chan
= data
;
894 u32 intr_cause
= mv_chan_get_intr_cause(chan
);
896 dev_dbg(chan
->device
->common
.dev
, "intr cause %x\n", intr_cause
);
898 if (mv_is_err_intr(intr_cause
))
899 mv_xor_err_interrupt_handler(chan
, intr_cause
);
901 tasklet_schedule(&chan
->irq_tasklet
);
903 mv_xor_device_clear_eoc_cause(chan
);
908 static void mv_xor_issue_pending(struct dma_chan
*chan
)
910 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
912 if (mv_chan
->pending
>= MV_XOR_THRESHOLD
) {
913 mv_chan
->pending
= 0;
914 mv_chan_activate(mv_chan
);
919 * Perform a transaction to verify the HW works.
921 #define MV_XOR_TEST_SIZE 2000
923 static int __devinit
mv_xor_memcpy_self_test(struct mv_xor_device
*device
)
927 dma_addr_t src_dma
, dest_dma
;
928 struct dma_chan
*dma_chan
;
930 struct dma_async_tx_descriptor
*tx
;
932 struct mv_xor_chan
*mv_chan
;
934 src
= kmalloc(sizeof(u8
) * MV_XOR_TEST_SIZE
, GFP_KERNEL
);
938 dest
= kzalloc(sizeof(u8
) * MV_XOR_TEST_SIZE
, GFP_KERNEL
);
944 /* Fill in src buffer */
945 for (i
= 0; i
< MV_XOR_TEST_SIZE
; i
++)
946 ((u8
*) src
)[i
] = (u8
)i
;
948 /* Start copy, using first DMA channel */
949 dma_chan
= container_of(device
->common
.channels
.next
,
952 if (mv_xor_alloc_chan_resources(dma_chan
, NULL
) < 1) {
957 dest_dma
= dma_map_single(dma_chan
->device
->dev
, dest
,
958 MV_XOR_TEST_SIZE
, DMA_FROM_DEVICE
);
960 src_dma
= dma_map_single(dma_chan
->device
->dev
, src
,
961 MV_XOR_TEST_SIZE
, DMA_TO_DEVICE
);
963 tx
= mv_xor_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
964 MV_XOR_TEST_SIZE
, 0);
965 cookie
= mv_xor_tx_submit(tx
);
966 mv_xor_issue_pending(dma_chan
);
970 if (mv_xor_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
972 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
973 "Self-test copy timed out, disabling\n");
978 mv_chan
= to_mv_xor_chan(dma_chan
);
979 dma_sync_single_for_cpu(&mv_chan
->device
->pdev
->dev
, dest_dma
,
980 MV_XOR_TEST_SIZE
, DMA_FROM_DEVICE
);
981 if (memcmp(src
, dest
, MV_XOR_TEST_SIZE
)) {
982 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
983 "Self-test copy failed compare, disabling\n");
989 mv_xor_free_chan_resources(dma_chan
);
996 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
998 mv_xor_xor_self_test(struct mv_xor_device
*device
)
1002 struct page
*xor_srcs
[MV_XOR_NUM_SRC_TEST
];
1003 dma_addr_t dma_srcs
[MV_XOR_NUM_SRC_TEST
];
1004 dma_addr_t dest_dma
;
1005 struct dma_async_tx_descriptor
*tx
;
1006 struct dma_chan
*dma_chan
;
1007 dma_cookie_t cookie
;
1011 struct mv_xor_chan
*mv_chan
;
1013 for (src_idx
= 0; src_idx
< MV_XOR_NUM_SRC_TEST
; src_idx
++) {
1014 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
1015 if (!xor_srcs
[src_idx
])
1017 __free_page(xor_srcs
[src_idx
]);
1022 dest
= alloc_page(GFP_KERNEL
);
1025 __free_page(xor_srcs
[src_idx
]);
1029 /* Fill in src buffers */
1030 for (src_idx
= 0; src_idx
< MV_XOR_NUM_SRC_TEST
; src_idx
++) {
1031 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
1032 for (i
= 0; i
< PAGE_SIZE
; i
++)
1033 ptr
[i
] = (1 << src_idx
);
1036 for (src_idx
= 0; src_idx
< MV_XOR_NUM_SRC_TEST
; src_idx
++)
1037 cmp_byte
^= (u8
) (1 << src_idx
);
1039 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
1040 (cmp_byte
<< 8) | cmp_byte
;
1042 memset(page_address(dest
), 0, PAGE_SIZE
);
1044 dma_chan
= container_of(device
->common
.channels
.next
,
1047 if (mv_xor_alloc_chan_resources(dma_chan
, NULL
) < 1) {
1053 dest_dma
= dma_map_page(dma_chan
->device
->dev
, dest
, 0, PAGE_SIZE
,
1056 for (i
= 0; i
< MV_XOR_NUM_SRC_TEST
; i
++)
1057 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
1058 0, PAGE_SIZE
, DMA_TO_DEVICE
);
1060 tx
= mv_xor_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
1061 MV_XOR_NUM_SRC_TEST
, PAGE_SIZE
, 0);
1063 cookie
= mv_xor_tx_submit(tx
);
1064 mv_xor_issue_pending(dma_chan
);
1068 if (mv_xor_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
1070 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1071 "Self-test xor timed out, disabling\n");
1073 goto free_resources
;
1076 mv_chan
= to_mv_xor_chan(dma_chan
);
1077 dma_sync_single_for_cpu(&mv_chan
->device
->pdev
->dev
, dest_dma
,
1078 PAGE_SIZE
, DMA_FROM_DEVICE
);
1079 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
1080 u32
*ptr
= page_address(dest
);
1081 if (ptr
[i
] != cmp_word
) {
1082 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1083 "Self-test xor failed compare, disabling."
1084 " index %d, data %x, expected %x\n", i
,
1087 goto free_resources
;
1092 mv_xor_free_chan_resources(dma_chan
);
1094 src_idx
= MV_XOR_NUM_SRC_TEST
;
1096 __free_page(xor_srcs
[src_idx
]);
1101 static int __devexit
mv_xor_remove(struct platform_device
*dev
)
1103 struct mv_xor_device
*device
= platform_get_drvdata(dev
);
1104 struct dma_chan
*chan
, *_chan
;
1105 struct mv_xor_chan
*mv_chan
;
1106 struct mv_xor_platform_data
*plat_data
= dev
->dev
.platform_data
;
1108 dma_async_device_unregister(&device
->common
);
1110 dma_free_coherent(&dev
->dev
, plat_data
->pool_size
,
1111 device
->dma_desc_pool_virt
, device
->dma_desc_pool
);
1113 list_for_each_entry_safe(chan
, _chan
, &device
->common
.channels
,
1115 mv_chan
= to_mv_xor_chan(chan
);
1116 list_del(&chan
->device_node
);
1122 static int __devinit
mv_xor_probe(struct platform_device
*pdev
)
1126 struct mv_xor_device
*adev
;
1127 struct mv_xor_chan
*mv_chan
;
1128 struct dma_device
*dma_dev
;
1129 struct mv_xor_platform_data
*plat_data
= pdev
->dev
.platform_data
;
1132 adev
= devm_kzalloc(&pdev
->dev
, sizeof(*adev
), GFP_KERNEL
);
1136 dma_dev
= &adev
->common
;
1138 /* allocate coherent memory for hardware descriptors
1139 * note: writecombine gives slightly better performance, but
1140 * requires that we explicitly flush the writes
1142 adev
->dma_desc_pool_virt
= dma_alloc_writecombine(&pdev
->dev
,
1143 plat_data
->pool_size
,
1144 &adev
->dma_desc_pool
,
1146 if (!adev
->dma_desc_pool_virt
)
1149 adev
->id
= plat_data
->hw_id
;
1151 /* discover transaction capabilites from the platform data */
1152 dma_dev
->cap_mask
= plat_data
->cap_mask
;
1154 platform_set_drvdata(pdev
, adev
);
1156 adev
->shared
= platform_get_drvdata(plat_data
->shared
);
1158 INIT_LIST_HEAD(&dma_dev
->channels
);
1160 /* set base routines */
1161 dma_dev
->device_alloc_chan_resources
= mv_xor_alloc_chan_resources
;
1162 dma_dev
->device_free_chan_resources
= mv_xor_free_chan_resources
;
1163 dma_dev
->device_is_tx_complete
= mv_xor_is_complete
;
1164 dma_dev
->device_issue_pending
= mv_xor_issue_pending
;
1165 dma_dev
->dev
= &pdev
->dev
;
1167 /* set prep routines based on capability */
1168 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1169 dma_dev
->device_prep_dma_memcpy
= mv_xor_prep_dma_memcpy
;
1170 if (dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
))
1171 dma_dev
->device_prep_dma_memset
= mv_xor_prep_dma_memset
;
1172 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1173 dma_dev
->max_xor
= 8; ;
1174 dma_dev
->device_prep_dma_xor
= mv_xor_prep_dma_xor
;
1177 mv_chan
= devm_kzalloc(&pdev
->dev
, sizeof(*mv_chan
), GFP_KERNEL
);
1182 mv_chan
->device
= adev
;
1183 mv_chan
->idx
= plat_data
->hw_id
;
1184 mv_chan
->mmr_base
= adev
->shared
->xor_base
;
1186 if (!mv_chan
->mmr_base
) {
1190 tasklet_init(&mv_chan
->irq_tasklet
, mv_xor_tasklet
, (unsigned long)
1193 /* clear errors before enabling interrupts */
1194 mv_xor_device_clear_err_status(mv_chan
);
1196 irq
= platform_get_irq(pdev
, 0);
1201 ret
= devm_request_irq(&pdev
->dev
, irq
,
1202 mv_xor_interrupt_handler
,
1203 0, dev_name(&pdev
->dev
), mv_chan
);
1207 mv_chan_unmask_interrupts(mv_chan
);
1209 mv_set_mode(mv_chan
, DMA_MEMCPY
);
1211 spin_lock_init(&mv_chan
->lock
);
1212 INIT_LIST_HEAD(&mv_chan
->chain
);
1213 INIT_LIST_HEAD(&mv_chan
->completed_slots
);
1214 INIT_LIST_HEAD(&mv_chan
->all_slots
);
1215 INIT_RCU_HEAD(&mv_chan
->common
.rcu
);
1216 mv_chan
->common
.device
= dma_dev
;
1218 list_add_tail(&mv_chan
->common
.device_node
, &dma_dev
->channels
);
1220 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1221 ret
= mv_xor_memcpy_self_test(adev
);
1222 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1227 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1228 ret
= mv_xor_xor_self_test(adev
);
1229 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1234 dev_printk(KERN_INFO
, &pdev
->dev
, "Marvell XOR: "
1236 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1237 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
) ? "fill " : "",
1238 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1239 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1241 dma_async_device_register(dma_dev
);
1245 dma_free_coherent(&adev
->pdev
->dev
, plat_data
->pool_size
,
1246 adev
->dma_desc_pool_virt
, adev
->dma_desc_pool
);
1252 mv_xor_conf_mbus_windows(struct mv_xor_shared_private
*msp
,
1253 struct mbus_dram_target_info
*dram
)
1255 void __iomem
*base
= msp
->xor_base
;
1259 for (i
= 0; i
< 8; i
++) {
1260 writel(0, base
+ WINDOW_BASE(i
));
1261 writel(0, base
+ WINDOW_SIZE(i
));
1263 writel(0, base
+ WINDOW_REMAP_HIGH(i
));
1266 for (i
= 0; i
< dram
->num_cs
; i
++) {
1267 struct mbus_dram_window
*cs
= dram
->cs
+ i
;
1269 writel((cs
->base
& 0xffff0000) |
1270 (cs
->mbus_attr
<< 8) |
1271 dram
->mbus_dram_target_id
, base
+ WINDOW_BASE(i
));
1272 writel((cs
->size
- 1) & 0xffff0000, base
+ WINDOW_SIZE(i
));
1274 win_enable
|= (1 << i
);
1275 win_enable
|= 3 << (16 + (2 * i
));
1278 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(0));
1279 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(1));
1282 static struct platform_driver mv_xor_driver
= {
1283 .probe
= mv_xor_probe
,
1284 .remove
= mv_xor_remove
,
1286 .owner
= THIS_MODULE
,
1287 .name
= MV_XOR_NAME
,
1291 static int mv_xor_shared_probe(struct platform_device
*pdev
)
1293 struct mv_xor_platform_shared_data
*msd
= pdev
->dev
.platform_data
;
1294 struct mv_xor_shared_private
*msp
;
1295 struct resource
*res
;
1297 dev_printk(KERN_NOTICE
, &pdev
->dev
, "Marvell shared XOR driver\n");
1299 msp
= devm_kzalloc(&pdev
->dev
, sizeof(*msp
), GFP_KERNEL
);
1303 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1307 msp
->xor_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1308 res
->end
- res
->start
+ 1);
1312 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1316 msp
->xor_high_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1317 res
->end
- res
->start
+ 1);
1318 if (!msp
->xor_high_base
)
1321 platform_set_drvdata(pdev
, msp
);
1324 * (Re-)program MBUS remapping windows if we are asked to.
1326 if (msd
!= NULL
&& msd
->dram
!= NULL
)
1327 mv_xor_conf_mbus_windows(msp
, msd
->dram
);
1332 static int mv_xor_shared_remove(struct platform_device
*pdev
)
1337 static struct platform_driver mv_xor_shared_driver
= {
1338 .probe
= mv_xor_shared_probe
,
1339 .remove
= mv_xor_shared_remove
,
1341 .owner
= THIS_MODULE
,
1342 .name
= MV_XOR_SHARED_NAME
,
1347 static int __init
mv_xor_init(void)
1351 rc
= platform_driver_register(&mv_xor_shared_driver
);
1353 rc
= platform_driver_register(&mv_xor_driver
);
1355 platform_driver_unregister(&mv_xor_shared_driver
);
1359 module_init(mv_xor_init
);
1361 /* it's currently unsafe to unload this module */
1363 static void __exit
mv_xor_exit(void)
1365 platform_driver_unregister(&mv_xor_driver
);
1366 platform_driver_unregister(&mv_xor_shared_driver
);
1370 module_exit(mv_xor_exit
);
1373 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375 MODULE_LICENSE("GPL");