2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/memory.h>
24 #include <linux/clk.h>
26 #include <linux/of_irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/platform_data/dma-mv_xor.h>
30 #include "dmaengine.h"
33 static void mv_xor_issue_pending(struct dma_chan
*chan
);
35 #define to_mv_xor_chan(chan) \
36 container_of(chan, struct mv_xor_chan, dmachan)
38 #define to_mv_xor_slot(tx) \
39 container_of(tx, struct mv_xor_desc_slot, async_tx)
41 #define mv_chan_to_devp(chan) \
44 static void mv_desc_init(struct mv_xor_desc_slot
*desc
,
45 dma_addr_t addr
, u32 byte_count
,
46 enum dma_ctrl_flags flags
)
48 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
50 hw_desc
->status
= XOR_DESC_DMA_OWNED
;
51 hw_desc
->phy_next_desc
= 0;
52 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
53 hw_desc
->desc_command
= (flags
& DMA_PREP_INTERRUPT
) ?
54 XOR_DESC_EOD_INT_EN
: 0;
55 hw_desc
->phy_dest_addr
= addr
;
56 hw_desc
->byte_count
= byte_count
;
59 static void mv_desc_set_next_desc(struct mv_xor_desc_slot
*desc
,
62 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
63 BUG_ON(hw_desc
->phy_next_desc
);
64 hw_desc
->phy_next_desc
= next_desc_addr
;
67 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot
*desc
)
69 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
70 hw_desc
->phy_next_desc
= 0;
73 static void mv_desc_set_src_addr(struct mv_xor_desc_slot
*desc
,
74 int index
, dma_addr_t addr
)
76 struct mv_xor_desc
*hw_desc
= desc
->hw_desc
;
77 hw_desc
->phy_src_addr
[mv_phy_src_idx(index
)] = addr
;
78 if (desc
->type
== DMA_XOR
)
79 hw_desc
->desc_command
|= (1 << index
);
82 static u32
mv_chan_get_current_desc(struct mv_xor_chan
*chan
)
84 return readl_relaxed(XOR_CURR_DESC(chan
));
87 static void mv_chan_set_next_descriptor(struct mv_xor_chan
*chan
,
90 writel_relaxed(next_desc_addr
, XOR_NEXT_DESC(chan
));
93 static void mv_chan_unmask_interrupts(struct mv_xor_chan
*chan
)
95 u32 val
= readl_relaxed(XOR_INTR_MASK(chan
));
96 val
|= XOR_INTR_MASK_VALUE
<< (chan
->idx
* 16);
97 writel_relaxed(val
, XOR_INTR_MASK(chan
));
100 static u32
mv_chan_get_intr_cause(struct mv_xor_chan
*chan
)
102 u32 intr_cause
= readl_relaxed(XOR_INTR_CAUSE(chan
));
103 intr_cause
= (intr_cause
>> (chan
->idx
* 16)) & 0xFFFF;
107 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan
*chan
)
111 val
= XOR_INT_END_OF_DESC
| XOR_INT_END_OF_CHAIN
| XOR_INT_STOPPED
;
112 val
= ~(val
<< (chan
->idx
* 16));
113 dev_dbg(mv_chan_to_devp(chan
), "%s, val 0x%08x\n", __func__
, val
);
114 writel_relaxed(val
, XOR_INTR_CAUSE(chan
));
117 static void mv_xor_device_clear_err_status(struct mv_xor_chan
*chan
)
119 u32 val
= 0xFFFF0000 >> (chan
->idx
* 16);
120 writel_relaxed(val
, XOR_INTR_CAUSE(chan
));
123 static void mv_set_mode(struct mv_xor_chan
*chan
,
124 enum dma_transaction_type type
)
127 u32 config
= readl_relaxed(XOR_CONFIG(chan
));
131 op_mode
= XOR_OPERATION_MODE_XOR
;
134 op_mode
= XOR_OPERATION_MODE_MEMCPY
;
137 dev_err(mv_chan_to_devp(chan
),
138 "error: unsupported operation %d\n",
147 #if defined(__BIG_ENDIAN)
148 config
|= XOR_DESCRIPTOR_SWAP
;
150 config
&= ~XOR_DESCRIPTOR_SWAP
;
153 writel_relaxed(config
, XOR_CONFIG(chan
));
154 chan
->current_type
= type
;
157 static void mv_chan_activate(struct mv_xor_chan
*chan
)
159 dev_dbg(mv_chan_to_devp(chan
), " activate chan.\n");
161 /* writel ensures all descriptors are flushed before activation */
162 writel(BIT(0), XOR_ACTIVATION(chan
));
165 static char mv_chan_is_busy(struct mv_xor_chan
*chan
)
167 u32 state
= readl_relaxed(XOR_ACTIVATION(chan
));
169 state
= (state
>> 4) & 0x3;
171 return (state
== 1) ? 1 : 0;
175 * mv_xor_free_slots - flags descriptor slots for reuse
176 * @slot: Slot to free
177 * Caller must hold &mv_chan->lock while calling this function
179 static void mv_xor_free_slots(struct mv_xor_chan
*mv_chan
,
180 struct mv_xor_desc_slot
*slot
)
182 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d slot %p\n",
183 __func__
, __LINE__
, slot
);
190 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
192 * Caller must hold &mv_chan->lock while calling this function
194 static void mv_xor_start_new_chain(struct mv_xor_chan
*mv_chan
,
195 struct mv_xor_desc_slot
*sw_desc
)
197 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d: sw_desc %p\n",
198 __func__
, __LINE__
, sw_desc
);
200 /* set the hardware chain */
201 mv_chan_set_next_descriptor(mv_chan
, sw_desc
->async_tx
.phys
);
204 mv_xor_issue_pending(&mv_chan
->dmachan
);
208 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot
*desc
,
209 struct mv_xor_chan
*mv_chan
, dma_cookie_t cookie
)
211 BUG_ON(desc
->async_tx
.cookie
< 0);
213 if (desc
->async_tx
.cookie
> 0) {
214 cookie
= desc
->async_tx
.cookie
;
216 /* call the callback (must not sleep or submit new
217 * operations to this channel)
219 if (desc
->async_tx
.callback
)
220 desc
->async_tx
.callback(
221 desc
->async_tx
.callback_param
);
223 dma_descriptor_unmap(&desc
->async_tx
);
226 /* run dependent operations */
227 dma_run_dependencies(&desc
->async_tx
);
233 mv_xor_clean_completed_slots(struct mv_xor_chan
*mv_chan
)
235 struct mv_xor_desc_slot
*iter
, *_iter
;
237 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d\n", __func__
, __LINE__
);
238 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
241 if (async_tx_test_ack(&iter
->async_tx
)) {
242 list_del(&iter
->completed_node
);
243 mv_xor_free_slots(mv_chan
, iter
);
250 mv_xor_clean_slot(struct mv_xor_desc_slot
*desc
,
251 struct mv_xor_chan
*mv_chan
)
253 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d: desc %p flags %d\n",
254 __func__
, __LINE__
, desc
, desc
->async_tx
.flags
);
255 list_del(&desc
->chain_node
);
256 /* the client is allowed to attach dependent operations
259 if (!async_tx_test_ack(&desc
->async_tx
)) {
260 /* move this slot to the completed_slots */
261 list_add_tail(&desc
->completed_node
, &mv_chan
->completed_slots
);
265 mv_xor_free_slots(mv_chan
, desc
);
269 /* This function must be called with the mv_xor_chan spinlock held */
270 static void mv_xor_slot_cleanup(struct mv_xor_chan
*mv_chan
)
272 struct mv_xor_desc_slot
*iter
, *_iter
;
273 dma_cookie_t cookie
= 0;
274 int busy
= mv_chan_is_busy(mv_chan
);
275 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
276 int seen_current
= 0;
278 dev_dbg(mv_chan_to_devp(mv_chan
), "%s %d\n", __func__
, __LINE__
);
279 dev_dbg(mv_chan_to_devp(mv_chan
), "current_desc %x\n", current_desc
);
280 mv_xor_clean_completed_slots(mv_chan
);
282 /* free completed slots from the chain starting with
283 * the oldest descriptor
286 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
289 prefetch(&_iter
->async_tx
);
291 /* do not advance past the current descriptor loaded into the
292 * hardware channel, subsequent descriptors are either in
293 * process or have not been submitted
298 /* stop the search if we reach the current descriptor and the
301 if (iter
->async_tx
.phys
== current_desc
) {
307 cookie
= mv_xor_run_tx_complete_actions(iter
, mv_chan
, cookie
);
309 if (mv_xor_clean_slot(iter
, mv_chan
))
313 if ((busy
== 0) && !list_empty(&mv_chan
->chain
)) {
314 struct mv_xor_desc_slot
*chain_head
;
315 chain_head
= list_entry(mv_chan
->chain
.next
,
316 struct mv_xor_desc_slot
,
319 mv_xor_start_new_chain(mv_chan
, chain_head
);
323 mv_chan
->dmachan
.completed_cookie
= cookie
;
326 static void mv_xor_tasklet(unsigned long data
)
328 struct mv_xor_chan
*chan
= (struct mv_xor_chan
*) data
;
330 spin_lock_bh(&chan
->lock
);
331 mv_xor_slot_cleanup(chan
);
332 spin_unlock_bh(&chan
->lock
);
335 static struct mv_xor_desc_slot
*
336 mv_xor_alloc_slot(struct mv_xor_chan
*mv_chan
)
338 struct mv_xor_desc_slot
*iter
, *_iter
;
341 /* start search from the last allocated descrtiptor
342 * if a contiguous allocation can not be found start searching
343 * from the beginning of the list
347 iter
= mv_chan
->last_used
;
349 iter
= list_entry(&mv_chan
->all_slots
,
350 struct mv_xor_desc_slot
,
353 list_for_each_entry_safe_continue(
354 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
357 prefetch(&_iter
->async_tx
);
358 if (iter
->slot_used
) {
359 /* give up after finding the first busy slot
360 * on the second pass through the list
367 /* pre-ack descriptor */
368 async_tx_ack(&iter
->async_tx
);
371 INIT_LIST_HEAD(&iter
->chain_node
);
372 iter
->async_tx
.cookie
= -EBUSY
;
373 mv_chan
->last_used
= iter
;
374 mv_desc_clear_next_desc(iter
);
382 /* try to free some slots if the allocation fails */
383 tasklet_schedule(&mv_chan
->irq_tasklet
);
388 /************************ DMA engine API functions ****************************/
390 mv_xor_tx_submit(struct dma_async_tx_descriptor
*tx
)
392 struct mv_xor_desc_slot
*sw_desc
= to_mv_xor_slot(tx
);
393 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(tx
->chan
);
394 struct mv_xor_desc_slot
*old_chain_tail
;
396 int new_hw_chain
= 1;
398 dev_dbg(mv_chan_to_devp(mv_chan
),
399 "%s sw_desc %p: async_tx %p\n",
400 __func__
, sw_desc
, &sw_desc
->async_tx
);
402 spin_lock_bh(&mv_chan
->lock
);
403 cookie
= dma_cookie_assign(tx
);
405 if (list_empty(&mv_chan
->chain
))
406 list_add_tail(&sw_desc
->chain_node
, &mv_chan
->chain
);
410 old_chain_tail
= list_entry(mv_chan
->chain
.prev
,
411 struct mv_xor_desc_slot
,
413 list_add_tail(&sw_desc
->chain_node
, &mv_chan
->chain
);
415 dev_dbg(mv_chan_to_devp(mv_chan
), "Append to last desc %pa\n",
416 &old_chain_tail
->async_tx
.phys
);
418 /* fix up the hardware chain */
419 mv_desc_set_next_desc(old_chain_tail
, sw_desc
->async_tx
.phys
);
421 /* if the channel is not busy */
422 if (!mv_chan_is_busy(mv_chan
)) {
423 u32 current_desc
= mv_chan_get_current_desc(mv_chan
);
425 * and the curren desc is the end of the chain before
426 * the append, then we need to start the channel
428 if (current_desc
== old_chain_tail
->async_tx
.phys
)
434 mv_xor_start_new_chain(mv_chan
, sw_desc
);
436 spin_unlock_bh(&mv_chan
->lock
);
441 /* returns the number of allocated descriptors */
442 static int mv_xor_alloc_chan_resources(struct dma_chan
*chan
)
447 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
448 struct mv_xor_desc_slot
*slot
= NULL
;
449 int num_descs_in_pool
= MV_XOR_POOL_SIZE
/MV_XOR_SLOT_SIZE
;
451 /* Allocate descriptor slots */
452 idx
= mv_chan
->slots_allocated
;
453 while (idx
< num_descs_in_pool
) {
454 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
456 dev_info(mv_chan_to_devp(mv_chan
),
457 "channel only initialized %d descriptor slots",
461 virt_desc
= mv_chan
->dma_desc_pool_virt
;
462 slot
->hw_desc
= virt_desc
+ idx
* MV_XOR_SLOT_SIZE
;
464 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
465 slot
->async_tx
.tx_submit
= mv_xor_tx_submit
;
466 INIT_LIST_HEAD(&slot
->chain_node
);
467 INIT_LIST_HEAD(&slot
->slot_node
);
468 dma_desc
= mv_chan
->dma_desc_pool
;
469 slot
->async_tx
.phys
= dma_desc
+ idx
* MV_XOR_SLOT_SIZE
;
472 spin_lock_bh(&mv_chan
->lock
);
473 mv_chan
->slots_allocated
= idx
;
474 list_add_tail(&slot
->slot_node
, &mv_chan
->all_slots
);
475 spin_unlock_bh(&mv_chan
->lock
);
478 if (mv_chan
->slots_allocated
&& !mv_chan
->last_used
)
479 mv_chan
->last_used
= list_entry(mv_chan
->all_slots
.next
,
480 struct mv_xor_desc_slot
,
483 dev_dbg(mv_chan_to_devp(mv_chan
),
484 "allocated %d descriptor slots last_used: %p\n",
485 mv_chan
->slots_allocated
, mv_chan
->last_used
);
487 return mv_chan
->slots_allocated
? : -ENOMEM
;
490 static struct dma_async_tx_descriptor
*
491 mv_xor_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
492 unsigned int src_cnt
, size_t len
, unsigned long flags
)
494 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
495 struct mv_xor_desc_slot
*sw_desc
;
497 if (unlikely(len
< MV_XOR_MIN_BYTE_COUNT
))
500 BUG_ON(len
> MV_XOR_MAX_BYTE_COUNT
);
502 dev_dbg(mv_chan_to_devp(mv_chan
),
503 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
504 __func__
, src_cnt
, len
, &dest
, flags
);
506 spin_lock_bh(&mv_chan
->lock
);
507 sw_desc
= mv_xor_alloc_slot(mv_chan
);
509 sw_desc
->type
= DMA_XOR
;
510 sw_desc
->async_tx
.flags
= flags
;
511 mv_desc_init(sw_desc
, dest
, len
, flags
);
513 mv_desc_set_src_addr(sw_desc
, src_cnt
, src
[src_cnt
]);
515 spin_unlock_bh(&mv_chan
->lock
);
516 dev_dbg(mv_chan_to_devp(mv_chan
),
517 "%s sw_desc %p async_tx %p \n",
518 __func__
, sw_desc
, &sw_desc
->async_tx
);
519 return sw_desc
? &sw_desc
->async_tx
: NULL
;
522 static struct dma_async_tx_descriptor
*
523 mv_xor_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
524 size_t len
, unsigned long flags
)
527 * A MEMCPY operation is identical to an XOR operation with only
528 * a single source address.
530 return mv_xor_prep_dma_xor(chan
, dest
, &src
, 1, len
, flags
);
533 static struct dma_async_tx_descriptor
*
534 mv_xor_prep_dma_interrupt(struct dma_chan
*chan
, unsigned long flags
)
536 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
537 dma_addr_t src
, dest
;
540 src
= mv_chan
->dummy_src_addr
;
541 dest
= mv_chan
->dummy_dst_addr
;
542 len
= MV_XOR_MIN_BYTE_COUNT
;
545 * We implement the DMA_INTERRUPT operation as a minimum sized
546 * XOR operation with a single dummy source address.
548 return mv_xor_prep_dma_xor(chan
, dest
, &src
, 1, len
, flags
);
551 static void mv_xor_free_chan_resources(struct dma_chan
*chan
)
553 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
554 struct mv_xor_desc_slot
*iter
, *_iter
;
555 int in_use_descs
= 0;
557 spin_lock_bh(&mv_chan
->lock
);
559 mv_xor_slot_cleanup(mv_chan
);
561 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->chain
,
564 list_del(&iter
->chain_node
);
566 list_for_each_entry_safe(iter
, _iter
, &mv_chan
->completed_slots
,
569 list_del(&iter
->completed_node
);
571 list_for_each_entry_safe_reverse(
572 iter
, _iter
, &mv_chan
->all_slots
, slot_node
) {
573 list_del(&iter
->slot_node
);
575 mv_chan
->slots_allocated
--;
577 mv_chan
->last_used
= NULL
;
579 dev_dbg(mv_chan_to_devp(mv_chan
), "%s slots_allocated %d\n",
580 __func__
, mv_chan
->slots_allocated
);
581 spin_unlock_bh(&mv_chan
->lock
);
584 dev_err(mv_chan_to_devp(mv_chan
),
585 "freeing %d in use descriptors!\n", in_use_descs
);
589 * mv_xor_status - poll the status of an XOR transaction
590 * @chan: XOR channel handle
591 * @cookie: XOR transaction identifier
592 * @txstate: XOR transactions state holder (or NULL)
594 static enum dma_status
mv_xor_status(struct dma_chan
*chan
,
596 struct dma_tx_state
*txstate
)
598 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
601 ret
= dma_cookie_status(chan
, cookie
, txstate
);
602 if (ret
== DMA_COMPLETE
)
605 spin_lock_bh(&mv_chan
->lock
);
606 mv_xor_slot_cleanup(mv_chan
);
607 spin_unlock_bh(&mv_chan
->lock
);
609 return dma_cookie_status(chan
, cookie
, txstate
);
612 static void mv_dump_xor_regs(struct mv_xor_chan
*chan
)
616 val
= readl_relaxed(XOR_CONFIG(chan
));
617 dev_err(mv_chan_to_devp(chan
), "config 0x%08x\n", val
);
619 val
= readl_relaxed(XOR_ACTIVATION(chan
));
620 dev_err(mv_chan_to_devp(chan
), "activation 0x%08x\n", val
);
622 val
= readl_relaxed(XOR_INTR_CAUSE(chan
));
623 dev_err(mv_chan_to_devp(chan
), "intr cause 0x%08x\n", val
);
625 val
= readl_relaxed(XOR_INTR_MASK(chan
));
626 dev_err(mv_chan_to_devp(chan
), "intr mask 0x%08x\n", val
);
628 val
= readl_relaxed(XOR_ERROR_CAUSE(chan
));
629 dev_err(mv_chan_to_devp(chan
), "error cause 0x%08x\n", val
);
631 val
= readl_relaxed(XOR_ERROR_ADDR(chan
));
632 dev_err(mv_chan_to_devp(chan
), "error addr 0x%08x\n", val
);
635 static void mv_xor_err_interrupt_handler(struct mv_xor_chan
*chan
,
638 if (intr_cause
& XOR_INT_ERR_DECODE
) {
639 dev_dbg(mv_chan_to_devp(chan
), "ignoring address decode error\n");
643 dev_err(mv_chan_to_devp(chan
), "error on chan %d. intr cause 0x%08x\n",
644 chan
->idx
, intr_cause
);
646 mv_dump_xor_regs(chan
);
650 static irqreturn_t
mv_xor_interrupt_handler(int irq
, void *data
)
652 struct mv_xor_chan
*chan
= data
;
653 u32 intr_cause
= mv_chan_get_intr_cause(chan
);
655 dev_dbg(mv_chan_to_devp(chan
), "intr cause %x\n", intr_cause
);
657 if (intr_cause
& XOR_INTR_ERRORS
)
658 mv_xor_err_interrupt_handler(chan
, intr_cause
);
660 tasklet_schedule(&chan
->irq_tasklet
);
662 mv_xor_device_clear_eoc_cause(chan
);
667 static void mv_xor_issue_pending(struct dma_chan
*chan
)
669 struct mv_xor_chan
*mv_chan
= to_mv_xor_chan(chan
);
671 if (mv_chan
->pending
>= MV_XOR_THRESHOLD
) {
672 mv_chan
->pending
= 0;
673 mv_chan_activate(mv_chan
);
678 * Perform a transaction to verify the HW works.
681 static int mv_xor_memcpy_self_test(struct mv_xor_chan
*mv_chan
)
685 dma_addr_t src_dma
, dest_dma
;
686 struct dma_chan
*dma_chan
;
688 struct dma_async_tx_descriptor
*tx
;
689 struct dmaengine_unmap_data
*unmap
;
692 src
= kmalloc(sizeof(u8
) * PAGE_SIZE
, GFP_KERNEL
);
696 dest
= kzalloc(sizeof(u8
) * PAGE_SIZE
, GFP_KERNEL
);
702 /* Fill in src buffer */
703 for (i
= 0; i
< PAGE_SIZE
; i
++)
704 ((u8
*) src
)[i
] = (u8
)i
;
706 dma_chan
= &mv_chan
->dmachan
;
707 if (mv_xor_alloc_chan_resources(dma_chan
) < 1) {
712 unmap
= dmaengine_get_unmap_data(dma_chan
->device
->dev
, 2, GFP_KERNEL
);
718 src_dma
= dma_map_page(dma_chan
->device
->dev
, virt_to_page(src
), 0,
719 PAGE_SIZE
, DMA_TO_DEVICE
);
720 unmap
->addr
[0] = src_dma
;
722 ret
= dma_mapping_error(dma_chan
->device
->dev
, src_dma
);
729 dest_dma
= dma_map_page(dma_chan
->device
->dev
, virt_to_page(dest
), 0,
730 PAGE_SIZE
, DMA_FROM_DEVICE
);
731 unmap
->addr
[1] = dest_dma
;
733 ret
= dma_mapping_error(dma_chan
->device
->dev
, dest_dma
);
739 unmap
->len
= PAGE_SIZE
;
741 tx
= mv_xor_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
744 dev_err(dma_chan
->device
->dev
,
745 "Self-test cannot prepare operation, disabling\n");
750 cookie
= mv_xor_tx_submit(tx
);
751 if (dma_submit_error(cookie
)) {
752 dev_err(dma_chan
->device
->dev
,
753 "Self-test submit error, disabling\n");
758 mv_xor_issue_pending(dma_chan
);
762 if (mv_xor_status(dma_chan
, cookie
, NULL
) !=
764 dev_err(dma_chan
->device
->dev
,
765 "Self-test copy timed out, disabling\n");
770 dma_sync_single_for_cpu(dma_chan
->device
->dev
, dest_dma
,
771 PAGE_SIZE
, DMA_FROM_DEVICE
);
772 if (memcmp(src
, dest
, PAGE_SIZE
)) {
773 dev_err(dma_chan
->device
->dev
,
774 "Self-test copy failed compare, disabling\n");
780 dmaengine_unmap_put(unmap
);
781 mv_xor_free_chan_resources(dma_chan
);
788 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
790 mv_xor_xor_self_test(struct mv_xor_chan
*mv_chan
)
794 struct page
*xor_srcs
[MV_XOR_NUM_SRC_TEST
];
795 dma_addr_t dma_srcs
[MV_XOR_NUM_SRC_TEST
];
797 struct dma_async_tx_descriptor
*tx
;
798 struct dmaengine_unmap_data
*unmap
;
799 struct dma_chan
*dma_chan
;
804 int src_count
= MV_XOR_NUM_SRC_TEST
;
806 for (src_idx
= 0; src_idx
< src_count
; src_idx
++) {
807 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
808 if (!xor_srcs
[src_idx
]) {
810 __free_page(xor_srcs
[src_idx
]);
815 dest
= alloc_page(GFP_KERNEL
);
818 __free_page(xor_srcs
[src_idx
]);
822 /* Fill in src buffers */
823 for (src_idx
= 0; src_idx
< src_count
; src_idx
++) {
824 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
825 for (i
= 0; i
< PAGE_SIZE
; i
++)
826 ptr
[i
] = (1 << src_idx
);
829 for (src_idx
= 0; src_idx
< src_count
; src_idx
++)
830 cmp_byte
^= (u8
) (1 << src_idx
);
832 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
833 (cmp_byte
<< 8) | cmp_byte
;
835 memset(page_address(dest
), 0, PAGE_SIZE
);
837 dma_chan
= &mv_chan
->dmachan
;
838 if (mv_xor_alloc_chan_resources(dma_chan
) < 1) {
843 unmap
= dmaengine_get_unmap_data(dma_chan
->device
->dev
, src_count
+ 1,
851 for (i
= 0; i
< src_count
; i
++) {
852 unmap
->addr
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
853 0, PAGE_SIZE
, DMA_TO_DEVICE
);
854 dma_srcs
[i
] = unmap
->addr
[i
];
855 ret
= dma_mapping_error(dma_chan
->device
->dev
, unmap
->addr
[i
]);
863 unmap
->addr
[src_count
] = dma_map_page(dma_chan
->device
->dev
, dest
, 0, PAGE_SIZE
,
865 dest_dma
= unmap
->addr
[src_count
];
866 ret
= dma_mapping_error(dma_chan
->device
->dev
, unmap
->addr
[src_count
]);
872 unmap
->len
= PAGE_SIZE
;
874 tx
= mv_xor_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
875 src_count
, PAGE_SIZE
, 0);
877 dev_err(dma_chan
->device
->dev
,
878 "Self-test cannot prepare operation, disabling\n");
883 cookie
= mv_xor_tx_submit(tx
);
884 if (dma_submit_error(cookie
)) {
885 dev_err(dma_chan
->device
->dev
,
886 "Self-test submit error, disabling\n");
891 mv_xor_issue_pending(dma_chan
);
895 if (mv_xor_status(dma_chan
, cookie
, NULL
) !=
897 dev_err(dma_chan
->device
->dev
,
898 "Self-test xor timed out, disabling\n");
903 dma_sync_single_for_cpu(dma_chan
->device
->dev
, dest_dma
,
904 PAGE_SIZE
, DMA_FROM_DEVICE
);
905 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
906 u32
*ptr
= page_address(dest
);
907 if (ptr
[i
] != cmp_word
) {
908 dev_err(dma_chan
->device
->dev
,
909 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
910 i
, ptr
[i
], cmp_word
);
917 dmaengine_unmap_put(unmap
);
918 mv_xor_free_chan_resources(dma_chan
);
922 __free_page(xor_srcs
[src_idx
]);
927 static int mv_xor_channel_remove(struct mv_xor_chan
*mv_chan
)
929 struct dma_chan
*chan
, *_chan
;
930 struct device
*dev
= mv_chan
->dmadev
.dev
;
932 dma_async_device_unregister(&mv_chan
->dmadev
);
934 dma_free_coherent(dev
, MV_XOR_POOL_SIZE
,
935 mv_chan
->dma_desc_pool_virt
, mv_chan
->dma_desc_pool
);
936 dma_unmap_single(dev
, mv_chan
->dummy_src_addr
,
937 MV_XOR_MIN_BYTE_COUNT
, DMA_FROM_DEVICE
);
938 dma_unmap_single(dev
, mv_chan
->dummy_dst_addr
,
939 MV_XOR_MIN_BYTE_COUNT
, DMA_TO_DEVICE
);
941 list_for_each_entry_safe(chan
, _chan
, &mv_chan
->dmadev
.channels
,
943 list_del(&chan
->device_node
);
946 free_irq(mv_chan
->irq
, mv_chan
);
951 static struct mv_xor_chan
*
952 mv_xor_channel_add(struct mv_xor_device
*xordev
,
953 struct platform_device
*pdev
,
954 int idx
, dma_cap_mask_t cap_mask
, int irq
)
957 struct mv_xor_chan
*mv_chan
;
958 struct dma_device
*dma_dev
;
960 mv_chan
= devm_kzalloc(&pdev
->dev
, sizeof(*mv_chan
), GFP_KERNEL
);
962 return ERR_PTR(-ENOMEM
);
967 dma_dev
= &mv_chan
->dmadev
;
970 * These source and destination dummy buffers are used to implement
971 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
972 * Hence, we only need to map the buffers at initialization-time.
974 mv_chan
->dummy_src_addr
= dma_map_single(dma_dev
->dev
,
975 mv_chan
->dummy_src
, MV_XOR_MIN_BYTE_COUNT
, DMA_FROM_DEVICE
);
976 mv_chan
->dummy_dst_addr
= dma_map_single(dma_dev
->dev
,
977 mv_chan
->dummy_dst
, MV_XOR_MIN_BYTE_COUNT
, DMA_TO_DEVICE
);
979 /* allocate coherent memory for hardware descriptors
980 * note: writecombine gives slightly better performance, but
981 * requires that we explicitly flush the writes
983 mv_chan
->dma_desc_pool_virt
=
984 dma_alloc_writecombine(&pdev
->dev
, MV_XOR_POOL_SIZE
,
985 &mv_chan
->dma_desc_pool
, GFP_KERNEL
);
986 if (!mv_chan
->dma_desc_pool_virt
)
987 return ERR_PTR(-ENOMEM
);
989 /* discover transaction capabilites from the platform data */
990 dma_dev
->cap_mask
= cap_mask
;
992 INIT_LIST_HEAD(&dma_dev
->channels
);
994 /* set base routines */
995 dma_dev
->device_alloc_chan_resources
= mv_xor_alloc_chan_resources
;
996 dma_dev
->device_free_chan_resources
= mv_xor_free_chan_resources
;
997 dma_dev
->device_tx_status
= mv_xor_status
;
998 dma_dev
->device_issue_pending
= mv_xor_issue_pending
;
999 dma_dev
->dev
= &pdev
->dev
;
1001 /* set prep routines based on capability */
1002 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1003 dma_dev
->device_prep_dma_interrupt
= mv_xor_prep_dma_interrupt
;
1004 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1005 dma_dev
->device_prep_dma_memcpy
= mv_xor_prep_dma_memcpy
;
1006 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1007 dma_dev
->max_xor
= 8;
1008 dma_dev
->device_prep_dma_xor
= mv_xor_prep_dma_xor
;
1011 mv_chan
->mmr_base
= xordev
->xor_base
;
1012 mv_chan
->mmr_high_base
= xordev
->xor_high_base
;
1013 tasklet_init(&mv_chan
->irq_tasklet
, mv_xor_tasklet
, (unsigned long)
1016 /* clear errors before enabling interrupts */
1017 mv_xor_device_clear_err_status(mv_chan
);
1019 ret
= request_irq(mv_chan
->irq
, mv_xor_interrupt_handler
,
1020 0, dev_name(&pdev
->dev
), mv_chan
);
1024 mv_chan_unmask_interrupts(mv_chan
);
1026 mv_set_mode(mv_chan
, DMA_XOR
);
1028 spin_lock_init(&mv_chan
->lock
);
1029 INIT_LIST_HEAD(&mv_chan
->chain
);
1030 INIT_LIST_HEAD(&mv_chan
->completed_slots
);
1031 INIT_LIST_HEAD(&mv_chan
->all_slots
);
1032 mv_chan
->dmachan
.device
= dma_dev
;
1033 dma_cookie_init(&mv_chan
->dmachan
);
1035 list_add_tail(&mv_chan
->dmachan
.device_node
, &dma_dev
->channels
);
1037 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1038 ret
= mv_xor_memcpy_self_test(mv_chan
);
1039 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1044 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1045 ret
= mv_xor_xor_self_test(mv_chan
);
1046 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1051 dev_info(&pdev
->dev
, "Marvell XOR: ( %s%s%s)\n",
1052 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1053 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1054 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1056 dma_async_device_register(dma_dev
);
1060 free_irq(mv_chan
->irq
, mv_chan
);
1062 dma_free_coherent(&pdev
->dev
, MV_XOR_POOL_SIZE
,
1063 mv_chan
->dma_desc_pool_virt
, mv_chan
->dma_desc_pool
);
1064 return ERR_PTR(ret
);
1068 mv_xor_conf_mbus_windows(struct mv_xor_device
*xordev
,
1069 const struct mbus_dram_target_info
*dram
)
1071 void __iomem
*base
= xordev
->xor_high_base
;
1075 for (i
= 0; i
< 8; i
++) {
1076 writel(0, base
+ WINDOW_BASE(i
));
1077 writel(0, base
+ WINDOW_SIZE(i
));
1079 writel(0, base
+ WINDOW_REMAP_HIGH(i
));
1082 for (i
= 0; i
< dram
->num_cs
; i
++) {
1083 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
1085 writel((cs
->base
& 0xffff0000) |
1086 (cs
->mbus_attr
<< 8) |
1087 dram
->mbus_dram_target_id
, base
+ WINDOW_BASE(i
));
1088 writel((cs
->size
- 1) & 0xffff0000, base
+ WINDOW_SIZE(i
));
1090 win_enable
|= (1 << i
);
1091 win_enable
|= 3 << (16 + (2 * i
));
1094 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(0));
1095 writel(win_enable
, base
+ WINDOW_BAR_ENABLE(1));
1096 writel(0, base
+ WINDOW_OVERRIDE_CTRL(0));
1097 writel(0, base
+ WINDOW_OVERRIDE_CTRL(1));
1100 static int mv_xor_probe(struct platform_device
*pdev
)
1102 const struct mbus_dram_target_info
*dram
;
1103 struct mv_xor_device
*xordev
;
1104 struct mv_xor_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1105 struct resource
*res
;
1108 dev_notice(&pdev
->dev
, "Marvell shared XOR driver\n");
1110 xordev
= devm_kzalloc(&pdev
->dev
, sizeof(*xordev
), GFP_KERNEL
);
1114 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1118 xordev
->xor_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1119 resource_size(res
));
1120 if (!xordev
->xor_base
)
1123 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1127 xordev
->xor_high_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1128 resource_size(res
));
1129 if (!xordev
->xor_high_base
)
1132 platform_set_drvdata(pdev
, xordev
);
1135 * (Re-)program MBUS remapping windows if we are asked to.
1137 dram
= mv_mbus_dram_info();
1139 mv_xor_conf_mbus_windows(xordev
, dram
);
1141 /* Not all platforms can gate the clock, so it is not
1142 * an error if the clock does not exists.
1144 xordev
->clk
= clk_get(&pdev
->dev
, NULL
);
1145 if (!IS_ERR(xordev
->clk
))
1146 clk_prepare_enable(xordev
->clk
);
1148 if (pdev
->dev
.of_node
) {
1149 struct device_node
*np
;
1152 for_each_child_of_node(pdev
->dev
.of_node
, np
) {
1153 struct mv_xor_chan
*chan
;
1154 dma_cap_mask_t cap_mask
;
1157 dma_cap_zero(cap_mask
);
1158 if (of_property_read_bool(np
, "dmacap,memcpy"))
1159 dma_cap_set(DMA_MEMCPY
, cap_mask
);
1160 if (of_property_read_bool(np
, "dmacap,xor"))
1161 dma_cap_set(DMA_XOR
, cap_mask
);
1162 if (of_property_read_bool(np
, "dmacap,interrupt"))
1163 dma_cap_set(DMA_INTERRUPT
, cap_mask
);
1165 irq
= irq_of_parse_and_map(np
, 0);
1168 goto err_channel_add
;
1171 chan
= mv_xor_channel_add(xordev
, pdev
, i
,
1174 ret
= PTR_ERR(chan
);
1175 irq_dispose_mapping(irq
);
1176 goto err_channel_add
;
1179 xordev
->channels
[i
] = chan
;
1182 } else if (pdata
&& pdata
->channels
) {
1183 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++) {
1184 struct mv_xor_channel_data
*cd
;
1185 struct mv_xor_chan
*chan
;
1188 cd
= &pdata
->channels
[i
];
1191 goto err_channel_add
;
1194 irq
= platform_get_irq(pdev
, i
);
1197 goto err_channel_add
;
1200 chan
= mv_xor_channel_add(xordev
, pdev
, i
,
1203 ret
= PTR_ERR(chan
);
1204 goto err_channel_add
;
1207 xordev
->channels
[i
] = chan
;
1214 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++)
1215 if (xordev
->channels
[i
]) {
1216 mv_xor_channel_remove(xordev
->channels
[i
]);
1217 if (pdev
->dev
.of_node
)
1218 irq_dispose_mapping(xordev
->channels
[i
]->irq
);
1221 if (!IS_ERR(xordev
->clk
)) {
1222 clk_disable_unprepare(xordev
->clk
);
1223 clk_put(xordev
->clk
);
1229 static int mv_xor_remove(struct platform_device
*pdev
)
1231 struct mv_xor_device
*xordev
= platform_get_drvdata(pdev
);
1234 for (i
= 0; i
< MV_XOR_MAX_CHANNELS
; i
++) {
1235 if (xordev
->channels
[i
])
1236 mv_xor_channel_remove(xordev
->channels
[i
]);
1239 if (!IS_ERR(xordev
->clk
)) {
1240 clk_disable_unprepare(xordev
->clk
);
1241 clk_put(xordev
->clk
);
1248 static const struct of_device_id mv_xor_dt_ids
[] = {
1249 { .compatible
= "marvell,orion-xor", },
1252 MODULE_DEVICE_TABLE(of
, mv_xor_dt_ids
);
1255 static struct platform_driver mv_xor_driver
= {
1256 .probe
= mv_xor_probe
,
1257 .remove
= mv_xor_remove
,
1259 .name
= MV_XOR_NAME
,
1260 .of_match_table
= of_match_ptr(mv_xor_dt_ids
),
1265 static int __init
mv_xor_init(void)
1267 return platform_driver_register(&mv_xor_driver
);
1269 module_init(mv_xor_init
);
1271 /* it's currently unsafe to unload this module */
1273 static void __exit
mv_xor_exit(void)
1275 platform_driver_unregister(&mv_xor_driver
);
1279 module_exit(mv_xor_exit
);
1282 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1283 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1284 MODULE_LICENSE("GPL");