2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/memory.h>
33 #include <linux/ioport.h>
35 #include <mach/adma.h>
37 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
38 #define to_iop_adma_device(dev) \
39 container_of(dev, struct iop_adma_device, common)
40 #define tx_to_iop_adma_slot(tx) \
41 container_of(tx, struct iop_adma_desc_slot, async_tx)
44 * iop_adma_free_slots - flags descriptor slots for reuse
46 * Caller must hold &iop_chan->lock while calling this function
48 static void iop_adma_free_slots(struct iop_adma_desc_slot
*slot
)
50 int stride
= slot
->slots_per_op
;
53 slot
->slots_per_op
= 0;
54 slot
= list_entry(slot
->slot_node
.next
,
55 struct iop_adma_desc_slot
,
61 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot
*desc
,
62 struct iop_adma_chan
*iop_chan
, dma_cookie_t cookie
)
64 BUG_ON(desc
->async_tx
.cookie
< 0);
65 if (desc
->async_tx
.cookie
> 0) {
66 cookie
= desc
->async_tx
.cookie
;
67 desc
->async_tx
.cookie
= 0;
69 /* call the callback (must not sleep or submit new
70 * operations to this channel)
72 if (desc
->async_tx
.callback
)
73 desc
->async_tx
.callback(
74 desc
->async_tx
.callback_param
);
76 /* unmap dma addresses
77 * (unmap_single vs unmap_page?)
79 if (desc
->group_head
&& desc
->unmap_len
) {
80 struct iop_adma_desc_slot
*unmap
= desc
->group_head
;
82 &iop_chan
->device
->pdev
->dev
;
83 u32 len
= unmap
->unmap_len
;
84 enum dma_ctrl_flags flags
= desc
->async_tx
.flags
;
89 src_cnt
= unmap
->unmap_src_cnt
;
90 dest
= iop_desc_get_dest_addr(unmap
, iop_chan
);
91 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
92 enum dma_data_direction dir
;
94 if (src_cnt
> 1) /* is xor? */
95 dir
= DMA_BIDIRECTIONAL
;
97 dir
= DMA_FROM_DEVICE
;
99 dma_unmap_page(dev
, dest
, len
, dir
);
102 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
104 addr
= iop_desc_get_src_addr(unmap
,
109 dma_unmap_page(dev
, addr
, len
,
113 desc
->group_head
= NULL
;
117 /* run dependent operations */
118 dma_run_dependencies(&desc
->async_tx
);
124 iop_adma_clean_slot(struct iop_adma_desc_slot
*desc
,
125 struct iop_adma_chan
*iop_chan
)
127 /* the client is allowed to attach dependent operations
130 if (!async_tx_test_ack(&desc
->async_tx
))
133 /* leave the last descriptor in the chain
134 * so we can append to it
136 if (desc
->chain_node
.next
== &iop_chan
->chain
)
139 dev_dbg(iop_chan
->device
->common
.dev
,
140 "\tfree slot: %d slots_per_op: %d\n",
141 desc
->idx
, desc
->slots_per_op
);
143 list_del(&desc
->chain_node
);
144 iop_adma_free_slots(desc
);
149 static void __iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
151 struct iop_adma_desc_slot
*iter
, *_iter
, *grp_start
= NULL
;
152 dma_cookie_t cookie
= 0;
153 u32 current_desc
= iop_chan_get_current_descriptor(iop_chan
);
154 int busy
= iop_chan_is_busy(iop_chan
);
155 int seen_current
= 0, slot_cnt
= 0, slots_per_op
= 0;
157 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
158 /* free completed slots from the chain starting with
159 * the oldest descriptor
161 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
163 pr_debug("\tcookie: %d slot: %d busy: %d "
164 "this_desc: %#x next_desc: %#x ack: %d\n",
165 iter
->async_tx
.cookie
, iter
->idx
, busy
,
166 iter
->async_tx
.phys
, iop_desc_get_next_desc(iter
),
167 async_tx_test_ack(&iter
->async_tx
));
169 prefetch(&_iter
->async_tx
);
171 /* do not advance past the current descriptor loaded into the
172 * hardware channel, subsequent descriptors are either in
173 * process or have not been submitted
178 /* stop the search if we reach the current descriptor and the
179 * channel is busy, or if it appears that the current descriptor
180 * needs to be re-read (i.e. has been appended to)
182 if (iter
->async_tx
.phys
== current_desc
) {
183 BUG_ON(seen_current
++);
184 if (busy
|| iop_desc_get_next_desc(iter
))
188 /* detect the start of a group transaction */
189 if (!slot_cnt
&& !slots_per_op
) {
190 slot_cnt
= iter
->slot_cnt
;
191 slots_per_op
= iter
->slots_per_op
;
192 if (slot_cnt
<= slots_per_op
) {
199 pr_debug("\tgroup++\n");
202 slot_cnt
-= slots_per_op
;
205 /* all the members of a group are complete */
206 if (slots_per_op
!= 0 && slot_cnt
== 0) {
207 struct iop_adma_desc_slot
*grp_iter
, *_grp_iter
;
208 int end_of_chain
= 0;
209 pr_debug("\tgroup end\n");
211 /* collect the total results */
212 if (grp_start
->xor_check_result
) {
213 u32 zero_sum_result
= 0;
214 slot_cnt
= grp_start
->slot_cnt
;
215 grp_iter
= grp_start
;
217 list_for_each_entry_from(grp_iter
,
218 &iop_chan
->chain
, chain_node
) {
220 iop_desc_get_zero_result(grp_iter
);
221 pr_debug("\titer%d result: %d\n",
222 grp_iter
->idx
, zero_sum_result
);
223 slot_cnt
-= slots_per_op
;
227 pr_debug("\tgrp_start->xor_check_result: %p\n",
228 grp_start
->xor_check_result
);
229 *grp_start
->xor_check_result
= zero_sum_result
;
232 /* clean up the group */
233 slot_cnt
= grp_start
->slot_cnt
;
234 grp_iter
= grp_start
;
235 list_for_each_entry_safe_from(grp_iter
, _grp_iter
,
236 &iop_chan
->chain
, chain_node
) {
237 cookie
= iop_adma_run_tx_complete_actions(
238 grp_iter
, iop_chan
, cookie
);
240 slot_cnt
-= slots_per_op
;
241 end_of_chain
= iop_adma_clean_slot(grp_iter
,
244 if (slot_cnt
== 0 || end_of_chain
)
248 /* the group should be complete at this point */
257 } else if (slots_per_op
) /* wait for group completion */
260 /* write back zero sum results (single descriptor case) */
261 if (iter
->xor_check_result
&& iter
->async_tx
.cookie
)
262 *iter
->xor_check_result
=
263 iop_desc_get_zero_result(iter
);
265 cookie
= iop_adma_run_tx_complete_actions(
266 iter
, iop_chan
, cookie
);
268 if (iop_adma_clean_slot(iter
, iop_chan
))
273 iop_chan
->completed_cookie
= cookie
;
274 pr_debug("\tcompleted cookie %d\n", cookie
);
279 iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
281 spin_lock_bh(&iop_chan
->lock
);
282 __iop_adma_slot_cleanup(iop_chan
);
283 spin_unlock_bh(&iop_chan
->lock
);
286 static void iop_adma_tasklet(unsigned long data
)
288 struct iop_adma_chan
*iop_chan
= (struct iop_adma_chan
*) data
;
290 spin_lock(&iop_chan
->lock
);
291 __iop_adma_slot_cleanup(iop_chan
);
292 spin_unlock(&iop_chan
->lock
);
295 static struct iop_adma_desc_slot
*
296 iop_adma_alloc_slots(struct iop_adma_chan
*iop_chan
, int num_slots
,
299 struct iop_adma_desc_slot
*iter
, *_iter
, *alloc_start
= NULL
;
301 int slots_found
, retry
= 0;
303 /* start search from the last allocated descrtiptor
304 * if a contiguous allocation can not be found start searching
305 * from the beginning of the list
310 iter
= iop_chan
->last_used
;
312 iter
= list_entry(&iop_chan
->all_slots
,
313 struct iop_adma_desc_slot
,
316 list_for_each_entry_safe_continue(
317 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
319 prefetch(&_iter
->async_tx
);
320 if (iter
->slots_per_op
) {
321 /* give up after finding the first busy slot
322 * on the second pass through the list
331 /* start the allocation if the slot is correctly aligned */
332 if (!slots_found
++) {
333 if (iop_desc_is_aligned(iter
, slots_per_op
))
341 if (slots_found
== num_slots
) {
342 struct iop_adma_desc_slot
*alloc_tail
= NULL
;
343 struct iop_adma_desc_slot
*last_used
= NULL
;
347 dev_dbg(iop_chan
->device
->common
.dev
,
348 "allocated slot: %d "
349 "(desc %p phys: %#x) slots_per_op %d\n",
350 iter
->idx
, iter
->hw_desc
,
351 iter
->async_tx
.phys
, slots_per_op
);
353 /* pre-ack all but the last descriptor */
354 if (num_slots
!= slots_per_op
)
355 async_tx_ack(&iter
->async_tx
);
357 list_add_tail(&iter
->chain_node
, &chain
);
359 iter
->async_tx
.cookie
= 0;
360 iter
->slot_cnt
= num_slots
;
361 iter
->xor_check_result
= NULL
;
362 for (i
= 0; i
< slots_per_op
; i
++) {
363 iter
->slots_per_op
= slots_per_op
- i
;
365 iter
= list_entry(iter
->slot_node
.next
,
366 struct iop_adma_desc_slot
,
369 num_slots
-= slots_per_op
;
371 alloc_tail
->group_head
= alloc_start
;
372 alloc_tail
->async_tx
.cookie
= -EBUSY
;
373 list_splice(&chain
, &alloc_tail
->async_tx
.tx_list
);
374 iop_chan
->last_used
= last_used
;
375 iop_desc_clear_next_desc(alloc_start
);
376 iop_desc_clear_next_desc(alloc_tail
);
383 /* perform direct reclaim if the allocation fails */
384 __iop_adma_slot_cleanup(iop_chan
);
390 iop_desc_assign_cookie(struct iop_adma_chan
*iop_chan
,
391 struct iop_adma_desc_slot
*desc
)
393 dma_cookie_t cookie
= iop_chan
->common
.cookie
;
397 iop_chan
->common
.cookie
= desc
->async_tx
.cookie
= cookie
;
401 static void iop_adma_check_threshold(struct iop_adma_chan
*iop_chan
)
403 dev_dbg(iop_chan
->device
->common
.dev
, "pending: %d\n",
406 if (iop_chan
->pending
>= IOP_ADMA_THRESHOLD
) {
407 iop_chan
->pending
= 0;
408 iop_chan_append(iop_chan
);
413 iop_adma_tx_submit(struct dma_async_tx_descriptor
*tx
)
415 struct iop_adma_desc_slot
*sw_desc
= tx_to_iop_adma_slot(tx
);
416 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(tx
->chan
);
417 struct iop_adma_desc_slot
*grp_start
, *old_chain_tail
;
423 grp_start
= sw_desc
->group_head
;
424 slot_cnt
= grp_start
->slot_cnt
;
425 slots_per_op
= grp_start
->slots_per_op
;
427 spin_lock_bh(&iop_chan
->lock
);
428 cookie
= iop_desc_assign_cookie(iop_chan
, sw_desc
);
430 old_chain_tail
= list_entry(iop_chan
->chain
.prev
,
431 struct iop_adma_desc_slot
, chain_node
);
432 list_splice_init(&sw_desc
->async_tx
.tx_list
,
433 &old_chain_tail
->chain_node
);
435 /* fix up the hardware chain */
436 next_dma
= grp_start
->async_tx
.phys
;
437 iop_desc_set_next_desc(old_chain_tail
, next_dma
);
438 BUG_ON(iop_desc_get_next_desc(old_chain_tail
) != next_dma
); /* flush */
440 /* check for pre-chained descriptors */
441 iop_paranoia(iop_desc_get_next_desc(sw_desc
));
443 /* increment the pending count by the number of slots
444 * memcpy operations have a 1:1 (slot:operation) relation
445 * other operations are heavier and will pop the threshold
448 iop_chan
->pending
+= slot_cnt
;
449 iop_adma_check_threshold(iop_chan
);
450 spin_unlock_bh(&iop_chan
->lock
);
452 dev_dbg(iop_chan
->device
->common
.dev
, "%s cookie: %d slot: %d\n",
453 __func__
, sw_desc
->async_tx
.cookie
, sw_desc
->idx
);
458 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
);
459 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
);
462 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
463 * @chan - allocate descriptor resources for this channel
464 * @client - current client requesting the channel be ready for requests
466 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
467 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
468 * greater than 2x the number slots needed to satisfy a device->max_xor
471 static int iop_adma_alloc_chan_resources(struct dma_chan
*chan
)
475 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
476 struct iop_adma_desc_slot
*slot
= NULL
;
477 int init
= iop_chan
->slots_allocated
? 0 : 1;
478 struct iop_adma_platform_data
*plat_data
=
479 iop_chan
->device
->pdev
->dev
.platform_data
;
480 int num_descs_in_pool
= plat_data
->pool_size
/IOP_ADMA_SLOT_SIZE
;
482 /* Allocate descriptor slots */
484 idx
= iop_chan
->slots_allocated
;
485 if (idx
== num_descs_in_pool
)
488 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
490 printk(KERN_INFO
"IOP ADMA Channel only initialized"
491 " %d descriptor slots", idx
);
494 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool_virt
;
495 slot
->hw_desc
= (void *) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
497 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
498 slot
->async_tx
.tx_submit
= iop_adma_tx_submit
;
499 INIT_LIST_HEAD(&slot
->chain_node
);
500 INIT_LIST_HEAD(&slot
->slot_node
);
501 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool
;
502 slot
->async_tx
.phys
=
503 (dma_addr_t
) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
506 spin_lock_bh(&iop_chan
->lock
);
507 iop_chan
->slots_allocated
++;
508 list_add_tail(&slot
->slot_node
, &iop_chan
->all_slots
);
509 spin_unlock_bh(&iop_chan
->lock
);
510 } while (iop_chan
->slots_allocated
< num_descs_in_pool
);
512 if (idx
&& !iop_chan
->last_used
)
513 iop_chan
->last_used
= list_entry(iop_chan
->all_slots
.next
,
514 struct iop_adma_desc_slot
,
517 dev_dbg(iop_chan
->device
->common
.dev
,
518 "allocated %d descriptor slots last_used: %p\n",
519 iop_chan
->slots_allocated
, iop_chan
->last_used
);
521 /* initialize the channel and the chain with a null operation */
523 if (dma_has_cap(DMA_MEMCPY
,
524 iop_chan
->device
->common
.cap_mask
))
525 iop_chan_start_null_memcpy(iop_chan
);
526 else if (dma_has_cap(DMA_XOR
,
527 iop_chan
->device
->common
.cap_mask
))
528 iop_chan_start_null_xor(iop_chan
);
533 return (idx
> 0) ? idx
: -ENOMEM
;
536 static struct dma_async_tx_descriptor
*
537 iop_adma_prep_dma_interrupt(struct dma_chan
*chan
, unsigned long flags
)
539 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
540 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
541 int slot_cnt
, slots_per_op
;
543 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
545 spin_lock_bh(&iop_chan
->lock
);
546 slot_cnt
= iop_chan_interrupt_slot_count(&slots_per_op
, iop_chan
);
547 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
549 grp_start
= sw_desc
->group_head
;
550 iop_desc_init_interrupt(grp_start
, iop_chan
);
551 grp_start
->unmap_len
= 0;
552 sw_desc
->async_tx
.flags
= flags
;
554 spin_unlock_bh(&iop_chan
->lock
);
556 return sw_desc
? &sw_desc
->async_tx
: NULL
;
559 static struct dma_async_tx_descriptor
*
560 iop_adma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dma_dest
,
561 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
563 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
564 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
565 int slot_cnt
, slots_per_op
;
569 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
571 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
574 spin_lock_bh(&iop_chan
->lock
);
575 slot_cnt
= iop_chan_memcpy_slot_count(len
, &slots_per_op
);
576 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
578 grp_start
= sw_desc
->group_head
;
579 iop_desc_init_memcpy(grp_start
, flags
);
580 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
581 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
582 iop_desc_set_memcpy_src_addr(grp_start
, dma_src
);
583 sw_desc
->unmap_src_cnt
= 1;
584 sw_desc
->unmap_len
= len
;
585 sw_desc
->async_tx
.flags
= flags
;
587 spin_unlock_bh(&iop_chan
->lock
);
589 return sw_desc
? &sw_desc
->async_tx
: NULL
;
592 static struct dma_async_tx_descriptor
*
593 iop_adma_prep_dma_memset(struct dma_chan
*chan
, dma_addr_t dma_dest
,
594 int value
, size_t len
, unsigned long flags
)
596 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
597 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
598 int slot_cnt
, slots_per_op
;
602 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
604 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
607 spin_lock_bh(&iop_chan
->lock
);
608 slot_cnt
= iop_chan_memset_slot_count(len
, &slots_per_op
);
609 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
611 grp_start
= sw_desc
->group_head
;
612 iop_desc_init_memset(grp_start
, flags
);
613 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
614 iop_desc_set_block_fill_val(grp_start
, value
);
615 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
616 sw_desc
->unmap_src_cnt
= 1;
617 sw_desc
->unmap_len
= len
;
618 sw_desc
->async_tx
.flags
= flags
;
620 spin_unlock_bh(&iop_chan
->lock
);
622 return sw_desc
? &sw_desc
->async_tx
: NULL
;
625 static struct dma_async_tx_descriptor
*
626 iop_adma_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dma_dest
,
627 dma_addr_t
*dma_src
, unsigned int src_cnt
, size_t len
,
630 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
631 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
632 int slot_cnt
, slots_per_op
;
636 BUG_ON(unlikely(len
> IOP_ADMA_XOR_MAX_BYTE_COUNT
));
638 dev_dbg(iop_chan
->device
->common
.dev
,
639 "%s src_cnt: %d len: %u flags: %lx\n",
640 __func__
, src_cnt
, len
, flags
);
642 spin_lock_bh(&iop_chan
->lock
);
643 slot_cnt
= iop_chan_xor_slot_count(len
, src_cnt
, &slots_per_op
);
644 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
646 grp_start
= sw_desc
->group_head
;
647 iop_desc_init_xor(grp_start
, src_cnt
, flags
);
648 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
649 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
650 sw_desc
->unmap_src_cnt
= src_cnt
;
651 sw_desc
->unmap_len
= len
;
652 sw_desc
->async_tx
.flags
= flags
;
654 iop_desc_set_xor_src_addr(grp_start
, src_cnt
,
657 spin_unlock_bh(&iop_chan
->lock
);
659 return sw_desc
? &sw_desc
->async_tx
: NULL
;
662 static struct dma_async_tx_descriptor
*
663 iop_adma_prep_dma_zero_sum(struct dma_chan
*chan
, dma_addr_t
*dma_src
,
664 unsigned int src_cnt
, size_t len
, u32
*result
,
667 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
668 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
669 int slot_cnt
, slots_per_op
;
674 dev_dbg(iop_chan
->device
->common
.dev
, "%s src_cnt: %d len: %u\n",
675 __func__
, src_cnt
, len
);
677 spin_lock_bh(&iop_chan
->lock
);
678 slot_cnt
= iop_chan_zero_sum_slot_count(len
, src_cnt
, &slots_per_op
);
679 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
681 grp_start
= sw_desc
->group_head
;
682 iop_desc_init_zero_sum(grp_start
, src_cnt
, flags
);
683 iop_desc_set_zero_sum_byte_count(grp_start
, len
);
684 grp_start
->xor_check_result
= result
;
685 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
686 __func__
, grp_start
->xor_check_result
);
687 sw_desc
->unmap_src_cnt
= src_cnt
;
688 sw_desc
->unmap_len
= len
;
689 sw_desc
->async_tx
.flags
= flags
;
691 iop_desc_set_zero_sum_src_addr(grp_start
, src_cnt
,
694 spin_unlock_bh(&iop_chan
->lock
);
696 return sw_desc
? &sw_desc
->async_tx
: NULL
;
699 static void iop_adma_free_chan_resources(struct dma_chan
*chan
)
701 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
702 struct iop_adma_desc_slot
*iter
, *_iter
;
703 int in_use_descs
= 0;
705 iop_adma_slot_cleanup(iop_chan
);
707 spin_lock_bh(&iop_chan
->lock
);
708 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
711 list_del(&iter
->chain_node
);
713 list_for_each_entry_safe_reverse(
714 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
715 list_del(&iter
->slot_node
);
717 iop_chan
->slots_allocated
--;
719 iop_chan
->last_used
= NULL
;
721 dev_dbg(iop_chan
->device
->common
.dev
, "%s slots_allocated %d\n",
722 __func__
, iop_chan
->slots_allocated
);
723 spin_unlock_bh(&iop_chan
->lock
);
725 /* one is ok since we left it on there on purpose */
726 if (in_use_descs
> 1)
727 printk(KERN_ERR
"IOP: Freeing %d in use descriptors!\n",
732 * iop_adma_is_complete - poll the status of an ADMA transaction
733 * @chan: ADMA channel handle
734 * @cookie: ADMA transaction identifier
736 static enum dma_status
iop_adma_is_complete(struct dma_chan
*chan
,
741 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
742 dma_cookie_t last_used
;
743 dma_cookie_t last_complete
;
746 last_used
= chan
->cookie
;
747 last_complete
= iop_chan
->completed_cookie
;
750 *done
= last_complete
;
754 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
755 if (ret
== DMA_SUCCESS
)
758 iop_adma_slot_cleanup(iop_chan
);
760 last_used
= chan
->cookie
;
761 last_complete
= iop_chan
->completed_cookie
;
764 *done
= last_complete
;
768 return dma_async_is_complete(cookie
, last_complete
, last_used
);
771 static irqreturn_t
iop_adma_eot_handler(int irq
, void *data
)
773 struct iop_adma_chan
*chan
= data
;
775 dev_dbg(chan
->device
->common
.dev
, "%s\n", __func__
);
777 tasklet_schedule(&chan
->irq_tasklet
);
779 iop_adma_device_clear_eot_status(chan
);
784 static irqreturn_t
iop_adma_eoc_handler(int irq
, void *data
)
786 struct iop_adma_chan
*chan
= data
;
788 dev_dbg(chan
->device
->common
.dev
, "%s\n", __func__
);
790 tasklet_schedule(&chan
->irq_tasklet
);
792 iop_adma_device_clear_eoc_status(chan
);
797 static irqreturn_t
iop_adma_err_handler(int irq
, void *data
)
799 struct iop_adma_chan
*chan
= data
;
800 unsigned long status
= iop_chan_get_status(chan
);
802 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
803 "error ( %s%s%s%s%s%s%s)\n",
804 iop_is_err_int_parity(status
, chan
) ? "int_parity " : "",
805 iop_is_err_mcu_abort(status
, chan
) ? "mcu_abort " : "",
806 iop_is_err_int_tabort(status
, chan
) ? "int_tabort " : "",
807 iop_is_err_int_mabort(status
, chan
) ? "int_mabort " : "",
808 iop_is_err_pci_tabort(status
, chan
) ? "pci_tabort " : "",
809 iop_is_err_pci_mabort(status
, chan
) ? "pci_mabort " : "",
810 iop_is_err_split_tx(status
, chan
) ? "split_tx " : "");
812 iop_adma_device_clear_err_status(chan
);
819 static void iop_adma_issue_pending(struct dma_chan
*chan
)
821 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
823 if (iop_chan
->pending
) {
824 iop_chan
->pending
= 0;
825 iop_chan_append(iop_chan
);
830 * Perform a transaction to verify the HW works.
832 #define IOP_ADMA_TEST_SIZE 2000
834 static int __devinit
iop_adma_memcpy_self_test(struct iop_adma_device
*device
)
838 dma_addr_t src_dma
, dest_dma
;
839 struct dma_chan
*dma_chan
;
841 struct dma_async_tx_descriptor
*tx
;
843 struct iop_adma_chan
*iop_chan
;
845 dev_dbg(device
->common
.dev
, "%s\n", __func__
);
847 src
= kmalloc(IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
850 dest
= kzalloc(IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
856 /* Fill in src buffer */
857 for (i
= 0; i
< IOP_ADMA_TEST_SIZE
; i
++)
858 ((u8
*) src
)[i
] = (u8
)i
;
860 /* Start copy, using first DMA channel */
861 dma_chan
= container_of(device
->common
.channels
.next
,
864 if (iop_adma_alloc_chan_resources(dma_chan
) < 1) {
869 dest_dma
= dma_map_single(dma_chan
->device
->dev
, dest
,
870 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
871 src_dma
= dma_map_single(dma_chan
->device
->dev
, src
,
872 IOP_ADMA_TEST_SIZE
, DMA_TO_DEVICE
);
873 tx
= iop_adma_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
875 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
877 cookie
= iop_adma_tx_submit(tx
);
878 iop_adma_issue_pending(dma_chan
);
881 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
883 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
884 "Self-test copy timed out, disabling\n");
889 iop_chan
= to_iop_adma_chan(dma_chan
);
890 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
891 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
892 if (memcmp(src
, dest
, IOP_ADMA_TEST_SIZE
)) {
893 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
894 "Self-test copy failed compare, disabling\n");
900 iop_adma_free_chan_resources(dma_chan
);
907 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
909 iop_adma_xor_zero_sum_self_test(struct iop_adma_device
*device
)
913 struct page
*xor_srcs
[IOP_ADMA_NUM_SRC_TEST
];
914 struct page
*zero_sum_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
915 dma_addr_t dma_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
916 dma_addr_t dma_addr
, dest_dma
;
917 struct dma_async_tx_descriptor
*tx
;
918 struct dma_chan
*dma_chan
;
924 struct iop_adma_chan
*iop_chan
;
926 dev_dbg(device
->common
.dev
, "%s\n", __func__
);
928 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
929 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
930 if (!xor_srcs
[src_idx
]) {
932 __free_page(xor_srcs
[src_idx
]);
937 dest
= alloc_page(GFP_KERNEL
);
940 __free_page(xor_srcs
[src_idx
]);
944 /* Fill in src buffers */
945 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
946 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
947 for (i
= 0; i
< PAGE_SIZE
; i
++)
948 ptr
[i
] = (1 << src_idx
);
951 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++)
952 cmp_byte
^= (u8
) (1 << src_idx
);
954 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
955 (cmp_byte
<< 8) | cmp_byte
;
957 memset(page_address(dest
), 0, PAGE_SIZE
);
959 dma_chan
= container_of(device
->common
.channels
.next
,
962 if (iop_adma_alloc_chan_resources(dma_chan
) < 1) {
968 dest_dma
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
969 PAGE_SIZE
, DMA_FROM_DEVICE
);
970 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
971 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
972 0, PAGE_SIZE
, DMA_TO_DEVICE
);
973 tx
= iop_adma_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
974 IOP_ADMA_NUM_SRC_TEST
, PAGE_SIZE
,
975 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
977 cookie
= iop_adma_tx_submit(tx
);
978 iop_adma_issue_pending(dma_chan
);
981 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
983 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
984 "Self-test xor timed out, disabling\n");
989 iop_chan
= to_iop_adma_chan(dma_chan
);
990 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
991 PAGE_SIZE
, DMA_FROM_DEVICE
);
992 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
993 u32
*ptr
= page_address(dest
);
994 if (ptr
[i
] != cmp_word
) {
995 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
996 "Self-test xor failed compare, disabling\n");
1001 dma_sync_single_for_device(&iop_chan
->device
->pdev
->dev
, dest_dma
,
1002 PAGE_SIZE
, DMA_TO_DEVICE
);
1004 /* skip zero sum if the capability is not present */
1005 if (!dma_has_cap(DMA_ZERO_SUM
, dma_chan
->device
->cap_mask
))
1006 goto free_resources
;
1008 /* zero sum the sources with the destintation page */
1009 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
1010 zero_sum_srcs
[i
] = xor_srcs
[i
];
1011 zero_sum_srcs
[i
] = dest
;
1013 zero_sum_result
= 1;
1015 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
1016 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1017 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1019 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1020 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1022 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1024 cookie
= iop_adma_tx_submit(tx
);
1025 iop_adma_issue_pending(dma_chan
);
1028 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1029 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1030 "Self-test zero sum timed out, disabling\n");
1032 goto free_resources
;
1035 if (zero_sum_result
!= 0) {
1036 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1037 "Self-test zero sum failed compare, disabling\n");
1039 goto free_resources
;
1043 dma_addr
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
1044 PAGE_SIZE
, DMA_FROM_DEVICE
);
1045 tx
= iop_adma_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1046 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1048 cookie
= iop_adma_tx_submit(tx
);
1049 iop_adma_issue_pending(dma_chan
);
1052 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1053 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1054 "Self-test memset timed out, disabling\n");
1056 goto free_resources
;
1059 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1060 u32
*ptr
= page_address(dest
);
1062 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1063 "Self-test memset failed compare, disabling\n");
1065 goto free_resources
;
1069 /* test for non-zero parity sum */
1070 zero_sum_result
= 0;
1071 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
1072 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1073 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1075 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1076 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1078 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1080 cookie
= iop_adma_tx_submit(tx
);
1081 iop_adma_issue_pending(dma_chan
);
1084 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1085 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1086 "Self-test non-zero sum timed out, disabling\n");
1088 goto free_resources
;
1091 if (zero_sum_result
!= 1) {
1092 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1093 "Self-test non-zero sum failed compare, disabling\n");
1095 goto free_resources
;
1099 iop_adma_free_chan_resources(dma_chan
);
1101 src_idx
= IOP_ADMA_NUM_SRC_TEST
;
1103 __free_page(xor_srcs
[src_idx
]);
1108 static int __devexit
iop_adma_remove(struct platform_device
*dev
)
1110 struct iop_adma_device
*device
= platform_get_drvdata(dev
);
1111 struct dma_chan
*chan
, *_chan
;
1112 struct iop_adma_chan
*iop_chan
;
1113 struct iop_adma_platform_data
*plat_data
= dev
->dev
.platform_data
;
1115 dma_async_device_unregister(&device
->common
);
1117 dma_free_coherent(&dev
->dev
, plat_data
->pool_size
,
1118 device
->dma_desc_pool_virt
, device
->dma_desc_pool
);
1120 list_for_each_entry_safe(chan
, _chan
, &device
->common
.channels
,
1122 iop_chan
= to_iop_adma_chan(chan
);
1123 list_del(&chan
->device_node
);
1131 static int __devinit
iop_adma_probe(struct platform_device
*pdev
)
1133 struct resource
*res
;
1135 struct iop_adma_device
*adev
;
1136 struct iop_adma_chan
*iop_chan
;
1137 struct dma_device
*dma_dev
;
1138 struct iop_adma_platform_data
*plat_data
= pdev
->dev
.platform_data
;
1140 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1144 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
1145 res
->end
- res
->start
, pdev
->name
))
1148 adev
= kzalloc(sizeof(*adev
), GFP_KERNEL
);
1151 dma_dev
= &adev
->common
;
1153 /* allocate coherent memory for hardware descriptors
1154 * note: writecombine gives slightly better performance, but
1155 * requires that we explicitly flush the writes
1157 if ((adev
->dma_desc_pool_virt
= dma_alloc_writecombine(&pdev
->dev
,
1158 plat_data
->pool_size
,
1159 &adev
->dma_desc_pool
,
1160 GFP_KERNEL
)) == NULL
) {
1165 dev_dbg(&pdev
->dev
, "%s: allocted descriptor pool virt %p phys %p\n",
1166 __func__
, adev
->dma_desc_pool_virt
,
1167 (void *) adev
->dma_desc_pool
);
1169 adev
->id
= plat_data
->hw_id
;
1171 /* discover transaction capabilites from the platform data */
1172 dma_dev
->cap_mask
= plat_data
->cap_mask
;
1175 platform_set_drvdata(pdev
, adev
);
1177 INIT_LIST_HEAD(&dma_dev
->channels
);
1179 /* set base routines */
1180 dma_dev
->device_alloc_chan_resources
= iop_adma_alloc_chan_resources
;
1181 dma_dev
->device_free_chan_resources
= iop_adma_free_chan_resources
;
1182 dma_dev
->device_is_tx_complete
= iop_adma_is_complete
;
1183 dma_dev
->device_issue_pending
= iop_adma_issue_pending
;
1184 dma_dev
->dev
= &pdev
->dev
;
1186 /* set prep routines based on capability */
1187 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1188 dma_dev
->device_prep_dma_memcpy
= iop_adma_prep_dma_memcpy
;
1189 if (dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
))
1190 dma_dev
->device_prep_dma_memset
= iop_adma_prep_dma_memset
;
1191 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1192 dma_dev
->max_xor
= iop_adma_get_max_xor();
1193 dma_dev
->device_prep_dma_xor
= iop_adma_prep_dma_xor
;
1195 if (dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
))
1196 dma_dev
->device_prep_dma_zero_sum
=
1197 iop_adma_prep_dma_zero_sum
;
1198 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1199 dma_dev
->device_prep_dma_interrupt
=
1200 iop_adma_prep_dma_interrupt
;
1202 iop_chan
= kzalloc(sizeof(*iop_chan
), GFP_KERNEL
);
1207 iop_chan
->device
= adev
;
1209 iop_chan
->mmr_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1210 res
->end
- res
->start
);
1211 if (!iop_chan
->mmr_base
) {
1213 goto err_free_iop_chan
;
1215 tasklet_init(&iop_chan
->irq_tasklet
, iop_adma_tasklet
, (unsigned long)
1218 /* clear errors before enabling interrupts */
1219 iop_adma_device_clear_err_status(iop_chan
);
1221 for (i
= 0; i
< 3; i
++) {
1222 irq_handler_t handler
[] = { iop_adma_eot_handler
,
1223 iop_adma_eoc_handler
,
1224 iop_adma_err_handler
};
1225 int irq
= platform_get_irq(pdev
, i
);
1228 goto err_free_iop_chan
;
1230 ret
= devm_request_irq(&pdev
->dev
, irq
,
1231 handler
[i
], 0, pdev
->name
, iop_chan
);
1233 goto err_free_iop_chan
;
1237 spin_lock_init(&iop_chan
->lock
);
1238 INIT_LIST_HEAD(&iop_chan
->chain
);
1239 INIT_LIST_HEAD(&iop_chan
->all_slots
);
1240 iop_chan
->common
.device
= dma_dev
;
1241 list_add_tail(&iop_chan
->common
.device_node
, &dma_dev
->channels
);
1243 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1244 ret
= iop_adma_memcpy_self_test(adev
);
1245 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1247 goto err_free_iop_chan
;
1250 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ||
1251 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
)) {
1252 ret
= iop_adma_xor_zero_sum_self_test(adev
);
1253 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1255 goto err_free_iop_chan
;
1258 dev_printk(KERN_INFO
, &pdev
->dev
, "Intel(R) IOP: "
1259 "( %s%s%s%s%s%s%s%s%s%s)\n",
1260 dma_has_cap(DMA_PQ_XOR
, dma_dev
->cap_mask
) ? "pq_xor " : "",
1261 dma_has_cap(DMA_PQ_UPDATE
, dma_dev
->cap_mask
) ? "pq_update " : "",
1262 dma_has_cap(DMA_PQ_ZERO_SUM
, dma_dev
->cap_mask
) ? "pq_zero_sum " : "",
1263 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1264 dma_has_cap(DMA_DUAL_XOR
, dma_dev
->cap_mask
) ? "dual_xor " : "",
1265 dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
) ? "xor_zero_sum " : "",
1266 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
) ? "fill " : "",
1267 dma_has_cap(DMA_MEMCPY_CRC32C
, dma_dev
->cap_mask
) ? "cpy+crc " : "",
1268 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1269 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1271 dma_async_device_register(dma_dev
);
1277 dma_free_coherent(&adev
->pdev
->dev
, plat_data
->pool_size
,
1278 adev
->dma_desc_pool_virt
, adev
->dma_desc_pool
);
1285 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
)
1287 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1288 dma_cookie_t cookie
;
1289 int slot_cnt
, slots_per_op
;
1291 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
1293 spin_lock_bh(&iop_chan
->lock
);
1294 slot_cnt
= iop_chan_memcpy_slot_count(0, &slots_per_op
);
1295 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1297 grp_start
= sw_desc
->group_head
;
1299 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1300 async_tx_ack(&sw_desc
->async_tx
);
1301 iop_desc_init_memcpy(grp_start
, 0);
1302 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1303 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1304 iop_desc_set_memcpy_src_addr(grp_start
, 0);
1306 cookie
= iop_chan
->common
.cookie
;
1311 /* initialize the completed cookie to be less than
1312 * the most recently used cookie
1314 iop_chan
->completed_cookie
= cookie
- 1;
1315 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1317 /* channel should not be busy */
1318 BUG_ON(iop_chan_is_busy(iop_chan
));
1320 /* clear any prior error-status bits */
1321 iop_adma_device_clear_err_status(iop_chan
);
1323 /* disable operation */
1324 iop_chan_disable(iop_chan
);
1326 /* set the descriptor address */
1327 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1329 /* 1/ don't add pre-chained descriptors
1330 * 2/ dummy read to flush next_desc write
1332 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1334 /* run the descriptor */
1335 iop_chan_enable(iop_chan
);
1337 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1338 "failed to allocate null descriptor\n");
1339 spin_unlock_bh(&iop_chan
->lock
);
1342 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
)
1344 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1345 dma_cookie_t cookie
;
1346 int slot_cnt
, slots_per_op
;
1348 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
1350 spin_lock_bh(&iop_chan
->lock
);
1351 slot_cnt
= iop_chan_xor_slot_count(0, 2, &slots_per_op
);
1352 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1354 grp_start
= sw_desc
->group_head
;
1355 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1356 async_tx_ack(&sw_desc
->async_tx
);
1357 iop_desc_init_null_xor(grp_start
, 2, 0);
1358 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1359 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1360 iop_desc_set_xor_src_addr(grp_start
, 0, 0);
1361 iop_desc_set_xor_src_addr(grp_start
, 1, 0);
1363 cookie
= iop_chan
->common
.cookie
;
1368 /* initialize the completed cookie to be less than
1369 * the most recently used cookie
1371 iop_chan
->completed_cookie
= cookie
- 1;
1372 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1374 /* channel should not be busy */
1375 BUG_ON(iop_chan_is_busy(iop_chan
));
1377 /* clear any prior error-status bits */
1378 iop_adma_device_clear_err_status(iop_chan
);
1380 /* disable operation */
1381 iop_chan_disable(iop_chan
);
1383 /* set the descriptor address */
1384 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1386 /* 1/ don't add pre-chained descriptors
1387 * 2/ dummy read to flush next_desc write
1389 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1391 /* run the descriptor */
1392 iop_chan_enable(iop_chan
);
1394 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1395 "failed to allocate null descriptor\n");
1396 spin_unlock_bh(&iop_chan
->lock
);
1399 MODULE_ALIAS("platform:iop-adma");
1401 static struct platform_driver iop_adma_driver
= {
1402 .probe
= iop_adma_probe
,
1403 .remove
= __devexit_p(iop_adma_remove
),
1405 .owner
= THIS_MODULE
,
1410 static int __init
iop_adma_init (void)
1412 return platform_driver_register(&iop_adma_driver
);
1415 static void __exit
iop_adma_exit (void)
1417 platform_driver_unregister(&iop_adma_driver
);
1420 module_exit(iop_adma_exit
);
1421 module_init(iop_adma_init
);
1423 MODULE_AUTHOR("Intel Corporation");
1424 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1425 MODULE_LICENSE("GPL");