GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / arm / include / asm / hardware / iop3xx-adma.h
blob9b28f1243bdc1d96c2be3dac0ad947a72a61a264
1 /*
2 * Copyright © 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 #ifndef _ADMA_H
19 #define _ADMA_H
20 #include <linux/types.h>
21 #include <linux/io.h>
22 #include <mach/hardware.h>
23 #include <asm/hardware/iop_adma.h>
25 /* Memory copy units */
26 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
36 /* Application accelerator unit */
37 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX 8
47 #define AAU_EDCR1_IDX 17
48 #define AAU_EDCR2_IDX 26
50 #define DMA0_ID 0
51 #define DMA1_ID 1
52 #define AAU_ID 2
54 struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
72 struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
85 struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
97 struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
118 u32 crc_addr;
121 struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
137 struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
144 struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
167 struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
179 u32 d_dest_addr;
182 union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
190 /* No support for p+q operations */
191 static inline int
192 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
194 BUG();
195 return 0;
198 static inline void
199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
200 unsigned long flags)
202 BUG();
205 static inline void
206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
208 BUG();
211 static inline void
212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
213 dma_addr_t addr, unsigned char coef)
215 BUG();
218 static inline int
219 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
221 BUG();
222 return 0;
225 static inline void
226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
227 unsigned long flags)
229 BUG();
232 static inline void
233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
235 BUG();
238 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
240 static inline void
241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
242 dma_addr_t *src)
244 BUG();
247 static inline int iop_adma_get_max_xor(void)
249 return 32;
252 static inline int iop_adma_get_max_pq(void)
254 BUG();
255 return 0;
258 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
260 int id = chan->device->id;
262 switch (id) {
263 case DMA0_ID:
264 case DMA1_ID:
265 return __raw_readl(DMA_DAR(chan));
266 case AAU_ID:
267 return __raw_readl(AAU_ADAR(chan));
268 default:
269 BUG();
271 return 0;
274 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
275 u32 next_desc_addr)
277 int id = chan->device->id;
279 switch (id) {
280 case DMA0_ID:
281 case DMA1_ID:
282 __raw_writel(next_desc_addr, DMA_NDAR(chan));
283 break;
284 case AAU_ID:
285 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
286 break;
291 #define IOP_ADMA_STATUS_BUSY (1 << 10)
292 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
293 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
294 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
296 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
298 u32 status = __raw_readl(DMA_CSR(chan));
299 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
303 int num_slots)
305 /* num_slots will only ever be 1, 2, 4, or 8 */
306 return (desc->idx & (num_slots - 1)) ? 0 : 1;
309 /* to do: support large (i.e. > hw max) buffer sizes */
310 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
312 *slots_per_op = 1;
313 return 1;
316 /* to do: support large (i.e. > hw max) buffer sizes */
317 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
319 *slots_per_op = 1;
320 return 1;
323 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
324 int *slots_per_op)
326 static const char slot_count_table[] = {
327 1, 1, 1, 1, /* 01 - 04 */
328 2, 2, 2, 2, /* 05 - 08 */
329 4, 4, 4, 4, /* 09 - 12 */
330 4, 4, 4, 4, /* 13 - 16 */
331 8, 8, 8, 8, /* 17 - 20 */
332 8, 8, 8, 8, /* 21 - 24 */
333 8, 8, 8, 8, /* 25 - 28 */
334 8, 8, 8, 8, /* 29 - 32 */
336 *slots_per_op = slot_count_table[src_cnt - 1];
337 return *slots_per_op;
340 static inline int
341 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
343 switch (chan->device->id) {
344 case DMA0_ID:
345 case DMA1_ID:
346 return iop_chan_memcpy_slot_count(0, slots_per_op);
347 case AAU_ID:
348 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
349 default:
350 BUG();
352 return 0;
355 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
356 int *slots_per_op)
358 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
360 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
361 return slot_cnt;
363 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
364 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
365 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
366 slot_cnt += *slots_per_op;
369 slot_cnt += *slots_per_op;
371 return slot_cnt;
374 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
375 * descriptors
377 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
378 int *slots_per_op)
380 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
382 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
383 return slot_cnt;
385 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
386 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
387 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
388 slot_cnt += *slots_per_op;
391 slot_cnt += *slots_per_op;
393 return slot_cnt;
396 static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
398 return 0;
401 static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
402 struct iop_adma_chan *chan)
404 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
406 switch (chan->device->id) {
407 case DMA0_ID:
408 case DMA1_ID:
409 return hw_desc.dma->dest_addr;
410 case AAU_ID:
411 return hw_desc.aau->dest_addr;
412 default:
413 BUG();
415 return 0;
419 static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
420 struct iop_adma_chan *chan)
422 BUG();
423 return 0;
426 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
427 struct iop_adma_chan *chan)
429 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
431 switch (chan->device->id) {
432 case DMA0_ID:
433 case DMA1_ID:
434 return hw_desc.dma->byte_count;
435 case AAU_ID:
436 return hw_desc.aau->byte_count;
437 default:
438 BUG();
440 return 0;
443 /* translate the src_idx to a descriptor word index */
444 static inline int __desc_idx(int src_idx)
446 static const int desc_idx_table[] = { 0, 0, 0, 0,
447 0, 1, 2, 3,
448 5, 6, 7, 8,
449 9, 10, 11, 12,
450 14, 15, 16, 17,
451 18, 19, 20, 21,
452 23, 24, 25, 26,
453 27, 28, 29, 30,
456 return desc_idx_table[src_idx];
459 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
460 struct iop_adma_chan *chan,
461 int src_idx)
463 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
465 switch (chan->device->id) {
466 case DMA0_ID:
467 case DMA1_ID:
468 return hw_desc.dma->src_addr;
469 case AAU_ID:
470 break;
471 default:
472 BUG();
475 if (src_idx < 4)
476 return hw_desc.aau->src[src_idx];
477 else
478 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
481 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
482 int src_idx, dma_addr_t addr)
484 if (src_idx < 4)
485 hw_desc->src[src_idx] = addr;
486 else
487 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
490 static inline void
491 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
493 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
494 union {
495 u32 value;
496 struct iop3xx_dma_desc_ctrl field;
497 } u_desc_ctrl;
499 u_desc_ctrl.value = 0;
500 u_desc_ctrl.field.mem_to_mem_en = 1;
501 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
502 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
503 hw_desc->desc_ctrl = u_desc_ctrl.value;
504 hw_desc->upper_pci_src_addr = 0;
505 hw_desc->crc_addr = 0;
508 static inline void
509 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
511 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
512 union {
513 u32 value;
514 struct iop3xx_aau_desc_ctrl field;
515 } u_desc_ctrl;
517 u_desc_ctrl.value = 0;
518 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
519 u_desc_ctrl.field.dest_write_en = 1;
520 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
521 hw_desc->desc_ctrl = u_desc_ctrl.value;
524 static inline u32
525 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
526 unsigned long flags)
528 int i, shift;
529 u32 edcr;
530 union {
531 u32 value;
532 struct iop3xx_aau_desc_ctrl field;
533 } u_desc_ctrl;
535 u_desc_ctrl.value = 0;
536 switch (src_cnt) {
537 case 25 ... 32:
538 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
539 edcr = 0;
540 shift = 1;
541 for (i = 24; i < src_cnt; i++) {
542 edcr |= (1 << shift);
543 shift += 3;
545 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
546 src_cnt = 24;
547 /* fall through */
548 case 17 ... 24:
549 if (!u_desc_ctrl.field.blk_ctrl) {
550 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
551 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
553 edcr = 0;
554 shift = 1;
555 for (i = 16; i < src_cnt; i++) {
556 edcr |= (1 << shift);
557 shift += 3;
559 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
560 src_cnt = 16;
561 /* fall through */
562 case 9 ... 16:
563 if (!u_desc_ctrl.field.blk_ctrl)
564 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
565 edcr = 0;
566 shift = 1;
567 for (i = 8; i < src_cnt; i++) {
568 edcr |= (1 << shift);
569 shift += 3;
571 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
572 src_cnt = 8;
573 /* fall through */
574 case 2 ... 8:
575 shift = 1;
576 for (i = 0; i < src_cnt; i++) {
577 u_desc_ctrl.value |= (1 << shift);
578 shift += 3;
581 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
582 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
585 u_desc_ctrl.field.dest_write_en = 1;
586 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
587 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
588 hw_desc->desc_ctrl = u_desc_ctrl.value;
590 return u_desc_ctrl.value;
593 static inline void
594 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
595 unsigned long flags)
597 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
600 /* return the number of operations */
601 static inline int
602 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
603 unsigned long flags)
605 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
606 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
607 union {
608 u32 value;
609 struct iop3xx_aau_desc_ctrl field;
610 } u_desc_ctrl;
611 int i, j;
613 hw_desc = desc->hw_desc;
615 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
616 i += slots_per_op, j++) {
617 iter = iop_hw_desc_slot_idx(hw_desc, i);
618 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
619 u_desc_ctrl.field.dest_write_en = 0;
620 u_desc_ctrl.field.zero_result_en = 1;
621 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
622 iter->desc_ctrl = u_desc_ctrl.value;
624 /* for the subsequent descriptors preserve the store queue
625 * and chain them together
627 if (i) {
628 prev_hw_desc =
629 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
630 prev_hw_desc->next_desc =
631 (u32) (desc->async_tx.phys + (i << 5));
635 return j;
638 static inline void
639 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
640 unsigned long flags)
642 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
643 union {
644 u32 value;
645 struct iop3xx_aau_desc_ctrl field;
646 } u_desc_ctrl;
648 u_desc_ctrl.value = 0;
649 switch (src_cnt) {
650 case 25 ... 32:
651 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
652 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
653 /* fall through */
654 case 17 ... 24:
655 if (!u_desc_ctrl.field.blk_ctrl) {
656 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
657 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
659 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
660 /* fall through */
661 case 9 ... 16:
662 if (!u_desc_ctrl.field.blk_ctrl)
663 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
664 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
665 /* fall through */
666 case 1 ... 8:
667 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
668 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
671 u_desc_ctrl.field.dest_write_en = 0;
672 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
673 hw_desc->desc_ctrl = u_desc_ctrl.value;
676 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
677 struct iop_adma_chan *chan,
678 u32 byte_count)
680 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
682 switch (chan->device->id) {
683 case DMA0_ID:
684 case DMA1_ID:
685 hw_desc.dma->byte_count = byte_count;
686 break;
687 case AAU_ID:
688 hw_desc.aau->byte_count = byte_count;
689 break;
690 default:
691 BUG();
695 static inline void
696 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
697 struct iop_adma_chan *chan)
699 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
701 switch (chan->device->id) {
702 case DMA0_ID:
703 case DMA1_ID:
704 iop_desc_init_memcpy(desc, 1);
705 hw_desc.dma->byte_count = 0;
706 hw_desc.dma->dest_addr = 0;
707 hw_desc.dma->src_addr = 0;
708 break;
709 case AAU_ID:
710 iop_desc_init_null_xor(desc, 2, 1);
711 hw_desc.aau->byte_count = 0;
712 hw_desc.aau->dest_addr = 0;
713 hw_desc.aau->src[0] = 0;
714 hw_desc.aau->src[1] = 0;
715 break;
716 default:
717 BUG();
721 static inline void
722 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
724 int slots_per_op = desc->slots_per_op;
725 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
726 int i = 0;
728 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
729 hw_desc->byte_count = len;
730 } else {
731 do {
732 iter = iop_hw_desc_slot_idx(hw_desc, i);
733 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
734 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
735 i += slots_per_op;
736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
738 iter = iop_hw_desc_slot_idx(hw_desc, i);
739 iter->byte_count = len;
743 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
744 struct iop_adma_chan *chan,
745 dma_addr_t addr)
747 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
749 switch (chan->device->id) {
750 case DMA0_ID:
751 case DMA1_ID:
752 hw_desc.dma->dest_addr = addr;
753 break;
754 case AAU_ID:
755 hw_desc.aau->dest_addr = addr;
756 break;
757 default:
758 BUG();
762 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
763 dma_addr_t addr)
765 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
766 hw_desc->src_addr = addr;
769 static inline void
770 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
771 dma_addr_t addr)
774 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
775 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
776 int i;
778 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
779 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
780 iter = iop_hw_desc_slot_idx(hw_desc, i);
781 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
785 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
786 int src_idx, dma_addr_t addr)
789 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
790 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
791 int i;
793 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
794 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
795 iter = iop_hw_desc_slot_idx(hw_desc, i);
796 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
800 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
801 u32 next_desc_addr)
803 /* hw_desc->next_desc is the same location for all channels */
804 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
806 iop_paranoia(hw_desc.dma->next_desc);
807 hw_desc.dma->next_desc = next_desc_addr;
810 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
812 /* hw_desc->next_desc is the same location for all channels */
813 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
814 return hw_desc.dma->next_desc;
817 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
819 /* hw_desc->next_desc is the same location for all channels */
820 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
821 hw_desc.dma->next_desc = 0;
824 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
825 u32 val)
827 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
828 hw_desc->src[0] = val;
831 static inline enum sum_check_flags
832 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
834 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
835 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
837 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
838 return desc_ctrl.zero_result_err << SUM_CHECK_P;
841 static inline void iop_chan_append(struct iop_adma_chan *chan)
843 u32 dma_chan_ctrl;
845 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
846 dma_chan_ctrl |= 0x2;
847 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
850 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
852 return __raw_readl(DMA_CSR(chan));
855 static inline void iop_chan_disable(struct iop_adma_chan *chan)
857 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
858 dma_chan_ctrl &= ~1;
859 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
862 static inline void iop_chan_enable(struct iop_adma_chan *chan)
864 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
866 dma_chan_ctrl |= 1;
867 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
870 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
872 u32 status = __raw_readl(DMA_CSR(chan));
873 status &= (1 << 9);
874 __raw_writel(status, DMA_CSR(chan));
877 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
879 u32 status = __raw_readl(DMA_CSR(chan));
880 status &= (1 << 8);
881 __raw_writel(status, DMA_CSR(chan));
884 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
886 u32 status = __raw_readl(DMA_CSR(chan));
888 switch (chan->device->id) {
889 case DMA0_ID:
890 case DMA1_ID:
891 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
892 break;
893 case AAU_ID:
894 status &= (1 << 5);
895 break;
896 default:
897 BUG();
900 __raw_writel(status, DMA_CSR(chan));
903 static inline int
904 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
906 return 0;
909 static inline int
910 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
912 return 0;
915 static inline int
916 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
918 return 0;
921 static inline int
922 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
924 return test_bit(5, &status);
927 static inline int
928 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
930 switch (chan->device->id) {
931 case DMA0_ID:
932 case DMA1_ID:
933 return test_bit(2, &status);
934 default:
935 return 0;
939 static inline int
940 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
942 switch (chan->device->id) {
943 case DMA0_ID:
944 case DMA1_ID:
945 return test_bit(3, &status);
946 default:
947 return 0;
951 static inline int
952 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
954 switch (chan->device->id) {
955 case DMA0_ID:
956 case DMA1_ID:
957 return test_bit(1, &status);
958 default:
959 return 0;
962 #endif /* _ADMA_H */