2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
8 #include <linux/kernel.h>
9 #include <plat/ste_dma40.h>
11 #include "ste_dma40_ll.h"
13 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */
14 void d40_log_cfg(struct stedma40_chan_cfg
*cfg
,
15 u32
*lcsp1
, u32
*lcsp3
)
20 /* src is mem? -> increase address pos */
21 if (cfg
->dir
== STEDMA40_MEM_TO_PERIPH
||
22 cfg
->dir
== STEDMA40_MEM_TO_MEM
)
23 l1
|= 1 << D40_MEM_LCSP1_SCFG_INCR_POS
;
25 /* dst is mem? -> increase address pos */
26 if (cfg
->dir
== STEDMA40_PERIPH_TO_MEM
||
27 cfg
->dir
== STEDMA40_MEM_TO_MEM
)
28 l3
|= 1 << D40_MEM_LCSP3_DCFG_INCR_POS
;
30 /* src is hw? -> master port 1 */
31 if (cfg
->dir
== STEDMA40_PERIPH_TO_MEM
||
32 cfg
->dir
== STEDMA40_PERIPH_TO_PERIPH
)
33 l1
|= 1 << D40_MEM_LCSP1_SCFG_MST_POS
;
35 /* dst is hw? -> master port 1 */
36 if (cfg
->dir
== STEDMA40_MEM_TO_PERIPH
||
37 cfg
->dir
== STEDMA40_PERIPH_TO_PERIPH
)
38 l3
|= 1 << D40_MEM_LCSP3_DCFG_MST_POS
;
40 l3
|= 1 << D40_MEM_LCSP3_DCFG_EIM_POS
;
41 l3
|= cfg
->dst_info
.psize
<< D40_MEM_LCSP3_DCFG_PSIZE_POS
;
42 l3
|= cfg
->dst_info
.data_width
<< D40_MEM_LCSP3_DCFG_ESIZE_POS
;
44 l1
|= 1 << D40_MEM_LCSP1_SCFG_EIM_POS
;
45 l1
|= cfg
->src_info
.psize
<< D40_MEM_LCSP1_SCFG_PSIZE_POS
;
46 l1
|= cfg
->src_info
.data_width
<< D40_MEM_LCSP1_SCFG_ESIZE_POS
;
53 /* Sets up SRC and DST CFG register for both logical and physical channels */
54 void d40_phy_cfg(struct stedma40_chan_cfg
*cfg
,
55 u32
*src_cfg
, u32
*dst_cfg
, bool is_log
)
61 /* Physical channel */
62 if ((cfg
->dir
== STEDMA40_PERIPH_TO_MEM
) ||
63 (cfg
->dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
64 /* Set master port to 1 */
65 src
|= 1 << D40_SREG_CFG_MST_POS
;
66 src
|= D40_TYPE_TO_EVENT(cfg
->src_dev_type
);
68 if (cfg
->src_info
.flow_ctrl
== STEDMA40_NO_FLOW_CTRL
)
69 src
|= 1 << D40_SREG_CFG_PHY_TM_POS
;
71 src
|= 3 << D40_SREG_CFG_PHY_TM_POS
;
73 if ((cfg
->dir
== STEDMA40_MEM_TO_PERIPH
) ||
74 (cfg
->dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
75 /* Set master port to 1 */
76 dst
|= 1 << D40_SREG_CFG_MST_POS
;
77 dst
|= D40_TYPE_TO_EVENT(cfg
->dst_dev_type
);
79 if (cfg
->dst_info
.flow_ctrl
== STEDMA40_NO_FLOW_CTRL
)
80 dst
|= 1 << D40_SREG_CFG_PHY_TM_POS
;
82 dst
|= 3 << D40_SREG_CFG_PHY_TM_POS
;
84 /* Interrupt on end of transfer for destination */
85 dst
|= 1 << D40_SREG_CFG_TIM_POS
;
87 /* Generate interrupt on error */
88 src
|= 1 << D40_SREG_CFG_EIM_POS
;
89 dst
|= 1 << D40_SREG_CFG_EIM_POS
;
92 if (cfg
->src_info
.psize
!= STEDMA40_PSIZE_PHY_1
) {
93 src
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
94 src
|= cfg
->src_info
.psize
<< D40_SREG_CFG_PSIZE_POS
;
96 if (cfg
->dst_info
.psize
!= STEDMA40_PSIZE_PHY_1
) {
97 dst
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
98 dst
|= cfg
->dst_info
.psize
<< D40_SREG_CFG_PSIZE_POS
;
102 src
|= cfg
->src_info
.data_width
<< D40_SREG_CFG_ESIZE_POS
;
103 dst
|= cfg
->dst_info
.data_width
<< D40_SREG_CFG_ESIZE_POS
;
106 /* Logical channel */
107 dst
|= 1 << D40_SREG_CFG_LOG_GIM_POS
;
108 src
|= 1 << D40_SREG_CFG_LOG_GIM_POS
;
111 if (cfg
->high_priority
) {
112 src
|= 1 << D40_SREG_CFG_PRI_POS
;
113 dst
|= 1 << D40_SREG_CFG_PRI_POS
;
116 if (cfg
->src_info
.big_endian
)
117 src
|= 1 << D40_SREG_CFG_LBE_POS
;
118 if (cfg
->dst_info
.big_endian
)
119 dst
|= 1 << D40_SREG_CFG_LBE_POS
;
125 static int d40_phy_fill_lli(struct d40_phy_lli
*lli
,
137 if (psize
== STEDMA40_PSIZE_PHY_1
)
140 num_elems
= 2 << psize
;
142 /* Must be aligned */
143 if (!IS_ALIGNED(data
, 0x1 << data_width
))
146 /* Transfer size can't be smaller than (num_elms * elem_size) */
147 if (data_size
< num_elems
* (0x1 << data_width
))
150 /* The number of elements. IE now many chunks */
151 lli
->reg_elt
= (data_size
>> data_width
) << D40_SREG_ELEM_PHY_ECNT_POS
;
154 * Distance to next element sized entry.
155 * Usually the size of the element unless you want gaps.
158 lli
->reg_elt
|= (0x1 << data_width
) <<
159 D40_SREG_ELEM_PHY_EIDX_POS
;
161 /* Where the data is */
163 lli
->reg_cfg
= reg_cfg
;
165 /* If this scatter list entry is the last one, no next link */
167 lli
->reg_lnk
= 0x1 << D40_SREG_LNK_PHY_TCP_POS
;
169 lli
->reg_lnk
= next_lli
;
171 /* Set/clear interrupt generation on this link item.*/
173 lli
->reg_cfg
|= 0x1 << D40_SREG_CFG_TIM_POS
;
175 lli
->reg_cfg
&= ~(0x1 << D40_SREG_CFG_TIM_POS
);
178 lli
->reg_lnk
|= 0 << D40_SREG_LNK_PHY_PRE_POS
;
183 static int d40_seg_size(int size
, int data_width1
, int data_width2
)
185 u32 max_w
= max(data_width1
, data_width2
);
186 u32 min_w
= min(data_width1
, data_width2
);
187 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
189 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
190 seg_max
-= (1 << max_w
);
195 if (size
<= 2 * seg_max
)
196 return ALIGN(size
/ 2, 1 << max_w
);
201 struct d40_phy_lli
*d40_phy_buf_to_lli(struct d40_phy_lli
*lli
,
213 dma_addr_t next
= lli_phys
;
214 int size_rest
= size
;
218 size_seg
= d40_seg_size(size_rest
, data_width1
, data_width2
);
219 size_rest
-= size_seg
;
221 if (term_int
&& size_rest
== 0)
224 next
= ALIGN(next
+ sizeof(struct d40_phy_lli
),
227 err
= d40_phy_fill_lli(lli
,
251 int d40_phy_sg_to_lli(struct scatterlist
*sg
,
254 struct d40_phy_lli
*lli_sg
,
263 struct scatterlist
*current_sg
= sg
;
265 struct d40_phy_lli
*lli
= lli_sg
;
266 dma_addr_t l_phys
= lli_phys
;
268 for_each_sg(sg
, current_sg
, sg_len
, i
) {
270 total_size
+= sg_dma_len(current_sg
);
275 dst
= sg_phys(current_sg
);
277 l_phys
= ALIGN(lli_phys
+ (lli
- lli_sg
) *
278 sizeof(struct d40_phy_lli
), D40_LLI_ALIGN
);
280 lli
= d40_phy_buf_to_lli(lli
,
282 sg_dma_len(current_sg
),
298 void d40_phy_lli_write(void __iomem
*virtbase
,
300 struct d40_phy_lli
*lli_dst
,
301 struct d40_phy_lli
*lli_src
)
304 writel(lli_src
->reg_cfg
, virtbase
+ D40_DREG_PCBASE
+
305 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSCFG
);
306 writel(lli_src
->reg_elt
, virtbase
+ D40_DREG_PCBASE
+
307 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSELT
);
308 writel(lli_src
->reg_ptr
, virtbase
+ D40_DREG_PCBASE
+
309 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSPTR
);
310 writel(lli_src
->reg_lnk
, virtbase
+ D40_DREG_PCBASE
+
311 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSLNK
);
313 writel(lli_dst
->reg_cfg
, virtbase
+ D40_DREG_PCBASE
+
314 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDCFG
);
315 writel(lli_dst
->reg_elt
, virtbase
+ D40_DREG_PCBASE
+
316 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDELT
);
317 writel(lli_dst
->reg_ptr
, virtbase
+ D40_DREG_PCBASE
+
318 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDPTR
);
319 writel(lli_dst
->reg_lnk
, virtbase
+ D40_DREG_PCBASE
+
320 phy_chan_num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDLNK
);
324 /* DMA logical lli operations */
326 static void d40_log_lli_link(struct d40_log_lli
*lli_dst
,
327 struct d40_log_lli
*lli_src
,
333 if (next
!= -EINVAL
) {
337 lli_dst
->lcsp13
|= D40_MEM_LCSP1_SCFG_TIM_MASK
;
338 lli_dst
->lcsp13
|= D40_MEM_LCSP3_DTCP_MASK
;
341 lli_src
->lcsp13
= (lli_src
->lcsp13
& ~D40_MEM_LCSP1_SLOS_MASK
) |
342 (slos
<< D40_MEM_LCSP1_SLOS_POS
);
344 lli_dst
->lcsp13
= (lli_dst
->lcsp13
& ~D40_MEM_LCSP1_SLOS_MASK
) |
345 (dlos
<< D40_MEM_LCSP1_SLOS_POS
);
348 void d40_log_lli_lcpa_write(struct d40_log_lli_full
*lcpa
,
349 struct d40_log_lli
*lli_dst
,
350 struct d40_log_lli
*lli_src
,
353 d40_log_lli_link(lli_dst
, lli_src
, next
);
355 writel(lli_src
->lcsp02
, &lcpa
[0].lcsp0
);
356 writel(lli_src
->lcsp13
, &lcpa
[0].lcsp1
);
357 writel(lli_dst
->lcsp02
, &lcpa
[0].lcsp2
);
358 writel(lli_dst
->lcsp13
, &lcpa
[0].lcsp3
);
361 void d40_log_lli_lcla_write(struct d40_log_lli
*lcla
,
362 struct d40_log_lli
*lli_dst
,
363 struct d40_log_lli
*lli_src
,
366 d40_log_lli_link(lli_dst
, lli_src
, next
);
368 writel(lli_src
->lcsp02
, &lcla
[0].lcsp02
);
369 writel(lli_src
->lcsp13
, &lcla
[0].lcsp13
);
370 writel(lli_dst
->lcsp02
, &lcla
[1].lcsp02
);
371 writel(lli_dst
->lcsp13
, &lcla
[1].lcsp13
);
374 static void d40_log_fill_lli(struct d40_log_lli
*lli
,
375 dma_addr_t data
, u32 data_size
,
380 lli
->lcsp13
= reg_cfg
;
382 /* The number of elements to transfer */
383 lli
->lcsp02
= ((data_size
>> data_width
) <<
384 D40_MEM_LCSP0_ECNT_POS
) & D40_MEM_LCSP0_ECNT_MASK
;
386 BUG_ON((data_size
>> data_width
) > STEDMA40_MAX_SEG_SIZE
);
388 /* 16 LSBs address of the current element */
389 lli
->lcsp02
|= data
& D40_MEM_LCSP0_SPTR_MASK
;
390 /* 16 MSBs address of the current element */
391 lli
->lcsp13
|= data
& D40_MEM_LCSP1_SPTR_MASK
;
394 lli
->lcsp13
|= D40_MEM_LCSP1_SCFG_INCR_MASK
;
398 int d40_log_sg_to_dev(struct scatterlist
*sg
,
400 struct d40_log_lli_bidir
*lli
,
401 struct d40_def_lcsp
*lcsp
,
404 enum dma_data_direction direction
,
408 struct scatterlist
*current_sg
= sg
;
410 struct d40_log_lli
*lli_src
= lli
->src
;
411 struct d40_log_lli
*lli_dst
= lli
->dst
;
413 for_each_sg(sg
, current_sg
, sg_len
, i
) {
414 total_size
+= sg_dma_len(current_sg
);
416 if (direction
== DMA_TO_DEVICE
) {
418 d40_log_buf_to_lli(lli_src
,
420 sg_dma_len(current_sg
),
421 lcsp
->lcsp1
, src_data_width
,
425 d40_log_buf_to_lli(lli_dst
,
427 sg_dma_len(current_sg
),
428 lcsp
->lcsp3
, dst_data_width
,
433 d40_log_buf_to_lli(lli_dst
,
435 sg_dma_len(current_sg
),
436 lcsp
->lcsp3
, dst_data_width
,
440 d40_log_buf_to_lli(lli_src
,
442 sg_dma_len(current_sg
),
443 lcsp
->lcsp1
, src_data_width
,
451 struct d40_log_lli
*d40_log_buf_to_lli(struct d40_log_lli
*lli_sg
,
454 u32 lcsp13
, /* src or dst*/
459 struct d40_log_lli
*lli
= lli_sg
;
460 int size_rest
= size
;
464 size_seg
= d40_seg_size(size_rest
, data_width1
, data_width2
);
465 size_rest
-= size_seg
;
467 d40_log_fill_lli(lli
,
480 int d40_log_sg_to_lli(struct scatterlist
*sg
,
482 struct d40_log_lli
*lli_sg
,
483 u32 lcsp13
, /* src or dst*/
484 u32 data_width1
, u32 data_width2
)
487 struct scatterlist
*current_sg
= sg
;
489 struct d40_log_lli
*lli
= lli_sg
;
491 for_each_sg(sg
, current_sg
, sg_len
, i
) {
492 total_size
+= sg_dma_len(current_sg
);
493 lli
= d40_log_buf_to_lli(lli
,
495 sg_dma_len(current_sg
),
497 data_width1
, data_width2
, true);