rtmutex-tester: Remove BKL tests
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / dma / ste_dma40_ll.c
blob8557cb88b255858efe98dbe0811c05ac4bf1c4a8
1 /*
2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 */
8 #include <linux/kernel.h>
9 #include <plat/ste_dma40.h>
11 #include "ste_dma40_ll.h"
13 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */
14 void d40_log_cfg(struct stedma40_chan_cfg *cfg,
15 u32 *lcsp1, u32 *lcsp3)
17 u32 l3 = 0; /* dst */
18 u32 l1 = 0; /* src */
20 /* src is mem? -> increase address pos */
21 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
22 cfg->dir == STEDMA40_MEM_TO_MEM)
23 l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
25 /* dst is mem? -> increase address pos */
26 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
27 cfg->dir == STEDMA40_MEM_TO_MEM)
28 l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
30 /* src is hw? -> master port 1 */
31 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
32 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
33 l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
35 /* dst is hw? -> master port 1 */
36 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
42 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
44 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
45 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
46 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
48 *lcsp1 = l1;
49 *lcsp3 = l3;
53 /* Sets up SRC and DST CFG register for both logical and physical channels */
54 void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
55 u32 *src_cfg, u32 *dst_cfg, bool is_log)
57 u32 src = 0;
58 u32 dst = 0;
60 if (!is_log) {
61 /* Physical channel */
62 if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
63 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
64 /* Set master port to 1 */
65 src |= 1 << D40_SREG_CFG_MST_POS;
66 src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
68 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
69 src |= 1 << D40_SREG_CFG_PHY_TM_POS;
70 else
71 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
73 if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
74 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
75 /* Set master port to 1 */
76 dst |= 1 << D40_SREG_CFG_MST_POS;
77 dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
79 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
80 dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
81 else
82 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
84 /* Interrupt on end of transfer for destination */
85 dst |= 1 << D40_SREG_CFG_TIM_POS;
87 /* Generate interrupt on error */
88 src |= 1 << D40_SREG_CFG_EIM_POS;
89 dst |= 1 << D40_SREG_CFG_EIM_POS;
91 /* PSIZE */
92 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
93 src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
94 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
96 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
97 dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
98 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
101 /* Element size */
102 src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
103 dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
105 } else {
106 /* Logical channel */
107 dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
108 src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
111 if (cfg->high_priority) {
112 src |= 1 << D40_SREG_CFG_PRI_POS;
113 dst |= 1 << D40_SREG_CFG_PRI_POS;
116 if (cfg->src_info.big_endian)
117 src |= 1 << D40_SREG_CFG_LBE_POS;
118 if (cfg->dst_info.big_endian)
119 dst |= 1 << D40_SREG_CFG_LBE_POS;
121 *src_cfg = src;
122 *dst_cfg = dst;
125 int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data,
127 u32 data_size,
128 int psize,
129 dma_addr_t next_lli,
130 u32 reg_cfg,
131 bool term_int,
132 u32 data_width,
133 bool is_device)
135 int num_elems;
137 if (psize == STEDMA40_PSIZE_PHY_1)
138 num_elems = 1;
139 else
140 num_elems = 2 << psize;
143 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
144 * Block large than 64 KiB must be split.
146 if (data_size > (0xffff << data_width))
147 return -EINVAL;
149 /* Must be aligned */
150 if (!IS_ALIGNED(data, 0x1 << data_width))
151 return -EINVAL;
153 /* Transfer size can't be smaller than (num_elms * elem_size) */
154 if (data_size < num_elems * (0x1 << data_width))
155 return -EINVAL;
157 /* The number of elements. IE now many chunks */
158 lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
161 * Distance to next element sized entry.
162 * Usually the size of the element unless you want gaps.
164 if (!is_device)
165 lli->reg_elt |= (0x1 << data_width) <<
166 D40_SREG_ELEM_PHY_EIDX_POS;
168 /* Where the data is */
169 lli->reg_ptr = data;
170 lli->reg_cfg = reg_cfg;
172 /* If this scatter list entry is the last one, no next link */
173 if (next_lli == 0)
174 lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
175 else
176 lli->reg_lnk = next_lli;
178 /* Set/clear interrupt generation on this link item.*/
179 if (term_int)
180 lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
181 else
182 lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
184 /* Post link */
185 lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
187 return 0;
190 int d40_phy_sg_to_lli(struct scatterlist *sg,
191 int sg_len,
192 dma_addr_t target,
193 struct d40_phy_lli *lli,
194 dma_addr_t lli_phys,
195 u32 reg_cfg,
196 u32 data_width,
197 int psize)
199 int total_size = 0;
200 int i;
201 struct scatterlist *current_sg = sg;
202 dma_addr_t next_lli_phys;
203 dma_addr_t dst;
204 int err = 0;
206 for_each_sg(sg, current_sg, sg_len, i) {
208 total_size += sg_dma_len(current_sg);
210 /* If this scatter list entry is the last one, no next link */
211 if (sg_len - 1 == i)
212 next_lli_phys = 0;
213 else
214 next_lli_phys = ALIGN(lli_phys + (i + 1) *
215 sizeof(struct d40_phy_lli),
216 D40_LLI_ALIGN);
218 if (target)
219 dst = target;
220 else
221 dst = sg_phys(current_sg);
223 err = d40_phy_fill_lli(&lli[i],
224 dst,
225 sg_dma_len(current_sg),
226 psize,
227 next_lli_phys,
228 reg_cfg,
229 !next_lli_phys,
230 data_width,
231 target == dst);
232 if (err)
233 goto err;
236 return total_size;
237 err:
238 return err;
242 void d40_phy_lli_write(void __iomem *virtbase,
243 u32 phy_chan_num,
244 struct d40_phy_lli *lli_dst,
245 struct d40_phy_lli *lli_src)
248 writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
249 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
250 writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
251 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
252 writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
253 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
254 writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
255 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
257 writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
258 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
259 writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
260 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
261 writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
262 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
263 writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
264 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
268 /* DMA logical lli operations */
270 static void d40_log_lli_link(struct d40_log_lli *lli_dst,
271 struct d40_log_lli *lli_src,
272 int next)
274 u32 slos = 0;
275 u32 dlos = 0;
277 if (next != -EINVAL) {
278 slos = next * 2;
279 dlos = next * 2 + 1;
280 } else {
281 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
282 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
285 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
286 (slos << D40_MEM_LCSP1_SLOS_POS);
288 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
289 (dlos << D40_MEM_LCSP1_SLOS_POS);
292 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
293 struct d40_log_lli *lli_dst,
294 struct d40_log_lli *lli_src,
295 int next)
297 d40_log_lli_link(lli_dst, lli_src, next);
299 writel(lli_src->lcsp02, &lcpa[0].lcsp0);
300 writel(lli_src->lcsp13, &lcpa[0].lcsp1);
301 writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
302 writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
305 void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
306 struct d40_log_lli *lli_dst,
307 struct d40_log_lli *lli_src,
308 int next)
310 d40_log_lli_link(lli_dst, lli_src, next);
312 writel(lli_src->lcsp02, &lcla[0].lcsp02);
313 writel(lli_src->lcsp13, &lcla[0].lcsp13);
314 writel(lli_dst->lcsp02, &lcla[1].lcsp02);
315 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
318 void d40_log_fill_lli(struct d40_log_lli *lli,
319 dma_addr_t data, u32 data_size,
320 u32 reg_cfg,
321 u32 data_width,
322 bool addr_inc)
324 lli->lcsp13 = reg_cfg;
326 /* The number of elements to transfer */
327 lli->lcsp02 = ((data_size >> data_width) <<
328 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
329 /* 16 LSBs address of the current element */
330 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
331 /* 16 MSBs address of the current element */
332 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
334 if (addr_inc)
335 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
339 int d40_log_sg_to_dev(struct scatterlist *sg,
340 int sg_len,
341 struct d40_log_lli_bidir *lli,
342 struct d40_def_lcsp *lcsp,
343 u32 src_data_width,
344 u32 dst_data_width,
345 enum dma_data_direction direction,
346 dma_addr_t dev_addr)
348 int total_size = 0;
349 struct scatterlist *current_sg = sg;
350 int i;
352 for_each_sg(sg, current_sg, sg_len, i) {
353 total_size += sg_dma_len(current_sg);
355 if (direction == DMA_TO_DEVICE) {
356 d40_log_fill_lli(&lli->src[i],
357 sg_phys(current_sg),
358 sg_dma_len(current_sg),
359 lcsp->lcsp1, src_data_width,
360 true);
361 d40_log_fill_lli(&lli->dst[i],
362 dev_addr,
363 sg_dma_len(current_sg),
364 lcsp->lcsp3, dst_data_width,
365 false);
366 } else {
367 d40_log_fill_lli(&lli->dst[i],
368 sg_phys(current_sg),
369 sg_dma_len(current_sg),
370 lcsp->lcsp3, dst_data_width,
371 true);
372 d40_log_fill_lli(&lli->src[i],
373 dev_addr,
374 sg_dma_len(current_sg),
375 lcsp->lcsp1, src_data_width,
376 false);
379 return total_size;
382 int d40_log_sg_to_lli(struct scatterlist *sg,
383 int sg_len,
384 struct d40_log_lli *lli_sg,
385 u32 lcsp13, /* src or dst*/
386 u32 data_width)
388 int total_size = 0;
389 struct scatterlist *current_sg = sg;
390 int i;
392 for_each_sg(sg, current_sg, sg_len, i) {
393 total_size += sg_dma_len(current_sg);
395 d40_log_fill_lli(&lli_sg[i],
396 sg_phys(current_sg),
397 sg_dma_len(current_sg),
398 lcsp13, data_width,
399 true);
401 return total_size;