megaraid: Don't use create_proc_read_entry()
[linux-2.6.git] / drivers / dma / ste_dma40_ll.c
blob7180e0d417228e2a07f5b9ab2fda02e5379e0979
1 /*
2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 */
8 #include <linux/kernel.h>
9 #include <linux/platform_data/dma-ste-dma40.h>
11 #include "ste_dma40_ll.h"
13 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */
14 void d40_log_cfg(struct stedma40_chan_cfg *cfg,
15 u32 *lcsp1, u32 *lcsp3)
17 u32 l3 = 0; /* dst */
18 u32 l1 = 0; /* src */
20 /* src is mem? -> increase address pos */
21 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
22 cfg->dir == STEDMA40_MEM_TO_MEM)
23 l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
25 /* dst is mem? -> increase address pos */
26 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
27 cfg->dir == STEDMA40_MEM_TO_MEM)
28 l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
30 /* src is hw? -> master port 1 */
31 if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
32 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
33 l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
35 /* dst is hw? -> master port 1 */
36 if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
37 cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
38 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
40 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
42 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
44 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
45 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
46 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
48 *lcsp1 = l1;
49 *lcsp3 = l3;
53 /* Sets up SRC and DST CFG register for both logical and physical channels */
54 void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
55 u32 *src_cfg, u32 *dst_cfg, bool is_log)
57 u32 src = 0;
58 u32 dst = 0;
60 if (!is_log) {
61 /* Physical channel */
62 if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
63 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
64 /* Set master port to 1 */
65 src |= 1 << D40_SREG_CFG_MST_POS;
66 src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
68 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
69 src |= 1 << D40_SREG_CFG_PHY_TM_POS;
70 else
71 src |= 3 << D40_SREG_CFG_PHY_TM_POS;
73 if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
74 (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
75 /* Set master port to 1 */
76 dst |= 1 << D40_SREG_CFG_MST_POS;
77 dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
79 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
80 dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
81 else
82 dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
84 /* Interrupt on end of transfer for destination */
85 dst |= 1 << D40_SREG_CFG_TIM_POS;
87 /* Generate interrupt on error */
88 src |= 1 << D40_SREG_CFG_EIM_POS;
89 dst |= 1 << D40_SREG_CFG_EIM_POS;
91 /* PSIZE */
92 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
93 src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
94 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
96 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
97 dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
98 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
101 /* Element size */
102 src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
103 dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
105 /* Set the priority bit to high for the physical channel */
106 if (cfg->high_priority) {
107 src |= 1 << D40_SREG_CFG_PRI_POS;
108 dst |= 1 << D40_SREG_CFG_PRI_POS;
111 } else {
112 /* Logical channel */
113 dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
114 src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
117 if (cfg->src_info.big_endian)
118 src |= 1 << D40_SREG_CFG_LBE_POS;
119 if (cfg->dst_info.big_endian)
120 dst |= 1 << D40_SREG_CFG_LBE_POS;
122 *src_cfg = src;
123 *dst_cfg = dst;
126 static int d40_phy_fill_lli(struct d40_phy_lli *lli,
127 dma_addr_t data,
128 u32 data_size,
129 dma_addr_t next_lli,
130 u32 reg_cfg,
131 struct stedma40_half_channel_info *info,
132 unsigned int flags)
134 bool addr_inc = flags & LLI_ADDR_INC;
135 bool term_int = flags & LLI_TERM_INT;
136 unsigned int data_width = info->data_width;
137 int psize = info->psize;
138 int num_elems;
140 if (psize == STEDMA40_PSIZE_PHY_1)
141 num_elems = 1;
142 else
143 num_elems = 2 << psize;
145 /* Must be aligned */
146 if (!IS_ALIGNED(data, 0x1 << data_width))
147 return -EINVAL;
149 /* Transfer size can't be smaller than (num_elms * elem_size) */
150 if (data_size < num_elems * (0x1 << data_width))
151 return -EINVAL;
153 /* The number of elements. IE now many chunks */
154 lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
157 * Distance to next element sized entry.
158 * Usually the size of the element unless you want gaps.
160 if (addr_inc)
161 lli->reg_elt |= (0x1 << data_width) <<
162 D40_SREG_ELEM_PHY_EIDX_POS;
164 /* Where the data is */
165 lli->reg_ptr = data;
166 lli->reg_cfg = reg_cfg;
168 /* If this scatter list entry is the last one, no next link */
169 if (next_lli == 0)
170 lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
171 else
172 lli->reg_lnk = next_lli;
174 /* Set/clear interrupt generation on this link item.*/
175 if (term_int)
176 lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
177 else
178 lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
180 /* Post link */
181 lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
183 return 0;
186 static int d40_seg_size(int size, int data_width1, int data_width2)
188 u32 max_w = max(data_width1, data_width2);
189 u32 min_w = min(data_width1, data_width2);
190 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
192 if (seg_max > STEDMA40_MAX_SEG_SIZE)
193 seg_max -= (1 << max_w);
195 if (size <= seg_max)
196 return size;
198 if (size <= 2 * seg_max)
199 return ALIGN(size / 2, 1 << max_w);
201 return seg_max;
204 static struct d40_phy_lli *
205 d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
206 dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
207 struct stedma40_half_channel_info *info,
208 struct stedma40_half_channel_info *otherinfo,
209 unsigned long flags)
211 bool lastlink = flags & LLI_LAST_LINK;
212 bool addr_inc = flags & LLI_ADDR_INC;
213 bool term_int = flags & LLI_TERM_INT;
214 bool cyclic = flags & LLI_CYCLIC;
215 int err;
216 dma_addr_t next = lli_phys;
217 int size_rest = size;
218 int size_seg = 0;
221 * This piece may be split up based on d40_seg_size(); we only want the
222 * term int on the last part.
224 if (term_int)
225 flags &= ~LLI_TERM_INT;
227 do {
228 size_seg = d40_seg_size(size_rest, info->data_width,
229 otherinfo->data_width);
230 size_rest -= size_seg;
232 if (size_rest == 0 && term_int)
233 flags |= LLI_TERM_INT;
235 if (size_rest == 0 && lastlink)
236 next = cyclic ? first_phys : 0;
237 else
238 next = ALIGN(next + sizeof(struct d40_phy_lli),
239 D40_LLI_ALIGN);
241 err = d40_phy_fill_lli(lli, addr, size_seg, next,
242 reg_cfg, info, flags);
244 if (err)
245 goto err;
247 lli++;
248 if (addr_inc)
249 addr += size_seg;
250 } while (size_rest);
252 return lli;
254 err:
255 return NULL;
258 int d40_phy_sg_to_lli(struct scatterlist *sg,
259 int sg_len,
260 dma_addr_t target,
261 struct d40_phy_lli *lli_sg,
262 dma_addr_t lli_phys,
263 u32 reg_cfg,
264 struct stedma40_half_channel_info *info,
265 struct stedma40_half_channel_info *otherinfo,
266 unsigned long flags)
268 int total_size = 0;
269 int i;
270 struct scatterlist *current_sg = sg;
271 struct d40_phy_lli *lli = lli_sg;
272 dma_addr_t l_phys = lli_phys;
274 if (!target)
275 flags |= LLI_ADDR_INC;
277 for_each_sg(sg, current_sg, sg_len, i) {
278 dma_addr_t sg_addr = sg_dma_address(current_sg);
279 unsigned int len = sg_dma_len(current_sg);
280 dma_addr_t dst = target ?: sg_addr;
282 total_size += sg_dma_len(current_sg);
284 if (i == sg_len - 1)
285 flags |= LLI_TERM_INT | LLI_LAST_LINK;
287 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
288 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
290 lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
291 reg_cfg, info, otherinfo, flags);
293 if (lli == NULL)
294 return -EINVAL;
297 return total_size;
301 /* DMA logical lli operations */
303 static void d40_log_lli_link(struct d40_log_lli *lli_dst,
304 struct d40_log_lli *lli_src,
305 int next, unsigned int flags)
307 bool interrupt = flags & LLI_TERM_INT;
308 u32 slos = 0;
309 u32 dlos = 0;
311 if (next != -EINVAL) {
312 slos = next * 2;
313 dlos = next * 2 + 1;
316 if (interrupt) {
317 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
318 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
321 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
322 (slos << D40_MEM_LCSP1_SLOS_POS);
324 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
325 (dlos << D40_MEM_LCSP1_SLOS_POS);
328 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
329 struct d40_log_lli *lli_dst,
330 struct d40_log_lli *lli_src,
331 int next, unsigned int flags)
333 d40_log_lli_link(lli_dst, lli_src, next, flags);
335 writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
336 writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
337 writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
338 writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
341 void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
342 struct d40_log_lli *lli_dst,
343 struct d40_log_lli *lli_src,
344 int next, unsigned int flags)
346 d40_log_lli_link(lli_dst, lli_src, next, flags);
348 writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
349 writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
350 writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
351 writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
354 static void d40_log_fill_lli(struct d40_log_lli *lli,
355 dma_addr_t data, u32 data_size,
356 u32 reg_cfg,
357 u32 data_width,
358 unsigned int flags)
360 bool addr_inc = flags & LLI_ADDR_INC;
362 lli->lcsp13 = reg_cfg;
364 /* The number of elements to transfer */
365 lli->lcsp02 = ((data_size >> data_width) <<
366 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
368 BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
370 /* 16 LSBs address of the current element */
371 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
372 /* 16 MSBs address of the current element */
373 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
375 if (addr_inc)
376 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
380 static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
381 dma_addr_t addr,
382 int size,
383 u32 lcsp13, /* src or dst*/
384 u32 data_width1,
385 u32 data_width2,
386 unsigned int flags)
388 bool addr_inc = flags & LLI_ADDR_INC;
389 struct d40_log_lli *lli = lli_sg;
390 int size_rest = size;
391 int size_seg = 0;
393 do {
394 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
395 size_rest -= size_seg;
397 d40_log_fill_lli(lli,
398 addr,
399 size_seg,
400 lcsp13, data_width1,
401 flags);
402 if (addr_inc)
403 addr += size_seg;
404 lli++;
405 } while (size_rest);
407 return lli;
410 int d40_log_sg_to_lli(struct scatterlist *sg,
411 int sg_len,
412 dma_addr_t dev_addr,
413 struct d40_log_lli *lli_sg,
414 u32 lcsp13, /* src or dst*/
415 u32 data_width1, u32 data_width2)
417 int total_size = 0;
418 struct scatterlist *current_sg = sg;
419 int i;
420 struct d40_log_lli *lli = lli_sg;
421 unsigned long flags = 0;
423 if (!dev_addr)
424 flags |= LLI_ADDR_INC;
426 for_each_sg(sg, current_sg, sg_len, i) {
427 dma_addr_t sg_addr = sg_dma_address(current_sg);
428 unsigned int len = sg_dma_len(current_sg);
429 dma_addr_t addr = dev_addr ?: sg_addr;
431 total_size += sg_dma_len(current_sg);
433 lli = d40_log_buf_to_lli(lli, addr, len,
434 lcsp13,
435 data_width1,
436 data_width2,
437 flags);
440 return total_size;