2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/tmio.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
24 #define TMIO_MMC_MIN_DMA_LEN 8
26 static void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
28 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
29 /* Switch DMA mode on or off - SuperH specific? */
30 writew(enable
? 2 : 0, host
->ctl
+ (0xd8 << host
->bus_shift
));
34 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
36 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
37 struct dma_async_tx_descriptor
*desc
= NULL
;
38 struct dma_chan
*chan
= host
->chan_rx
;
39 struct tmio_mmc_data
*pdata
= host
->pdata
;
42 bool aligned
= true, multiple
= true;
43 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
45 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
46 if (sg_tmp
->offset
& align
)
48 if (sg_tmp
->length
& align
) {
54 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
55 (align
& PAGE_MASK
))) || !multiple
) {
60 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
61 host
->force_pio
= true;
65 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_RXRDY
);
67 /* The only sg element can be unaligned, use our bounce buffer then */
69 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
70 host
->sg_ptr
= &host
->bounce_sg
;
74 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
76 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
77 DMA_FROM_DEVICE
, DMA_CTRL_ACK
);
80 cookie
= dmaengine_submit(desc
);
86 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
87 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
91 /* DMA failed, fall back to PIO */
95 dma_release_channel(chan
);
96 /* Free the Tx channel too */
100 dma_release_channel(chan
);
102 dev_warn(&host
->pdev
->dev
,
103 "DMA failed: %d, falling back to PIO\n", ret
);
104 tmio_mmc_enable_dma(host
, false);
107 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
108 desc
, cookie
, host
->sg_len
);
111 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
113 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
114 struct dma_async_tx_descriptor
*desc
= NULL
;
115 struct dma_chan
*chan
= host
->chan_tx
;
116 struct tmio_mmc_data
*pdata
= host
->pdata
;
119 bool aligned
= true, multiple
= true;
120 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
122 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
123 if (sg_tmp
->offset
& align
)
125 if (sg_tmp
->length
& align
) {
131 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
132 (align
& PAGE_MASK
))) || !multiple
) {
137 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
138 host
->force_pio
= true;
142 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_TXRQ
);
144 /* The only sg element can be unaligned, use our bounce buffer then */
147 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
148 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
149 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
150 tmio_mmc_kunmap_atomic(sg
, &flags
, sg_vaddr
);
151 host
->sg_ptr
= &host
->bounce_sg
;
155 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
157 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
158 DMA_TO_DEVICE
, DMA_CTRL_ACK
);
161 cookie
= dmaengine_submit(desc
);
167 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
168 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
172 /* DMA failed, fall back to PIO */
175 host
->chan_tx
= NULL
;
176 dma_release_channel(chan
);
177 /* Free the Rx channel too */
178 chan
= host
->chan_rx
;
180 host
->chan_rx
= NULL
;
181 dma_release_channel(chan
);
183 dev_warn(&host
->pdev
->dev
,
184 "DMA failed: %d, falling back to PIO\n", ret
);
185 tmio_mmc_enable_dma(host
, false);
188 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
192 void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
193 struct mmc_data
*data
)
195 if (data
->flags
& MMC_DATA_READ
) {
197 tmio_mmc_start_dma_rx(host
);
200 tmio_mmc_start_dma_tx(host
);
204 static void tmio_mmc_issue_tasklet_fn(unsigned long priv
)
206 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
207 struct dma_chan
*chan
= NULL
;
209 spin_lock_irq(&host
->lock
);
211 if (host
&& host
->data
) {
212 if (host
->data
->flags
& MMC_DATA_READ
)
213 chan
= host
->chan_rx
;
215 chan
= host
->chan_tx
;
218 spin_unlock_irq(&host
->lock
);
220 tmio_mmc_enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
223 dma_async_issue_pending(chan
);
226 static void tmio_mmc_tasklet_fn(unsigned long arg
)
228 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
230 spin_lock_irq(&host
->lock
);
235 if (host
->data
->flags
& MMC_DATA_READ
)
236 dma_unmap_sg(host
->chan_rx
->device
->dev
,
237 host
->sg_ptr
, host
->sg_len
,
240 dma_unmap_sg(host
->chan_tx
->device
->dev
,
241 host
->sg_ptr
, host
->sg_len
,
244 tmio_mmc_do_data_irq(host
);
246 spin_unlock_irq(&host
->lock
);
249 /* It might be necessary to make filter MFD specific */
250 static bool tmio_mmc_filter(struct dma_chan
*chan
, void *arg
)
252 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
257 void tmio_mmc_request_dma(struct tmio_mmc_host
*host
, struct tmio_mmc_data
*pdata
)
259 /* We can only either use DMA for both Tx and Rx or not use it at all */
263 if (!host
->chan_tx
&& !host
->chan_rx
) {
267 dma_cap_set(DMA_SLAVE
, mask
);
269 host
->chan_tx
= dma_request_channel(mask
, tmio_mmc_filter
,
270 pdata
->dma
->chan_priv_tx
);
271 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
277 host
->chan_rx
= dma_request_channel(mask
, tmio_mmc_filter
,
278 pdata
->dma
->chan_priv_rx
);
279 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
285 host
->bounce_buf
= (u8
*)__get_free_page(GFP_KERNEL
| GFP_DMA
);
286 if (!host
->bounce_buf
)
289 tasklet_init(&host
->dma_complete
, tmio_mmc_tasklet_fn
, (unsigned long)host
);
290 tasklet_init(&host
->dma_issue
, tmio_mmc_issue_tasklet_fn
, (unsigned long)host
);
293 tmio_mmc_enable_dma(host
, true);
298 dma_release_channel(host
->chan_rx
);
299 host
->chan_rx
= NULL
;
301 dma_release_channel(host
->chan_tx
);
302 host
->chan_tx
= NULL
;
305 void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
308 struct dma_chan
*chan
= host
->chan_tx
;
309 host
->chan_tx
= NULL
;
310 dma_release_channel(chan
);
313 struct dma_chan
*chan
= host
->chan_rx
;
314 host
->chan_rx
= NULL
;
315 dma_release_channel(chan
);
317 if (host
->bounce_buf
) {
318 free_pages((unsigned long)host
->bounce_buf
, 0);
319 host
->bounce_buf
= NULL
;