2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/dma-mapping.h>
29 * typedef dma_cookie_t - an opaque DMA cookie
31 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
33 typedef s32 dma_cookie_t
;
35 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
38 * enum dma_status - DMA transaction status
39 * @DMA_SUCCESS: transaction completed successfully
40 * @DMA_IN_PROGRESS: transaction not yet processed
41 * @DMA_ERROR: transaction failed
50 * enum dma_transaction_type - DMA transaction types/indexes
52 enum dma_transaction_type
{
67 /* last transaction type for creation of the capabilities mask */
68 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
72 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73 * control completion, and communicate status.
74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77 * acknowledges receipt, i.e. has has a chance to establish any
79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
81 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
82 * (if not set, do the source dma-unmapping as page)
83 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
84 * (if not set, do the destination dma-unmapping as page)
87 DMA_PREP_INTERRUPT
= (1 << 0),
88 DMA_CTRL_ACK
= (1 << 1),
89 DMA_COMPL_SKIP_SRC_UNMAP
= (1 << 2),
90 DMA_COMPL_SKIP_DEST_UNMAP
= (1 << 3),
91 DMA_COMPL_SRC_UNMAP_SINGLE
= (1 << 4),
92 DMA_COMPL_DEST_UNMAP_SINGLE
= (1 << 5),
96 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
99 typedef struct { DECLARE_BITMAP(bits
, DMA_TX_TYPE_END
); } dma_cap_mask_t
;
102 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
103 * @memcpy_count: transaction counter
104 * @bytes_transferred: byte counter
107 struct dma_chan_percpu
{
109 unsigned long memcpy_count
;
110 unsigned long bytes_transferred
;
114 * struct dma_chan - devices supply DMA channels, clients use them
115 * @device: ptr to the dma device who supplies this channel, always !%NULL
116 * @cookie: last cookie value returned to client
117 * @chan_id: channel ID for sysfs
118 * @dev: class device for sysfs
119 * @device_node: used to add this to the device chan list
120 * @local: per-cpu pointer to a struct dma_chan_percpu
121 * @client-count: how many clients are using this channel
122 * @table_count: number of appearances in the mem-to-mem allocation table
123 * @private: private data for certain client-channel associations
126 struct dma_device
*device
;
131 struct dma_chan_dev
*dev
;
133 struct list_head device_node
;
134 struct dma_chan_percpu
*local
;
141 * struct dma_chan_dev - relate sysfs device node to backing channel device
142 * @chan - driver channel device
143 * @device - sysfs device
144 * @dev_id - parent dma_device dev_id
145 * @idr_ref - reference count to gate release of dma_device dev_id
147 struct dma_chan_dev
{
148 struct dma_chan
*chan
;
149 struct device device
;
154 static inline const char *dma_chan_name(struct dma_chan
*chan
)
156 return dev_name(&chan
->dev
->device
);
159 void dma_chan_cleanup(struct kref
*kref
);
162 * typedef dma_filter_fn - callback filter for dma_request_channel
163 * @chan: channel to be reviewed
164 * @filter_param: opaque parameter passed through dma_request_channel
166 * When this optional parameter is specified in a call to dma_request_channel a
167 * suitable channel is passed to this routine for further dispositioning before
168 * being returned. Where 'suitable' indicates a non-busy channel that
169 * satisfies the given capability mask. It returns 'true' to indicate that the
170 * channel is suitable.
172 typedef bool (*dma_filter_fn
)(struct dma_chan
*chan
, void *filter_param
);
174 typedef void (*dma_async_tx_callback
)(void *dma_async_param
);
176 * struct dma_async_tx_descriptor - async transaction descriptor
177 * ---dma generic offload fields---
178 * @cookie: tracking cookie for this transaction, set to -EBUSY if
179 * this tx is sitting on a dependency list
180 * @flags: flags to augment operation preparation, control completion, and
182 * @phys: physical address of the descriptor
183 * @tx_list: driver common field for operations that require multiple
185 * @chan: target channel for this operation
186 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
187 * @callback: routine to call after this operation is complete
188 * @callback_param: general parameter to pass to the callback routine
189 * ---async_tx api specific fields---
190 * @next: at completion submit this descriptor
191 * @parent: pointer to the next level up in the dependency chain
192 * @lock: protect the parent and next pointers
194 struct dma_async_tx_descriptor
{
196 enum dma_ctrl_flags flags
; /* not a 'long' to pack with cookie */
198 struct list_head tx_list
;
199 struct dma_chan
*chan
;
200 dma_cookie_t (*tx_submit
)(struct dma_async_tx_descriptor
*tx
);
201 dma_async_tx_callback callback
;
202 void *callback_param
;
203 struct dma_async_tx_descriptor
*next
;
204 struct dma_async_tx_descriptor
*parent
;
209 * struct dma_device - info on the entity supplying DMA services
210 * @chancnt: how many DMA channels are supported
211 * @privatecnt: how many DMA channels are requested by dma_request_channel
212 * @channels: the list of struct dma_chan
213 * @global_node: list_head for global dma_device_list
214 * @cap_mask: one or more dma_capability flags
215 * @max_xor: maximum number of xor sources, 0 if no capability
216 * @dev_id: unique device ID
217 * @dev: struct device reference for dma mapping api
218 * @device_alloc_chan_resources: allocate resources and return the
219 * number of allocated descriptors
220 * @device_free_chan_resources: release DMA channel's resources
221 * @device_prep_dma_memcpy: prepares a memcpy operation
222 * @device_prep_dma_xor: prepares a xor operation
223 * @device_prep_dma_zero_sum: prepares a zero_sum operation
224 * @device_prep_dma_memset: prepares a memset operation
225 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
226 * @device_prep_slave_sg: prepares a slave dma operation
227 * @device_terminate_all: terminate all pending operations
228 * @device_is_tx_complete: poll for transaction completion
229 * @device_issue_pending: push pending transactions to hardware
233 unsigned int chancnt
;
234 unsigned int privatecnt
;
235 struct list_head channels
;
236 struct list_head global_node
;
237 dma_cap_mask_t cap_mask
;
243 int (*device_alloc_chan_resources
)(struct dma_chan
*chan
);
244 void (*device_free_chan_resources
)(struct dma_chan
*chan
);
246 struct dma_async_tx_descriptor
*(*device_prep_dma_memcpy
)(
247 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
248 size_t len
, unsigned long flags
);
249 struct dma_async_tx_descriptor
*(*device_prep_dma_xor
)(
250 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
251 unsigned int src_cnt
, size_t len
, unsigned long flags
);
252 struct dma_async_tx_descriptor
*(*device_prep_dma_zero_sum
)(
253 struct dma_chan
*chan
, dma_addr_t
*src
, unsigned int src_cnt
,
254 size_t len
, u32
*result
, unsigned long flags
);
255 struct dma_async_tx_descriptor
*(*device_prep_dma_memset
)(
256 struct dma_chan
*chan
, dma_addr_t dest
, int value
, size_t len
,
257 unsigned long flags
);
258 struct dma_async_tx_descriptor
*(*device_prep_dma_interrupt
)(
259 struct dma_chan
*chan
, unsigned long flags
);
261 struct dma_async_tx_descriptor
*(*device_prep_slave_sg
)(
262 struct dma_chan
*chan
, struct scatterlist
*sgl
,
263 unsigned int sg_len
, enum dma_data_direction direction
,
264 unsigned long flags
);
265 void (*device_terminate_all
)(struct dma_chan
*chan
);
267 enum dma_status (*device_is_tx_complete
)(struct dma_chan
*chan
,
268 dma_cookie_t cookie
, dma_cookie_t
*last
,
270 void (*device_issue_pending
)(struct dma_chan
*chan
);
273 /* --- public DMA engine API --- */
275 #ifdef CONFIG_DMA_ENGINE
276 void dmaengine_get(void);
277 void dmaengine_put(void);
279 static inline void dmaengine_get(void)
282 static inline void dmaengine_put(void)
287 #ifdef CONFIG_NET_DMA
288 #define net_dmaengine_get() dmaengine_get()
289 #define net_dmaengine_put() dmaengine_put()
291 static inline void net_dmaengine_get(void)
294 static inline void net_dmaengine_put(void)
299 #ifdef CONFIG_ASYNC_TX_DMA
300 #define async_dmaengine_get() dmaengine_get()
301 #define async_dmaengine_put() dmaengine_put()
302 #define async_dma_find_channel(type) dma_find_channel(type)
304 static inline void async_dmaengine_get(void)
307 static inline void async_dmaengine_put(void)
310 static inline struct dma_chan
*
311 async_dma_find_channel(enum dma_transaction_type type
)
317 dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan
*chan
,
318 void *dest
, void *src
, size_t len
);
319 dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan
*chan
,
320 struct page
*page
, unsigned int offset
, void *kdata
, size_t len
);
321 dma_cookie_t
dma_async_memcpy_pg_to_pg(struct dma_chan
*chan
,
322 struct page
*dest_pg
, unsigned int dest_off
, struct page
*src_pg
,
323 unsigned int src_off
, size_t len
);
324 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
325 struct dma_chan
*chan
);
327 static inline void async_tx_ack(struct dma_async_tx_descriptor
*tx
)
329 tx
->flags
|= DMA_CTRL_ACK
;
332 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor
*tx
)
334 tx
->flags
&= ~DMA_CTRL_ACK
;
337 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor
*tx
)
339 return (tx
->flags
& DMA_CTRL_ACK
) == DMA_CTRL_ACK
;
342 #define first_dma_cap(mask) __first_dma_cap(&(mask))
343 static inline int __first_dma_cap(const dma_cap_mask_t
*srcp
)
345 return min_t(int, DMA_TX_TYPE_END
,
346 find_first_bit(srcp
->bits
, DMA_TX_TYPE_END
));
349 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
350 static inline int __next_dma_cap(int n
, const dma_cap_mask_t
*srcp
)
352 return min_t(int, DMA_TX_TYPE_END
,
353 find_next_bit(srcp
->bits
, DMA_TX_TYPE_END
, n
+1));
356 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
358 __dma_cap_set(enum dma_transaction_type tx_type
, dma_cap_mask_t
*dstp
)
360 set_bit(tx_type
, dstp
->bits
);
363 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
365 __dma_cap_clear(enum dma_transaction_type tx_type
, dma_cap_mask_t
*dstp
)
367 clear_bit(tx_type
, dstp
->bits
);
370 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
371 static inline void __dma_cap_zero(dma_cap_mask_t
*dstp
)
373 bitmap_zero(dstp
->bits
, DMA_TX_TYPE_END
);
376 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
378 __dma_has_cap(enum dma_transaction_type tx_type
, dma_cap_mask_t
*srcp
)
380 return test_bit(tx_type
, srcp
->bits
);
383 #define for_each_dma_cap_mask(cap, mask) \
384 for ((cap) = first_dma_cap(mask); \
385 (cap) < DMA_TX_TYPE_END; \
386 (cap) = next_dma_cap((cap), (mask)))
389 * dma_async_issue_pending - flush pending transactions to HW
390 * @chan: target DMA channel
392 * This allows drivers to push copies to HW in batches,
393 * reducing MMIO writes where possible.
395 static inline void dma_async_issue_pending(struct dma_chan
*chan
)
397 chan
->device
->device_issue_pending(chan
);
400 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
403 * dma_async_is_tx_complete - poll for transaction completion
405 * @cookie: transaction identifier to check status of
406 * @last: returns last completed cookie, can be NULL
407 * @used: returns last issued cookie, can be NULL
409 * If @last and @used are passed in, upon return they reflect the driver
410 * internal state and can be used with dma_async_is_complete() to check
411 * the status of multiple cookies without re-checking hardware state.
413 static inline enum dma_status
dma_async_is_tx_complete(struct dma_chan
*chan
,
414 dma_cookie_t cookie
, dma_cookie_t
*last
, dma_cookie_t
*used
)
416 return chan
->device
->device_is_tx_complete(chan
, cookie
, last
, used
);
419 #define dma_async_memcpy_complete(chan, cookie, last, used)\
420 dma_async_is_tx_complete(chan, cookie, last, used)
423 * dma_async_is_complete - test a cookie against chan state
424 * @cookie: transaction identifier to test status of
425 * @last_complete: last know completed transaction
426 * @last_used: last cookie value handed out
428 * dma_async_is_complete() is used in dma_async_memcpy_complete()
429 * the test logic is separated for lightweight testing of multiple cookies
431 static inline enum dma_status
dma_async_is_complete(dma_cookie_t cookie
,
432 dma_cookie_t last_complete
, dma_cookie_t last_used
)
434 if (last_complete
<= last_used
) {
435 if ((cookie
<= last_complete
) || (cookie
> last_used
))
438 if ((cookie
<= last_complete
) && (cookie
> last_used
))
441 return DMA_IN_PROGRESS
;
444 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
);
445 #ifdef CONFIG_DMA_ENGINE
446 enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
);
447 void dma_issue_pending_all(void);
449 static inline enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
453 static inline void dma_issue_pending_all(void)
459 /* --- DMA device --- */
461 int dma_async_device_register(struct dma_device
*device
);
462 void dma_async_device_unregister(struct dma_device
*device
);
463 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
);
464 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
);
465 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
466 struct dma_chan
*__dma_request_channel(dma_cap_mask_t
*mask
, dma_filter_fn fn
, void *fn_param
);
467 void dma_release_channel(struct dma_chan
*chan
);
469 /* --- Helper iov-locking functions --- */
471 struct dma_page_list
{
472 char __user
*base_address
;
477 struct dma_pinned_list
{
479 struct dma_page_list page_list
[0];
482 struct dma_pinned_list
*dma_pin_iovec_pages(struct iovec
*iov
, size_t len
);
483 void dma_unpin_iovec_pages(struct dma_pinned_list
* pinned_list
);
485 dma_cookie_t
dma_memcpy_to_iovec(struct dma_chan
*chan
, struct iovec
*iov
,
486 struct dma_pinned_list
*pinned_list
, unsigned char *kdata
, size_t len
);
487 dma_cookie_t
dma_memcpy_pg_to_iovec(struct dma_chan
*chan
, struct iovec
*iov
,
488 struct dma_pinned_list
*pinned_list
, struct page
*page
,
489 unsigned int offset
, size_t len
);
491 #endif /* DMAENGINE_H */