sched: use group_first_cpu() instead of cpumask_first(sched_group_cpus())
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / dmaengine.h
blob2e2aa3df170cfb5f2be5f8097ee9e2324c759143
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/dma-mapping.h>
28 /**
29 * typedef dma_cookie_t - an opaque DMA cookie
31 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
33 typedef s32 dma_cookie_t;
35 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
37 /**
38 * enum dma_status - DMA transaction status
39 * @DMA_SUCCESS: transaction completed successfully
40 * @DMA_IN_PROGRESS: transaction not yet processed
41 * @DMA_ERROR: transaction failed
43 enum dma_status {
44 DMA_SUCCESS,
45 DMA_IN_PROGRESS,
46 DMA_ERROR,
49 /**
50 * enum dma_transaction_type - DMA transaction types/indexes
52 enum dma_transaction_type {
53 DMA_MEMCPY,
54 DMA_XOR,
55 DMA_PQ_XOR,
56 DMA_DUAL_XOR,
57 DMA_PQ_UPDATE,
58 DMA_ZERO_SUM,
59 DMA_PQ_ZERO_SUM,
60 DMA_MEMSET,
61 DMA_MEMCPY_CRC32C,
62 DMA_INTERRUPT,
63 DMA_PRIVATE,
64 DMA_SLAVE,
67 /* last transaction type for creation of the capabilities mask */
68 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
71 /**
72 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
73 * control completion, and communicate status.
74 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
75 * this transaction
76 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
77 * acknowledges receipt, i.e. has has a chance to establish any
78 * dependency chains
79 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
80 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
82 enum dma_ctrl_flags {
83 DMA_PREP_INTERRUPT = (1 << 0),
84 DMA_CTRL_ACK = (1 << 1),
85 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
86 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
89 /**
90 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
91 * See linux/cpumask.h
93 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
95 /**
96 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
97 * @memcpy_count: transaction counter
98 * @bytes_transferred: byte counter
101 struct dma_chan_percpu {
102 /* stats */
103 unsigned long memcpy_count;
104 unsigned long bytes_transferred;
108 * struct dma_chan - devices supply DMA channels, clients use them
109 * @device: ptr to the dma device who supplies this channel, always !%NULL
110 * @cookie: last cookie value returned to client
111 * @chan_id: channel ID for sysfs
112 * @dev: class device for sysfs
113 * @device_node: used to add this to the device chan list
114 * @local: per-cpu pointer to a struct dma_chan_percpu
115 * @client-count: how many clients are using this channel
116 * @table_count: number of appearances in the mem-to-mem allocation table
117 * @private: private data for certain client-channel associations
119 struct dma_chan {
120 struct dma_device *device;
121 dma_cookie_t cookie;
123 /* sysfs */
124 int chan_id;
125 struct dma_chan_dev *dev;
127 struct list_head device_node;
128 struct dma_chan_percpu *local;
129 int client_count;
130 int table_count;
131 void *private;
135 * struct dma_chan_dev - relate sysfs device node to backing channel device
136 * @chan - driver channel device
137 * @device - sysfs device
138 * @dev_id - parent dma_device dev_id
139 * @idr_ref - reference count to gate release of dma_device dev_id
141 struct dma_chan_dev {
142 struct dma_chan *chan;
143 struct device device;
144 int dev_id;
145 atomic_t *idr_ref;
148 static inline const char *dma_chan_name(struct dma_chan *chan)
150 return dev_name(&chan->dev->device);
153 void dma_chan_cleanup(struct kref *kref);
156 * typedef dma_filter_fn - callback filter for dma_request_channel
157 * @chan: channel to be reviewed
158 * @filter_param: opaque parameter passed through dma_request_channel
160 * When this optional parameter is specified in a call to dma_request_channel a
161 * suitable channel is passed to this routine for further dispositioning before
162 * being returned. Where 'suitable' indicates a non-busy channel that
163 * satisfies the given capability mask. It returns 'true' to indicate that the
164 * channel is suitable.
166 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
168 typedef void (*dma_async_tx_callback)(void *dma_async_param);
170 * struct dma_async_tx_descriptor - async transaction descriptor
171 * ---dma generic offload fields---
172 * @cookie: tracking cookie for this transaction, set to -EBUSY if
173 * this tx is sitting on a dependency list
174 * @flags: flags to augment operation preparation, control completion, and
175 * communicate status
176 * @phys: physical address of the descriptor
177 * @tx_list: driver common field for operations that require multiple
178 * descriptors
179 * @chan: target channel for this operation
180 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
181 * @callback: routine to call after this operation is complete
182 * @callback_param: general parameter to pass to the callback routine
183 * ---async_tx api specific fields---
184 * @next: at completion submit this descriptor
185 * @parent: pointer to the next level up in the dependency chain
186 * @lock: protect the parent and next pointers
188 struct dma_async_tx_descriptor {
189 dma_cookie_t cookie;
190 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
191 dma_addr_t phys;
192 struct list_head tx_list;
193 struct dma_chan *chan;
194 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
195 dma_async_tx_callback callback;
196 void *callback_param;
197 struct dma_async_tx_descriptor *next;
198 struct dma_async_tx_descriptor *parent;
199 spinlock_t lock;
203 * struct dma_device - info on the entity supplying DMA services
204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
206 * @channels: the list of struct dma_chan
207 * @global_node: list_head for global dma_device_list
208 * @cap_mask: one or more dma_capability flags
209 * @max_xor: maximum number of xor sources, 0 if no capability
210 * @dev_id: unique device ID
211 * @dev: struct device reference for dma mapping api
212 * @device_alloc_chan_resources: allocate resources and return the
213 * number of allocated descriptors
214 * @device_free_chan_resources: release DMA channel's resources
215 * @device_prep_dma_memcpy: prepares a memcpy operation
216 * @device_prep_dma_xor: prepares a xor operation
217 * @device_prep_dma_zero_sum: prepares a zero_sum operation
218 * @device_prep_dma_memset: prepares a memset operation
219 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
220 * @device_prep_slave_sg: prepares a slave dma operation
221 * @device_terminate_all: terminate all pending operations
222 * @device_is_tx_complete: poll for transaction completion
223 * @device_issue_pending: push pending transactions to hardware
225 struct dma_device {
227 unsigned int chancnt;
228 unsigned int privatecnt;
229 struct list_head channels;
230 struct list_head global_node;
231 dma_cap_mask_t cap_mask;
232 int max_xor;
234 int dev_id;
235 struct device *dev;
237 int (*device_alloc_chan_resources)(struct dma_chan *chan);
238 void (*device_free_chan_resources)(struct dma_chan *chan);
240 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
241 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
242 size_t len, unsigned long flags);
243 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
244 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
245 unsigned int src_cnt, size_t len, unsigned long flags);
246 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
247 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
248 size_t len, u32 *result, unsigned long flags);
249 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
250 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
251 unsigned long flags);
252 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
253 struct dma_chan *chan, unsigned long flags);
255 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
256 struct dma_chan *chan, struct scatterlist *sgl,
257 unsigned int sg_len, enum dma_data_direction direction,
258 unsigned long flags);
259 void (*device_terminate_all)(struct dma_chan *chan);
261 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
262 dma_cookie_t cookie, dma_cookie_t *last,
263 dma_cookie_t *used);
264 void (*device_issue_pending)(struct dma_chan *chan);
267 /* --- public DMA engine API --- */
269 #ifdef CONFIG_DMA_ENGINE
270 void dmaengine_get(void);
271 void dmaengine_put(void);
272 #else
273 static inline void dmaengine_get(void)
276 static inline void dmaengine_put(void)
279 #endif
281 #ifdef CONFIG_NET_DMA
282 #define net_dmaengine_get() dmaengine_get()
283 #define net_dmaengine_put() dmaengine_put()
284 #else
285 static inline void net_dmaengine_get(void)
288 static inline void net_dmaengine_put(void)
291 #endif
293 #ifdef CONFIG_ASYNC_TX_DMA
294 #define async_dmaengine_get() dmaengine_get()
295 #define async_dmaengine_put() dmaengine_put()
296 #define async_dma_find_channel(type) dma_find_channel(type)
297 #else
298 static inline void async_dmaengine_get(void)
301 static inline void async_dmaengine_put(void)
304 static inline struct dma_chan *
305 async_dma_find_channel(enum dma_transaction_type type)
307 return NULL;
309 #endif
311 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
312 void *dest, void *src, size_t len);
313 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
314 struct page *page, unsigned int offset, void *kdata, size_t len);
315 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
316 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
317 unsigned int src_off, size_t len);
318 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan);
321 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
323 tx->flags |= DMA_CTRL_ACK;
326 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
328 tx->flags &= ~DMA_CTRL_ACK;
331 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
333 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
336 #define first_dma_cap(mask) __first_dma_cap(&(mask))
337 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
339 return min_t(int, DMA_TX_TYPE_END,
340 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
343 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
344 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
346 return min_t(int, DMA_TX_TYPE_END,
347 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
350 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
351 static inline void
352 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
354 set_bit(tx_type, dstp->bits);
357 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358 static inline void
359 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
361 clear_bit(tx_type, dstp->bits);
364 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
365 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
367 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
370 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
371 static inline int
372 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
374 return test_bit(tx_type, srcp->bits);
377 #define for_each_dma_cap_mask(cap, mask) \
378 for ((cap) = first_dma_cap(mask); \
379 (cap) < DMA_TX_TYPE_END; \
380 (cap) = next_dma_cap((cap), (mask)))
383 * dma_async_issue_pending - flush pending transactions to HW
384 * @chan: target DMA channel
386 * This allows drivers to push copies to HW in batches,
387 * reducing MMIO writes where possible.
389 static inline void dma_async_issue_pending(struct dma_chan *chan)
391 chan->device->device_issue_pending(chan);
394 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
397 * dma_async_is_tx_complete - poll for transaction completion
398 * @chan: DMA channel
399 * @cookie: transaction identifier to check status of
400 * @last: returns last completed cookie, can be NULL
401 * @used: returns last issued cookie, can be NULL
403 * If @last and @used are passed in, upon return they reflect the driver
404 * internal state and can be used with dma_async_is_complete() to check
405 * the status of multiple cookies without re-checking hardware state.
407 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
408 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
410 return chan->device->device_is_tx_complete(chan, cookie, last, used);
413 #define dma_async_memcpy_complete(chan, cookie, last, used)\
414 dma_async_is_tx_complete(chan, cookie, last, used)
417 * dma_async_is_complete - test a cookie against chan state
418 * @cookie: transaction identifier to test status of
419 * @last_complete: last know completed transaction
420 * @last_used: last cookie value handed out
422 * dma_async_is_complete() is used in dma_async_memcpy_complete()
423 * the test logic is separated for lightweight testing of multiple cookies
425 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
426 dma_cookie_t last_complete, dma_cookie_t last_used)
428 if (last_complete <= last_used) {
429 if ((cookie <= last_complete) || (cookie > last_used))
430 return DMA_SUCCESS;
431 } else {
432 if ((cookie <= last_complete) && (cookie > last_used))
433 return DMA_SUCCESS;
435 return DMA_IN_PROGRESS;
438 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
439 #ifdef CONFIG_DMA_ENGINE
440 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
441 void dma_issue_pending_all(void);
442 #else
443 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
445 return DMA_SUCCESS;
447 static inline void dma_issue_pending_all(void)
449 do { } while (0);
451 #endif
453 /* --- DMA device --- */
455 int dma_async_device_register(struct dma_device *device);
456 void dma_async_device_unregister(struct dma_device *device);
457 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
458 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
459 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
460 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
461 void dma_release_channel(struct dma_chan *chan);
463 /* --- Helper iov-locking functions --- */
465 struct dma_page_list {
466 char __user *base_address;
467 int nr_pages;
468 struct page **pages;
471 struct dma_pinned_list {
472 int nr_iovecs;
473 struct dma_page_list page_list[0];
476 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
477 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
479 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
480 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
481 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
482 struct dma_pinned_list *pinned_list, struct page *page,
483 unsigned int offset, size_t len);
485 #endif /* DMAENGINE_H */