2 * xor offload engine api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/kernel.h>
27 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/raid/xor.h>
31 #include <linux/async_tx.h>
33 /* do_async_xor - dma map the pages and perform the xor with an engine.
34 * This routine is marked __always_inline so it can be compiled away
35 * when CONFIG_DMA_ENGINE=n
37 static __always_inline
struct dma_async_tx_descriptor
*
38 do_async_xor(struct dma_chan
*chan
, struct page
*dest
, struct page
**src_list
,
39 unsigned int offset
, int src_cnt
, size_t len
,
40 enum async_tx_flags flags
,
41 struct dma_async_tx_descriptor
*depend_tx
,
42 dma_async_tx_callback cb_fn
, void *cb_param
)
44 struct dma_device
*dma
= chan
->device
;
45 dma_addr_t
*dma_src
= (dma_addr_t
*) src_list
;
46 struct dma_async_tx_descriptor
*tx
= NULL
;
49 dma_async_tx_callback _cb_fn
;
51 enum async_tx_flags async_flags
;
52 enum dma_ctrl_flags dma_flags
;
56 dma_dest
= dma_map_page(dma
->dev
, dest
, offset
, len
, DMA_FROM_DEVICE
);
57 for (i
= 0; i
< src_cnt
; i
++)
58 dma_src
[i
] = dma_map_page(dma
->dev
, src_list
[i
], offset
,
64 xor_src_cnt
= min(src_cnt
, dma
->max_xor
);
65 /* if we are submitting additional xors, leave the chain open,
66 * clear the callback parameters, and leave the destination
69 if (src_cnt
> xor_src_cnt
) {
70 async_flags
&= ~ASYNC_TX_ACK
;
71 dma_flags
= DMA_COMPL_SKIP_DEST_UNMAP
;
79 dma_flags
|= DMA_PREP_INTERRUPT
;
81 /* Since we have clobbered the src_list we are committed
82 * to doing this asynchronously. Drivers force forward progress
83 * in case they can not provide a descriptor
85 tx
= dma
->device_prep_dma_xor(chan
, dma_dest
, &dma_src
[src_off
],
86 xor_src_cnt
, len
, dma_flags
);
89 async_tx_quiesce(&depend_tx
);
91 /* spin wait for the preceeding transactions to complete */
92 while (unlikely(!tx
)) {
93 dma_async_issue_pending(chan
);
94 tx
= dma
->device_prep_dma_xor(chan
, dma_dest
,
100 async_tx_submit(chan
, tx
, async_flags
, depend_tx
, _cb_fn
,
104 flags
|= ASYNC_TX_DEP_ACK
;
106 if (src_cnt
> xor_src_cnt
) {
107 /* drop completed sources */
108 src_cnt
-= xor_src_cnt
;
109 src_off
+= xor_src_cnt
;
111 /* use the intermediate result a source */
112 dma_src
[--src_off
] = dma_dest
;
122 do_sync_xor(struct page
*dest
, struct page
**src_list
, unsigned int offset
,
123 int src_cnt
, size_t len
, enum async_tx_flags flags
,
124 dma_async_tx_callback cb_fn
, void *cb_param
)
130 void **srcs
= (void **) src_list
;
132 /* reuse the 'src_list' array to convert to buffer pointers */
133 for (i
= 0; i
< src_cnt
; i
++)
134 srcs
[i
] = page_address(src_list
[i
]) + offset
;
136 /* set destination address */
137 dest_buf
= page_address(dest
) + offset
;
139 if (flags
& ASYNC_TX_XOR_ZERO_DST
)
140 memset(dest_buf
, 0, len
);
142 while (src_cnt
> 0) {
143 /* process up to 'MAX_XOR_BLOCKS' sources */
144 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
145 xor_blocks(xor_src_cnt
, len
, dest_buf
, &srcs
[src_off
]);
147 /* drop completed sources */
148 src_cnt
-= xor_src_cnt
;
149 src_off
+= xor_src_cnt
;
152 async_tx_sync_epilog(cb_fn
, cb_param
);
156 * async_xor - attempt to xor a set of blocks with a dma engine.
157 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
158 * flag must be set to not include dest data in the calculation. The
159 * assumption with dma eninges is that they only use the destination
160 * buffer as a source when it is explicity specified in the source list.
161 * @dest: destination page
162 * @src_list: array of source pages (if the dest is also a source it must be
163 * at index zero). The contents of this array may be overwritten.
164 * @offset: offset in pages to start transaction
165 * @src_cnt: number of source pages
166 * @len: length in bytes
167 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
168 * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
169 * @depend_tx: xor depends on the result of this transaction.
170 * @cb_fn: function to call when the xor completes
171 * @cb_param: parameter to pass to the callback routine
173 struct dma_async_tx_descriptor
*
174 async_xor(struct page
*dest
, struct page
**src_list
, unsigned int offset
,
175 int src_cnt
, size_t len
, enum async_tx_flags flags
,
176 struct dma_async_tx_descriptor
*depend_tx
,
177 dma_async_tx_callback cb_fn
, void *cb_param
)
179 struct dma_chan
*chan
= async_tx_find_channel(depend_tx
, DMA_XOR
,
182 BUG_ON(src_cnt
<= 1);
185 /* run the xor asynchronously */
186 pr_debug("%s (async): len: %zu\n", __func__
, len
);
188 return do_async_xor(chan
, dest
, src_list
, offset
, src_cnt
, len
,
189 flags
, depend_tx
, cb_fn
, cb_param
);
191 /* run the xor synchronously */
192 pr_debug("%s (sync): len: %zu\n", __func__
, len
);
194 /* in the sync case the dest is an implied source
195 * (assumes the dest is the first source)
197 if (flags
& ASYNC_TX_XOR_DROP_DST
) {
202 /* wait for any prerequisite operations */
203 async_tx_quiesce(&depend_tx
);
205 do_sync_xor(dest
, src_list
, offset
, src_cnt
, len
,
206 flags
, cb_fn
, cb_param
);
211 EXPORT_SYMBOL_GPL(async_xor
);
213 static int page_is_zero(struct page
*p
, unsigned int offset
, size_t len
)
215 char *a
= page_address(p
) + offset
;
216 return ((*(u32
*) a
) == 0 &&
217 memcmp(a
, a
+ 4, len
- 4) == 0);
221 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
222 * @dest: destination page used if the xor is performed synchronously
223 * @src_list: array of source pages. The dest page must be listed as a source
224 * at index zero. The contents of this array may be overwritten.
225 * @offset: offset in pages to start transaction
226 * @src_cnt: number of source pages
227 * @len: length in bytes
228 * @result: 0 if sum == 0 else non-zero
229 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
230 * @depend_tx: xor depends on the result of this transaction.
231 * @cb_fn: function to call when the xor completes
232 * @cb_param: parameter to pass to the callback routine
234 struct dma_async_tx_descriptor
*
235 async_xor_zero_sum(struct page
*dest
, struct page
**src_list
,
236 unsigned int offset
, int src_cnt
, size_t len
,
237 u32
*result
, enum async_tx_flags flags
,
238 struct dma_async_tx_descriptor
*depend_tx
,
239 dma_async_tx_callback cb_fn
, void *cb_param
)
241 struct dma_chan
*chan
= async_tx_find_channel(depend_tx
, DMA_ZERO_SUM
,
244 struct dma_device
*device
= chan
? chan
->device
: NULL
;
245 struct dma_async_tx_descriptor
*tx
= NULL
;
247 BUG_ON(src_cnt
<= 1);
249 if (device
&& src_cnt
<= device
->max_xor
) {
250 dma_addr_t
*dma_src
= (dma_addr_t
*) src_list
;
251 unsigned long dma_prep_flags
= cb_fn
? DMA_PREP_INTERRUPT
: 0;
254 pr_debug("%s: (async) len: %zu\n", __func__
, len
);
256 for (i
= 0; i
< src_cnt
; i
++)
257 dma_src
[i
] = dma_map_page(device
->dev
, src_list
[i
],
258 offset
, len
, DMA_TO_DEVICE
);
260 tx
= device
->device_prep_dma_zero_sum(chan
, dma_src
, src_cnt
,
264 async_tx_quiesce(&depend_tx
);
267 dma_async_issue_pending(chan
);
268 tx
= device
->device_prep_dma_zero_sum(chan
,
269 dma_src
, src_cnt
, len
, result
,
274 async_tx_submit(chan
, tx
, flags
, depend_tx
, cb_fn
, cb_param
);
276 unsigned long xor_flags
= flags
;
278 pr_debug("%s: (sync) len: %zu\n", __func__
, len
);
280 xor_flags
|= ASYNC_TX_XOR_DROP_DST
;
281 xor_flags
&= ~ASYNC_TX_ACK
;
283 tx
= async_xor(dest
, src_list
, offset
, src_cnt
, len
, xor_flags
,
284 depend_tx
, NULL
, NULL
);
286 async_tx_quiesce(&tx
);
288 *result
= page_is_zero(dest
, offset
, len
) ? 0 : 1;
290 async_tx_sync_epilog(cb_fn
, cb_param
);
295 EXPORT_SYMBOL_GPL(async_xor_zero_sum
);
297 static int __init
async_xor_init(void)
299 #ifdef CONFIG_DMA_ENGINE
300 /* To conserve stack space the input src_list (array of page pointers)
301 * is reused to hold the array of dma addresses passed to the driver.
302 * This conversion is only possible when dma_addr_t is less than the
303 * the size of a pointer. HIGHMEM64G is known to violate this
306 BUILD_BUG_ON(sizeof(dma_addr_t
) > sizeof(struct page
*));
312 static void __exit
async_xor_exit(void)
317 module_init(async_xor_init
);
318 module_exit(async_xor_exit
);
320 MODULE_AUTHOR("Intel Corporation");
321 MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
322 MODULE_LICENSE("GPL");