ext4: use transaction reservation for extent conversion in ext4_end_io
[linux-2.6.git] / crypto / async_tx / async_memset.c
blob05a4d1e001487bb9941c561f001a3e90042fdbc5
1 /*
2 * memory fill offload engine support
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/kernel.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/mm.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/async_tx.h>
33 /**
34 * async_memset - attempt to fill memory with a dma engine.
35 * @dest: destination page
36 * @val: fill value
37 * @offset: offset in pages to start transaction
38 * @len: length in bytes
40 * honored flags: ASYNC_TX_ACK
42 struct dma_async_tx_descriptor *
43 async_memset(struct page *dest, int val, unsigned int offset, size_t len,
44 struct async_submit_ctl *submit)
46 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
47 &dest, 1, NULL, 0, len);
48 struct dma_device *device = chan ? chan->device : NULL;
49 struct dma_async_tx_descriptor *tx = NULL;
51 if (device && is_dma_fill_aligned(device, offset, 0, len)) {
52 dma_addr_t dma_dest;
53 unsigned long dma_prep_flags = 0;
55 if (submit->cb_fn)
56 dma_prep_flags |= DMA_PREP_INTERRUPT;
57 if (submit->flags & ASYNC_TX_FENCE)
58 dma_prep_flags |= DMA_PREP_FENCE;
59 dma_dest = dma_map_page(device->dev, dest, offset, len,
60 DMA_FROM_DEVICE);
62 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
63 dma_prep_flags);
66 if (tx) {
67 pr_debug("%s: (async) len: %zu\n", __func__, len);
68 async_tx_submit(chan, tx, submit);
69 } else { /* run the memset synchronously */
70 void *dest_buf;
71 pr_debug("%s: (sync) len: %zu\n", __func__, len);
73 dest_buf = page_address(dest) + offset;
75 /* wait for any prerequisite operations */
76 async_tx_quiesce(&submit->depend_tx);
78 memset(dest_buf, val, len);
80 async_tx_sync_epilog(submit);
83 return tx;
85 EXPORT_SYMBOL_GPL(async_memset);
87 MODULE_AUTHOR("Intel Corporation");
88 MODULE_DESCRIPTION("asynchronous memset api");
89 MODULE_LICENSE("GPL");