amd64: fix get_user() on bitwise
[linux-2.6/mini2440.git] / drivers / dma / ioatdma.c
blob5fbe56b5cea09bb01f74e40488f63187dd851c1f
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 * copy operations.
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
34 #include "ioatdma.h"
35 #include "ioatdma_registers.h"
36 #include "ioatdma_hw.h"
38 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
40 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
41 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 /* internal functions */
44 static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
45 static void ioat_shutdown(struct pci_dev *pdev);
46 static void __devexit ioat_remove(struct pci_dev *pdev);
48 static int enumerate_dma_channels(struct ioat_device *device)
50 u8 xfercap_scale;
51 u32 xfercap;
52 int i;
53 struct ioat_dma_chan *ioat_chan;
55 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
56 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
57 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
59 for (i = 0; i < device->common.chancnt; i++) {
60 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
61 if (!ioat_chan) {
62 device->common.chancnt = i;
63 break;
66 ioat_chan->device = device;
67 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
68 ioat_chan->xfercap = xfercap;
69 spin_lock_init(&ioat_chan->cleanup_lock);
70 spin_lock_init(&ioat_chan->desc_lock);
71 INIT_LIST_HEAD(&ioat_chan->free_desc);
72 INIT_LIST_HEAD(&ioat_chan->used_desc);
73 /* This should be made common somewhere in dmaengine.c */
74 ioat_chan->common.device = &device->common;
75 list_add_tail(&ioat_chan->common.device_node,
76 &device->common.channels);
78 return device->common.chancnt;
81 static void
82 ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
84 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
85 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
87 pci_unmap_addr_set(desc, src, addr);
89 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
90 iter->hw->src_addr = addr;
91 addr += ioat_chan->xfercap;
96 static void
97 ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
99 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
100 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
102 pci_unmap_addr_set(desc, dst, addr);
104 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
105 iter->hw->dst_addr = addr;
106 addr += ioat_chan->xfercap;
110 static dma_cookie_t
111 ioat_tx_submit(struct dma_async_tx_descriptor *tx)
113 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
114 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
115 int append = 0;
116 dma_cookie_t cookie;
117 struct ioat_desc_sw *group_start;
119 group_start = list_entry(desc->async_tx.tx_list.next,
120 struct ioat_desc_sw, node);
121 spin_lock_bh(&ioat_chan->desc_lock);
122 /* cookie incr and addition to used_list must be atomic */
123 cookie = ioat_chan->common.cookie;
124 cookie++;
125 if (cookie < 0)
126 cookie = 1;
127 ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
129 /* write address into NextDescriptor field of last desc in chain */
130 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
131 group_start->async_tx.phys;
132 list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
134 ioat_chan->pending += desc->tx_cnt;
135 if (ioat_chan->pending >= 4) {
136 append = 1;
137 ioat_chan->pending = 0;
139 spin_unlock_bh(&ioat_chan->desc_lock);
141 if (append)
142 writeb(IOAT_CHANCMD_APPEND,
143 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
145 return cookie;
148 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
149 struct ioat_dma_chan *ioat_chan,
150 gfp_t flags)
152 struct ioat_dma_descriptor *desc;
153 struct ioat_desc_sw *desc_sw;
154 struct ioat_device *ioat_device;
155 dma_addr_t phys;
157 ioat_device = to_ioat_device(ioat_chan->common.device);
158 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
159 if (unlikely(!desc))
160 return NULL;
162 desc_sw = kzalloc(sizeof(*desc_sw), flags);
163 if (unlikely(!desc_sw)) {
164 pci_pool_free(ioat_device->dma_pool, desc, phys);
165 return NULL;
168 memset(desc, 0, sizeof(*desc));
169 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
170 desc_sw->async_tx.tx_set_src = ioat_set_src;
171 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
172 desc_sw->async_tx.tx_submit = ioat_tx_submit;
173 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
174 desc_sw->hw = desc;
175 desc_sw->async_tx.phys = phys;
177 return desc_sw;
180 #define INITIAL_IOAT_DESC_COUNT 128
182 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan);
184 /* returns the actual number of allocated descriptors */
185 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
187 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
188 struct ioat_desc_sw *desc = NULL;
189 u16 chanctrl;
190 u32 chanerr;
191 int i;
192 LIST_HEAD(tmp_list);
195 * In-use bit automatically set by reading chanctrl
196 * If 0, we got it, if 1, someone else did
198 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
199 if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
200 return -EBUSY;
202 /* Setup register to interrupt and write completion status on error */
203 chanctrl = IOAT_CHANCTRL_CHANNEL_IN_USE |
204 IOAT_CHANCTRL_ERR_INT_EN |
205 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
206 IOAT_CHANCTRL_ERR_COMPLETION_EN;
207 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
209 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
210 if (chanerr) {
211 printk("IOAT: CHANERR = %x, clearing\n", chanerr);
212 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
215 /* Allocate descriptors */
216 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
217 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
218 if (!desc) {
219 printk(KERN_ERR "IOAT: Only %d initial descriptors\n", i);
220 break;
222 list_add_tail(&desc->node, &tmp_list);
224 spin_lock_bh(&ioat_chan->desc_lock);
225 list_splice(&tmp_list, &ioat_chan->free_desc);
226 spin_unlock_bh(&ioat_chan->desc_lock);
228 /* allocate a completion writeback area */
229 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
230 ioat_chan->completion_virt =
231 pci_pool_alloc(ioat_chan->device->completion_pool,
232 GFP_KERNEL,
233 &ioat_chan->completion_addr);
234 memset(ioat_chan->completion_virt, 0,
235 sizeof(*ioat_chan->completion_virt));
236 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
237 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
238 writel(((u64) ioat_chan->completion_addr) >> 32,
239 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
241 ioat_start_null_desc(ioat_chan);
242 return i;
245 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
247 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
249 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
250 struct ioat_device *ioat_device = to_ioat_device(chan->device);
251 struct ioat_desc_sw *desc, *_desc;
252 u16 chanctrl;
253 int in_use_descs = 0;
255 ioat_dma_memcpy_cleanup(ioat_chan);
257 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
259 spin_lock_bh(&ioat_chan->desc_lock);
260 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
261 in_use_descs++;
262 list_del(&desc->node);
263 pci_pool_free(ioat_device->dma_pool, desc->hw,
264 desc->async_tx.phys);
265 kfree(desc);
267 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
268 list_del(&desc->node);
269 pci_pool_free(ioat_device->dma_pool, desc->hw,
270 desc->async_tx.phys);
271 kfree(desc);
273 spin_unlock_bh(&ioat_chan->desc_lock);
275 pci_pool_free(ioat_device->completion_pool,
276 ioat_chan->completion_virt,
277 ioat_chan->completion_addr);
279 /* one is ok since we left it on there on purpose */
280 if (in_use_descs > 1)
281 printk(KERN_ERR "IOAT: Freeing %d in use descriptors!\n",
282 in_use_descs - 1);
284 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
286 /* Tell hw the chan is free */
287 chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
288 chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
289 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
292 static struct dma_async_tx_descriptor *
293 ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en)
295 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
296 struct ioat_desc_sw *first, *prev, *new;
297 LIST_HEAD(new_chain);
298 u32 copy;
299 size_t orig_len;
300 int desc_count = 0;
302 if (!len)
303 return NULL;
305 orig_len = len;
307 first = NULL;
308 prev = NULL;
310 spin_lock_bh(&ioat_chan->desc_lock);
311 while (len) {
312 if (!list_empty(&ioat_chan->free_desc)) {
313 new = to_ioat_desc(ioat_chan->free_desc.next);
314 list_del(&new->node);
315 } else {
316 /* try to get another desc */
317 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
318 /* will this ever happen? */
319 /* TODO add upper limit on these */
320 BUG_ON(!new);
323 copy = min((u32) len, ioat_chan->xfercap);
325 new->hw->size = copy;
326 new->hw->ctl = 0;
327 new->async_tx.cookie = 0;
328 new->async_tx.ack = 1;
330 /* chain together the physical address list for the HW */
331 if (!first)
332 first = new;
333 else
334 prev->hw->next = (u64) new->async_tx.phys;
336 prev = new;
337 len -= copy;
338 list_add_tail(&new->node, &new_chain);
339 desc_count++;
342 list_splice(&new_chain, &new->async_tx.tx_list);
344 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
345 new->hw->next = 0;
346 new->tx_cnt = desc_count;
347 new->async_tx.ack = 0; /* client is in control of this ack */
348 new->async_tx.cookie = -EBUSY;
350 pci_unmap_len_set(new, src_len, orig_len);
351 pci_unmap_len_set(new, dst_len, orig_len);
352 spin_unlock_bh(&ioat_chan->desc_lock);
354 return new ? &new->async_tx : NULL;
359 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
360 * @chan: DMA channel handle
363 static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
365 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
367 if (ioat_chan->pending != 0) {
368 ioat_chan->pending = 0;
369 writeb(IOAT_CHANCMD_APPEND,
370 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
374 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
376 unsigned long phys_complete;
377 struct ioat_desc_sw *desc, *_desc;
378 dma_cookie_t cookie = 0;
380 prefetch(chan->completion_virt);
382 if (!spin_trylock(&chan->cleanup_lock))
383 return;
385 /* The completion writeback can happen at any time,
386 so reads by the driver need to be atomic operations
387 The descriptor physical addresses are limited to 32-bits
388 when the CPU can only do a 32-bit mov */
390 #if (BITS_PER_LONG == 64)
391 phys_complete =
392 chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
393 #else
394 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
395 #endif
397 if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
398 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
399 printk("IOAT: Channel halted, chanerr = %x\n",
400 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
402 /* TODO do something to salvage the situation */
405 if (phys_complete == chan->last_completion) {
406 spin_unlock(&chan->cleanup_lock);
407 return;
410 spin_lock_bh(&chan->desc_lock);
411 list_for_each_entry_safe(desc, _desc, &chan->used_desc, node) {
414 * Incoming DMA requests may use multiple descriptors, due to
415 * exceeding xfercap, perhaps. If so, only the last one will
416 * have a cookie, and require unmapping.
418 if (desc->async_tx.cookie) {
419 cookie = desc->async_tx.cookie;
421 /* yes we are unmapping both _page and _single alloc'd
422 regions with unmap_page. Is this *really* that bad?
424 pci_unmap_page(chan->device->pdev,
425 pci_unmap_addr(desc, dst),
426 pci_unmap_len(desc, dst_len),
427 PCI_DMA_FROMDEVICE);
428 pci_unmap_page(chan->device->pdev,
429 pci_unmap_addr(desc, src),
430 pci_unmap_len(desc, src_len),
431 PCI_DMA_TODEVICE);
434 if (desc->async_tx.phys != phys_complete) {
435 /* a completed entry, but not the last, so cleanup
436 * if the client is done with the descriptor
438 if (desc->async_tx.ack) {
439 list_del(&desc->node);
440 list_add_tail(&desc->node, &chan->free_desc);
441 } else
442 desc->async_tx.cookie = 0;
443 } else {
444 /* last used desc. Do not remove, so we can append from
445 it, but don't look at it next time, either */
446 desc->async_tx.cookie = 0;
448 /* TODO check status bits? */
449 break;
453 spin_unlock_bh(&chan->desc_lock);
455 chan->last_completion = phys_complete;
456 if (cookie != 0)
457 chan->completed_cookie = cookie;
459 spin_unlock(&chan->cleanup_lock);
462 static void ioat_dma_dependency_added(struct dma_chan *chan)
464 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
465 spin_lock_bh(&ioat_chan->desc_lock);
466 if (ioat_chan->pending == 0) {
467 spin_unlock_bh(&ioat_chan->desc_lock);
468 ioat_dma_memcpy_cleanup(ioat_chan);
469 } else
470 spin_unlock_bh(&ioat_chan->desc_lock);
474 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
475 * @chan: IOAT DMA channel handle
476 * @cookie: DMA transaction identifier
477 * @done: if not %NULL, updated with last completed transaction
478 * @used: if not %NULL, updated with last used transaction
481 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
482 dma_cookie_t cookie,
483 dma_cookie_t *done,
484 dma_cookie_t *used)
486 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
487 dma_cookie_t last_used;
488 dma_cookie_t last_complete;
489 enum dma_status ret;
491 last_used = chan->cookie;
492 last_complete = ioat_chan->completed_cookie;
494 if (done)
495 *done= last_complete;
496 if (used)
497 *used = last_used;
499 ret = dma_async_is_complete(cookie, last_complete, last_used);
500 if (ret == DMA_SUCCESS)
501 return ret;
503 ioat_dma_memcpy_cleanup(ioat_chan);
505 last_used = chan->cookie;
506 last_complete = ioat_chan->completed_cookie;
508 if (done)
509 *done= last_complete;
510 if (used)
511 *used = last_used;
513 return dma_async_is_complete(cookie, last_complete, last_used);
516 /* PCI API */
518 static struct pci_device_id ioat_pci_tbl[] = {
519 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
520 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS,
521 PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
522 { 0, }
525 static struct pci_driver ioat_pci_driver = {
526 .name = "ioatdma",
527 .id_table = ioat_pci_tbl,
528 .probe = ioat_probe,
529 .shutdown = ioat_shutdown,
530 .remove = __devexit_p(ioat_remove),
533 static irqreturn_t ioat_do_interrupt(int irq, void *data)
535 struct ioat_device *instance = data;
536 unsigned long attnstatus;
537 u8 intrctrl;
539 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
541 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
542 return IRQ_NONE;
544 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
545 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
546 return IRQ_NONE;
549 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
551 printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
553 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
554 return IRQ_HANDLED;
557 static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
559 struct ioat_desc_sw *desc;
561 spin_lock_bh(&ioat_chan->desc_lock);
563 if (!list_empty(&ioat_chan->free_desc)) {
564 desc = to_ioat_desc(ioat_chan->free_desc.next);
565 list_del(&desc->node);
566 } else {
567 /* try to get another desc */
568 spin_unlock_bh(&ioat_chan->desc_lock);
569 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
570 spin_lock_bh(&ioat_chan->desc_lock);
571 /* will this ever happen? */
572 BUG_ON(!desc);
575 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
576 desc->hw->next = 0;
577 desc->async_tx.ack = 1;
579 list_add_tail(&desc->node, &ioat_chan->used_desc);
580 spin_unlock_bh(&ioat_chan->desc_lock);
582 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
583 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
584 writel(((u64) desc->async_tx.phys) >> 32,
585 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
587 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
591 * Perform a IOAT transaction to verify the HW works.
593 #define IOAT_TEST_SIZE 2000
595 static int ioat_self_test(struct ioat_device *device)
597 int i;
598 u8 *src;
599 u8 *dest;
600 struct dma_chan *dma_chan;
601 struct dma_async_tx_descriptor *tx;
602 dma_addr_t addr;
603 dma_cookie_t cookie;
604 int err = 0;
606 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
607 if (!src)
608 return -ENOMEM;
609 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
610 if (!dest) {
611 kfree(src);
612 return -ENOMEM;
615 /* Fill in src buffer */
616 for (i = 0; i < IOAT_TEST_SIZE; i++)
617 src[i] = (u8)i;
619 /* Start copy, using first DMA channel */
620 dma_chan = container_of(device->common.channels.next,
621 struct dma_chan,
622 device_node);
623 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
624 err = -ENODEV;
625 goto out;
628 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
629 async_tx_ack(tx);
630 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
631 DMA_TO_DEVICE);
632 ioat_set_src(addr, tx, 0);
633 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
634 DMA_FROM_DEVICE);
635 ioat_set_dest(addr, tx, 0);
636 cookie = ioat_tx_submit(tx);
637 ioat_dma_memcpy_issue_pending(dma_chan);
638 msleep(1);
640 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
641 printk(KERN_ERR "ioatdma: Self-test copy timed out, disabling\n");
642 err = -ENODEV;
643 goto free_resources;
645 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
646 printk(KERN_ERR "ioatdma: Self-test copy failed compare, disabling\n");
647 err = -ENODEV;
648 goto free_resources;
651 free_resources:
652 ioat_dma_free_chan_resources(dma_chan);
653 out:
654 kfree(src);
655 kfree(dest);
656 return err;
659 static int __devinit ioat_probe(struct pci_dev *pdev,
660 const struct pci_device_id *ent)
662 int err;
663 unsigned long mmio_start, mmio_len;
664 void __iomem *reg_base;
665 struct ioat_device *device;
667 err = pci_enable_device(pdev);
668 if (err)
669 goto err_enable_device;
671 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
672 if (err)
673 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
674 if (err)
675 goto err_set_dma_mask;
677 err = pci_request_regions(pdev, ioat_pci_driver.name);
678 if (err)
679 goto err_request_regions;
681 mmio_start = pci_resource_start(pdev, 0);
682 mmio_len = pci_resource_len(pdev, 0);
684 reg_base = ioremap(mmio_start, mmio_len);
685 if (!reg_base) {
686 err = -ENOMEM;
687 goto err_ioremap;
690 device = kzalloc(sizeof(*device), GFP_KERNEL);
691 if (!device) {
692 err = -ENOMEM;
693 goto err_kzalloc;
696 /* DMA coherent memory pool for DMA descriptor allocations */
697 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
698 sizeof(struct ioat_dma_descriptor), 64, 0);
699 if (!device->dma_pool) {
700 err = -ENOMEM;
701 goto err_dma_pool;
704 device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES);
705 if (!device->completion_pool) {
706 err = -ENOMEM;
707 goto err_completion_pool;
710 device->pdev = pdev;
711 pci_set_drvdata(pdev, device);
712 #ifdef CONFIG_PCI_MSI
713 if (pci_enable_msi(pdev) == 0) {
714 device->msi = 1;
715 } else {
716 device->msi = 0;
718 #endif
719 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
720 device);
721 if (err)
722 goto err_irq;
724 device->reg_base = reg_base;
726 writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
727 pci_set_master(pdev);
729 INIT_LIST_HEAD(&device->common.channels);
730 enumerate_dma_channels(device);
732 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
733 device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
734 device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
735 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
736 device->common.device_is_tx_complete = ioat_dma_is_complete;
737 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
738 device->common.device_dependency_added = ioat_dma_dependency_added;
739 device->common.dev = &pdev->dev;
740 printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
741 device->common.chancnt);
743 err = ioat_self_test(device);
744 if (err)
745 goto err_self_test;
747 dma_async_device_register(&device->common);
749 return 0;
751 err_self_test:
752 err_irq:
753 pci_pool_destroy(device->completion_pool);
754 err_completion_pool:
755 pci_pool_destroy(device->dma_pool);
756 err_dma_pool:
757 kfree(device);
758 err_kzalloc:
759 iounmap(reg_base);
760 err_ioremap:
761 pci_release_regions(pdev);
762 err_request_regions:
763 err_set_dma_mask:
764 pci_disable_device(pdev);
765 err_enable_device:
767 printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n");
769 return err;
772 static void ioat_shutdown(struct pci_dev *pdev)
774 struct ioat_device *device;
775 device = pci_get_drvdata(pdev);
777 dma_async_device_unregister(&device->common);
780 static void __devexit ioat_remove(struct pci_dev *pdev)
782 struct ioat_device *device;
783 struct dma_chan *chan, *_chan;
784 struct ioat_dma_chan *ioat_chan;
786 device = pci_get_drvdata(pdev);
787 dma_async_device_unregister(&device->common);
789 free_irq(device->pdev->irq, device);
790 #ifdef CONFIG_PCI_MSI
791 if (device->msi)
792 pci_disable_msi(device->pdev);
793 #endif
794 pci_pool_destroy(device->dma_pool);
795 pci_pool_destroy(device->completion_pool);
796 iounmap(device->reg_base);
797 pci_release_regions(pdev);
798 pci_disable_device(pdev);
799 list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) {
800 ioat_chan = to_ioat_chan(chan);
801 list_del(&chan->device_node);
802 kfree(ioat_chan);
804 kfree(device);
807 /* MODULE API */
808 MODULE_VERSION("1.9");
809 MODULE_LICENSE("GPL");
810 MODULE_AUTHOR("Intel Corporation");
812 static int __init ioat_init_module(void)
814 /* it's currently unsafe to unload this module */
815 /* if forced, worst case is that rmmod hangs */
816 __unsafe(THIS_MODULE);
818 return pci_register_driver(&ioat_pci_driver);
821 module_init(ioat_init_module);
823 static void __exit ioat_exit_module(void)
825 pci_unregister_driver(&ioat_pci_driver);
828 module_exit(ioat_exit_module);