2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
35 #include "ioatdma_registers.h"
36 #include "ioatdma_hw.h"
38 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
39 #define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
40 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
41 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 /* internal functions */
44 static int __devinit
ioat_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
45 static void ioat_shutdown(struct pci_dev
*pdev
);
46 static void __devexit
ioat_remove(struct pci_dev
*pdev
);
48 static int enumerate_dma_channels(struct ioat_device
*device
)
53 struct ioat_dma_chan
*ioat_chan
;
55 device
->common
.chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
56 xfercap_scale
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
57 xfercap
= (xfercap_scale
== 0 ? -1 : (1UL << xfercap_scale
));
59 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
60 ioat_chan
= kzalloc(sizeof(*ioat_chan
), GFP_KERNEL
);
62 device
->common
.chancnt
= i
;
66 ioat_chan
->device
= device
;
67 ioat_chan
->reg_base
= device
->reg_base
+ (0x80 * (i
+ 1));
68 ioat_chan
->xfercap
= xfercap
;
69 spin_lock_init(&ioat_chan
->cleanup_lock
);
70 spin_lock_init(&ioat_chan
->desc_lock
);
71 INIT_LIST_HEAD(&ioat_chan
->free_desc
);
72 INIT_LIST_HEAD(&ioat_chan
->used_desc
);
73 /* This should be made common somewhere in dmaengine.c */
74 ioat_chan
->common
.device
= &device
->common
;
75 list_add_tail(&ioat_chan
->common
.device_node
,
76 &device
->common
.channels
);
78 return device
->common
.chancnt
;
82 ioat_set_src(dma_addr_t addr
, struct dma_async_tx_descriptor
*tx
, int index
)
84 struct ioat_desc_sw
*iter
, *desc
= tx_to_ioat_desc(tx
);
85 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
87 pci_unmap_addr_set(desc
, src
, addr
);
89 list_for_each_entry(iter
, &desc
->async_tx
.tx_list
, node
) {
90 iter
->hw
->src_addr
= addr
;
91 addr
+= ioat_chan
->xfercap
;
97 ioat_set_dest(dma_addr_t addr
, struct dma_async_tx_descriptor
*tx
, int index
)
99 struct ioat_desc_sw
*iter
, *desc
= tx_to_ioat_desc(tx
);
100 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
102 pci_unmap_addr_set(desc
, dst
, addr
);
104 list_for_each_entry(iter
, &desc
->async_tx
.tx_list
, node
) {
105 iter
->hw
->dst_addr
= addr
;
106 addr
+= ioat_chan
->xfercap
;
111 ioat_tx_submit(struct dma_async_tx_descriptor
*tx
)
113 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
114 struct ioat_desc_sw
*desc
= tx_to_ioat_desc(tx
);
117 struct ioat_desc_sw
*group_start
;
119 group_start
= list_entry(desc
->async_tx
.tx_list
.next
,
120 struct ioat_desc_sw
, node
);
121 spin_lock_bh(&ioat_chan
->desc_lock
);
122 /* cookie incr and addition to used_list must be atomic */
123 cookie
= ioat_chan
->common
.cookie
;
127 ioat_chan
->common
.cookie
= desc
->async_tx
.cookie
= cookie
;
129 /* write address into NextDescriptor field of last desc in chain */
130 to_ioat_desc(ioat_chan
->used_desc
.prev
)->hw
->next
=
131 group_start
->async_tx
.phys
;
132 list_splice_init(&desc
->async_tx
.tx_list
, ioat_chan
->used_desc
.prev
);
134 ioat_chan
->pending
+= desc
->tx_cnt
;
135 if (ioat_chan
->pending
>= 4) {
137 ioat_chan
->pending
= 0;
139 spin_unlock_bh(&ioat_chan
->desc_lock
);
142 writeb(IOAT_CHANCMD_APPEND
,
143 ioat_chan
->reg_base
+ IOAT_CHANCMD_OFFSET
);
148 static struct ioat_desc_sw
*ioat_dma_alloc_descriptor(
149 struct ioat_dma_chan
*ioat_chan
,
152 struct ioat_dma_descriptor
*desc
;
153 struct ioat_desc_sw
*desc_sw
;
154 struct ioat_device
*ioat_device
;
157 ioat_device
= to_ioat_device(ioat_chan
->common
.device
);
158 desc
= pci_pool_alloc(ioat_device
->dma_pool
, flags
, &phys
);
162 desc_sw
= kzalloc(sizeof(*desc_sw
), flags
);
163 if (unlikely(!desc_sw
)) {
164 pci_pool_free(ioat_device
->dma_pool
, desc
, phys
);
168 memset(desc
, 0, sizeof(*desc
));
169 dma_async_tx_descriptor_init(&desc_sw
->async_tx
, &ioat_chan
->common
);
170 desc_sw
->async_tx
.tx_set_src
= ioat_set_src
;
171 desc_sw
->async_tx
.tx_set_dest
= ioat_set_dest
;
172 desc_sw
->async_tx
.tx_submit
= ioat_tx_submit
;
173 INIT_LIST_HEAD(&desc_sw
->async_tx
.tx_list
);
175 desc_sw
->async_tx
.phys
= phys
;
180 #define INITIAL_IOAT_DESC_COUNT 128
182 static void ioat_start_null_desc(struct ioat_dma_chan
*ioat_chan
);
184 /* returns the actual number of allocated descriptors */
185 static int ioat_dma_alloc_chan_resources(struct dma_chan
*chan
)
187 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
188 struct ioat_desc_sw
*desc
= NULL
;
194 /* have we already been set up? */
195 if (!list_empty(&ioat_chan
->free_desc
))
196 return INITIAL_IOAT_DESC_COUNT
;
198 /* Setup register to interrupt and write completion status on error */
199 chanctrl
= IOAT_CHANCTRL_ERR_INT_EN
|
200 IOAT_CHANCTRL_ANY_ERR_ABORT_EN
|
201 IOAT_CHANCTRL_ERR_COMPLETION_EN
;
202 writew(chanctrl
, ioat_chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
204 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
206 printk("IOAT: CHANERR = %x, clearing\n", chanerr
);
207 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
210 /* Allocate descriptors */
211 for (i
= 0; i
< INITIAL_IOAT_DESC_COUNT
; i
++) {
212 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_KERNEL
);
214 printk(KERN_ERR
"IOAT: Only %d initial descriptors\n", i
);
217 list_add_tail(&desc
->node
, &tmp_list
);
219 spin_lock_bh(&ioat_chan
->desc_lock
);
220 list_splice(&tmp_list
, &ioat_chan
->free_desc
);
221 spin_unlock_bh(&ioat_chan
->desc_lock
);
223 /* allocate a completion writeback area */
224 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
225 ioat_chan
->completion_virt
=
226 pci_pool_alloc(ioat_chan
->device
->completion_pool
,
228 &ioat_chan
->completion_addr
);
229 memset(ioat_chan
->completion_virt
, 0,
230 sizeof(*ioat_chan
->completion_virt
));
231 writel(((u64
) ioat_chan
->completion_addr
) & 0x00000000FFFFFFFF,
232 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
233 writel(((u64
) ioat_chan
->completion_addr
) >> 32,
234 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
236 ioat_start_null_desc(ioat_chan
);
240 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*ioat_chan
);
242 static void ioat_dma_free_chan_resources(struct dma_chan
*chan
)
244 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
245 struct ioat_device
*ioat_device
= to_ioat_device(chan
->device
);
246 struct ioat_desc_sw
*desc
, *_desc
;
248 int in_use_descs
= 0;
250 ioat_dma_memcpy_cleanup(ioat_chan
);
252 writeb(IOAT_CHANCMD_RESET
, ioat_chan
->reg_base
+ IOAT_CHANCMD_OFFSET
);
254 spin_lock_bh(&ioat_chan
->desc_lock
);
255 list_for_each_entry_safe(desc
, _desc
, &ioat_chan
->used_desc
, node
) {
257 list_del(&desc
->node
);
258 pci_pool_free(ioat_device
->dma_pool
, desc
->hw
,
259 desc
->async_tx
.phys
);
262 list_for_each_entry_safe(desc
, _desc
, &ioat_chan
->free_desc
, node
) {
263 list_del(&desc
->node
);
264 pci_pool_free(ioat_device
->dma_pool
, desc
->hw
,
265 desc
->async_tx
.phys
);
268 spin_unlock_bh(&ioat_chan
->desc_lock
);
270 pci_pool_free(ioat_device
->completion_pool
,
271 ioat_chan
->completion_virt
,
272 ioat_chan
->completion_addr
);
274 /* one is ok since we left it on there on purpose */
275 if (in_use_descs
> 1)
276 printk(KERN_ERR
"IOAT: Freeing %d in use descriptors!\n",
279 ioat_chan
->last_completion
= ioat_chan
->completion_addr
= 0;
282 static struct dma_async_tx_descriptor
*
283 ioat_dma_prep_memcpy(struct dma_chan
*chan
, size_t len
, int int_en
)
285 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
286 struct ioat_desc_sw
*first
, *prev
, *new;
287 LIST_HEAD(new_chain
);
300 spin_lock_bh(&ioat_chan
->desc_lock
);
302 if (!list_empty(&ioat_chan
->free_desc
)) {
303 new = to_ioat_desc(ioat_chan
->free_desc
.next
);
304 list_del(&new->node
);
306 /* try to get another desc */
307 new = ioat_dma_alloc_descriptor(ioat_chan
, GFP_ATOMIC
);
308 /* will this ever happen? */
309 /* TODO add upper limit on these */
313 copy
= min((u32
) len
, ioat_chan
->xfercap
);
315 new->hw
->size
= copy
;
317 new->async_tx
.cookie
= 0;
318 new->async_tx
.ack
= 1;
320 /* chain together the physical address list for the HW */
324 prev
->hw
->next
= (u64
) new->async_tx
.phys
;
328 list_add_tail(&new->node
, &new_chain
);
332 list_splice(&new_chain
, &new->async_tx
.tx_list
);
334 new->hw
->ctl
= IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
336 new->tx_cnt
= desc_count
;
337 new->async_tx
.ack
= 0; /* client is in control of this ack */
338 new->async_tx
.cookie
= -EBUSY
;
340 pci_unmap_len_set(new, len
, orig_len
);
341 spin_unlock_bh(&ioat_chan
->desc_lock
);
343 return new ? &new->async_tx
: NULL
;
348 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
349 * @chan: DMA channel handle
352 static void ioat_dma_memcpy_issue_pending(struct dma_chan
*chan
)
354 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
356 if (ioat_chan
->pending
!= 0) {
357 ioat_chan
->pending
= 0;
358 writeb(IOAT_CHANCMD_APPEND
,
359 ioat_chan
->reg_base
+ IOAT_CHANCMD_OFFSET
);
363 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*chan
)
365 unsigned long phys_complete
;
366 struct ioat_desc_sw
*desc
, *_desc
;
367 dma_cookie_t cookie
= 0;
369 prefetch(chan
->completion_virt
);
371 if (!spin_trylock(&chan
->cleanup_lock
))
374 /* The completion writeback can happen at any time,
375 so reads by the driver need to be atomic operations
376 The descriptor physical addresses are limited to 32-bits
377 when the CPU can only do a 32-bit mov */
379 #if (BITS_PER_LONG == 64)
381 chan
->completion_virt
->full
& IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
383 phys_complete
= chan
->completion_virt
->low
& IOAT_LOW_COMPLETION_MASK
;
386 if ((chan
->completion_virt
->full
& IOAT_CHANSTS_DMA_TRANSFER_STATUS
) ==
387 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED
) {
388 printk("IOAT: Channel halted, chanerr = %x\n",
389 readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
));
391 /* TODO do something to salvage the situation */
394 if (phys_complete
== chan
->last_completion
) {
395 spin_unlock(&chan
->cleanup_lock
);
399 spin_lock_bh(&chan
->desc_lock
);
400 list_for_each_entry_safe(desc
, _desc
, &chan
->used_desc
, node
) {
403 * Incoming DMA requests may use multiple descriptors, due to
404 * exceeding xfercap, perhaps. If so, only the last one will
405 * have a cookie, and require unmapping.
407 if (desc
->async_tx
.cookie
) {
408 cookie
= desc
->async_tx
.cookie
;
410 /* yes we are unmapping both _page and _single alloc'd
411 regions with unmap_page. Is this *really* that bad?
413 pci_unmap_page(chan
->device
->pdev
,
414 pci_unmap_addr(desc
, dst
),
415 pci_unmap_len(desc
, len
),
417 pci_unmap_page(chan
->device
->pdev
,
418 pci_unmap_addr(desc
, src
),
419 pci_unmap_len(desc
, len
),
423 if (desc
->async_tx
.phys
!= phys_complete
) {
424 /* a completed entry, but not the last, so cleanup
425 * if the client is done with the descriptor
427 if (desc
->async_tx
.ack
) {
428 list_del(&desc
->node
);
429 list_add_tail(&desc
->node
, &chan
->free_desc
);
431 desc
->async_tx
.cookie
= 0;
433 /* last used desc. Do not remove, so we can append from
434 it, but don't look at it next time, either */
435 desc
->async_tx
.cookie
= 0;
437 /* TODO check status bits? */
442 spin_unlock_bh(&chan
->desc_lock
);
444 chan
->last_completion
= phys_complete
;
446 chan
->completed_cookie
= cookie
;
448 spin_unlock(&chan
->cleanup_lock
);
451 static void ioat_dma_dependency_added(struct dma_chan
*chan
)
453 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
454 spin_lock_bh(&ioat_chan
->desc_lock
);
455 if (ioat_chan
->pending
== 0) {
456 spin_unlock_bh(&ioat_chan
->desc_lock
);
457 ioat_dma_memcpy_cleanup(ioat_chan
);
459 spin_unlock_bh(&ioat_chan
->desc_lock
);
463 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
464 * @chan: IOAT DMA channel handle
465 * @cookie: DMA transaction identifier
466 * @done: if not %NULL, updated with last completed transaction
467 * @used: if not %NULL, updated with last used transaction
470 static enum dma_status
ioat_dma_is_complete(struct dma_chan
*chan
,
475 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
476 dma_cookie_t last_used
;
477 dma_cookie_t last_complete
;
480 last_used
= chan
->cookie
;
481 last_complete
= ioat_chan
->completed_cookie
;
484 *done
= last_complete
;
488 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
489 if (ret
== DMA_SUCCESS
)
492 ioat_dma_memcpy_cleanup(ioat_chan
);
494 last_used
= chan
->cookie
;
495 last_complete
= ioat_chan
->completed_cookie
;
498 *done
= last_complete
;
502 return dma_async_is_complete(cookie
, last_complete
, last_used
);
507 static struct pci_device_id ioat_pci_tbl
[] = {
508 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT
) },
509 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS
,
510 PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR
) },
514 static struct pci_driver ioat_pci_driver
= {
516 .id_table
= ioat_pci_tbl
,
518 .shutdown
= ioat_shutdown
,
519 .remove
= __devexit_p(ioat_remove
),
522 static irqreturn_t
ioat_do_interrupt(int irq
, void *data
)
524 struct ioat_device
*instance
= data
;
525 unsigned long attnstatus
;
528 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
530 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
533 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
534 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
538 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
540 printk(KERN_ERR
"ioatdma error: interrupt! status %lx\n", attnstatus
);
542 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
546 static void ioat_start_null_desc(struct ioat_dma_chan
*ioat_chan
)
548 struct ioat_desc_sw
*desc
;
550 spin_lock_bh(&ioat_chan
->desc_lock
);
552 if (!list_empty(&ioat_chan
->free_desc
)) {
553 desc
= to_ioat_desc(ioat_chan
->free_desc
.next
);
554 list_del(&desc
->node
);
556 /* try to get another desc */
557 spin_unlock_bh(&ioat_chan
->desc_lock
);
558 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_KERNEL
);
559 spin_lock_bh(&ioat_chan
->desc_lock
);
560 /* will this ever happen? */
564 desc
->hw
->ctl
= IOAT_DMA_DESCRIPTOR_NUL
;
566 desc
->async_tx
.ack
= 1;
568 list_add_tail(&desc
->node
, &ioat_chan
->used_desc
);
569 spin_unlock_bh(&ioat_chan
->desc_lock
);
571 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
572 ioat_chan
->reg_base
+ IOAT_CHAINADDR_OFFSET_LOW
);
573 writel(((u64
) desc
->async_tx
.phys
) >> 32,
574 ioat_chan
->reg_base
+ IOAT_CHAINADDR_OFFSET_HIGH
);
576 writeb(IOAT_CHANCMD_START
, ioat_chan
->reg_base
+ IOAT_CHANCMD_OFFSET
);
580 * Perform a IOAT transaction to verify the HW works.
582 #define IOAT_TEST_SIZE 2000
584 static int ioat_self_test(struct ioat_device
*device
)
589 struct dma_chan
*dma_chan
;
590 struct dma_async_tx_descriptor
*tx
;
595 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
598 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
604 /* Fill in src buffer */
605 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
608 /* Start copy, using first DMA channel */
609 dma_chan
= container_of(device
->common
.channels
.next
,
612 if (ioat_dma_alloc_chan_resources(dma_chan
) < 1) {
617 tx
= ioat_dma_prep_memcpy(dma_chan
, IOAT_TEST_SIZE
, 0);
619 addr
= dma_map_single(dma_chan
->device
->dev
, src
, IOAT_TEST_SIZE
,
621 ioat_set_src(addr
, tx
, 0);
622 addr
= dma_map_single(dma_chan
->device
->dev
, dest
, IOAT_TEST_SIZE
,
624 ioat_set_dest(addr
, tx
, 0);
625 cookie
= ioat_tx_submit(tx
);
626 ioat_dma_memcpy_issue_pending(dma_chan
);
629 if (ioat_dma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
630 printk(KERN_ERR
"ioatdma: Self-test copy timed out, disabling\n");
634 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
635 printk(KERN_ERR
"ioatdma: Self-test copy failed compare, disabling\n");
641 ioat_dma_free_chan_resources(dma_chan
);
648 static int __devinit
ioat_probe(struct pci_dev
*pdev
,
649 const struct pci_device_id
*ent
)
652 unsigned long mmio_start
, mmio_len
;
653 void __iomem
*reg_base
;
654 struct ioat_device
*device
;
656 err
= pci_enable_device(pdev
);
658 goto err_enable_device
;
660 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
662 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
664 goto err_set_dma_mask
;
666 err
= pci_request_regions(pdev
, ioat_pci_driver
.name
);
668 goto err_request_regions
;
670 mmio_start
= pci_resource_start(pdev
, 0);
671 mmio_len
= pci_resource_len(pdev
, 0);
673 reg_base
= ioremap(mmio_start
, mmio_len
);
679 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
685 /* DMA coherent memory pool for DMA descriptor allocations */
686 device
->dma_pool
= pci_pool_create("dma_desc_pool", pdev
,
687 sizeof(struct ioat_dma_descriptor
), 64, 0);
688 if (!device
->dma_pool
) {
693 device
->completion_pool
= pci_pool_create("completion_pool", pdev
, sizeof(u64
), SMP_CACHE_BYTES
, SMP_CACHE_BYTES
);
694 if (!device
->completion_pool
) {
696 goto err_completion_pool
;
700 pci_set_drvdata(pdev
, device
);
701 #ifdef CONFIG_PCI_MSI
702 if (pci_enable_msi(pdev
) == 0) {
708 err
= request_irq(pdev
->irq
, &ioat_do_interrupt
, IRQF_SHARED
, "ioat",
713 device
->reg_base
= reg_base
;
715 writeb(IOAT_INTRCTRL_MASTER_INT_EN
, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
716 pci_set_master(pdev
);
718 INIT_LIST_HEAD(&device
->common
.channels
);
719 enumerate_dma_channels(device
);
721 dma_cap_set(DMA_MEMCPY
, device
->common
.cap_mask
);
722 device
->common
.device_alloc_chan_resources
= ioat_dma_alloc_chan_resources
;
723 device
->common
.device_free_chan_resources
= ioat_dma_free_chan_resources
;
724 device
->common
.device_prep_dma_memcpy
= ioat_dma_prep_memcpy
;
725 device
->common
.device_is_tx_complete
= ioat_dma_is_complete
;
726 device
->common
.device_issue_pending
= ioat_dma_memcpy_issue_pending
;
727 device
->common
.device_dependency_added
= ioat_dma_dependency_added
;
728 device
->common
.dev
= &pdev
->dev
;
729 printk(KERN_INFO
"Intel(R) I/OAT DMA Engine found, %d channels\n",
730 device
->common
.chancnt
);
732 err
= ioat_self_test(device
);
736 dma_async_device_register(&device
->common
);
742 pci_pool_destroy(device
->completion_pool
);
744 pci_pool_destroy(device
->dma_pool
);
750 pci_release_regions(pdev
);
753 pci_disable_device(pdev
);
756 printk(KERN_ERR
"Intel(R) I/OAT DMA Engine initialization failed\n");
761 static void ioat_shutdown(struct pci_dev
*pdev
)
763 struct ioat_device
*device
;
764 device
= pci_get_drvdata(pdev
);
766 dma_async_device_unregister(&device
->common
);
769 static void __devexit
ioat_remove(struct pci_dev
*pdev
)
771 struct ioat_device
*device
;
772 struct dma_chan
*chan
, *_chan
;
773 struct ioat_dma_chan
*ioat_chan
;
775 device
= pci_get_drvdata(pdev
);
776 dma_async_device_unregister(&device
->common
);
778 free_irq(device
->pdev
->irq
, device
);
779 #ifdef CONFIG_PCI_MSI
781 pci_disable_msi(device
->pdev
);
783 pci_pool_destroy(device
->dma_pool
);
784 pci_pool_destroy(device
->completion_pool
);
785 iounmap(device
->reg_base
);
786 pci_release_regions(pdev
);
787 pci_disable_device(pdev
);
788 list_for_each_entry_safe(chan
, _chan
, &device
->common
.channels
, device_node
) {
789 ioat_chan
= to_ioat_chan(chan
);
790 list_del(&chan
->device_node
);
797 MODULE_VERSION("1.9");
798 MODULE_LICENSE("GPL");
799 MODULE_AUTHOR("Intel Corporation");
801 static int __init
ioat_init_module(void)
803 /* it's currently unsafe to unload this module */
804 /* if forced, worst case is that rmmod hangs */
805 __unsafe(THIS_MODULE
);
807 return pci_register_driver(&ioat_pci_driver
);
810 module_init(ioat_init_module
);
812 static void __exit
ioat_exit_module(void)
814 pci_unregister_driver(&ioat_pci_driver
);
817 module_exit(ioat_exit_module
);