2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
37 #include "ioatdma_registers.h"
38 #include "ioatdma_hw.h"
40 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
41 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
42 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
43 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
46 static int ioat_pending_level
= 4;
47 module_param(ioat_pending_level
, int, 0644);
48 MODULE_PARM_DESC(ioat_pending_level
,
49 "high-water mark for pushing ioat descriptors (default: 4)");
51 #define RESET_DELAY msecs_to_jiffies(100)
52 #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53 static void ioat_dma_chan_reset_part2(struct work_struct
*work
);
54 static void ioat_dma_chan_watchdog(struct work_struct
*work
);
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
60 #define NULL_DESC_BUFFER_SIZE 1
62 /* internal functions */
63 static void ioat_dma_start_null_desc(struct ioat_dma_chan
*ioat_chan
);
64 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*ioat_chan
);
66 static struct ioat_desc_sw
*
67 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
);
68 static struct ioat_desc_sw
*
69 ioat2_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
);
71 static inline struct ioat_dma_chan
*ioat_lookup_chan_by_index(
72 struct ioatdma_device
*device
,
75 return device
->idx
[index
];
79 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
81 * @data: interrupt data
83 static irqreturn_t
ioat_dma_do_interrupt(int irq
, void *data
)
85 struct ioatdma_device
*instance
= data
;
86 struct ioat_dma_chan
*ioat_chan
;
87 unsigned long attnstatus
;
91 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
93 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
96 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
97 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
101 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
102 for_each_bit(bit
, &attnstatus
, BITS_PER_LONG
) {
103 ioat_chan
= ioat_lookup_chan_by_index(instance
, bit
);
104 tasklet_schedule(&ioat_chan
->cleanup_task
);
107 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
112 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
114 * @data: interrupt data
116 static irqreturn_t
ioat_dma_do_interrupt_msix(int irq
, void *data
)
118 struct ioat_dma_chan
*ioat_chan
= data
;
120 tasklet_schedule(&ioat_chan
->cleanup_task
);
125 static void ioat_dma_cleanup_tasklet(unsigned long data
);
128 * ioat_dma_enumerate_channels - find and initialize the device's channels
129 * @device: the device to be enumerated
131 static int ioat_dma_enumerate_channels(struct ioatdma_device
*device
)
136 struct ioat_dma_chan
*ioat_chan
;
139 * IOAT ver.3 workarounds
141 if (device
->version
== IOAT_VER_3_0
) {
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
150 chan_err_mask
= 0x3E07;
151 pci_write_config_dword(device
->pdev
,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET
,
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
159 pci_read_config_word(device
->pdev
,
160 IOAT_PCI_DEVICE_ID_OFFSET
,
162 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) {
164 pci_write_config_dword(device
->pdev
,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET
,
170 device
->common
.chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
171 xfercap_scale
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
172 xfercap
= (xfercap_scale
== 0 ? -1 : (1UL << xfercap_scale
));
174 #if CONFIG_I7300_IDLE_IOAT_CHANNEL
175 device
->common
.chancnt
--;
177 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
178 ioat_chan
= kzalloc(sizeof(*ioat_chan
), GFP_KERNEL
);
180 device
->common
.chancnt
= i
;
184 ioat_chan
->device
= device
;
185 ioat_chan
->reg_base
= device
->reg_base
+ (0x80 * (i
+ 1));
186 ioat_chan
->xfercap
= xfercap
;
187 ioat_chan
->desccount
= 0;
188 INIT_DELAYED_WORK(&ioat_chan
->work
, ioat_dma_chan_reset_part2
);
189 if (ioat_chan
->device
->version
!= IOAT_VER_1_2
) {
190 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
191 | IOAT_DMA_DCA_ANY_CPU
,
192 ioat_chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
194 spin_lock_init(&ioat_chan
->cleanup_lock
);
195 spin_lock_init(&ioat_chan
->desc_lock
);
196 INIT_LIST_HEAD(&ioat_chan
->free_desc
);
197 INIT_LIST_HEAD(&ioat_chan
->used_desc
);
198 /* This should be made common somewhere in dmaengine.c */
199 ioat_chan
->common
.device
= &device
->common
;
200 list_add_tail(&ioat_chan
->common
.device_node
,
201 &device
->common
.channels
);
202 device
->idx
[i
] = ioat_chan
;
203 tasklet_init(&ioat_chan
->cleanup_task
,
204 ioat_dma_cleanup_tasklet
,
205 (unsigned long) ioat_chan
);
206 tasklet_disable(&ioat_chan
->cleanup_task
);
208 return device
->common
.chancnt
;
212 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
214 * @chan: DMA channel handle
216 static inline void __ioat1_dma_memcpy_issue_pending(
217 struct ioat_dma_chan
*ioat_chan
)
219 ioat_chan
->pending
= 0;
220 writeb(IOAT_CHANCMD_APPEND
, ioat_chan
->reg_base
+ IOAT1_CHANCMD_OFFSET
);
223 static void ioat1_dma_memcpy_issue_pending(struct dma_chan
*chan
)
225 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
227 if (ioat_chan
->pending
> 0) {
228 spin_lock_bh(&ioat_chan
->desc_lock
);
229 __ioat1_dma_memcpy_issue_pending(ioat_chan
);
230 spin_unlock_bh(&ioat_chan
->desc_lock
);
234 static inline void __ioat2_dma_memcpy_issue_pending(
235 struct ioat_dma_chan
*ioat_chan
)
237 ioat_chan
->pending
= 0;
238 writew(ioat_chan
->dmacount
,
239 ioat_chan
->reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
242 static void ioat2_dma_memcpy_issue_pending(struct dma_chan
*chan
)
244 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
246 if (ioat_chan
->pending
> 0) {
247 spin_lock_bh(&ioat_chan
->desc_lock
);
248 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
249 spin_unlock_bh(&ioat_chan
->desc_lock
);
255 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
257 static void ioat_dma_chan_reset_part2(struct work_struct
*work
)
259 struct ioat_dma_chan
*ioat_chan
=
260 container_of(work
, struct ioat_dma_chan
, work
.work
);
261 struct ioat_desc_sw
*desc
;
263 spin_lock_bh(&ioat_chan
->cleanup_lock
);
264 spin_lock_bh(&ioat_chan
->desc_lock
);
266 ioat_chan
->completion_virt
->low
= 0;
267 ioat_chan
->completion_virt
->high
= 0;
268 ioat_chan
->pending
= 0;
271 * count the descriptors waiting, and be sure to do it
272 * right for both the CB1 line and the CB2 ring
274 ioat_chan
->dmacount
= 0;
275 if (ioat_chan
->used_desc
.prev
) {
276 desc
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
278 ioat_chan
->dmacount
++;
279 desc
= to_ioat_desc(desc
->node
.next
);
280 } while (&desc
->node
!= ioat_chan
->used_desc
.next
);
284 * write the new starting descriptor address
285 * this puts channel engine into ARMED state
287 desc
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
288 switch (ioat_chan
->device
->version
) {
290 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
291 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_LOW
);
292 writel(((u64
) desc
->async_tx
.phys
) >> 32,
293 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_HIGH
);
295 writeb(IOAT_CHANCMD_START
, ioat_chan
->reg_base
296 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
299 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
300 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
301 writel(((u64
) desc
->async_tx
.phys
) >> 32,
302 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
304 /* tell the engine to go with what's left to be done */
305 writew(ioat_chan
->dmacount
,
306 ioat_chan
->reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
310 dev_err(&ioat_chan
->device
->pdev
->dev
,
311 "chan%d reset - %d descs waiting, %d total desc\n",
312 chan_num(ioat_chan
), ioat_chan
->dmacount
, ioat_chan
->desccount
);
314 spin_unlock_bh(&ioat_chan
->desc_lock
);
315 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
319 * ioat_dma_reset_channel - restart a channel
320 * @ioat_chan: IOAT DMA channel handle
322 static void ioat_dma_reset_channel(struct ioat_dma_chan
*ioat_chan
)
324 u32 chansts
, chanerr
;
326 if (!ioat_chan
->used_desc
.prev
)
329 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
330 chansts
= (ioat_chan
->completion_virt
->low
331 & IOAT_CHANSTS_DMA_TRANSFER_STATUS
);
333 dev_err(&ioat_chan
->device
->pdev
->dev
,
334 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
335 chan_num(ioat_chan
), chansts
, chanerr
);
336 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
340 * whack it upside the head with a reset
341 * and wait for things to settle out.
342 * force the pending count to a really big negative
343 * to make sure no one forces an issue_pending
344 * while we're waiting.
347 spin_lock_bh(&ioat_chan
->desc_lock
);
348 ioat_chan
->pending
= INT_MIN
;
349 writeb(IOAT_CHANCMD_RESET
,
351 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
352 spin_unlock_bh(&ioat_chan
->desc_lock
);
354 /* schedule the 2nd half instead of sleeping a long time */
355 schedule_delayed_work(&ioat_chan
->work
, RESET_DELAY
);
359 * ioat_dma_chan_watchdog - watch for stuck channels
361 static void ioat_dma_chan_watchdog(struct work_struct
*work
)
363 struct ioatdma_device
*device
=
364 container_of(work
, struct ioatdma_device
, work
.work
);
365 struct ioat_dma_chan
*ioat_chan
;
375 unsigned long compl_desc_addr_hw
;
377 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
378 ioat_chan
= ioat_lookup_chan_by_index(device
, i
);
380 if (ioat_chan
->device
->version
== IOAT_VER_1_2
381 /* have we started processing anything yet */
382 && ioat_chan
->last_completion
383 /* have we completed any since last watchdog cycle? */
384 && (ioat_chan
->last_completion
==
385 ioat_chan
->watchdog_completion
)
386 /* has TCP stuck on one cookie since last watchdog? */
387 && (ioat_chan
->watchdog_tcp_cookie
==
388 ioat_chan
->watchdog_last_tcp_cookie
)
389 && (ioat_chan
->watchdog_tcp_cookie
!=
390 ioat_chan
->completed_cookie
)
391 /* is there something in the chain to be processed? */
392 /* CB1 chain always has at least the last one processed */
393 && (ioat_chan
->used_desc
.prev
!= ioat_chan
->used_desc
.next
)
394 && ioat_chan
->pending
== 0) {
397 * check CHANSTS register for completed
398 * descriptor address.
399 * if it is different than completion writeback,
401 * and it has changed since the last watchdog
402 * we can assume that channel
403 * is still working correctly
404 * and the problem is in completion writeback.
405 * update completion writeback
406 * with actual CHANSTS value
408 * try resetting the channel
411 completion_hw
.low
= readl(ioat_chan
->reg_base
+
412 IOAT_CHANSTS_OFFSET_LOW(ioat_chan
->device
->version
));
413 completion_hw
.high
= readl(ioat_chan
->reg_base
+
414 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan
->device
->version
));
415 #if (BITS_PER_LONG == 64)
418 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
421 completion_hw
.low
& IOAT_LOW_COMPLETION_MASK
;
424 if ((compl_desc_addr_hw
!= 0)
425 && (compl_desc_addr_hw
!= ioat_chan
->watchdog_completion
)
426 && (compl_desc_addr_hw
!= ioat_chan
->last_compl_desc_addr_hw
)) {
427 ioat_chan
->last_compl_desc_addr_hw
= compl_desc_addr_hw
;
428 ioat_chan
->completion_virt
->low
= completion_hw
.low
;
429 ioat_chan
->completion_virt
->high
= completion_hw
.high
;
431 ioat_dma_reset_channel(ioat_chan
);
432 ioat_chan
->watchdog_completion
= 0;
433 ioat_chan
->last_compl_desc_addr_hw
= 0;
437 * for version 2.0 if there are descriptors yet to be processed
438 * and the last completed hasn't changed since the last watchdog
439 * if they haven't hit the pending level
440 * issue the pending to push them through
442 * try resetting the channel
444 } else if (ioat_chan
->device
->version
== IOAT_VER_2_0
445 && ioat_chan
->used_desc
.prev
446 && ioat_chan
->last_completion
447 && ioat_chan
->last_completion
== ioat_chan
->watchdog_completion
) {
449 if (ioat_chan
->pending
< ioat_pending_level
)
450 ioat2_dma_memcpy_issue_pending(&ioat_chan
->common
);
452 ioat_dma_reset_channel(ioat_chan
);
453 ioat_chan
->watchdog_completion
= 0;
456 ioat_chan
->last_compl_desc_addr_hw
= 0;
457 ioat_chan
->watchdog_completion
458 = ioat_chan
->last_completion
;
461 ioat_chan
->watchdog_last_tcp_cookie
=
462 ioat_chan
->watchdog_tcp_cookie
;
465 schedule_delayed_work(&device
->work
, WATCHDOG_DELAY
);
468 static dma_cookie_t
ioat1_tx_submit(struct dma_async_tx_descriptor
*tx
)
470 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
471 struct ioat_desc_sw
*first
= tx_to_ioat_desc(tx
);
472 struct ioat_desc_sw
*prev
, *new;
473 struct ioat_dma_descriptor
*hw
;
475 LIST_HEAD(new_chain
);
479 unsigned long orig_flags
;
480 unsigned int desc_count
= 0;
482 /* src and dest and len are stored in the initial descriptor */
486 orig_flags
= first
->async_tx
.flags
;
489 spin_lock_bh(&ioat_chan
->desc_lock
);
490 prev
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
493 copy
= min_t(size_t, len
, ioat_chan
->xfercap
);
495 async_tx_ack(&new->async_tx
);
504 /* chain together the physical address list for the HW */
506 prev
->hw
->next
= (u64
) new->async_tx
.phys
;
512 list_add_tail(&new->node
, &new_chain
);
515 } while (len
&& (new = ioat1_dma_get_next_descriptor(ioat_chan
)));
518 dev_err(&ioat_chan
->device
->pdev
->dev
,
519 "tx submit failed\n");
520 spin_unlock_bh(&ioat_chan
->desc_lock
);
524 hw
->ctl
= IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
525 if (new->async_tx
.callback
) {
526 hw
->ctl
|= IOAT_DMA_DESCRIPTOR_CTL_INT_GN
;
528 /* move callback into to last desc */
529 new->async_tx
.callback
= first
->async_tx
.callback
;
530 new->async_tx
.callback_param
531 = first
->async_tx
.callback_param
;
532 first
->async_tx
.callback
= NULL
;
533 first
->async_tx
.callback_param
= NULL
;
537 new->tx_cnt
= desc_count
;
538 new->async_tx
.flags
= orig_flags
; /* client is in control of this ack */
540 /* store the original values for use in later cleanup */
542 new->src
= first
->src
;
543 new->dst
= first
->dst
;
544 new->len
= first
->len
;
547 /* cookie incr and addition to used_list must be atomic */
548 cookie
= ioat_chan
->common
.cookie
;
552 ioat_chan
->common
.cookie
= new->async_tx
.cookie
= cookie
;
554 /* write address into NextDescriptor field of last desc in chain */
555 to_ioat_desc(ioat_chan
->used_desc
.prev
)->hw
->next
=
556 first
->async_tx
.phys
;
557 list_splice_tail(&new_chain
, &ioat_chan
->used_desc
);
559 ioat_chan
->dmacount
+= desc_count
;
560 ioat_chan
->pending
+= desc_count
;
561 if (ioat_chan
->pending
>= ioat_pending_level
)
562 __ioat1_dma_memcpy_issue_pending(ioat_chan
);
563 spin_unlock_bh(&ioat_chan
->desc_lock
);
568 static dma_cookie_t
ioat2_tx_submit(struct dma_async_tx_descriptor
*tx
)
570 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(tx
->chan
);
571 struct ioat_desc_sw
*first
= tx_to_ioat_desc(tx
);
572 struct ioat_desc_sw
*new;
573 struct ioat_dma_descriptor
*hw
;
578 unsigned long orig_flags
;
579 unsigned int desc_count
= 0;
581 /* src and dest and len are stored in the initial descriptor */
585 orig_flags
= first
->async_tx
.flags
;
589 * ioat_chan->desc_lock is still in force in version 2 path
590 * it gets unlocked at end of this function
593 copy
= min_t(size_t, len
, ioat_chan
->xfercap
);
595 async_tx_ack(&new->async_tx
);
607 } while (len
&& (new = ioat2_dma_get_next_descriptor(ioat_chan
)));
610 dev_err(&ioat_chan
->device
->pdev
->dev
,
611 "tx submit failed\n");
612 spin_unlock_bh(&ioat_chan
->desc_lock
);
616 hw
->ctl
|= IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
617 if (new->async_tx
.callback
) {
618 hw
->ctl
|= IOAT_DMA_DESCRIPTOR_CTL_INT_GN
;
620 /* move callback into to last desc */
621 new->async_tx
.callback
= first
->async_tx
.callback
;
622 new->async_tx
.callback_param
623 = first
->async_tx
.callback_param
;
624 first
->async_tx
.callback
= NULL
;
625 first
->async_tx
.callback_param
= NULL
;
629 new->tx_cnt
= desc_count
;
630 new->async_tx
.flags
= orig_flags
; /* client is in control of this ack */
632 /* store the original values for use in later cleanup */
634 new->src
= first
->src
;
635 new->dst
= first
->dst
;
636 new->len
= first
->len
;
639 /* cookie incr and addition to used_list must be atomic */
640 cookie
= ioat_chan
->common
.cookie
;
644 ioat_chan
->common
.cookie
= new->async_tx
.cookie
= cookie
;
646 ioat_chan
->dmacount
+= desc_count
;
647 ioat_chan
->pending
+= desc_count
;
648 if (ioat_chan
->pending
>= ioat_pending_level
)
649 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
650 spin_unlock_bh(&ioat_chan
->desc_lock
);
656 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
657 * @ioat_chan: the channel supplying the memory pool for the descriptors
658 * @flags: allocation flags
660 static struct ioat_desc_sw
*ioat_dma_alloc_descriptor(
661 struct ioat_dma_chan
*ioat_chan
,
664 struct ioat_dma_descriptor
*desc
;
665 struct ioat_desc_sw
*desc_sw
;
666 struct ioatdma_device
*ioatdma_device
;
669 ioatdma_device
= to_ioatdma_device(ioat_chan
->common
.device
);
670 desc
= pci_pool_alloc(ioatdma_device
->dma_pool
, flags
, &phys
);
674 desc_sw
= kzalloc(sizeof(*desc_sw
), flags
);
675 if (unlikely(!desc_sw
)) {
676 pci_pool_free(ioatdma_device
->dma_pool
, desc
, phys
);
680 memset(desc
, 0, sizeof(*desc
));
681 dma_async_tx_descriptor_init(&desc_sw
->async_tx
, &ioat_chan
->common
);
682 switch (ioat_chan
->device
->version
) {
684 desc_sw
->async_tx
.tx_submit
= ioat1_tx_submit
;
688 desc_sw
->async_tx
.tx_submit
= ioat2_tx_submit
;
691 INIT_LIST_HEAD(&desc_sw
->async_tx
.tx_list
);
694 desc_sw
->async_tx
.phys
= phys
;
699 static int ioat_initial_desc_count
= 256;
700 module_param(ioat_initial_desc_count
, int, 0644);
701 MODULE_PARM_DESC(ioat_initial_desc_count
,
702 "initial descriptors per channel (default: 256)");
705 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
706 * @ioat_chan: the channel to be massaged
708 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan
*ioat_chan
)
710 struct ioat_desc_sw
*desc
, *_desc
;
712 /* setup used_desc */
713 ioat_chan
->used_desc
.next
= ioat_chan
->free_desc
.next
;
714 ioat_chan
->used_desc
.prev
= NULL
;
716 /* pull free_desc out of the circle so that every node is a hw
717 * descriptor, but leave it pointing to the list
719 ioat_chan
->free_desc
.prev
->next
= ioat_chan
->free_desc
.next
;
720 ioat_chan
->free_desc
.next
->prev
= ioat_chan
->free_desc
.prev
;
722 /* circle link the hw descriptors */
723 desc
= to_ioat_desc(ioat_chan
->free_desc
.next
);
724 desc
->hw
->next
= to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
725 list_for_each_entry_safe(desc
, _desc
, ioat_chan
->free_desc
.next
, node
) {
726 desc
->hw
->next
= to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
731 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
732 * @chan: the channel to be filled out
734 static int ioat_dma_alloc_chan_resources(struct dma_chan
*chan
,
735 struct dma_client
*client
)
737 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
738 struct ioat_desc_sw
*desc
;
744 /* have we already been set up? */
745 if (!list_empty(&ioat_chan
->free_desc
))
746 return ioat_chan
->desccount
;
748 /* Setup register to interrupt and write completion status on error */
749 chanctrl
= IOAT_CHANCTRL_ERR_INT_EN
|
750 IOAT_CHANCTRL_ANY_ERR_ABORT_EN
|
751 IOAT_CHANCTRL_ERR_COMPLETION_EN
;
752 writew(chanctrl
, ioat_chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
754 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
756 dev_err(&ioat_chan
->device
->pdev
->dev
,
757 "CHANERR = %x, clearing\n", chanerr
);
758 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
761 /* Allocate descriptors */
762 for (i
= 0; i
< ioat_initial_desc_count
; i
++) {
763 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_KERNEL
);
765 dev_err(&ioat_chan
->device
->pdev
->dev
,
766 "Only %d initial descriptors\n", i
);
769 list_add_tail(&desc
->node
, &tmp_list
);
771 spin_lock_bh(&ioat_chan
->desc_lock
);
772 ioat_chan
->desccount
= i
;
773 list_splice(&tmp_list
, &ioat_chan
->free_desc
);
774 if (ioat_chan
->device
->version
!= IOAT_VER_1_2
)
775 ioat2_dma_massage_chan_desc(ioat_chan
);
776 spin_unlock_bh(&ioat_chan
->desc_lock
);
778 /* allocate a completion writeback area */
779 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
780 ioat_chan
->completion_virt
=
781 pci_pool_alloc(ioat_chan
->device
->completion_pool
,
783 &ioat_chan
->completion_addr
);
784 memset(ioat_chan
->completion_virt
, 0,
785 sizeof(*ioat_chan
->completion_virt
));
786 writel(((u64
) ioat_chan
->completion_addr
) & 0x00000000FFFFFFFF,
787 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
788 writel(((u64
) ioat_chan
->completion_addr
) >> 32,
789 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
791 tasklet_enable(&ioat_chan
->cleanup_task
);
792 ioat_dma_start_null_desc(ioat_chan
); /* give chain to dma device */
793 return ioat_chan
->desccount
;
797 * ioat_dma_free_chan_resources - release all the descriptors
798 * @chan: the channel to be cleaned
800 static void ioat_dma_free_chan_resources(struct dma_chan
*chan
)
802 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
803 struct ioatdma_device
*ioatdma_device
= to_ioatdma_device(chan
->device
);
804 struct ioat_desc_sw
*desc
, *_desc
;
805 int in_use_descs
= 0;
807 tasklet_disable(&ioat_chan
->cleanup_task
);
808 ioat_dma_memcpy_cleanup(ioat_chan
);
810 /* Delay 100ms after reset to allow internal DMA logic to quiesce
811 * before removing DMA descriptor resources.
813 writeb(IOAT_CHANCMD_RESET
,
815 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
818 spin_lock_bh(&ioat_chan
->desc_lock
);
819 switch (ioat_chan
->device
->version
) {
821 list_for_each_entry_safe(desc
, _desc
,
822 &ioat_chan
->used_desc
, node
) {
824 list_del(&desc
->node
);
825 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
826 desc
->async_tx
.phys
);
829 list_for_each_entry_safe(desc
, _desc
,
830 &ioat_chan
->free_desc
, node
) {
831 list_del(&desc
->node
);
832 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
833 desc
->async_tx
.phys
);
839 list_for_each_entry_safe(desc
, _desc
,
840 ioat_chan
->free_desc
.next
, node
) {
841 list_del(&desc
->node
);
842 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
843 desc
->async_tx
.phys
);
846 desc
= to_ioat_desc(ioat_chan
->free_desc
.next
);
847 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
848 desc
->async_tx
.phys
);
850 INIT_LIST_HEAD(&ioat_chan
->free_desc
);
851 INIT_LIST_HEAD(&ioat_chan
->used_desc
);
854 spin_unlock_bh(&ioat_chan
->desc_lock
);
856 pci_pool_free(ioatdma_device
->completion_pool
,
857 ioat_chan
->completion_virt
,
858 ioat_chan
->completion_addr
);
860 /* one is ok since we left it on there on purpose */
861 if (in_use_descs
> 1)
862 dev_err(&ioat_chan
->device
->pdev
->dev
,
863 "Freeing %d in use descriptors!\n",
866 ioat_chan
->last_completion
= ioat_chan
->completion_addr
= 0;
867 ioat_chan
->pending
= 0;
868 ioat_chan
->dmacount
= 0;
869 ioat_chan
->watchdog_completion
= 0;
870 ioat_chan
->last_compl_desc_addr_hw
= 0;
871 ioat_chan
->watchdog_tcp_cookie
=
872 ioat_chan
->watchdog_last_tcp_cookie
= 0;
876 * ioat_dma_get_next_descriptor - return the next available descriptor
877 * @ioat_chan: IOAT DMA channel handle
879 * Gets the next descriptor from the chain, and must be called with the
880 * channel's desc_lock held. Allocates more descriptors if the channel
883 static struct ioat_desc_sw
*
884 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
)
886 struct ioat_desc_sw
*new;
888 if (!list_empty(&ioat_chan
->free_desc
)) {
889 new = to_ioat_desc(ioat_chan
->free_desc
.next
);
890 list_del(&new->node
);
892 /* try to get another desc */
893 new = ioat_dma_alloc_descriptor(ioat_chan
, GFP_ATOMIC
);
895 dev_err(&ioat_chan
->device
->pdev
->dev
,
905 static struct ioat_desc_sw
*
906 ioat2_dma_get_next_descriptor(struct ioat_dma_chan
*ioat_chan
)
908 struct ioat_desc_sw
*new;
911 * used.prev points to where to start processing
912 * used.next points to next free descriptor
913 * if used.prev == NULL, there are none waiting to be processed
914 * if used.next == used.prev.prev, there is only one free descriptor,
915 * and we need to use it to as a noop descriptor before
916 * linking in a new set of descriptors, since the device
917 * has probably already read the pointer to it
919 if (ioat_chan
->used_desc
.prev
&&
920 ioat_chan
->used_desc
.next
== ioat_chan
->used_desc
.prev
->prev
) {
922 struct ioat_desc_sw
*desc
;
923 struct ioat_desc_sw
*noop_desc
;
926 /* set up the noop descriptor */
927 noop_desc
= to_ioat_desc(ioat_chan
->used_desc
.next
);
928 /* set size to non-zero value (channel returns error when size is 0) */
929 noop_desc
->hw
->size
= NULL_DESC_BUFFER_SIZE
;
930 noop_desc
->hw
->ctl
= IOAT_DMA_DESCRIPTOR_NUL
;
931 noop_desc
->hw
->src_addr
= 0;
932 noop_desc
->hw
->dst_addr
= 0;
934 ioat_chan
->used_desc
.next
= ioat_chan
->used_desc
.next
->next
;
935 ioat_chan
->pending
++;
936 ioat_chan
->dmacount
++;
938 /* try to get a few more descriptors */
939 for (i
= 16; i
; i
--) {
940 desc
= ioat_dma_alloc_descriptor(ioat_chan
, GFP_ATOMIC
);
942 dev_err(&ioat_chan
->device
->pdev
->dev
,
946 list_add_tail(&desc
->node
, ioat_chan
->used_desc
.next
);
949 = to_ioat_desc(desc
->node
.next
)->async_tx
.phys
;
950 to_ioat_desc(desc
->node
.prev
)->hw
->next
951 = desc
->async_tx
.phys
;
952 ioat_chan
->desccount
++;
955 ioat_chan
->used_desc
.next
= noop_desc
->node
.next
;
957 new = to_ioat_desc(ioat_chan
->used_desc
.next
);
959 ioat_chan
->used_desc
.next
= new->node
.next
;
961 if (ioat_chan
->used_desc
.prev
== NULL
)
962 ioat_chan
->used_desc
.prev
= &new->node
;
968 static struct ioat_desc_sw
*ioat_dma_get_next_descriptor(
969 struct ioat_dma_chan
*ioat_chan
)
974 switch (ioat_chan
->device
->version
) {
976 return ioat1_dma_get_next_descriptor(ioat_chan
);
979 return ioat2_dma_get_next_descriptor(ioat_chan
);
984 static struct dma_async_tx_descriptor
*ioat1_dma_prep_memcpy(
985 struct dma_chan
*chan
,
991 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
992 struct ioat_desc_sw
*new;
994 spin_lock_bh(&ioat_chan
->desc_lock
);
995 new = ioat_dma_get_next_descriptor(ioat_chan
);
996 spin_unlock_bh(&ioat_chan
->desc_lock
);
1000 new->dst
= dma_dest
;
1002 new->async_tx
.flags
= flags
;
1003 return &new->async_tx
;
1005 dev_err(&ioat_chan
->device
->pdev
->dev
,
1006 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1007 chan_num(ioat_chan
), ioat_chan
->dmacount
, ioat_chan
->desccount
);
1012 static struct dma_async_tx_descriptor
*ioat2_dma_prep_memcpy(
1013 struct dma_chan
*chan
,
1014 dma_addr_t dma_dest
,
1017 unsigned long flags
)
1019 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
1020 struct ioat_desc_sw
*new;
1022 spin_lock_bh(&ioat_chan
->desc_lock
);
1023 new = ioat2_dma_get_next_descriptor(ioat_chan
);
1026 * leave ioat_chan->desc_lock set in ioat 2 path
1027 * it will get unlocked at end of tx_submit
1032 new->dst
= dma_dest
;
1034 new->async_tx
.flags
= flags
;
1035 return &new->async_tx
;
1037 spin_unlock_bh(&ioat_chan
->desc_lock
);
1038 dev_err(&ioat_chan
->device
->pdev
->dev
,
1039 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1040 chan_num(ioat_chan
), ioat_chan
->dmacount
, ioat_chan
->desccount
);
1045 static void ioat_dma_cleanup_tasklet(unsigned long data
)
1047 struct ioat_dma_chan
*chan
= (void *)data
;
1048 ioat_dma_memcpy_cleanup(chan
);
1049 writew(IOAT_CHANCTRL_INT_DISABLE
,
1050 chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
1054 ioat_dma_unmap(struct ioat_dma_chan
*ioat_chan
, struct ioat_desc_sw
*desc
)
1057 * yes we are unmapping both _page and _single
1058 * alloc'd regions with unmap_page. Is this
1059 * *really* that bad?
1061 if (!(desc
->async_tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
1062 pci_unmap_page(ioat_chan
->device
->pdev
,
1063 pci_unmap_addr(desc
, dst
),
1064 pci_unmap_len(desc
, len
),
1065 PCI_DMA_FROMDEVICE
);
1067 if (!(desc
->async_tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
))
1068 pci_unmap_page(ioat_chan
->device
->pdev
,
1069 pci_unmap_addr(desc
, src
),
1070 pci_unmap_len(desc
, len
),
1075 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1076 * @chan: ioat channel to be cleaned up
1078 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan
*ioat_chan
)
1080 unsigned long phys_complete
;
1081 struct ioat_desc_sw
*desc
, *_desc
;
1082 dma_cookie_t cookie
= 0;
1083 unsigned long desc_phys
;
1084 struct ioat_desc_sw
*latest_desc
;
1086 prefetch(ioat_chan
->completion_virt
);
1088 if (!spin_trylock_bh(&ioat_chan
->cleanup_lock
))
1091 /* The completion writeback can happen at any time,
1092 so reads by the driver need to be atomic operations
1093 The descriptor physical addresses are limited to 32-bits
1094 when the CPU can only do a 32-bit mov */
1096 #if (BITS_PER_LONG == 64)
1098 ioat_chan
->completion_virt
->full
1099 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
1102 ioat_chan
->completion_virt
->low
& IOAT_LOW_COMPLETION_MASK
;
1105 if ((ioat_chan
->completion_virt
->full
1106 & IOAT_CHANSTS_DMA_TRANSFER_STATUS
) ==
1107 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED
) {
1108 dev_err(&ioat_chan
->device
->pdev
->dev
,
1109 "Channel halted, chanerr = %x\n",
1110 readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
));
1112 /* TODO do something to salvage the situation */
1115 if (phys_complete
== ioat_chan
->last_completion
) {
1116 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
1118 * perhaps we're stuck so hard that the watchdog can't go off?
1119 * try to catch it after 2 seconds
1121 if (ioat_chan
->device
->version
!= IOAT_VER_3_0
) {
1122 if (time_after(jiffies
,
1123 ioat_chan
->last_completion_time
+ HZ
*WATCHDOG_DELAY
)) {
1124 ioat_dma_chan_watchdog(&(ioat_chan
->device
->work
.work
));
1125 ioat_chan
->last_completion_time
= jiffies
;
1130 ioat_chan
->last_completion_time
= jiffies
;
1133 if (!spin_trylock_bh(&ioat_chan
->desc_lock
)) {
1134 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
1138 switch (ioat_chan
->device
->version
) {
1140 list_for_each_entry_safe(desc
, _desc
,
1141 &ioat_chan
->used_desc
, node
) {
1144 * Incoming DMA requests may use multiple descriptors,
1145 * due to exceeding xfercap, perhaps. If so, only the
1146 * last one will have a cookie, and require unmapping.
1148 if (desc
->async_tx
.cookie
) {
1149 cookie
= desc
->async_tx
.cookie
;
1150 ioat_dma_unmap(ioat_chan
, desc
);
1151 if (desc
->async_tx
.callback
) {
1152 desc
->async_tx
.callback(desc
->async_tx
.callback_param
);
1153 desc
->async_tx
.callback
= NULL
;
1157 if (desc
->async_tx
.phys
!= phys_complete
) {
1159 * a completed entry, but not the last, so clean
1160 * up if the client is done with the descriptor
1162 if (async_tx_test_ack(&desc
->async_tx
)) {
1163 list_del(&desc
->node
);
1164 list_add_tail(&desc
->node
,
1165 &ioat_chan
->free_desc
);
1167 desc
->async_tx
.cookie
= 0;
1170 * last used desc. Do not remove, so we can
1171 * append from it, but don't look at it next
1174 desc
->async_tx
.cookie
= 0;
1176 /* TODO check status bits? */
1183 /* has some other thread has already cleaned up? */
1184 if (ioat_chan
->used_desc
.prev
== NULL
)
1187 /* work backwards to find latest finished desc */
1188 desc
= to_ioat_desc(ioat_chan
->used_desc
.next
);
1191 desc
= to_ioat_desc(desc
->node
.prev
);
1192 desc_phys
= (unsigned long)desc
->async_tx
.phys
1193 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR
;
1194 if (desc_phys
== phys_complete
) {
1198 } while (&desc
->node
!= ioat_chan
->used_desc
.prev
);
1200 if (latest_desc
!= NULL
) {
1202 /* work forwards to clear finished descriptors */
1203 for (desc
= to_ioat_desc(ioat_chan
->used_desc
.prev
);
1204 &desc
->node
!= latest_desc
->node
.next
&&
1205 &desc
->node
!= ioat_chan
->used_desc
.next
;
1206 desc
= to_ioat_desc(desc
->node
.next
)) {
1207 if (desc
->async_tx
.cookie
) {
1208 cookie
= desc
->async_tx
.cookie
;
1209 desc
->async_tx
.cookie
= 0;
1210 ioat_dma_unmap(ioat_chan
, desc
);
1211 if (desc
->async_tx
.callback
) {
1212 desc
->async_tx
.callback(desc
->async_tx
.callback_param
);
1213 desc
->async_tx
.callback
= NULL
;
1218 /* move used.prev up beyond those that are finished */
1219 if (&desc
->node
== ioat_chan
->used_desc
.next
)
1220 ioat_chan
->used_desc
.prev
= NULL
;
1222 ioat_chan
->used_desc
.prev
= &desc
->node
;
1227 spin_unlock_bh(&ioat_chan
->desc_lock
);
1229 ioat_chan
->last_completion
= phys_complete
;
1231 ioat_chan
->completed_cookie
= cookie
;
1233 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
1237 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1238 * @chan: IOAT DMA channel handle
1239 * @cookie: DMA transaction identifier
1240 * @done: if not %NULL, updated with last completed transaction
1241 * @used: if not %NULL, updated with last used transaction
1243 static enum dma_status
ioat_dma_is_complete(struct dma_chan
*chan
,
1244 dma_cookie_t cookie
,
1248 struct ioat_dma_chan
*ioat_chan
= to_ioat_chan(chan
);
1249 dma_cookie_t last_used
;
1250 dma_cookie_t last_complete
;
1251 enum dma_status ret
;
1253 last_used
= chan
->cookie
;
1254 last_complete
= ioat_chan
->completed_cookie
;
1255 ioat_chan
->watchdog_tcp_cookie
= cookie
;
1258 *done
= last_complete
;
1262 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1263 if (ret
== DMA_SUCCESS
)
1266 ioat_dma_memcpy_cleanup(ioat_chan
);
1268 last_used
= chan
->cookie
;
1269 last_complete
= ioat_chan
->completed_cookie
;
1272 *done
= last_complete
;
1276 return dma_async_is_complete(cookie
, last_complete
, last_used
);
1279 static void ioat_dma_start_null_desc(struct ioat_dma_chan
*ioat_chan
)
1281 struct ioat_desc_sw
*desc
;
1283 spin_lock_bh(&ioat_chan
->desc_lock
);
1285 desc
= ioat_dma_get_next_descriptor(ioat_chan
);
1288 dev_err(&ioat_chan
->device
->pdev
->dev
,
1289 "Unable to start null desc - get next desc failed\n");
1290 spin_unlock_bh(&ioat_chan
->desc_lock
);
1294 desc
->hw
->ctl
= IOAT_DMA_DESCRIPTOR_NUL
1295 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
1296 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS
;
1297 /* set size to non-zero value (channel returns error when size is 0) */
1298 desc
->hw
->size
= NULL_DESC_BUFFER_SIZE
;
1299 desc
->hw
->src_addr
= 0;
1300 desc
->hw
->dst_addr
= 0;
1301 async_tx_ack(&desc
->async_tx
);
1302 switch (ioat_chan
->device
->version
) {
1305 list_add_tail(&desc
->node
, &ioat_chan
->used_desc
);
1307 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
1308 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_LOW
);
1309 writel(((u64
) desc
->async_tx
.phys
) >> 32,
1310 ioat_chan
->reg_base
+ IOAT1_CHAINADDR_OFFSET_HIGH
);
1312 writeb(IOAT_CHANCMD_START
, ioat_chan
->reg_base
1313 + IOAT_CHANCMD_OFFSET(ioat_chan
->device
->version
));
1317 writel(((u64
) desc
->async_tx
.phys
) & 0x00000000FFFFFFFF,
1318 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
1319 writel(((u64
) desc
->async_tx
.phys
) >> 32,
1320 ioat_chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
1322 ioat_chan
->dmacount
++;
1323 __ioat2_dma_memcpy_issue_pending(ioat_chan
);
1326 spin_unlock_bh(&ioat_chan
->desc_lock
);
1330 * Perform a IOAT transaction to verify the HW works.
1332 #define IOAT_TEST_SIZE 2000
1334 static void ioat_dma_test_callback(void *dma_async_param
)
1336 printk(KERN_ERR
"ioatdma: ioat_dma_test_callback(%p)\n",
1341 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1342 * @device: device to be tested
1344 static int ioat_dma_self_test(struct ioatdma_device
*device
)
1349 struct dma_chan
*dma_chan
;
1350 struct dma_async_tx_descriptor
*tx
;
1351 dma_addr_t dma_dest
, dma_src
;
1352 dma_cookie_t cookie
;
1355 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
1358 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
1364 /* Fill in src buffer */
1365 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
1368 /* Start copy, using first DMA channel */
1369 dma_chan
= container_of(device
->common
.channels
.next
,
1372 if (device
->common
.device_alloc_chan_resources(dma_chan
, NULL
) < 1) {
1373 dev_err(&device
->pdev
->dev
,
1374 "selftest cannot allocate chan resource\n");
1379 dma_src
= dma_map_single(dma_chan
->device
->dev
, src
, IOAT_TEST_SIZE
,
1381 dma_dest
= dma_map_single(dma_chan
->device
->dev
, dest
, IOAT_TEST_SIZE
,
1383 tx
= device
->common
.device_prep_dma_memcpy(dma_chan
, dma_dest
, dma_src
,
1386 dev_err(&device
->pdev
->dev
,
1387 "Self-test prep failed, disabling\n");
1389 goto free_resources
;
1393 tx
->callback
= ioat_dma_test_callback
;
1394 tx
->callback_param
= (void *)0x8086;
1395 cookie
= tx
->tx_submit(tx
);
1397 dev_err(&device
->pdev
->dev
,
1398 "Self-test setup failed, disabling\n");
1400 goto free_resources
;
1402 device
->common
.device_issue_pending(dma_chan
);
1405 if (device
->common
.device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
)
1407 dev_err(&device
->pdev
->dev
,
1408 "Self-test copy timed out, disabling\n");
1410 goto free_resources
;
1412 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
1413 dev_err(&device
->pdev
->dev
,
1414 "Self-test copy failed compare, disabling\n");
1416 goto free_resources
;
1420 device
->common
.device_free_chan_resources(dma_chan
);
1427 static char ioat_interrupt_style
[32] = "msix";
1428 module_param_string(ioat_interrupt_style
, ioat_interrupt_style
,
1429 sizeof(ioat_interrupt_style
), 0644);
1430 MODULE_PARM_DESC(ioat_interrupt_style
,
1431 "set ioat interrupt style: msix (default), "
1432 "msix-single-vector, msi, intx)");
1435 * ioat_dma_setup_interrupts - setup interrupt handler
1436 * @device: ioat device
1438 static int ioat_dma_setup_interrupts(struct ioatdma_device
*device
)
1440 struct ioat_dma_chan
*ioat_chan
;
1441 int err
, i
, j
, msixcnt
;
1444 if (!strcmp(ioat_interrupt_style
, "msix"))
1446 if (!strcmp(ioat_interrupt_style
, "msix-single-vector"))
1447 goto msix_single_vector
;
1448 if (!strcmp(ioat_interrupt_style
, "msi"))
1450 if (!strcmp(ioat_interrupt_style
, "intx"))
1452 dev_err(&device
->pdev
->dev
, "invalid ioat_interrupt_style %s\n",
1453 ioat_interrupt_style
);
1457 /* The number of MSI-X vectors should equal the number of channels */
1458 msixcnt
= device
->common
.chancnt
;
1459 for (i
= 0; i
< msixcnt
; i
++)
1460 device
->msix_entries
[i
].entry
= i
;
1462 err
= pci_enable_msix(device
->pdev
, device
->msix_entries
, msixcnt
);
1466 goto msix_single_vector
;
1468 for (i
= 0; i
< msixcnt
; i
++) {
1469 ioat_chan
= ioat_lookup_chan_by_index(device
, i
);
1470 err
= request_irq(device
->msix_entries
[i
].vector
,
1471 ioat_dma_do_interrupt_msix
,
1472 0, "ioat-msix", ioat_chan
);
1474 for (j
= 0; j
< i
; j
++) {
1476 ioat_lookup_chan_by_index(device
, j
);
1477 free_irq(device
->msix_entries
[j
].vector
,
1480 goto msix_single_vector
;
1483 intrctrl
|= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL
;
1484 device
->irq_mode
= msix_multi_vector
;
1488 device
->msix_entries
[0].entry
= 0;
1489 err
= pci_enable_msix(device
->pdev
, device
->msix_entries
, 1);
1493 err
= request_irq(device
->msix_entries
[0].vector
, ioat_dma_do_interrupt
,
1494 0, "ioat-msix", device
);
1496 pci_disable_msix(device
->pdev
);
1499 device
->irq_mode
= msix_single_vector
;
1503 err
= pci_enable_msi(device
->pdev
);
1507 err
= request_irq(device
->pdev
->irq
, ioat_dma_do_interrupt
,
1508 0, "ioat-msi", device
);
1510 pci_disable_msi(device
->pdev
);
1514 * CB 1.2 devices need a bit set in configuration space to enable MSI
1516 if (device
->version
== IOAT_VER_1_2
) {
1518 pci_read_config_dword(device
->pdev
,
1519 IOAT_PCI_DMACTRL_OFFSET
, &dmactrl
);
1520 dmactrl
|= IOAT_PCI_DMACTRL_MSI_EN
;
1521 pci_write_config_dword(device
->pdev
,
1522 IOAT_PCI_DMACTRL_OFFSET
, dmactrl
);
1524 device
->irq_mode
= msi
;
1528 err
= request_irq(device
->pdev
->irq
, ioat_dma_do_interrupt
,
1529 IRQF_SHARED
, "ioat-intx", device
);
1532 device
->irq_mode
= intx
;
1535 intrctrl
|= IOAT_INTRCTRL_MASTER_INT_EN
;
1536 writeb(intrctrl
, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1540 /* Disable all interrupt generation */
1541 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1542 dev_err(&device
->pdev
->dev
, "no usable interrupts\n");
1543 device
->irq_mode
= none
;
1548 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1549 * @device: ioat device
1551 static void ioat_dma_remove_interrupts(struct ioatdma_device
*device
)
1553 struct ioat_dma_chan
*ioat_chan
;
1556 /* Disable all interrupt generation */
1557 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1559 switch (device
->irq_mode
) {
1560 case msix_multi_vector
:
1561 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
1562 ioat_chan
= ioat_lookup_chan_by_index(device
, i
);
1563 free_irq(device
->msix_entries
[i
].vector
, ioat_chan
);
1565 pci_disable_msix(device
->pdev
);
1567 case msix_single_vector
:
1568 free_irq(device
->msix_entries
[0].vector
, device
);
1569 pci_disable_msix(device
->pdev
);
1572 free_irq(device
->pdev
->irq
, device
);
1573 pci_disable_msi(device
->pdev
);
1576 free_irq(device
->pdev
->irq
, device
);
1579 dev_warn(&device
->pdev
->dev
,
1580 "call to %s without interrupts setup\n", __func__
);
1582 device
->irq_mode
= none
;
1585 struct ioatdma_device
*ioat_dma_probe(struct pci_dev
*pdev
,
1586 void __iomem
*iobase
)
1589 struct ioatdma_device
*device
;
1591 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1596 device
->pdev
= pdev
;
1597 device
->reg_base
= iobase
;
1598 device
->version
= readb(device
->reg_base
+ IOAT_VER_OFFSET
);
1600 /* DMA coherent memory pool for DMA descriptor allocations */
1601 device
->dma_pool
= pci_pool_create("dma_desc_pool", pdev
,
1602 sizeof(struct ioat_dma_descriptor
),
1604 if (!device
->dma_pool
) {
1609 device
->completion_pool
= pci_pool_create("completion_pool", pdev
,
1610 sizeof(u64
), SMP_CACHE_BYTES
,
1612 if (!device
->completion_pool
) {
1614 goto err_completion_pool
;
1617 INIT_LIST_HEAD(&device
->common
.channels
);
1618 ioat_dma_enumerate_channels(device
);
1620 device
->common
.device_alloc_chan_resources
=
1621 ioat_dma_alloc_chan_resources
;
1622 device
->common
.device_free_chan_resources
=
1623 ioat_dma_free_chan_resources
;
1624 device
->common
.dev
= &pdev
->dev
;
1626 dma_cap_set(DMA_MEMCPY
, device
->common
.cap_mask
);
1627 device
->common
.device_is_tx_complete
= ioat_dma_is_complete
;
1628 switch (device
->version
) {
1630 device
->common
.device_prep_dma_memcpy
= ioat1_dma_prep_memcpy
;
1631 device
->common
.device_issue_pending
=
1632 ioat1_dma_memcpy_issue_pending
;
1636 device
->common
.device_prep_dma_memcpy
= ioat2_dma_prep_memcpy
;
1637 device
->common
.device_issue_pending
=
1638 ioat2_dma_memcpy_issue_pending
;
1642 dev_err(&device
->pdev
->dev
,
1643 "Intel(R) I/OAT DMA Engine found,"
1644 " %d channels, device version 0x%02x, driver version %s\n",
1645 device
->common
.chancnt
, device
->version
, IOAT_DMA_VERSION
);
1647 err
= ioat_dma_setup_interrupts(device
);
1649 goto err_setup_interrupts
;
1651 err
= ioat_dma_self_test(device
);
1655 ioat_set_tcp_copy_break(device
);
1657 dma_async_device_register(&device
->common
);
1659 if (device
->version
!= IOAT_VER_3_0
) {
1660 INIT_DELAYED_WORK(&device
->work
, ioat_dma_chan_watchdog
);
1661 schedule_delayed_work(&device
->work
,
1668 ioat_dma_remove_interrupts(device
);
1669 err_setup_interrupts
:
1670 pci_pool_destroy(device
->completion_pool
);
1671 err_completion_pool
:
1672 pci_pool_destroy(device
->dma_pool
);
1677 "Intel(R) I/OAT DMA Engine initialization failed\n");
1681 void ioat_dma_remove(struct ioatdma_device
*device
)
1683 struct dma_chan
*chan
, *_chan
;
1684 struct ioat_dma_chan
*ioat_chan
;
1686 ioat_dma_remove_interrupts(device
);
1688 dma_async_device_unregister(&device
->common
);
1690 pci_pool_destroy(device
->dma_pool
);
1691 pci_pool_destroy(device
->completion_pool
);
1693 iounmap(device
->reg_base
);
1694 pci_release_regions(device
->pdev
);
1695 pci_disable_device(device
->pdev
);
1697 if (device
->version
!= IOAT_VER_3_0
) {
1698 cancel_delayed_work(&device
->work
);
1701 list_for_each_entry_safe(chan
, _chan
,
1702 &device
->common
.channels
, device_node
) {
1703 ioat_chan
= to_ioat_chan(chan
);
1704 list_del(&chan
->device_node
);