2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 static int ioat_ring_alloc_order
= 8;
43 module_param(ioat_ring_alloc_order
, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order
,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
47 static void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
)
49 void * __iomem reg_base
= ioat
->base
.reg_base
;
52 ioat
->dmacount
+= ioat2_ring_pending(ioat
);
53 ioat
->issued
= ioat
->head
;
54 /* make descriptor updates globally visible before notifying channel */
56 writew(ioat
->dmacount
, reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
57 dev_dbg(to_dev(&ioat
->base
),
58 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
59 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
62 static void ioat2_issue_pending(struct dma_chan
*chan
)
64 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(chan
);
66 spin_lock_bh(&ioat
->ring_lock
);
67 if (ioat
->pending
== 1)
68 __ioat2_issue_pending(ioat
);
69 spin_unlock_bh(&ioat
->ring_lock
);
73 * ioat2_update_pending - log pending descriptors
74 * @ioat: ioat2+ channel
76 * set pending to '1' unless pending is already set to '2', pending == 2
77 * indicates that submission is temporarily blocked due to an in-flight
78 * reset. If we are already above the ioat_pending_level threshold then
81 * called with ring_lock held
83 static void ioat2_update_pending(struct ioat2_dma_chan
*ioat
)
85 if (unlikely(ioat
->pending
== 2))
87 else if (ioat2_ring_pending(ioat
) > ioat_pending_level
)
88 __ioat2_issue_pending(ioat
);
93 static void __ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
95 void __iomem
*reg_base
= ioat
->base
.reg_base
;
96 struct ioat_ring_ent
*desc
;
97 struct ioat_dma_descriptor
*hw
;
100 if (ioat2_ring_space(ioat
) < 1) {
101 dev_err(to_dev(&ioat
->base
),
102 "Unable to start null desc - ring full\n");
106 dev_dbg(to_dev(&ioat
->base
), "%s: head: %#x tail: %#x issued: %#x\n",
107 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
108 idx
= ioat2_desc_alloc(ioat
, 1);
109 desc
= ioat2_get_ring_ent(ioat
, idx
);
114 hw
->ctl_f
.int_en
= 1;
115 hw
->ctl_f
.compl_write
= 1;
116 /* set size to non-zero value (channel returns error when size is 0) */
117 hw
->size
= NULL_DESC_BUFFER_SIZE
;
120 async_tx_ack(&desc
->txd
);
121 writel(((u64
) desc
->txd
.phys
) & 0x00000000FFFFFFFF,
122 reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
123 writel(((u64
) desc
->txd
.phys
) >> 32,
124 reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
125 dump_desc_dbg(ioat
, desc
);
126 __ioat2_issue_pending(ioat
);
129 static void ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
131 spin_lock_bh(&ioat
->ring_lock
);
132 __ioat2_start_null_desc(ioat
);
133 spin_unlock_bh(&ioat
->ring_lock
);
136 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
);
139 * ioat2_reset_part2 - reinit the channel after a reset
141 static void ioat2_reset_part2(struct work_struct
*work
)
143 struct ioat_chan_common
*chan
;
144 struct ioat2_dma_chan
*ioat
;
146 chan
= container_of(work
, struct ioat_chan_common
, work
.work
);
147 ioat
= container_of(chan
, struct ioat2_dma_chan
, base
);
149 /* ensure that ->tail points to the stalled descriptor
150 * (ioat->pending is set to 2 at this point so no new
151 * descriptors will be issued while we perform this cleanup)
155 spin_lock_bh(&chan
->cleanup_lock
);
156 spin_lock_bh(&ioat
->ring_lock
);
158 /* set the tail to be re-issued */
159 ioat
->issued
= ioat
->tail
;
162 dev_dbg(to_dev(&ioat
->base
),
163 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
164 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
166 if (ioat2_ring_pending(ioat
)) {
167 struct ioat_ring_ent
*desc
;
169 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
170 writel(((u64
) desc
->txd
.phys
) & 0x00000000FFFFFFFF,
171 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
172 writel(((u64
) desc
->txd
.phys
) >> 32,
173 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
174 __ioat2_issue_pending(ioat
);
176 __ioat2_start_null_desc(ioat
);
178 spin_unlock_bh(&ioat
->ring_lock
);
179 spin_unlock_bh(&chan
->cleanup_lock
);
181 dev_info(to_dev(chan
),
182 "chan%d reset - %d descs waiting, %d total desc\n",
183 chan_num(chan
), ioat
->dmacount
, 1 << ioat
->alloc_order
);
187 * ioat2_reset_channel - restart a channel
188 * @ioat: IOAT DMA channel handle
190 static void ioat2_reset_channel(struct ioat2_dma_chan
*ioat
)
192 u32 chansts
, chanerr
;
193 struct ioat_chan_common
*chan
= &ioat
->base
;
196 spin_lock_bh(&ioat
->ring_lock
);
197 active
= ioat2_ring_active(ioat
);
198 spin_unlock_bh(&ioat
->ring_lock
);
202 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
203 chansts
= *chan
->completion
& IOAT_CHANSTS_DMA_TRANSFER_STATUS
;
205 dev_err(to_dev(chan
),
206 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
207 chan_num(chan
), chansts
, chanerr
);
208 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
211 spin_lock_bh(&ioat
->ring_lock
);
213 writeb(IOAT_CHANCMD_RESET
,
215 + IOAT_CHANCMD_OFFSET(chan
->device
->version
));
216 spin_unlock_bh(&ioat
->ring_lock
);
217 schedule_delayed_work(&chan
->work
, RESET_DELAY
);
221 * ioat2_chan_watchdog - watch for stuck channels
223 static void ioat2_chan_watchdog(struct work_struct
*work
)
225 struct ioatdma_device
*device
=
226 container_of(work
, struct ioatdma_device
, work
.work
);
227 struct ioat2_dma_chan
*ioat
;
228 struct ioat_chan_common
*chan
;
232 dev_dbg(&device
->pdev
->dev
, "%s\n", __func__
);
234 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
235 chan
= ioat_chan_by_index(device
, i
);
236 ioat
= container_of(chan
, struct ioat2_dma_chan
, base
);
239 * for version 2.0 if there are descriptors yet to be processed
240 * and the last completed hasn't changed since the last watchdog
241 * if they haven't hit the pending level
242 * issue the pending to push them through
244 * try resetting the channel
246 spin_lock_bh(&ioat
->ring_lock
);
247 active
= ioat2_ring_active(ioat
);
248 spin_unlock_bh(&ioat
->ring_lock
);
251 chan
->last_completion
&&
252 chan
->last_completion
== chan
->watchdog_completion
) {
254 if (ioat
->pending
== 1)
255 ioat2_issue_pending(&chan
->common
);
257 ioat2_reset_channel(ioat
);
258 chan
->watchdog_completion
= 0;
261 chan
->last_compl_desc_addr_hw
= 0;
262 chan
->watchdog_completion
= chan
->last_completion
;
264 chan
->watchdog_last_tcp_cookie
= chan
->watchdog_tcp_cookie
;
266 schedule_delayed_work(&device
->work
, WATCHDOG_DELAY
);
270 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
271 * @chan: ioat channel to be cleaned up
273 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
)
275 struct ioat_chan_common
*chan
= &ioat
->base
;
276 unsigned long phys_complete
;
277 struct ioat_ring_ent
*desc
;
278 bool seen_current
= false;
281 struct dma_async_tx_descriptor
*tx
;
283 prefetch(chan
->completion
);
285 spin_lock_bh(&chan
->cleanup_lock
);
286 phys_complete
= ioat_get_current_completion(chan
);
287 if (phys_complete
== chan
->last_completion
) {
288 spin_unlock_bh(&chan
->cleanup_lock
);
290 * perhaps we're stuck so hard that the watchdog can't go off?
291 * try to catch it after WATCHDOG_DELAY seconds
293 if (chan
->device
->version
< IOAT_VER_3_0
) {
296 tmo
= chan
->last_completion_time
+ HZ
*WATCHDOG_DELAY
;
297 if (time_after(jiffies
, tmo
)) {
298 ioat2_chan_watchdog(&(chan
->device
->work
.work
));
299 chan
->last_completion_time
= jiffies
;
304 chan
->last_completion_time
= jiffies
;
306 spin_lock_bh(&ioat
->ring_lock
);
308 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
309 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
311 active
= ioat2_ring_active(ioat
);
312 for (i
= 0; i
< active
&& !seen_current
; i
++) {
313 prefetch(ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
+ 1));
314 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
316 dump_desc_dbg(ioat
, desc
);
318 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
319 chan
->completed_cookie
= tx
->cookie
;
322 tx
->callback(tx
->callback_param
);
327 if (tx
->phys
== phys_complete
)
331 BUG_ON(!seen_current
); /* no active descs have written a completion? */
332 spin_unlock_bh(&ioat
->ring_lock
);
334 chan
->last_completion
= phys_complete
;
336 spin_unlock_bh(&chan
->cleanup_lock
);
339 static void ioat2_cleanup_tasklet(unsigned long data
)
341 struct ioat2_dma_chan
*ioat
= (void *) data
;
344 writew(IOAT_CHANCTRL_INT_DISABLE
,
345 ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
349 * ioat2_enumerate_channels - find and initialize the device's channels
350 * @device: the device to be enumerated
352 static int ioat2_enumerate_channels(struct ioatdma_device
*device
)
354 struct ioat2_dma_chan
*ioat
;
355 struct device
*dev
= &device
->pdev
->dev
;
356 struct dma_device
*dma
= &device
->common
;
360 INIT_LIST_HEAD(&dma
->channels
);
361 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
362 xfercap_log
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
363 if (xfercap_log
== 0)
365 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
367 /* FIXME which i/oat version is i7300? */
368 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
369 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
372 for (i
= 0; i
< dma
->chancnt
; i
++) {
373 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
377 ioat_init_channel(device
, &ioat
->base
, i
,
379 ioat2_cleanup_tasklet
,
380 (unsigned long) ioat
);
381 ioat
->xfercap_log
= xfercap_log
;
382 spin_lock_init(&ioat
->ring_lock
);
388 static dma_cookie_t
ioat2_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
390 struct dma_chan
*c
= tx
->chan
;
391 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
392 dma_cookie_t cookie
= c
->cookie
;
399 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
401 ioat2_update_pending(ioat
);
402 spin_unlock_bh(&ioat
->ring_lock
);
407 static struct ioat_ring_ent
*ioat2_alloc_ring_ent(struct dma_chan
*chan
)
409 struct ioat_dma_descriptor
*hw
;
410 struct ioat_ring_ent
*desc
;
411 struct ioatdma_device
*dma
;
414 dma
= to_ioatdma_device(chan
->device
);
415 hw
= pci_pool_alloc(dma
->dma_pool
, GFP_KERNEL
, &phys
);
418 memset(hw
, 0, sizeof(*hw
));
420 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
422 pci_pool_free(dma
->dma_pool
, hw
, phys
);
426 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
427 desc
->txd
.tx_submit
= ioat2_tx_submit_unlock
;
429 desc
->txd
.phys
= phys
;
433 static void ioat2_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
435 struct ioatdma_device
*dma
;
437 dma
= to_ioatdma_device(chan
->device
);
438 pci_pool_free(dma
->dma_pool
, desc
->hw
, desc
->txd
.phys
);
442 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
443 * @chan: channel to be initialized
445 static int ioat2_alloc_chan_resources(struct dma_chan
*c
)
447 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
448 struct ioat_chan_common
*chan
= &ioat
->base
;
449 struct ioat_ring_ent
**ring
;
455 /* have we already been set up? */
457 return 1 << ioat
->alloc_order
;
459 /* Setup register to interrupt and write completion status on error */
460 chanctrl
= IOAT_CHANCTRL_ERR_INT_EN
| IOAT_CHANCTRL_ANY_ERR_ABORT_EN
|
461 IOAT_CHANCTRL_ERR_COMPLETION_EN
;
462 writew(chanctrl
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
464 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
466 dev_err(to_dev(chan
), "CHANERR = %x, clearing\n", chanerr
);
467 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
470 /* allocate a completion writeback area */
471 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
472 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
473 GFP_KERNEL
, &chan
->completion_dma
);
474 if (!chan
->completion
)
477 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
478 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
479 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
480 writel(((u64
) chan
->completion_dma
) >> 32,
481 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
483 ioat
->alloc_order
= ioat_get_alloc_order();
484 descs
= 1 << ioat
->alloc_order
;
486 /* allocate the array to hold the software ring */
487 ring
= kcalloc(descs
, sizeof(*ring
), GFP_KERNEL
);
490 for (i
= 0; i
< descs
; i
++) {
491 ring
[i
] = ioat2_alloc_ring_ent(c
);
494 ioat2_free_ring_ent(ring
[i
], c
);
498 set_desc_id(ring
[i
], i
);
502 for (i
= 0; i
< descs
-1; i
++) {
503 struct ioat_ring_ent
*next
= ring
[i
+1];
504 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
506 hw
->next
= next
->txd
.phys
;
508 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
510 spin_lock_bh(&ioat
->ring_lock
);
516 spin_unlock_bh(&ioat
->ring_lock
);
518 tasklet_enable(&chan
->cleanup_task
);
519 ioat2_start_null_desc(ioat
);
525 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
526 * @idx: gets starting descriptor index on successful allocation
527 * @ioat: ioat2,3 channel (ring) to operate on
528 * @num_descs: allocation length
530 static int ioat2_alloc_and_lock(u16
*idx
, struct ioat2_dma_chan
*ioat
, int num_descs
)
532 struct ioat_chan_common
*chan
= &ioat
->base
;
534 spin_lock_bh(&ioat
->ring_lock
);
535 if (unlikely(ioat2_ring_space(ioat
) < num_descs
)) {
536 if (printk_ratelimit())
537 dev_dbg(to_dev(chan
),
538 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
539 __func__
, num_descs
, ioat
->head
, ioat
->tail
,
541 spin_unlock_bh(&ioat
->ring_lock
);
543 /* do direct reclaim in the allocation failure case */
549 dev_dbg(to_dev(chan
), "%s: num_descs: %d (%x:%x:%x)\n",
550 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
552 *idx
= ioat2_desc_alloc(ioat
, num_descs
);
553 return 0; /* with ioat->ring_lock held */
556 static struct dma_async_tx_descriptor
*
557 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
558 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
560 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
561 struct ioat_dma_descriptor
*hw
;
562 struct ioat_ring_ent
*desc
;
563 dma_addr_t dst
= dma_dest
;
564 dma_addr_t src
= dma_src
;
565 size_t total_len
= len
;
570 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
571 if (likely(num_descs
) &&
572 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
) == 0)
576 for (i
= 0; i
< num_descs
; i
++) {
577 size_t copy
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
579 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
590 dump_desc_dbg(ioat
, desc
);
593 desc
->txd
.flags
= flags
;
594 desc
->len
= total_len
;
595 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
596 hw
->ctl_f
.compl_write
= 1;
597 dump_desc_dbg(ioat
, desc
);
598 /* we leave the channel locked to ensure in order submission */
604 * ioat2_free_chan_resources - release all the descriptors
605 * @chan: the channel to be cleaned
607 static void ioat2_free_chan_resources(struct dma_chan
*c
)
609 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
610 struct ioat_chan_common
*chan
= &ioat
->base
;
611 struct ioatdma_device
*ioatdma_device
= chan
->device
;
612 struct ioat_ring_ent
*desc
;
613 const u16 total_descs
= 1 << ioat
->alloc_order
;
617 /* Before freeing channel resources first check
618 * if they have been previously allocated for this channel.
623 tasklet_disable(&chan
->cleanup_task
);
626 /* Delay 100ms after reset to allow internal DMA logic to quiesce
627 * before removing DMA descriptor resources.
629 writeb(IOAT_CHANCMD_RESET
,
630 chan
->reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
633 spin_lock_bh(&ioat
->ring_lock
);
634 descs
= ioat2_ring_space(ioat
);
635 dev_dbg(to_dev(chan
), "freeing %d idle descriptors\n", descs
);
636 for (i
= 0; i
< descs
; i
++) {
637 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
+ i
);
638 ioat2_free_ring_ent(desc
, c
);
641 if (descs
< total_descs
)
642 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
643 total_descs
- descs
);
645 for (i
= 0; i
< total_descs
- descs
; i
++) {
646 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
647 dump_desc_dbg(ioat
, desc
);
648 ioat2_free_ring_ent(desc
, c
);
653 ioat
->alloc_order
= 0;
654 pci_pool_free(ioatdma_device
->completion_pool
,
656 chan
->completion_dma
);
657 spin_unlock_bh(&ioat
->ring_lock
);
659 chan
->last_completion
= 0;
660 chan
->completion_dma
= 0;
663 chan
->watchdog_completion
= 0;
664 chan
->last_compl_desc_addr_hw
= 0;
665 chan
->watchdog_tcp_cookie
= 0;
666 chan
->watchdog_last_tcp_cookie
= 0;
669 static enum dma_status
670 ioat2_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
671 dma_cookie_t
*done
, dma_cookie_t
*used
)
673 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
675 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
680 return ioat_is_complete(c
, cookie
, done
, used
);
683 int ioat2_dma_probe(struct ioatdma_device
*device
, int dca
)
685 struct pci_dev
*pdev
= device
->pdev
;
686 struct dma_device
*dma
;
688 struct ioat_chan_common
*chan
;
691 device
->enumerate_channels
= ioat2_enumerate_channels
;
692 dma
= &device
->common
;
693 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
694 dma
->device_issue_pending
= ioat2_issue_pending
;
695 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
696 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
697 dma
->device_is_tx_complete
= ioat2_is_complete
;
699 err
= ioat_probe(device
);
702 ioat_set_tcp_copy_break(2048);
704 list_for_each_entry(c
, &dma
->channels
, device_node
) {
705 chan
= to_chan_common(c
);
706 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU
,
707 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
710 err
= ioat_register(device
);
714 device
->dca
= ioat2_dca_init(pdev
, device
->reg_base
);
716 INIT_DELAYED_WORK(&device
->work
, ioat2_chan_watchdog
);
717 schedule_delayed_work(&device
->work
, WATCHDOG_DELAY
);
722 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
724 struct pci_dev
*pdev
= device
->pdev
;
725 struct dma_device
*dma
;
727 struct ioat_chan_common
*chan
;
731 device
->enumerate_channels
= ioat2_enumerate_channels
;
732 dma
= &device
->common
;
733 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
734 dma
->device_issue_pending
= ioat2_issue_pending
;
735 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
736 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
737 dma
->device_is_tx_complete
= ioat2_is_complete
;
739 /* -= IOAT ver.3 workarounds =- */
740 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
741 * that can cause stability issues for IOAT ver.3
743 pci_write_config_dword(pdev
, IOAT_PCI_CHANERRMASK_INT_OFFSET
, 0x3e07);
745 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
746 * (workaround for spurious config parity error after restart)
748 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
749 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
)
750 pci_write_config_dword(pdev
, IOAT_PCI_DMAUNCERRSTS_OFFSET
, 0x10);
752 err
= ioat_probe(device
);
755 ioat_set_tcp_copy_break(262144);
757 list_for_each_entry(c
, &dma
->channels
, device_node
) {
758 chan
= to_chan_common(c
);
759 writel(IOAT_DMA_DCA_ANY_CPU
,
760 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
763 err
= ioat_register(device
);
767 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);