2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 static int ioat_ring_alloc_order
= 8;
43 module_param(ioat_ring_alloc_order
, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order
,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
46 static int ioat_ring_max_alloc_order
= IOAT_MAX_ORDER
;
47 module_param(ioat_ring_max_alloc_order
, int, 0644);
48 MODULE_PARM_DESC(ioat_ring_max_alloc_order
,
49 "ioat2+: upper limit for dynamic ring resizing (default: n=16)");
51 static void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
)
53 void * __iomem reg_base
= ioat
->base
.reg_base
;
56 ioat
->dmacount
+= ioat2_ring_pending(ioat
);;
57 ioat
->issued
= ioat
->head
;
58 /* make descriptor updates globally visible before notifying channel */
60 writew(ioat
->dmacount
, reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
61 dev_dbg(to_dev(&ioat
->base
),
62 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
63 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
66 static void ioat2_issue_pending(struct dma_chan
*chan
)
68 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(chan
);
70 spin_lock_bh(&ioat
->ring_lock
);
71 if (ioat
->pending
== 1)
72 __ioat2_issue_pending(ioat
);
73 spin_unlock_bh(&ioat
->ring_lock
);
77 * ioat2_update_pending - log pending descriptors
78 * @ioat: ioat2+ channel
80 * set pending to '1' unless pending is already set to '2', pending == 2
81 * indicates that submission is temporarily blocked due to an in-flight
82 * reset. If we are already above the ioat_pending_level threshold then
85 * called with ring_lock held
87 static void ioat2_update_pending(struct ioat2_dma_chan
*ioat
)
89 if (unlikely(ioat
->pending
== 2))
91 else if (ioat2_ring_pending(ioat
) > ioat_pending_level
)
92 __ioat2_issue_pending(ioat
);
97 static void __ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
99 struct ioat_ring_ent
*desc
;
100 struct ioat_dma_descriptor
*hw
;
103 if (ioat2_ring_space(ioat
) < 1) {
104 dev_err(to_dev(&ioat
->base
),
105 "Unable to start null desc - ring full\n");
109 dev_dbg(to_dev(&ioat
->base
), "%s: head: %#x tail: %#x issued: %#x\n",
110 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
111 idx
= ioat2_desc_alloc(ioat
, 1);
112 desc
= ioat2_get_ring_ent(ioat
, idx
);
117 hw
->ctl_f
.int_en
= 1;
118 hw
->ctl_f
.compl_write
= 1;
119 /* set size to non-zero value (channel returns error when size is 0) */
120 hw
->size
= NULL_DESC_BUFFER_SIZE
;
123 async_tx_ack(&desc
->txd
);
124 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
125 dump_desc_dbg(ioat
, desc
);
126 __ioat2_issue_pending(ioat
);
129 static void ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
131 spin_lock_bh(&ioat
->ring_lock
);
132 __ioat2_start_null_desc(ioat
);
133 spin_unlock_bh(&ioat
->ring_lock
);
136 static void __cleanup(struct ioat2_dma_chan
*ioat
, unsigned long phys_complete
)
138 struct ioat_chan_common
*chan
= &ioat
->base
;
139 struct dma_async_tx_descriptor
*tx
;
140 struct ioat_ring_ent
*desc
;
141 bool seen_current
= false;
145 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
146 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
148 active
= ioat2_ring_active(ioat
);
149 for (i
= 0; i
< active
&& !seen_current
; i
++) {
150 prefetch(ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
+ 1));
151 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
153 dump_desc_dbg(ioat
, desc
);
155 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
156 chan
->completed_cookie
= tx
->cookie
;
159 tx
->callback(tx
->callback_param
);
164 if (tx
->phys
== phys_complete
)
168 BUG_ON(!seen_current
); /* no active descs have written a completion? */
170 chan
->last_completion
= phys_complete
;
171 if (ioat
->head
== ioat
->tail
) {
172 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
174 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
175 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
180 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
181 * @chan: ioat channel to be cleaned up
183 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
)
185 struct ioat_chan_common
*chan
= &ioat
->base
;
186 unsigned long phys_complete
;
188 prefetch(chan
->completion
);
190 if (!spin_trylock_bh(&chan
->cleanup_lock
))
193 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
194 spin_unlock_bh(&chan
->cleanup_lock
);
198 if (!spin_trylock_bh(&ioat
->ring_lock
)) {
199 spin_unlock_bh(&chan
->cleanup_lock
);
203 __cleanup(ioat
, phys_complete
);
205 spin_unlock_bh(&ioat
->ring_lock
);
206 spin_unlock_bh(&chan
->cleanup_lock
);
209 static void ioat2_cleanup_tasklet(unsigned long data
)
211 struct ioat2_dma_chan
*ioat
= (void *) data
;
214 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
217 static void __restart_chan(struct ioat2_dma_chan
*ioat
)
219 struct ioat_chan_common
*chan
= &ioat
->base
;
221 /* set the tail to be re-issued */
222 ioat
->issued
= ioat
->tail
;
224 set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
225 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
227 dev_dbg(to_dev(chan
),
228 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
229 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
231 if (ioat2_ring_pending(ioat
)) {
232 struct ioat_ring_ent
*desc
;
234 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
235 ioat2_set_chainaddr(ioat
, desc
->txd
.phys
);
236 __ioat2_issue_pending(ioat
);
238 __ioat2_start_null_desc(ioat
);
241 static void ioat2_restart_channel(struct ioat2_dma_chan
*ioat
)
243 struct ioat_chan_common
*chan
= &ioat
->base
;
244 unsigned long phys_complete
;
247 status
= ioat_chansts(chan
);
248 if (is_ioat_active(status
) || is_ioat_idle(status
))
250 while (is_ioat_active(status
) || is_ioat_idle(status
)) {
251 status
= ioat_chansts(chan
);
255 if (ioat_cleanup_preamble(chan
, &phys_complete
))
256 __cleanup(ioat
, phys_complete
);
258 __restart_chan(ioat
);
261 static bool reshape_ring(struct ioat2_dma_chan
*ioat
, int order
);
263 static void ioat2_timer_event(unsigned long data
)
265 struct ioat2_dma_chan
*ioat
= (void *) data
;
266 struct ioat_chan_common
*chan
= &ioat
->base
;
268 spin_lock_bh(&chan
->cleanup_lock
);
269 if (test_bit(IOAT_COMPLETION_PENDING
, &chan
->state
)) {
270 unsigned long phys_complete
;
273 spin_lock_bh(&ioat
->ring_lock
);
274 status
= ioat_chansts(chan
);
276 /* when halted due to errors check for channel
277 * programming errors before advancing the completion state
279 if (is_ioat_halted(status
)) {
282 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
283 BUG_ON(is_ioat_bug(chanerr
));
286 /* if we haven't made progress and we have already
287 * acknowledged a pending completion once, then be more
288 * forceful with a restart
290 if (ioat_cleanup_preamble(chan
, &phys_complete
))
291 __cleanup(ioat
, phys_complete
);
292 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
))
293 ioat2_restart_channel(ioat
);
295 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
296 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
298 spin_unlock_bh(&ioat
->ring_lock
);
302 /* if the ring is idle, empty, and oversized try to step
305 spin_lock_bh(&ioat
->ring_lock
);
306 active
= ioat2_ring_active(ioat
);
307 if (active
== 0 && ioat
->alloc_order
> ioat_get_alloc_order())
308 reshape_ring(ioat
, ioat
->alloc_order
-1);
309 spin_unlock_bh(&ioat
->ring_lock
);
311 /* keep shrinking until we get back to our minimum
314 if (ioat
->alloc_order
> ioat_get_alloc_order())
315 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
317 spin_unlock_bh(&chan
->cleanup_lock
);
321 * ioat2_enumerate_channels - find and initialize the device's channels
322 * @device: the device to be enumerated
324 static int ioat2_enumerate_channels(struct ioatdma_device
*device
)
326 struct ioat2_dma_chan
*ioat
;
327 struct device
*dev
= &device
->pdev
->dev
;
328 struct dma_device
*dma
= &device
->common
;
332 INIT_LIST_HEAD(&dma
->channels
);
333 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
334 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
335 if (dma
->chancnt
> ARRAY_SIZE(device
->idx
)) {
336 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
337 dma
->chancnt
, ARRAY_SIZE(device
->idx
));
338 dma
->chancnt
= ARRAY_SIZE(device
->idx
);
340 xfercap_log
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
341 xfercap_log
&= 0x1f; /* bits [4:0] valid */
342 if (xfercap_log
== 0)
344 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
346 /* FIXME which i/oat version is i7300? */
347 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
348 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
351 for (i
= 0; i
< dma
->chancnt
; i
++) {
352 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
356 ioat_init_channel(device
, &ioat
->base
, i
,
358 ioat2_cleanup_tasklet
,
359 (unsigned long) ioat
);
360 ioat
->xfercap_log
= xfercap_log
;
361 spin_lock_init(&ioat
->ring_lock
);
367 static dma_cookie_t
ioat2_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
369 struct dma_chan
*c
= tx
->chan
;
370 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
371 struct ioat_chan_common
*chan
= &ioat
->base
;
372 dma_cookie_t cookie
= c
->cookie
;
379 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
381 if (!test_and_set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
))
382 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
383 ioat2_update_pending(ioat
);
384 spin_unlock_bh(&ioat
->ring_lock
);
389 static struct ioat_ring_ent
*ioat2_alloc_ring_ent(struct dma_chan
*chan
, gfp_t flags
)
391 struct ioat_dma_descriptor
*hw
;
392 struct ioat_ring_ent
*desc
;
393 struct ioatdma_device
*dma
;
396 dma
= to_ioatdma_device(chan
->device
);
397 hw
= pci_pool_alloc(dma
->dma_pool
, flags
, &phys
);
400 memset(hw
, 0, sizeof(*hw
));
402 desc
= kzalloc(sizeof(*desc
), flags
);
404 pci_pool_free(dma
->dma_pool
, hw
, phys
);
408 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
409 desc
->txd
.tx_submit
= ioat2_tx_submit_unlock
;
411 desc
->txd
.phys
= phys
;
415 static void ioat2_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
417 struct ioatdma_device
*dma
;
419 dma
= to_ioatdma_device(chan
->device
);
420 pci_pool_free(dma
->dma_pool
, desc
->hw
, desc
->txd
.phys
);
424 static struct ioat_ring_ent
**ioat2_alloc_ring(struct dma_chan
*c
, int order
, gfp_t flags
)
426 struct ioat_ring_ent
**ring
;
427 int descs
= 1 << order
;
430 if (order
> ioat_get_max_alloc_order())
433 /* allocate the array to hold the software ring */
434 ring
= kcalloc(descs
, sizeof(*ring
), flags
);
437 for (i
= 0; i
< descs
; i
++) {
438 ring
[i
] = ioat2_alloc_ring_ent(c
, flags
);
441 ioat2_free_ring_ent(ring
[i
], c
);
445 set_desc_id(ring
[i
], i
);
449 for (i
= 0; i
< descs
-1; i
++) {
450 struct ioat_ring_ent
*next
= ring
[i
+1];
451 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
453 hw
->next
= next
->txd
.phys
;
455 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
460 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
461 * @chan: channel to be initialized
463 static int ioat2_alloc_chan_resources(struct dma_chan
*c
)
465 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
466 struct ioat_chan_common
*chan
= &ioat
->base
;
467 struct ioat_ring_ent
**ring
;
471 /* have we already been set up? */
473 return 1 << ioat
->alloc_order
;
475 /* Setup register to interrupt and write completion status on error */
476 writew(IOAT_CHANCTRL_RUN
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
478 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
480 dev_err(to_dev(chan
), "CHANERR = %x, clearing\n", chanerr
);
481 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
484 /* allocate a completion writeback area */
485 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
486 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
487 GFP_KERNEL
, &chan
->completion_dma
);
488 if (!chan
->completion
)
491 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
492 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
493 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
494 writel(((u64
) chan
->completion_dma
) >> 32,
495 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
497 order
= ioat_get_alloc_order();
498 ring
= ioat2_alloc_ring(c
, order
, GFP_KERNEL
);
502 spin_lock_bh(&ioat
->ring_lock
);
508 ioat
->alloc_order
= order
;
509 spin_unlock_bh(&ioat
->ring_lock
);
511 tasklet_enable(&chan
->cleanup_task
);
512 ioat2_start_null_desc(ioat
);
514 return 1 << ioat
->alloc_order
;
517 static bool reshape_ring(struct ioat2_dma_chan
*ioat
, int order
)
519 /* reshape differs from normal ring allocation in that we want
520 * to allocate a new software ring while only
521 * extending/truncating the hardware ring
523 struct ioat_chan_common
*chan
= &ioat
->base
;
524 struct dma_chan
*c
= &chan
->common
;
525 const u16 curr_size
= ioat2_ring_mask(ioat
) + 1;
526 const u16 active
= ioat2_ring_active(ioat
);
527 const u16 new_size
= 1 << order
;
528 struct ioat_ring_ent
**ring
;
531 if (order
> ioat_get_max_alloc_order())
534 /* double check that we have at least 1 free descriptor */
535 if (active
== curr_size
)
538 /* when shrinking, verify that we can hold the current active
539 * set in the new ring
541 if (active
>= new_size
)
544 /* allocate the array to hold the software ring */
545 ring
= kcalloc(new_size
, sizeof(*ring
), GFP_NOWAIT
);
549 /* allocate/trim descriptors as needed */
550 if (new_size
> curr_size
) {
551 /* copy current descriptors to the new ring */
552 for (i
= 0; i
< curr_size
; i
++) {
553 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
554 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
556 ring
[new_idx
] = ioat
->ring
[curr_idx
];
557 set_desc_id(ring
[new_idx
], new_idx
);
560 /* add new descriptors to the ring */
561 for (i
= curr_size
; i
< new_size
; i
++) {
562 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
564 ring
[new_idx
] = ioat2_alloc_ring_ent(c
, GFP_NOWAIT
);
565 if (!ring
[new_idx
]) {
567 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
569 ioat2_free_ring_ent(ring
[new_idx
], c
);
574 set_desc_id(ring
[new_idx
], new_idx
);
577 /* hw link new descriptors */
578 for (i
= curr_size
-1; i
< new_size
; i
++) {
579 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
580 struct ioat_ring_ent
*next
= ring
[(new_idx
+1) & (new_size
-1)];
581 struct ioat_dma_descriptor
*hw
= ring
[new_idx
]->hw
;
583 hw
->next
= next
->txd
.phys
;
586 struct ioat_dma_descriptor
*hw
;
587 struct ioat_ring_ent
*next
;
589 /* copy current descriptors to the new ring, dropping the
590 * removed descriptors
592 for (i
= 0; i
< new_size
; i
++) {
593 u16 curr_idx
= (ioat
->tail
+i
) & (curr_size
-1);
594 u16 new_idx
= (ioat
->tail
+i
) & (new_size
-1);
596 ring
[new_idx
] = ioat
->ring
[curr_idx
];
597 set_desc_id(ring
[new_idx
], new_idx
);
600 /* free deleted descriptors */
601 for (i
= new_size
; i
< curr_size
; i
++) {
602 struct ioat_ring_ent
*ent
;
604 ent
= ioat2_get_ring_ent(ioat
, ioat
->tail
+i
);
605 ioat2_free_ring_ent(ent
, c
);
608 /* fix up hardware ring */
609 hw
= ring
[(ioat
->tail
+new_size
-1) & (new_size
-1)]->hw
;
610 next
= ring
[(ioat
->tail
+new_size
) & (new_size
-1)];
611 hw
->next
= next
->txd
.phys
;
614 dev_dbg(to_dev(chan
), "%s: allocated %d descriptors\n",
619 ioat
->alloc_order
= order
;
625 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
626 * @idx: gets starting descriptor index on successful allocation
627 * @ioat: ioat2,3 channel (ring) to operate on
628 * @num_descs: allocation length
630 static int ioat2_alloc_and_lock(u16
*idx
, struct ioat2_dma_chan
*ioat
, int num_descs
)
632 struct ioat_chan_common
*chan
= &ioat
->base
;
634 spin_lock_bh(&ioat
->ring_lock
);
635 /* never allow the last descriptor to be consumed, we need at
636 * least one free at all times to allow for on-the-fly ring
639 while (unlikely(ioat2_ring_space(ioat
) <= num_descs
)) {
640 if (reshape_ring(ioat
, ioat
->alloc_order
+ 1) &&
641 ioat2_ring_space(ioat
) > num_descs
)
644 if (printk_ratelimit())
645 dev_dbg(to_dev(chan
),
646 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
647 __func__
, num_descs
, ioat
->head
, ioat
->tail
,
649 spin_unlock_bh(&ioat
->ring_lock
);
651 /* progress reclaim in the allocation failure case we
652 * may be called under bh_disabled so we need to trigger
653 * the timer event directly
655 spin_lock_bh(&chan
->cleanup_lock
);
656 if (jiffies
> chan
->timer
.expires
&&
657 timer_pending(&chan
->timer
)) {
658 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
659 spin_unlock_bh(&chan
->cleanup_lock
);
660 ioat2_timer_event((unsigned long) ioat
);
662 spin_unlock_bh(&chan
->cleanup_lock
);
666 dev_dbg(to_dev(chan
), "%s: num_descs: %d (%x:%x:%x)\n",
667 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
669 *idx
= ioat2_desc_alloc(ioat
, num_descs
);
670 return 0; /* with ioat->ring_lock held */
673 static struct dma_async_tx_descriptor
*
674 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
675 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
677 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
678 struct ioat_dma_descriptor
*hw
;
679 struct ioat_ring_ent
*desc
;
680 dma_addr_t dst
= dma_dest
;
681 dma_addr_t src
= dma_src
;
682 size_t total_len
= len
;
687 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
688 if (likely(num_descs
) &&
689 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
) == 0)
693 for (i
= 0; i
< num_descs
; i
++) {
694 size_t copy
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
696 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
707 dump_desc_dbg(ioat
, desc
);
710 desc
->txd
.flags
= flags
;
711 desc
->len
= total_len
;
712 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
713 hw
->ctl_f
.compl_write
= 1;
714 dump_desc_dbg(ioat
, desc
);
715 /* we leave the channel locked to ensure in order submission */
721 * ioat2_free_chan_resources - release all the descriptors
722 * @chan: the channel to be cleaned
724 static void ioat2_free_chan_resources(struct dma_chan
*c
)
726 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
727 struct ioat_chan_common
*chan
= &ioat
->base
;
728 struct ioatdma_device
*ioatdma_device
= chan
->device
;
729 struct ioat_ring_ent
*desc
;
730 const u16 total_descs
= 1 << ioat
->alloc_order
;
734 /* Before freeing channel resources first check
735 * if they have been previously allocated for this channel.
740 tasklet_disable(&chan
->cleanup_task
);
741 del_timer_sync(&chan
->timer
);
744 /* Delay 100ms after reset to allow internal DMA logic to quiesce
745 * before removing DMA descriptor resources.
747 writeb(IOAT_CHANCMD_RESET
,
748 chan
->reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
751 spin_lock_bh(&ioat
->ring_lock
);
752 descs
= ioat2_ring_space(ioat
);
753 dev_dbg(to_dev(chan
), "freeing %d idle descriptors\n", descs
);
754 for (i
= 0; i
< descs
; i
++) {
755 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
+ i
);
756 ioat2_free_ring_ent(desc
, c
);
759 if (descs
< total_descs
)
760 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
761 total_descs
- descs
);
763 for (i
= 0; i
< total_descs
- descs
; i
++) {
764 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
765 dump_desc_dbg(ioat
, desc
);
766 ioat2_free_ring_ent(desc
, c
);
771 ioat
->alloc_order
= 0;
772 pci_pool_free(ioatdma_device
->completion_pool
,
774 chan
->completion_dma
);
775 spin_unlock_bh(&ioat
->ring_lock
);
777 chan
->last_completion
= 0;
778 chan
->completion_dma
= 0;
783 static enum dma_status
784 ioat2_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
785 dma_cookie_t
*done
, dma_cookie_t
*used
)
787 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
789 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
794 return ioat_is_complete(c
, cookie
, done
, used
);
797 int __devinit
ioat2_dma_probe(struct ioatdma_device
*device
, int dca
)
799 struct pci_dev
*pdev
= device
->pdev
;
800 struct dma_device
*dma
;
802 struct ioat_chan_common
*chan
;
805 device
->enumerate_channels
= ioat2_enumerate_channels
;
806 dma
= &device
->common
;
807 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
808 dma
->device_issue_pending
= ioat2_issue_pending
;
809 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
810 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
811 dma
->device_is_tx_complete
= ioat2_is_complete
;
813 err
= ioat_probe(device
);
816 ioat_set_tcp_copy_break(2048);
818 list_for_each_entry(c
, &dma
->channels
, device_node
) {
819 chan
= to_chan_common(c
);
820 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU
,
821 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
824 err
= ioat_register(device
);
828 device
->dca
= ioat2_dca_init(pdev
, device
->reg_base
);
833 int __devinit
ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
835 struct pci_dev
*pdev
= device
->pdev
;
836 struct dma_device
*dma
;
838 struct ioat_chan_common
*chan
;
842 device
->enumerate_channels
= ioat2_enumerate_channels
;
843 dma
= &device
->common
;
844 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
845 dma
->device_issue_pending
= ioat2_issue_pending
;
846 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
847 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
848 dma
->device_is_tx_complete
= ioat2_is_complete
;
850 /* -= IOAT ver.3 workarounds =- */
851 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
852 * that can cause stability issues for IOAT ver.3
854 pci_write_config_dword(pdev
, IOAT_PCI_CHANERRMASK_INT_OFFSET
, 0x3e07);
856 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
857 * (workaround for spurious config parity error after restart)
859 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
860 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
)
861 pci_write_config_dword(pdev
, IOAT_PCI_DMAUNCERRSTS_OFFSET
, 0x10);
863 err
= ioat_probe(device
);
866 ioat_set_tcp_copy_break(262144);
868 list_for_each_entry(c
, &dma
->channels
, device_node
) {
869 chan
= to_chan_common(c
);
870 writel(IOAT_DMA_DCA_ANY_CPU
,
871 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
874 err
= ioat_register(device
);
878 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);