2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* local prototypes */
28 static int ata_marvell_pata_chipinit(device_t dev
);
29 static int ata_marvell_pata_allocate(device_t dev
);
30 static void ata_marvell_pata_setmode(device_t dev
, int mode
);
31 static int ata_marvell_edma_allocate(device_t dev
);
32 static int ata_marvell_edma_status(device_t dev
);
33 static int ata_marvell_edma_begin_transaction(struct ata_request
*request
);
34 static int ata_marvell_edma_end_transaction(struct ata_request
*request
);
35 static void ata_marvell_edma_reset(device_t dev
);
36 static void ata_marvell_edma_dmasetprd(void *xsc
, bus_dma_segment_t
*segs
, int nsegs
, int error
);
37 static void ata_marvell_edma_dmainit(device_t dev
);
48 #define ATA_MV_HOST_BASE(ch) \
49 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000)
50 #define ATA_MV_EDMA_BASE(ch) \
51 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000)
53 struct ata_marvell_response
{
60 struct ata_marvell_dma_prdentry
{
68 * Marvell chipset support functions
71 ata_marvell_ident(device_t dev
)
73 struct ata_pci_controller
*ctlr
= device_get_softc(dev
);
74 static const struct ata_chip_id ids
[] =
75 {{ ATA_M88SX5040
, 0, 4, MV_50XX
, ATA_SA150
, "88SX5040" },
76 { ATA_M88SX5041
, 0, 4, MV_50XX
, ATA_SA150
, "88SX5041" },
77 { ATA_M88SX5080
, 0, 8, MV_50XX
, ATA_SA150
, "88SX5080" },
78 { ATA_M88SX5081
, 0, 8, MV_50XX
, ATA_SA150
, "88SX5081" },
79 { ATA_M88SX6041
, 0, 4, MV_60XX
, ATA_SA300
, "88SX6041" },
80 { ATA_M88SX6042
, 0, 4, MV_6042
, ATA_SA300
, "88SX6042" },
81 { ATA_M88SX6081
, 0, 8, MV_60XX
, ATA_SA300
, "88SX6081" },
82 { ATA_M88SX7042
, 0, 4, MV_7042
, ATA_SA300
, "88SX7042" },
83 { ATA_M88SX6101
, 0, 1, MV_61XX
, ATA_UDMA6
, "88SX6101" },
84 { ATA_M88SX6121
, 0, 1, MV_61XX
, ATA_UDMA6
, "88SX6121" },
85 { ATA_M88SX6145
, 0, 2, MV_61XX
, ATA_UDMA6
, "88SX6145" },
88 if (pci_get_vendor(dev
) != ATA_MARVELL_ID
)
91 if (!(ctlr
->chip
= ata_match_chip(dev
, ids
)))
96 switch (ctlr
->chip
->cfg2
) {
101 ctlr
->chipinit
= ata_marvell_edma_chipinit
;
104 ctlr
->chipinit
= ata_marvell_pata_chipinit
;
111 ata_marvell_pata_chipinit(device_t dev
)
113 struct ata_pci_controller
*ctlr
= device_get_softc(dev
);
115 if (ata_setup_interrupt(dev
, ata_generic_intr
))
118 ctlr
->allocate
= ata_marvell_pata_allocate
;
119 ctlr
->setmode
= ata_marvell_pata_setmode
;
120 ctlr
->channels
= ctlr
->chip
->cfg1
;
125 ata_marvell_pata_allocate(device_t dev
)
127 struct ata_channel
*ch
= device_get_softc(dev
);
129 /* setup the usual register normal pci style */
130 if (ata_pci_allocate(dev
))
133 /* dont use 32 bit PIO transfers */
134 ch
->flags
|= ATA_USE_16BIT
;
140 ata_marvell_pata_setmode(device_t dev
, int mode
)
142 device_t gparent
= GRANDPARENT(dev
);
143 struct ata_pci_controller
*ctlr
= device_get_softc(gparent
);
144 struct ata_device
*atadev
= device_get_softc(dev
);
146 mode
= ata_limit_mode(dev
, mode
, ctlr
->chip
->max_dma
);
147 mode
= ata_check_80pin(dev
, mode
);
148 if (!ata_controlcmd(dev
, ATA_SETFEATURES
, ATA_SF_SETXFER
, 0, mode
))
153 ata_marvell_edma_chipinit(device_t dev
)
155 struct ata_pci_controller
*ctlr
= device_get_softc(dev
);
157 if (ata_setup_interrupt(dev
, ata_generic_intr
))
160 ctlr
->r_type1
= SYS_RES_MEMORY
;
161 ctlr
->r_rid1
= PCIR_BAR(0);
162 if (!(ctlr
->r_res1
= bus_alloc_resource_any(dev
, ctlr
->r_type1
,
163 &ctlr
->r_rid1
, RF_ACTIVE
))) {
164 ata_teardown_interrupt(dev
);
168 /* mask all host controller interrupts */
169 ATA_OUTL(ctlr
->r_res1
, 0x01d64, 0x00000000);
171 /* mask all PCI interrupts */
172 ATA_OUTL(ctlr
->r_res1
, 0x01d5c, 0x00000000);
174 ctlr
->allocate
= ata_marvell_edma_allocate
;
175 ctlr
->reset
= ata_marvell_edma_reset
;
176 ctlr
->dmainit
= ata_marvell_edma_dmainit
;
177 ctlr
->setmode
= ata_sata_setmode
;
178 ctlr
->channels
= ctlr
->chip
->cfg1
;
180 /* clear host controller interrupts */
181 ATA_OUTL(ctlr
->r_res1
, 0x20014, 0x00000000);
182 if (ctlr
->chip
->cfg1
> 4)
183 ATA_OUTL(ctlr
->r_res1
, 0x30014, 0x00000000);
185 /* clear PCI interrupts */
186 ATA_OUTL(ctlr
->r_res1
, 0x01d58, 0x00000000);
188 /* unmask PCI interrupts we want */
189 ATA_OUTL(ctlr
->r_res1
, 0x01d5c, 0x007fffff);
191 /* unmask host controller interrupts we want */
192 ATA_OUTL(ctlr
->r_res1
, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ |
193 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25));
195 /* enable PCI interrupt */
196 pci_write_config(dev
, PCIR_COMMAND
,
197 pci_read_config(dev
, PCIR_COMMAND
, 2) & ~0x0400, 2);
202 ata_marvell_edma_allocate(device_t dev
)
204 struct ata_pci_controller
*ctlr
= device_get_softc(device_get_parent(dev
));
205 struct ata_channel
*ch
= device_get_softc(dev
);
209 work
= ch
->dma
->work_bus
;
210 /* clear work area */
211 bzero(ch
->dma
->work
, 1024+256);
212 bus_dmamap_sync(ch
->dma
->work_tag
, ch
->dma
->work_map
,
213 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
215 /* set legacy ATA resources */
216 for (i
= ATA_DATA
; i
<= ATA_COMMAND
; i
++) {
217 ch
->r_io
[i
].res
= ctlr
->r_res1
;
218 ch
->r_io
[i
].offset
= 0x02100 + (i
<< 2) + ATA_MV_EDMA_BASE(ch
);
220 ch
->r_io
[ATA_CONTROL
].res
= ctlr
->r_res1
;
221 ch
->r_io
[ATA_CONTROL
].offset
= 0x02120 + ATA_MV_EDMA_BASE(ch
);
222 ch
->r_io
[ATA_IDX_ADDR
].res
= ctlr
->r_res1
;
223 ata_default_registers(dev
);
225 /* set SATA resources */
226 switch (ctlr
->chip
->cfg2
) {
228 ch
->r_io
[ATA_SSTATUS
].res
= ctlr
->r_res1
;
229 ch
->r_io
[ATA_SSTATUS
].offset
= 0x00100 + ATA_MV_HOST_BASE(ch
);
230 ch
->r_io
[ATA_SERROR
].res
= ctlr
->r_res1
;
231 ch
->r_io
[ATA_SERROR
].offset
= 0x00104 + ATA_MV_HOST_BASE(ch
);
232 ch
->r_io
[ATA_SCONTROL
].res
= ctlr
->r_res1
;
233 ch
->r_io
[ATA_SCONTROL
].offset
= 0x00108 + ATA_MV_HOST_BASE(ch
);
238 ch
->r_io
[ATA_SSTATUS
].res
= ctlr
->r_res1
;
239 ch
->r_io
[ATA_SSTATUS
].offset
= 0x02300 + ATA_MV_EDMA_BASE(ch
);
240 ch
->r_io
[ATA_SERROR
].res
= ctlr
->r_res1
;
241 ch
->r_io
[ATA_SERROR
].offset
= 0x02304 + ATA_MV_EDMA_BASE(ch
);
242 ch
->r_io
[ATA_SCONTROL
].res
= ctlr
->r_res1
;
243 ch
->r_io
[ATA_SCONTROL
].offset
= 0x02308 + ATA_MV_EDMA_BASE(ch
);
244 ch
->r_io
[ATA_SACTIVE
].res
= ctlr
->r_res1
;
245 ch
->r_io
[ATA_SACTIVE
].offset
= 0x02350 + ATA_MV_EDMA_BASE(ch
);
249 ch
->flags
|= ATA_NO_SLAVE
;
250 ch
->flags
|= ATA_USE_16BIT
; /* XXX SOS needed ? */
252 ch
->hw
.begin_transaction
= ata_marvell_edma_begin_transaction
;
253 ch
->hw
.end_transaction
= ata_marvell_edma_end_transaction
;
254 ch
->hw
.status
= ata_marvell_edma_status
;
256 /* disable the EDMA machinery */
257 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000002);
258 DELAY(100000); /* SOS should poll for disabled */
260 /* set configuration to non-queued 128b read transfers stop on error */
261 ATA_OUTL(ctlr
->r_res1
, 0x02000 + ATA_MV_EDMA_BASE(ch
), (1<<11) | (1<<13));
263 /* request queue base high */
264 ATA_OUTL(ctlr
->r_res1
, 0x02010 + ATA_MV_EDMA_BASE(ch
), work
>> 32);
266 /* request queue in ptr */
267 ATA_OUTL(ctlr
->r_res1
, 0x02014 + ATA_MV_EDMA_BASE(ch
), work
& 0xffffffff);
269 /* request queue out ptr */
270 ATA_OUTL(ctlr
->r_res1
, 0x02018 + ATA_MV_EDMA_BASE(ch
), 0x0);
272 /* response queue base high */
274 ATA_OUTL(ctlr
->r_res1
, 0x0201c + ATA_MV_EDMA_BASE(ch
), work
>> 32);
276 /* response queue in ptr */
277 ATA_OUTL(ctlr
->r_res1
, 0x02020 + ATA_MV_EDMA_BASE(ch
), 0x0);
279 /* response queue out ptr */
280 ATA_OUTL(ctlr
->r_res1
, 0x02024 + ATA_MV_EDMA_BASE(ch
), work
& 0xffffffff);
282 /* clear SATA error register */
283 ATA_IDX_OUTL(ch
, ATA_SERROR
, ATA_IDX_INL(ch
, ATA_SERROR
));
285 /* clear any outstanding error interrupts */
286 ATA_OUTL(ctlr
->r_res1
, 0x02008 + ATA_MV_EDMA_BASE(ch
), 0x0);
288 /* unmask all error interrupts */
289 ATA_OUTL(ctlr
->r_res1
, 0x0200c + ATA_MV_EDMA_BASE(ch
), ~0x0);
291 /* enable EDMA machinery */
292 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000001);
297 ata_marvell_edma_status(device_t dev
)
299 struct ata_pci_controller
*ctlr
= device_get_softc(device_get_parent(dev
));
300 struct ata_channel
*ch
= device_get_softc(dev
);
301 u_int32_t cause
= ATA_INL(ctlr
->r_res1
, 0x01d60);
302 int shift
= (ch
->unit
<< 1) + (ch
->unit
> 3);
304 if (cause
& (1 << shift
)) {
306 /* clear interrupt(s) */
307 ATA_OUTL(ctlr
->r_res1
, 0x02008 + ATA_MV_EDMA_BASE(ch
), 0x0);
309 /* do we have any PHY events ? */
310 ata_sata_phy_check_events(dev
);
313 /* do we have any device action ? */
314 return (cause
& (2 << shift
));
317 /* must be called with ATA channel locked and state_mtx held */
319 ata_marvell_edma_begin_transaction(struct ata_request
*request
)
321 struct ata_pci_controller
*ctlr
=device_get_softc(GRANDPARENT(request
->dev
));
322 struct ata_channel
*ch
= device_get_softc(request
->parent
);
325 int i
, tag
= 0x07; /* XXX why 0x07 ? */
326 int dummy
, error
, slot
;
328 /* only DMA R/W goes through the EMDA machine */
329 if (request
->u
.ata
.command
!= ATA_READ_DMA
&&
330 request
->u
.ata
.command
!= ATA_WRITE_DMA
&&
331 request
->u
.ata
.command
!= ATA_READ_DMA48
&&
332 request
->u
.ata
.command
!= ATA_WRITE_DMA48
) {
334 /* disable the EDMA machinery */
335 if (ATA_INL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
)) & 0x00000001)
336 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000002);
337 return ata_begin_transaction(request
);
340 /* check for 48 bit access and convert if needed */
341 ata_modify_if_48bit(request
);
343 /* check sanity, setup SG list and DMA engine */
344 if ((error
= ch
->dma
->load(ch
->dev
, request
->data
, request
->bytecount
,
345 request
->flags
& ATA_R_READ
, ch
->dma
->sg
,
347 device_printf(request
->dev
, "setting up DMA failed\n");
348 request
->result
= error
;
349 return ATA_OP_FINISHED
;
352 /* get next free request queue slot */
353 req_in
= ATA_INL(ctlr
->r_res1
, 0x02014 + ATA_MV_EDMA_BASE(ch
));
354 slot
= (((req_in
& ~0xfffffc00) >> 5) + 0) & 0x1f;
355 bytep
= (u_int8_t
*)(ch
->dma
->work
);
356 bytep
+= (slot
<< 5);
358 /* fill in this request */
359 le32enc(bytep
+ 0 * sizeof(u_int32_t
),
360 (long)ch
->dma
->sg_bus
& 0xffffffff);
361 le32enc(bytep
+ 1 * sizeof(u_int32_t
),
362 (u_int64_t
)ch
->dma
->sg_bus
>> 32);
363 if (ctlr
->chip
->cfg2
!= MV_6042
&& ctlr
->chip
->cfg2
!= MV_7042
) {
364 le16enc(bytep
+ 4 * sizeof(u_int16_t
),
365 (request
->flags
& ATA_R_READ
? 0x01 : 0x00) | (tag
<<1));
368 bytep
[i
++] = (request
->u
.ata
.count
>> 8) & 0xff;
369 bytep
[i
++] = 0x10 | ATA_COUNT
;
370 bytep
[i
++] = request
->u
.ata
.count
& 0xff;
371 bytep
[i
++] = 0x10 | ATA_COUNT
;
373 bytep
[i
++] = (request
->u
.ata
.lba
>> 24) & 0xff;
374 bytep
[i
++] = 0x10 | ATA_SECTOR
;
375 bytep
[i
++] = request
->u
.ata
.lba
& 0xff;
376 bytep
[i
++] = 0x10 | ATA_SECTOR
;
378 bytep
[i
++] = (request
->u
.ata
.lba
>> 32) & 0xff;
379 bytep
[i
++] = 0x10 | ATA_CYL_LSB
;
380 bytep
[i
++] = (request
->u
.ata
.lba
>> 8) & 0xff;
381 bytep
[i
++] = 0x10 | ATA_CYL_LSB
;
383 bytep
[i
++] = (request
->u
.ata
.lba
>> 40) & 0xff;
384 bytep
[i
++] = 0x10 | ATA_CYL_MSB
;
385 bytep
[i
++] = (request
->u
.ata
.lba
>> 16) & 0xff;
386 bytep
[i
++] = 0x10 | ATA_CYL_MSB
;
388 bytep
[i
++] = ATA_D_LBA
| ATA_D_IBM
| ((request
->u
.ata
.lba
>> 24) & 0xf);
389 bytep
[i
++] = 0x10 | ATA_DRIVE
;
391 bytep
[i
++] = request
->u
.ata
.command
;
392 bytep
[i
++] = 0x90 | ATA_COMMAND
;
394 le32enc(bytep
+ 2 * sizeof(u_int32_t
),
395 (request
->flags
& ATA_R_READ
? 0x01 : 0x00) | (tag
<<1));
400 bytep
[i
++] = request
->u
.ata
.command
;
401 bytep
[i
++] = request
->u
.ata
.feature
& 0xff;
403 bytep
[i
++] = request
->u
.ata
.lba
& 0xff;
404 bytep
[i
++] = (request
->u
.ata
.lba
>> 8) & 0xff;
405 bytep
[i
++] = (request
->u
.ata
.lba
>> 16) & 0xff;
406 bytep
[i
++] = ATA_D_LBA
| ATA_D_IBM
| ((request
->u
.ata
.lba
>> 24) & 0x0f);
408 bytep
[i
++] = (request
->u
.ata
.lba
>> 24) & 0xff;
409 bytep
[i
++] = (request
->u
.ata
.lba
>> 32) & 0xff;
410 bytep
[i
++] = (request
->u
.ata
.lba
>> 40) & 0xff;
411 bytep
[i
++] = (request
->u
.ata
.feature
>> 8) & 0xff;
413 bytep
[i
++] = request
->u
.ata
.count
& 0xff;
414 bytep
[i
++] = (request
->u
.ata
.count
>> 8) & 0xff;
419 bus_dmamap_sync(ch
->dma
->work_tag
, ch
->dma
->work_map
,
420 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
422 /* enable EDMA machinery if needed */
423 if (!(ATA_INL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
)) & 0x00000001)) {
424 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000001);
425 while (!(ATA_INL(ctlr
->r_res1
,
426 0x02028 + ATA_MV_EDMA_BASE(ch
)) & 0x00000001))
430 /* tell EDMA it has a new request */
431 slot
= (((req_in
& ~0xfffffc00) >> 5) + 1) & 0x1f;
432 req_in
&= 0xfffffc00;
433 req_in
+= (slot
<< 5);
434 ATA_OUTL(ctlr
->r_res1
, 0x02014 + ATA_MV_EDMA_BASE(ch
), req_in
);
436 return ATA_OP_CONTINUES
;
439 /* must be called with ATA channel locked and state_mtx held */
441 ata_marvell_edma_end_transaction(struct ata_request
*request
)
443 struct ata_pci_controller
*ctlr
=device_get_softc(GRANDPARENT(request
->dev
));
444 struct ata_channel
*ch
= device_get_softc(request
->parent
);
445 int offset
= (ch
->unit
> 3 ? 0x30014 : 0x20014);
446 u_int32_t icr
= ATA_INL(ctlr
->r_res1
, offset
);
450 if ((icr
& (0x0001 << (ch
->unit
& 3)))) {
451 struct ata_marvell_response
*response
;
452 u_int32_t rsp_in
, rsp_out
;
456 callout_cancel(&request
->callout
);
458 /* get response ptr's */
459 rsp_in
= ATA_INL(ctlr
->r_res1
, 0x02020 + ATA_MV_EDMA_BASE(ch
));
460 rsp_out
= ATA_INL(ctlr
->r_res1
, 0x02024 + ATA_MV_EDMA_BASE(ch
));
461 slot
= (((rsp_in
& ~0xffffff00) >> 3)) & 0x1f;
462 rsp_out
&= 0xffffff00;
463 rsp_out
+= (slot
<< 3);
464 bus_dmamap_sync(ch
->dma
->work_tag
, ch
->dma
->work_map
,
465 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
466 response
= (struct ata_marvell_response
*)
467 (ch
->dma
->work
+ 1024 + (slot
<< 3));
469 /* record status for this request */
470 request
->status
= response
->dev_status
;
474 ATA_OUTL(ctlr
->r_res1
, 0x02024 + ATA_MV_EDMA_BASE(ch
), rsp_out
);
476 /* update progress */
477 if (!(request
->status
& ATA_S_ERROR
) &&
478 !(request
->flags
& ATA_R_TIMEOUT
))
479 request
->donecount
= request
->bytecount
;
482 ch
->dma
->unload(ch
->dev
);
484 res
= ATA_OP_FINISHED
;
487 /* legacy ATA interrupt */
489 res
= ata_end_transaction(request
);
493 ATA_OUTL(ctlr
->r_res1
, offset
, ~(icr
& (0x0101 << (ch
->unit
& 3))));
498 ata_marvell_edma_reset(device_t dev
)
500 struct ata_pci_controller
*ctlr
= device_get_softc(device_get_parent(dev
));
501 struct ata_channel
*ch
= device_get_softc(dev
);
503 /* disable the EDMA machinery */
504 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000002);
505 while ((ATA_INL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
)) & 0x00000001))
508 /* clear SATA error register */
509 ATA_IDX_OUTL(ch
, ATA_SERROR
, ATA_IDX_INL(ch
, ATA_SERROR
));
511 /* clear any outstanding error interrupts */
512 ATA_OUTL(ctlr
->r_res1
, 0x02008 + ATA_MV_EDMA_BASE(ch
), 0x0);
514 /* unmask all error interrupts */
515 ATA_OUTL(ctlr
->r_res1
, 0x0200c + ATA_MV_EDMA_BASE(ch
), ~0x0);
517 /* enable channel and test for devices */
518 if (ata_sata_phy_reset(dev
))
519 ata_generic_reset(dev
);
521 /* enable EDMA machinery */
522 ATA_OUTL(ctlr
->r_res1
, 0x02028 + ATA_MV_EDMA_BASE(ch
), 0x00000001);
526 ata_marvell_edma_dmasetprd(void *xsc
, bus_dma_segment_t
*segs
, int nsegs
,
529 struct ata_dmasetprd_args
*args
= xsc
;
530 struct ata_marvell_dma_prdentry
*prd
= args
->dmatab
;
533 if ((args
->error
= error
))
536 for (i
= 0; i
< nsegs
; i
++) {
537 prd
[i
].addrlo
= htole32(segs
[i
].ds_addr
);
538 prd
[i
].count
= htole32(segs
[i
].ds_len
);
539 prd
[i
].addrhi
= htole32((u_int64_t
)segs
[i
].ds_addr
>> 32);
542 prd
[i
- 1].count
|= htole32(ATA_DMA_EOT
);
543 KASSERT(nsegs
<= ATA_DMA_ENTRIES
, ("too many DMA segment entries\n"));
548 ata_marvell_edma_dmainit(device_t dev
)
550 struct ata_pci_controller
*ctlr
= device_get_softc(device_get_parent(dev
));
551 struct ata_channel
*ch
= device_get_softc(dev
);
555 /* note start and stop are not used here */
556 ch
->dma
->setprd
= ata_marvell_edma_dmasetprd
;
558 if (ATA_INL(ctlr
->r_res1
, 0x00d00) & 0x00000004)
559 ch
->dma
->max_address
= BUS_SPACE_MAXADDR
;
561 /* chip does not reliably do 64K DMA transfers */
562 if (ctlr
->chip
->cfg2
== MV_50XX
|| ctlr
->chip
->cfg2
== MV_60XX
)
563 ch
->dma
->max_iosize
= 64 * DEV_BSIZE
;
565 ch
->dma
->max_iosize
= (ATA_DMA_ENTRIES
- 1) * PAGE_SIZE
;