2 * Driver for the Octeon bootbus compact flash.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2005 - 2009 Cavium Networks
9 * Copyright (C) 2008 Wind River Systems
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/libata.h>
15 #include <linux/irq.h>
16 #include <linux/platform_device.h>
17 #include <linux/workqueue.h>
18 #include <scsi/scsi_host.h>
20 #include <asm/octeon/octeon.h>
23 * The Octeon bootbus compact flash interface is connected in at least
24 * 3 different configurations on various evaluation boards:
26 * -- 8 bits no irq, no DMA
27 * -- 16 bits no irq, no DMA
28 * -- 16 bits True IDE mode with DMA, but no irq.
30 * In the last case the DMA engine can generate an interrupt when the
31 * transfer is complete. For the first two cases only PIO is supported.
35 #define DRV_NAME "pata_octeon_cf"
36 #define DRV_VERSION "2.1"
39 struct octeon_cf_port
{
40 struct workqueue_struct
*wq
;
41 struct delayed_work delayed_finish
;
46 static struct scsi_host_template octeon_cf_sht
= {
47 ATA_PIO_SHT(DRV_NAME
),
51 * Convert nanosecond based time to setting used in the
52 * boot bus timing register, based on timing multiple
54 static unsigned int ns_to_tim_reg(unsigned int tim_mult
, unsigned int nsecs
)
59 * Compute # of eclock periods to get desired duration in
62 val
= DIV_ROUND_UP(nsecs
* (octeon_get_clock_rate() / 1000000),
68 static void octeon_cf_set_boot_reg_cfg(int cs
)
70 union cvmx_mio_boot_reg_cfgx reg_cfg
;
71 reg_cfg
.u64
= cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs
));
72 reg_cfg
.s
.dmack
= 0; /* Don't assert DMACK on access */
73 reg_cfg
.s
.tim_mult
= 2; /* Timing mutiplier 2x */
74 reg_cfg
.s
.rd_dly
= 0; /* Sample on falling edge of BOOT_OE */
75 reg_cfg
.s
.sam
= 0; /* Don't combine write and output enable */
76 reg_cfg
.s
.we_ext
= 0; /* No write enable extension */
77 reg_cfg
.s
.oe_ext
= 0; /* No read enable extension */
78 reg_cfg
.s
.en
= 1; /* Enable this region */
79 reg_cfg
.s
.orbit
= 0; /* Don't combine with previous region */
80 reg_cfg
.s
.ale
= 0; /* Don't do address multiplexing */
81 cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs
), reg_cfg
.u64
);
85 * Called after libata determines the needed PIO mode. This
86 * function programs the Octeon bootbus regions to support the
87 * timing requirements of the PIO mode.
89 * @ap: ATA port information
92 static void octeon_cf_set_piomode(struct ata_port
*ap
, struct ata_device
*dev
)
94 struct octeon_cf_data
*ocd
= ap
->dev
->platform_data
;
95 union cvmx_mio_boot_reg_timx reg_tim
;
96 int cs
= ocd
->base_region
;
98 struct ata_timing timing
;
103 /* These names are timing parameters from the ATA spec */
108 T
= (int)(2000000000000LL / octeon_get_clock_rate());
110 if (ata_timing_compute(dev
, dev
->pio_mode
, &timing
, T
, T
))
123 trh
= ns_to_tim_reg(2, 20);
127 pause
= timing
.cycle
- timing
.active
- timing
.setup
- trh
;
131 octeon_cf_set_boot_reg_cfg(cs
);
132 if (ocd
->dma_engine
>= 0)
133 /* True IDE mode, program both chip selects. */
134 octeon_cf_set_boot_reg_cfg(cs
+ 1);
137 use_iordy
= ata_pio_need_iordy(dev
);
139 reg_tim
.u64
= cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs
));
140 /* Disable page mode */
142 /* Enable dynamic timing */
143 reg_tim
.s
.waitm
= use_iordy
;
144 /* Pages are disabled */
146 /* We don't use multiplexed address mode */
150 /* Time after IORDY to coninue to assert the data */
152 /* Time to wait to complete the cycle. */
153 reg_tim
.s
.pause
= pause
;
154 /* How long to hold after a write to de-assert CE. */
155 reg_tim
.s
.wr_hld
= trh
;
156 /* How long to wait after a read to de-assert CE. */
157 reg_tim
.s
.rd_hld
= trh
;
158 /* How long write enable is asserted */
160 /* How long read enable is asserted */
162 /* Time after CE that read/write starts */
163 reg_tim
.s
.ce
= ns_to_tim_reg(2, 5);
164 /* Time before CE that address is valid */
167 /* Program the bootbus region timing for the data port chip select. */
168 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs
), reg_tim
.u64
);
169 if (ocd
->dma_engine
>= 0)
170 /* True IDE mode, program both chip selects. */
171 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs
+ 1), reg_tim
.u64
);
174 static void octeon_cf_set_dmamode(struct ata_port
*ap
, struct ata_device
*dev
)
176 struct octeon_cf_data
*ocd
= dev
->link
->ap
->dev
->platform_data
;
177 union cvmx_mio_boot_dma_timx dma_tim
;
180 unsigned int dma_ackh
;
181 unsigned int dma_arq
;
183 unsigned int T0
, Tkr
, Td
;
184 unsigned int tim_mult
;
186 const struct ata_timing
*timing
;
188 timing
= ata_timing_find_mode(dev
->dma_mode
);
191 Tkr
= timing
->recover
;
192 dma_ackh
= timing
->dmack_hold
;
195 /* dma_tim.s.tim_mult = 0 --> 4x */
198 /* not spec'ed, value in eclocks, not affected by tim_mult */
200 pause
= 25 - dma_arq
* 1000 /
201 (octeon_get_clock_rate() / 1000000); /* Tz */
204 /* Tkr from cf spec, lengthened to meet T0 */
205 oe_n
= max(T0
- oe_a
, Tkr
);
207 dma_tim
.s
.dmack_pi
= 1;
209 dma_tim
.s
.oe_n
= ns_to_tim_reg(tim_mult
, oe_n
);
210 dma_tim
.s
.oe_a
= ns_to_tim_reg(tim_mult
, oe_a
);
213 * This is tI, C.F. spec. says 0, but Sony CF card requires
214 * more, we use 20 nS.
216 dma_tim
.s
.dmack_s
= ns_to_tim_reg(tim_mult
, 20);;
217 dma_tim
.s
.dmack_h
= ns_to_tim_reg(tim_mult
, dma_ackh
);
219 dma_tim
.s
.dmarq
= dma_arq
;
220 dma_tim
.s
.pause
= ns_to_tim_reg(tim_mult
, pause
);
222 dma_tim
.s
.rd_dly
= 0; /* Sample right on edge */
225 dma_tim
.s
.we_n
= ns_to_tim_reg(tim_mult
, oe_n
);
226 dma_tim
.s
.we_a
= ns_to_tim_reg(tim_mult
, oe_a
);
228 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult
, 60,
229 ns_to_tim_reg(tim_mult
, 60));
230 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
231 "%d, dmarq: %d, pause: %d\n",
232 dma_tim
.s
.oe_n
, dma_tim
.s
.oe_a
, dma_tim
.s
.dmack_s
,
233 dma_tim
.s
.dmack_h
, dma_tim
.s
.dmarq
, dma_tim
.s
.pause
);
235 cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd
->dma_engine
),
241 * Handle an 8 bit I/O request.
243 * @dev: Device to access
244 * @buffer: Data buffer
245 * @buflen: Length of the buffer.
246 * @rw: True to write.
248 static unsigned int octeon_cf_data_xfer8(struct ata_device
*dev
,
249 unsigned char *buffer
,
253 struct ata_port
*ap
= dev
->link
->ap
;
254 void __iomem
*data_addr
= ap
->ioaddr
.data_addr
;
262 iowrite8(*buffer
, data_addr
);
265 * Every 16 writes do a read so the bootbus
266 * FIFO doesn't fill up.
269 ioread8(ap
->ioaddr
.altstatus_addr
);
274 ioread8_rep(data_addr
, buffer
, words
);
280 * Handle a 16 bit I/O request.
282 * @dev: Device to access
283 * @buffer: Data buffer
284 * @buflen: Length of the buffer.
285 * @rw: True to write.
287 static unsigned int octeon_cf_data_xfer16(struct ata_device
*dev
,
288 unsigned char *buffer
,
292 struct ata_port
*ap
= dev
->link
->ap
;
293 void __iomem
*data_addr
= ap
->ioaddr
.data_addr
;
301 iowrite16(*(uint16_t *)buffer
, data_addr
);
302 buffer
+= sizeof(uint16_t);
304 * Every 16 writes do a read so the bootbus
305 * FIFO doesn't fill up.
308 ioread8(ap
->ioaddr
.altstatus_addr
);
314 *(uint16_t *)buffer
= ioread16(data_addr
);
315 buffer
+= sizeof(uint16_t);
318 /* Transfer trailing 1 byte, if any. */
319 if (unlikely(buflen
& 0x01)) {
320 __le16 align_buf
[1] = { 0 };
323 align_buf
[0] = cpu_to_le16(ioread16(data_addr
));
324 memcpy(buffer
, align_buf
, 1);
326 memcpy(align_buf
, buffer
, 1);
327 iowrite16(le16_to_cpu(align_buf
[0]), data_addr
);
335 * Read the taskfile for 16bit non-True IDE only.
337 static void octeon_cf_tf_read16(struct ata_port
*ap
, struct ata_taskfile
*tf
)
340 /* The base of the registers is at ioaddr.data_addr. */
341 void __iomem
*base
= ap
->ioaddr
.data_addr
;
343 blob
= __raw_readw(base
+ 0xc);
344 tf
->feature
= blob
>> 8;
346 blob
= __raw_readw(base
+ 2);
347 tf
->nsect
= blob
& 0xff;
348 tf
->lbal
= blob
>> 8;
350 blob
= __raw_readw(base
+ 4);
351 tf
->lbam
= blob
& 0xff;
352 tf
->lbah
= blob
>> 8;
354 blob
= __raw_readw(base
+ 6);
355 tf
->device
= blob
& 0xff;
356 tf
->command
= blob
>> 8;
358 if (tf
->flags
& ATA_TFLAG_LBA48
) {
359 if (likely(ap
->ioaddr
.ctl_addr
)) {
360 iowrite8(tf
->ctl
| ATA_HOB
, ap
->ioaddr
.ctl_addr
);
362 blob
= __raw_readw(base
+ 0xc);
363 tf
->hob_feature
= blob
>> 8;
365 blob
= __raw_readw(base
+ 2);
366 tf
->hob_nsect
= blob
& 0xff;
367 tf
->hob_lbal
= blob
>> 8;
369 blob
= __raw_readw(base
+ 4);
370 tf
->hob_lbam
= blob
& 0xff;
371 tf
->hob_lbah
= blob
>> 8;
373 iowrite8(tf
->ctl
, ap
->ioaddr
.ctl_addr
);
374 ap
->last_ctl
= tf
->ctl
;
381 static u8
octeon_cf_check_status16(struct ata_port
*ap
)
384 void __iomem
*base
= ap
->ioaddr
.data_addr
;
386 blob
= __raw_readw(base
+ 6);
390 static int octeon_cf_softreset16(struct ata_link
*link
, unsigned int *classes
,
391 unsigned long deadline
)
393 struct ata_port
*ap
= link
->ap
;
394 void __iomem
*base
= ap
->ioaddr
.data_addr
;
398 DPRINTK("about to softreset\n");
399 __raw_writew(ap
->ctl
, base
+ 0xe);
401 __raw_writew(ap
->ctl
| ATA_SRST
, base
+ 0xe);
403 __raw_writew(ap
->ctl
, base
+ 0xe);
405 rc
= ata_sff_wait_after_reset(link
, 1, deadline
);
407 ata_link_printk(link
, KERN_ERR
, "SRST failed (errno=%d)\n", rc
);
411 /* determine by signature whether we have ATA or ATAPI devices */
412 classes
[0] = ata_sff_dev_classify(&link
->device
[0], 1, &err
);
413 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
418 * Load the taskfile for 16bit non-True IDE only. The device_addr is
419 * not loaded, we do this as part of octeon_cf_exec_command16.
421 static void octeon_cf_tf_load16(struct ata_port
*ap
,
422 const struct ata_taskfile
*tf
)
424 unsigned int is_addr
= tf
->flags
& ATA_TFLAG_ISADDR
;
425 /* The base of the registers is at ioaddr.data_addr. */
426 void __iomem
*base
= ap
->ioaddr
.data_addr
;
428 if (tf
->ctl
!= ap
->last_ctl
) {
429 iowrite8(tf
->ctl
, ap
->ioaddr
.ctl_addr
);
430 ap
->last_ctl
= tf
->ctl
;
433 if (is_addr
&& (tf
->flags
& ATA_TFLAG_LBA48
)) {
434 __raw_writew(tf
->hob_feature
<< 8, base
+ 0xc);
435 __raw_writew(tf
->hob_nsect
| tf
->hob_lbal
<< 8, base
+ 2);
436 __raw_writew(tf
->hob_lbam
| tf
->hob_lbah
<< 8, base
+ 4);
437 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
445 __raw_writew(tf
->feature
<< 8, base
+ 0xc);
446 __raw_writew(tf
->nsect
| tf
->lbal
<< 8, base
+ 2);
447 __raw_writew(tf
->lbam
| tf
->lbah
<< 8, base
+ 4);
448 VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
459 static void octeon_cf_dev_select(struct ata_port
*ap
, unsigned int device
)
461 /* There is only one device, do nothing. */
466 * Issue ATA command to host controller. The device_addr is also sent
467 * as it must be written in a combined write with the command.
469 static void octeon_cf_exec_command16(struct ata_port
*ap
,
470 const struct ata_taskfile
*tf
)
472 /* The base of the registers is at ioaddr.data_addr. */
473 void __iomem
*base
= ap
->ioaddr
.data_addr
;
476 if (tf
->flags
& ATA_TFLAG_DEVICE
) {
477 VPRINTK("device 0x%X\n", tf
->device
);
483 DPRINTK("ata%u: cmd 0x%X\n", ap
->print_id
, tf
->command
);
484 blob
|= (tf
->command
<< 8);
485 __raw_writew(blob
, base
+ 6);
491 static u8
octeon_cf_irq_on(struct ata_port
*ap
)
496 static void octeon_cf_irq_clear(struct ata_port
*ap
)
501 static void octeon_cf_dma_setup(struct ata_queued_cmd
*qc
)
503 struct ata_port
*ap
= qc
->ap
;
504 struct octeon_cf_port
*cf_port
;
506 cf_port
= (struct octeon_cf_port
*)ap
->private_data
;
508 /* issue r/w command */
510 cf_port
->dma_finished
= 0;
511 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
516 * Start a DMA transfer that was already setup
518 * @qc: Information about the DMA
520 static void octeon_cf_dma_start(struct ata_queued_cmd
*qc
)
522 struct octeon_cf_data
*ocd
= qc
->ap
->dev
->platform_data
;
523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg
;
524 union cvmx_mio_boot_dma_intx mio_boot_dma_int
;
525 struct scatterlist
*sg
;
527 VPRINTK("%d scatterlists\n", qc
->n_elem
);
529 /* Get the scatter list entry we need to DMA into */
534 * Clear the DMA complete status.
536 mio_boot_dma_int
.u64
= 0;
537 mio_boot_dma_int
.s
.done
= 1;
538 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd
->dma_engine
),
539 mio_boot_dma_int
.u64
);
541 /* Enable the interrupt. */
542 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd
->dma_engine
),
543 mio_boot_dma_int
.u64
);
545 /* Set the direction of the DMA */
546 mio_boot_dma_cfg
.u64
= 0;
547 mio_boot_dma_cfg
.s
.en
= 1;
548 mio_boot_dma_cfg
.s
.rw
= ((qc
->tf
.flags
& ATA_TFLAG_WRITE
) != 0);
551 * Don't stop the DMA if the device deasserts DMARQ. Many
552 * compact flashes deassert DMARQ for a short time between
553 * sectors. Instead of stopping and restarting the DMA, we'll
554 * let the hardware do it. If the DMA is really stopped early
555 * due to an error condition, a later timeout will force us to
558 mio_boot_dma_cfg
.s
.clr
= 0;
560 /* Size is specified in 16bit words and minus one notation */
561 mio_boot_dma_cfg
.s
.size
= sg_dma_len(sg
) / 2 - 1;
563 /* We need to swap the high and low bytes of every 16 bits */
564 mio_boot_dma_cfg
.s
.swap8
= 1;
566 mio_boot_dma_cfg
.s
.adr
= sg_dma_address(sg
);
568 VPRINTK("%s %d bytes address=%p\n",
569 (mio_boot_dma_cfg
.s
.rw
) ? "write" : "read", sg
->length
,
570 (void *)(unsigned long)mio_boot_dma_cfg
.s
.adr
);
572 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd
->dma_engine
),
573 mio_boot_dma_cfg
.u64
);
579 * spin_lock_irqsave(host lock)
582 static unsigned int octeon_cf_dma_finished(struct ata_port
*ap
,
583 struct ata_queued_cmd
*qc
)
585 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
586 struct octeon_cf_data
*ocd
= ap
->dev
->platform_data
;
587 union cvmx_mio_boot_dma_cfgx dma_cfg
;
588 union cvmx_mio_boot_dma_intx dma_int
;
589 struct octeon_cf_port
*cf_port
;
592 VPRINTK("ata%u: protocol %d task_state %d\n",
593 ap
->print_id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
596 if (ap
->hsm_task_state
!= HSM_ST_LAST
)
599 cf_port
= (struct octeon_cf_port
*)ap
->private_data
;
601 dma_cfg
.u64
= cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd
->dma_engine
));
602 if (dma_cfg
.s
.size
!= 0xfffff) {
603 /* Error, the transfer was not complete. */
604 qc
->err_mask
|= AC_ERR_HOST_BUS
;
605 ap
->hsm_task_state
= HSM_ST_ERR
;
608 /* Stop and clear the dma engine. */
611 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd
->dma_engine
), dma_cfg
.u64
);
613 /* Disable the interrupt. */
615 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd
->dma_engine
), dma_int
.u64
);
617 /* Clear the DMA complete status */
619 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd
->dma_engine
), dma_int
.u64
);
621 status
= ap
->ops
->sff_check_status(ap
);
623 ata_sff_hsm_move(ap
, qc
, status
, 0);
625 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
))
626 ata_ehi_push_desc(ehi
, "DMA stat 0x%x", status
);
632 * Check if any queued commands have more DMAs, if so start the next
633 * transfer, else do end of transfer handling.
635 static irqreturn_t
octeon_cf_interrupt(int irq
, void *dev_instance
)
637 struct ata_host
*host
= dev_instance
;
638 struct octeon_cf_port
*cf_port
;
640 unsigned int handled
= 0;
643 spin_lock_irqsave(&host
->lock
, flags
);
646 for (i
= 0; i
< host
->n_ports
; i
++) {
649 struct ata_queued_cmd
*qc
;
650 union cvmx_mio_boot_dma_intx dma_int
;
651 union cvmx_mio_boot_dma_cfgx dma_cfg
;
652 struct octeon_cf_data
*ocd
;
655 ocd
= ap
->dev
->platform_data
;
656 if (!ap
|| (ap
->flags
& ATA_FLAG_DISABLED
))
659 ocd
= ap
->dev
->platform_data
;
660 cf_port
= (struct octeon_cf_port
*)ap
->private_data
;
662 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd
->dma_engine
));
664 cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd
->dma_engine
));
666 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
668 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
669 (qc
->flags
& ATA_QCFLAG_ACTIVE
)) {
670 if (dma_int
.s
.done
&& !dma_cfg
.s
.en
) {
671 if (!sg_is_last(qc
->cursg
)) {
672 qc
->cursg
= sg_next(qc
->cursg
);
674 octeon_cf_dma_start(qc
);
677 cf_port
->dma_finished
= 1;
680 if (!cf_port
->dma_finished
)
682 status
= ioread8(ap
->ioaddr
.altstatus_addr
);
683 if (status
& (ATA_BUSY
| ATA_DRQ
)) {
685 * We are busy, try to handle it
686 * later. This is the DMA finished
687 * interrupt, and it could take a
688 * little while for the card to be
689 * ready for more commands.
694 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd
->dma_engine
),
697 queue_delayed_work(cf_port
->wq
,
698 &cf_port
->delayed_finish
, 1);
701 handled
|= octeon_cf_dma_finished(ap
, qc
);
705 spin_unlock_irqrestore(&host
->lock
, flags
);
707 return IRQ_RETVAL(handled
);
710 static void octeon_cf_delayed_finish(struct work_struct
*work
)
712 struct octeon_cf_port
*cf_port
= container_of(work
,
713 struct octeon_cf_port
,
714 delayed_finish
.work
);
715 struct ata_port
*ap
= cf_port
->ap
;
716 struct ata_host
*host
= ap
->host
;
717 struct ata_queued_cmd
*qc
;
721 spin_lock_irqsave(&host
->lock
, flags
);
724 * If the port is not waiting for completion, it must have
725 * handled it previously. The hsm_task_state is
726 * protected by host->lock.
728 if (ap
->hsm_task_state
!= HSM_ST_LAST
|| !cf_port
->dma_finished
)
731 status
= ioread8(ap
->ioaddr
.altstatus_addr
);
732 if (status
& (ATA_BUSY
| ATA_DRQ
)) {
733 /* Still busy, try again. */
734 queue_delayed_work(cf_port
->wq
,
735 &cf_port
->delayed_finish
, 1);
738 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
739 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
740 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
741 octeon_cf_dma_finished(ap
, qc
);
743 spin_unlock_irqrestore(&host
->lock
, flags
);
746 static void octeon_cf_dev_config(struct ata_device
*dev
)
749 * A maximum of 2^20 - 1 16 bit transfers are possible with
750 * the bootbus DMA. So we need to throttle max_sectors to
751 * (2^12 - 1 == 4095) to assure that this can never happen.
753 dev
->max_sectors
= min(dev
->max_sectors
, 4095U);
757 * Trap if driver tries to do standard bmdma commands. They are not
760 static void unreachable_qc(struct ata_queued_cmd
*qc
)
765 static u8
unreachable_port(struct ata_port
*ap
)
771 * We don't do ATAPI DMA so return 0.
773 static int octeon_cf_check_atapi_dma(struct ata_queued_cmd
*qc
)
778 static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd
*qc
)
780 struct ata_port
*ap
= qc
->ap
;
782 switch (qc
->tf
.protocol
) {
784 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
786 ap
->ops
->sff_tf_load(ap
, &qc
->tf
); /* load tf registers */
787 octeon_cf_dma_setup(qc
); /* set up dma */
788 octeon_cf_dma_start(qc
); /* initiate dma */
789 ap
->hsm_task_state
= HSM_ST_LAST
;
793 dev_err(ap
->dev
, "Error, ATAPI not supported\n");
797 return ata_sff_qc_issue(qc
);
803 static struct ata_port_operations octeon_cf_ops
= {
804 .inherits
= &ata_sff_port_ops
,
805 .check_atapi_dma
= octeon_cf_check_atapi_dma
,
806 .qc_prep
= ata_noop_qc_prep
,
807 .qc_issue
= octeon_cf_qc_issue
,
808 .sff_dev_select
= octeon_cf_dev_select
,
809 .sff_irq_on
= octeon_cf_irq_on
,
810 .sff_irq_clear
= octeon_cf_irq_clear
,
811 .bmdma_setup
= unreachable_qc
,
812 .bmdma_start
= unreachable_qc
,
813 .bmdma_stop
= unreachable_qc
,
814 .bmdma_status
= unreachable_port
,
815 .cable_detect
= ata_cable_40wire
,
816 .set_piomode
= octeon_cf_set_piomode
,
817 .set_dmamode
= octeon_cf_set_dmamode
,
818 .dev_config
= octeon_cf_dev_config
,
821 static int __devinit
octeon_cf_probe(struct platform_device
*pdev
)
823 struct resource
*res_cs0
, *res_cs1
;
826 void __iomem
*cs1
= NULL
;
827 struct ata_host
*host
;
829 struct octeon_cf_data
*ocd
;
831 irq_handler_t irq_handler
= NULL
;
833 struct octeon_cf_port
*cf_port
;
835 res_cs0
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
840 ocd
= pdev
->dev
.platform_data
;
842 cs0
= devm_ioremap_nocache(&pdev
->dev
, res_cs0
->start
,
843 res_cs0
->end
- res_cs0
->start
+ 1);
848 /* Determine from availability of DMA if True IDE mode or not */
849 if (ocd
->dma_engine
>= 0) {
850 res_cs1
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
854 cs1
= devm_ioremap_nocache(&pdev
->dev
, res_cs1
->start
,
855 res_cs0
->end
- res_cs1
->start
+ 1);
861 cf_port
= kzalloc(sizeof(*cf_port
), GFP_KERNEL
);
866 host
= ata_host_alloc(&pdev
->dev
, 1);
871 ap
->private_data
= cf_port
;
873 ap
->ops
= &octeon_cf_ops
;
874 ap
->pio_mask
= 0x7f; /* Support PIO 0-6 */
875 ap
->flags
|= ATA_FLAG_MMIO
| ATA_FLAG_NO_LEGACY
876 | ATA_FLAG_NO_ATAPI
| ATA_FLAG_PIO_POLLING
;
878 base
= cs0
+ ocd
->base_region_bias
;
880 ap
->ioaddr
.cmd_addr
= base
;
881 ata_sff_std_ports(&ap
->ioaddr
);
883 ap
->ioaddr
.altstatus_addr
= base
+ 0xe;
884 ap
->ioaddr
.ctl_addr
= base
+ 0xe;
885 octeon_cf_ops
.sff_data_xfer
= octeon_cf_data_xfer8
;
887 /* Presence of cs1 indicates True IDE mode. */
888 ap
->ioaddr
.cmd_addr
= base
+ (ATA_REG_CMD
<< 1) + 1;
889 ap
->ioaddr
.data_addr
= base
+ (ATA_REG_DATA
<< 1);
890 ap
->ioaddr
.error_addr
= base
+ (ATA_REG_ERR
<< 1) + 1;
891 ap
->ioaddr
.feature_addr
= base
+ (ATA_REG_FEATURE
<< 1) + 1;
892 ap
->ioaddr
.nsect_addr
= base
+ (ATA_REG_NSECT
<< 1) + 1;
893 ap
->ioaddr
.lbal_addr
= base
+ (ATA_REG_LBAL
<< 1) + 1;
894 ap
->ioaddr
.lbam_addr
= base
+ (ATA_REG_LBAM
<< 1) + 1;
895 ap
->ioaddr
.lbah_addr
= base
+ (ATA_REG_LBAH
<< 1) + 1;
896 ap
->ioaddr
.device_addr
= base
+ (ATA_REG_DEVICE
<< 1) + 1;
897 ap
->ioaddr
.status_addr
= base
+ (ATA_REG_STATUS
<< 1) + 1;
898 ap
->ioaddr
.command_addr
= base
+ (ATA_REG_CMD
<< 1) + 1;
899 ap
->ioaddr
.altstatus_addr
= cs1
+ (6 << 1) + 1;
900 ap
->ioaddr
.ctl_addr
= cs1
+ (6 << 1) + 1;
901 octeon_cf_ops
.sff_data_xfer
= octeon_cf_data_xfer16
;
903 ap
->mwdma_mask
= 0x1f; /* Support MWDMA 0-4 */
904 irq
= platform_get_irq(pdev
, 0);
905 irq_handler
= octeon_cf_interrupt
;
907 /* True IDE mode needs delayed work to poll for not-busy. */
908 cf_port
->wq
= create_singlethread_workqueue(DRV_NAME
);
911 INIT_DELAYED_WORK(&cf_port
->delayed_finish
,
912 octeon_cf_delayed_finish
);
915 /* 16 bit but not True IDE */
916 octeon_cf_ops
.sff_data_xfer
= octeon_cf_data_xfer16
;
917 octeon_cf_ops
.softreset
= octeon_cf_softreset16
;
918 octeon_cf_ops
.sff_check_status
= octeon_cf_check_status16
;
919 octeon_cf_ops
.sff_tf_read
= octeon_cf_tf_read16
;
920 octeon_cf_ops
.sff_tf_load
= octeon_cf_tf_load16
;
921 octeon_cf_ops
.sff_exec_command
= octeon_cf_exec_command16
;
923 ap
->ioaddr
.data_addr
= base
+ ATA_REG_DATA
;
924 ap
->ioaddr
.nsect_addr
= base
+ ATA_REG_NSECT
;
925 ap
->ioaddr
.lbal_addr
= base
+ ATA_REG_LBAL
;
926 ap
->ioaddr
.ctl_addr
= base
+ 0xe;
927 ap
->ioaddr
.altstatus_addr
= base
+ 0xe;
930 ata_port_desc(ap
, "cmd %p ctl %p", base
, ap
->ioaddr
.ctl_addr
);
933 dev_info(&pdev
->dev
, "version " DRV_VERSION
" %d bit%s.\n",
934 (ocd
->is16bit
) ? 16 : 8,
935 (cs1
) ? ", True IDE" : "");
938 return ata_host_activate(host
, irq
, irq_handler
, 0, &octeon_cf_sht
);
945 static struct platform_driver octeon_cf_driver
= {
946 .probe
= octeon_cf_probe
,
949 .owner
= THIS_MODULE
,
953 static int __init
octeon_cf_init(void)
955 return platform_driver_register(&octeon_cf_driver
);
959 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
960 MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
961 MODULE_LICENSE("GPL");
962 MODULE_VERSION(DRV_VERSION
);
963 MODULE_ALIAS("platform:" DRV_NAME
);
965 module_init(octeon_cf_init
);