[PATCH] libata: convert assert(xxx)'s in low-level drivers to WARN_ON(!xxx)'s
[linux-2.6/kvm.git] / drivers / scsi / sata_qstor.c
blob955131b432062ae10b759b74886028bb86d62891
1 /*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
4 * Maintained by: Mark Lord <mlord@pobox.com>
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/pci.h>
33 #include <linux/init.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/device.h>
39 #include <scsi/scsi_host.h>
40 #include <asm/io.h>
41 #include <linux/libata.h>
43 #define DRV_NAME "sata_qstor"
44 #define DRV_VERSION "0.05"
46 enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
102 enum {
103 QS_DMA_BOUNDARY = ~0UL
106 typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
108 struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
114 static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115 static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116 static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117 static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118 static int qs_port_start(struct ata_port *ap);
119 static void qs_host_stop(struct ata_host_set *host_set);
120 static void qs_port_stop(struct ata_port *ap);
121 static void qs_phy_reset(struct ata_port *ap);
122 static void qs_qc_prep(struct ata_queued_cmd *qc);
123 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124 static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125 static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126 static u8 qs_bmdma_status(struct ata_port *ap);
127 static void qs_irq_clear(struct ata_port *ap);
128 static void qs_eng_timeout(struct ata_port *ap);
130 static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
136 .eh_strategy_handler = ata_scsi_error,
137 .can_queue = ATA_DEF_QUEUE,
138 .this_id = ATA_SHT_THIS_ID,
139 .sg_tablesize = QS_MAX_PRD,
140 .max_sectors = ATA_MAX_SECTORS,
141 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
142 .emulated = ATA_SHT_EMULATED,
143 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
144 .use_clustering = ENABLE_CLUSTERING,
145 .proc_name = DRV_NAME,
146 .dma_boundary = QS_DMA_BOUNDARY,
147 .slave_configure = ata_scsi_slave_config,
148 .bios_param = ata_std_bios_param,
151 static const struct ata_port_operations qs_ata_ops = {
152 .port_disable = ata_port_disable,
153 .tf_load = ata_tf_load,
154 .tf_read = ata_tf_read,
155 .check_status = ata_check_status,
156 .check_atapi_dma = qs_check_atapi_dma,
157 .exec_command = ata_exec_command,
158 .dev_select = ata_std_dev_select,
159 .phy_reset = qs_phy_reset,
160 .qc_prep = qs_qc_prep,
161 .qc_issue = qs_qc_issue,
162 .eng_timeout = qs_eng_timeout,
163 .irq_handler = qs_intr,
164 .irq_clear = qs_irq_clear,
165 .scr_read = qs_scr_read,
166 .scr_write = qs_scr_write,
167 .port_start = qs_port_start,
168 .port_stop = qs_port_stop,
169 .host_stop = qs_host_stop,
170 .bmdma_stop = qs_bmdma_stop,
171 .bmdma_status = qs_bmdma_status,
174 static const struct ata_port_info qs_port_info[] = {
175 /* board_2068_idx */
177 .sht = &qs_ata_sht,
178 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
179 ATA_FLAG_SATA_RESET |
180 //FIXME ATA_FLAG_SRST |
181 ATA_FLAG_MMIO,
182 .pio_mask = 0x10, /* pio4 */
183 .udma_mask = 0x7f, /* udma0-6 */
184 .port_ops = &qs_ata_ops,
188 static const struct pci_device_id qs_ata_pci_tbl[] = {
189 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
190 board_2068_idx },
192 { } /* terminate list */
195 static struct pci_driver qs_ata_pci_driver = {
196 .name = DRV_NAME,
197 .id_table = qs_ata_pci_tbl,
198 .probe = qs_ata_init_one,
199 .remove = ata_pci_remove_one,
202 static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
204 return 1; /* ATAPI DMA not supported */
207 static void qs_bmdma_stop(struct ata_queued_cmd *qc)
209 /* nothing */
212 static u8 qs_bmdma_status(struct ata_port *ap)
214 return 0;
217 static void qs_irq_clear(struct ata_port *ap)
219 /* nothing */
222 static inline void qs_enter_reg_mode(struct ata_port *ap)
224 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
226 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
227 readb(chan + QS_CCT_CTR0); /* flush */
230 static inline void qs_reset_channel_logic(struct ata_port *ap)
232 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
234 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
235 readb(chan + QS_CCT_CTR0); /* flush */
236 qs_enter_reg_mode(ap);
239 static void qs_phy_reset(struct ata_port *ap)
241 struct qs_port_priv *pp = ap->private_data;
243 pp->state = qs_state_idle;
244 qs_reset_channel_logic(ap);
245 sata_phy_reset(ap);
248 static void qs_eng_timeout(struct ata_port *ap)
250 struct qs_port_priv *pp = ap->private_data;
252 if (pp->state != qs_state_idle) /* healthy paranoia */
253 pp->state = qs_state_mmio;
254 qs_reset_channel_logic(ap);
255 ata_eng_timeout(ap);
258 static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
260 if (sc_reg > SCR_CONTROL)
261 return ~0U;
262 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
265 static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
267 if (sc_reg > SCR_CONTROL)
268 return;
269 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
272 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
274 struct scatterlist *sg;
275 struct ata_port *ap = qc->ap;
276 struct qs_port_priv *pp = ap->private_data;
277 unsigned int nelem;
278 u8 *prd = pp->pkt + QS_CPB_BYTES;
280 WARN_ON(qc->__sg == NULL);
281 WARN_ON(qc->n_elem == 0);
283 nelem = 0;
284 ata_for_each_sg(sg, qc) {
285 u64 addr;
286 u32 len;
288 addr = sg_dma_address(sg);
289 *(__le64 *)prd = cpu_to_le64(addr);
290 prd += sizeof(u64);
292 len = sg_dma_len(sg);
293 *(__le32 *)prd = cpu_to_le32(len);
294 prd += sizeof(u64);
296 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
297 (unsigned long long)addr, len);
298 nelem++;
301 return nelem;
304 static void qs_qc_prep(struct ata_queued_cmd *qc)
306 struct qs_port_priv *pp = qc->ap->private_data;
307 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
308 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
309 u64 addr;
310 unsigned int nelem;
312 VPRINTK("ENTER\n");
314 qs_enter_reg_mode(qc->ap);
315 if (qc->tf.protocol != ATA_PROT_DMA) {
316 ata_qc_prep(qc);
317 return;
320 nelem = qs_fill_sg(qc);
322 if ((qc->tf.flags & ATA_TFLAG_WRITE))
323 hflags |= QS_HF_DIRO;
324 if ((qc->tf.flags & ATA_TFLAG_LBA48))
325 dflags |= QS_DF_ELBA;
327 /* host control block (HCB) */
328 buf[ 0] = QS_HCB_HDR;
329 buf[ 1] = hflags;
330 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
331 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
332 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
333 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
335 /* device control block (DCB) */
336 buf[24] = QS_DCB_HDR;
337 buf[28] = dflags;
339 /* frame information structure (FIS) */
340 ata_tf_to_fis(&qc->tf, &buf[32], 0);
343 static inline void qs_packet_start(struct ata_queued_cmd *qc)
345 struct ata_port *ap = qc->ap;
346 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
348 VPRINTK("ENTER, ap %p\n", ap);
350 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
351 wmb(); /* flush PRDs and pkt to memory */
352 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
353 readl(chan + QS_CCT_CFF); /* flush */
356 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
358 struct qs_port_priv *pp = qc->ap->private_data;
360 switch (qc->tf.protocol) {
361 case ATA_PROT_DMA:
363 pp->state = qs_state_pkt;
364 qs_packet_start(qc);
365 return 0;
367 case ATA_PROT_ATAPI_DMA:
368 BUG();
369 break;
371 default:
372 break;
375 pp->state = qs_state_mmio;
376 return ata_qc_issue_prot(qc);
379 static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
381 unsigned int handled = 0;
382 u8 sFFE;
383 u8 __iomem *mmio_base = host_set->mmio_base;
385 do {
386 u32 sff0 = readl(mmio_base + QS_HST_SFF);
387 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
388 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
389 sFFE = sff1 >> 31; /* empty flag */
391 if (sEVLD) {
392 u8 sDST = sff0 >> 16; /* dev status */
393 u8 sHST = sff1 & 0x3f; /* host status */
394 unsigned int port_no = (sff1 >> 8) & 0x03;
395 struct ata_port *ap = host_set->ports[port_no];
397 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
398 sff1, sff0, port_no, sHST, sDST);
399 handled = 1;
400 if (ap && !(ap->flags &
401 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
402 struct ata_queued_cmd *qc;
403 struct qs_port_priv *pp = ap->private_data;
404 if (!pp || pp->state != qs_state_pkt)
405 continue;
406 qc = ata_qc_from_tag(ap, ap->active_tag);
407 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
408 switch (sHST) {
409 case 0: /* successful CPB */
410 case 3: /* device error */
411 pp->state = qs_state_idle;
412 qs_enter_reg_mode(qc->ap);
413 qc->err_mask |= ac_err_mask(sDST);
414 ata_qc_complete(qc);
415 break;
416 default:
417 break;
422 } while (!sFFE);
423 return handled;
426 static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 unsigned int handled = 0, port_no;
430 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
431 struct ata_port *ap;
432 ap = host_set->ports[port_no];
433 if (ap &&
434 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
435 struct ata_queued_cmd *qc;
436 struct qs_port_priv *pp = ap->private_data;
437 if (!pp || pp->state != qs_state_mmio)
438 continue;
439 qc = ata_qc_from_tag(ap, ap->active_tag);
440 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
442 /* check main status, clearing INTRQ */
443 u8 status = ata_check_status(ap);
444 if ((status & ATA_BUSY))
445 continue;
446 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
447 ap->id, qc->tf.protocol, status);
449 /* complete taskfile transaction */
450 pp->state = qs_state_idle;
451 qc->err_mask |= ac_err_mask(status);
452 ata_qc_complete(qc);
453 handled = 1;
457 return handled;
460 static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
462 struct ata_host_set *host_set = dev_instance;
463 unsigned int handled = 0;
465 VPRINTK("ENTER\n");
467 spin_lock(&host_set->lock);
468 handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
469 spin_unlock(&host_set->lock);
471 VPRINTK("EXIT\n");
473 return IRQ_RETVAL(handled);
476 static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
478 port->cmd_addr =
479 port->data_addr = base + 0x400;
480 port->error_addr =
481 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
482 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
483 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
484 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
485 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
486 port->device_addr = base + 0x430;
487 port->status_addr =
488 port->command_addr = base + 0x438;
489 port->altstatus_addr =
490 port->ctl_addr = base + 0x440;
491 port->scr_addr = base + 0xc00;
494 static int qs_port_start(struct ata_port *ap)
496 struct device *dev = ap->host_set->dev;
497 struct qs_port_priv *pp;
498 void __iomem *mmio_base = ap->host_set->mmio_base;
499 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
500 u64 addr;
501 int rc;
503 rc = ata_port_start(ap);
504 if (rc)
505 return rc;
506 qs_enter_reg_mode(ap);
507 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
508 if (!pp) {
509 rc = -ENOMEM;
510 goto err_out;
512 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
513 GFP_KERNEL);
514 if (!pp->pkt) {
515 rc = -ENOMEM;
516 goto err_out_kfree;
518 memset(pp->pkt, 0, QS_PKT_BYTES);
519 ap->private_data = pp;
521 addr = (u64)pp->pkt_dma;
522 writel((u32) addr, chan + QS_CCF_CPBA);
523 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
524 return 0;
526 err_out_kfree:
527 kfree(pp);
528 err_out:
529 ata_port_stop(ap);
530 return rc;
533 static void qs_port_stop(struct ata_port *ap)
535 struct device *dev = ap->host_set->dev;
536 struct qs_port_priv *pp = ap->private_data;
538 if (pp != NULL) {
539 ap->private_data = NULL;
540 if (pp->pkt != NULL)
541 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
542 pp->pkt_dma);
543 kfree(pp);
545 ata_port_stop(ap);
548 static void qs_host_stop(struct ata_host_set *host_set)
550 void __iomem *mmio_base = host_set->mmio_base;
551 struct pci_dev *pdev = to_pci_dev(host_set->dev);
553 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
554 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
556 pci_iounmap(pdev, mmio_base);
559 static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
561 void __iomem *mmio_base = pe->mmio_base;
562 unsigned int port_no;
564 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
565 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
567 /* reset each channel in turn */
568 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
569 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
570 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
571 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
572 readb(chan + QS_CCT_CTR0); /* flush */
574 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
576 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
577 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
578 /* set FIFO depths to same settings as Windows driver */
579 writew(32, chan + QS_CFC_HUFT);
580 writew(32, chan + QS_CFC_HDFT);
581 writew(10, chan + QS_CFC_DUFT);
582 writew( 8, chan + QS_CFC_DDFT);
583 /* set CPB size in bytes, as a power of two */
584 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
586 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
590 * The QStor understands 64-bit buses, and uses 64-bit fields
591 * for DMA pointers regardless of bus width. We just have to
592 * make sure our DMA masks are set appropriately for whatever
593 * bridge lies between us and the QStor, and then the DMA mapping
594 * code will ensure we only ever "see" appropriate buffer addresses.
595 * If we're 32-bit limited somewhere, then our 64-bit fields will
596 * just end up with zeros in the upper 32-bits, without any special
597 * logic required outside of this routine (below).
599 static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
601 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
602 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
604 if (have_64bit_bus &&
605 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
607 if (rc) {
608 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
609 if (rc) {
610 dev_printk(KERN_ERR, &pdev->dev,
611 "64-bit DMA enable failed\n");
612 return rc;
615 } else {
616 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
617 if (rc) {
618 dev_printk(KERN_ERR, &pdev->dev,
619 "32-bit DMA enable failed\n");
620 return rc;
622 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
623 if (rc) {
624 dev_printk(KERN_ERR, &pdev->dev,
625 "32-bit consistent DMA enable failed\n");
626 return rc;
629 return 0;
632 static int qs_ata_init_one(struct pci_dev *pdev,
633 const struct pci_device_id *ent)
635 static int printed_version;
636 struct ata_probe_ent *probe_ent = NULL;
637 void __iomem *mmio_base;
638 unsigned int board_idx = (unsigned int) ent->driver_data;
639 int rc, port_no;
641 if (!printed_version++)
642 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
644 rc = pci_enable_device(pdev);
645 if (rc)
646 return rc;
648 rc = pci_request_regions(pdev, DRV_NAME);
649 if (rc)
650 goto err_out;
652 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
653 rc = -ENODEV;
654 goto err_out_regions;
657 mmio_base = pci_iomap(pdev, 4, 0);
658 if (mmio_base == NULL) {
659 rc = -ENOMEM;
660 goto err_out_regions;
663 rc = qs_set_dma_masks(pdev, mmio_base);
664 if (rc)
665 goto err_out_iounmap;
667 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
668 if (probe_ent == NULL) {
669 rc = -ENOMEM;
670 goto err_out_iounmap;
673 memset(probe_ent, 0, sizeof(*probe_ent));
674 probe_ent->dev = pci_dev_to_dev(pdev);
675 INIT_LIST_HEAD(&probe_ent->node);
677 probe_ent->sht = qs_port_info[board_idx].sht;
678 probe_ent->host_flags = qs_port_info[board_idx].host_flags;
679 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
680 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
681 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
682 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
684 probe_ent->irq = pdev->irq;
685 probe_ent->irq_flags = SA_SHIRQ;
686 probe_ent->mmio_base = mmio_base;
687 probe_ent->n_ports = QS_PORTS;
689 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
690 unsigned long chan = (unsigned long)mmio_base +
691 (port_no * 0x4000);
692 qs_ata_setup_port(&probe_ent->port[port_no], chan);
695 pci_set_master(pdev);
697 /* initialize adapter */
698 qs_host_init(board_idx, probe_ent);
700 rc = ata_device_add(probe_ent);
701 kfree(probe_ent);
702 if (rc != QS_PORTS)
703 goto err_out_iounmap;
704 return 0;
706 err_out_iounmap:
707 pci_iounmap(pdev, mmio_base);
708 err_out_regions:
709 pci_release_regions(pdev);
710 err_out:
711 pci_disable_device(pdev);
712 return rc;
715 static int __init qs_ata_init(void)
717 return pci_module_init(&qs_ata_pci_driver);
720 static void __exit qs_ata_exit(void)
722 pci_unregister_driver(&qs_ata_pci_driver);
725 MODULE_AUTHOR("Mark Lord");
726 MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
727 MODULE_LICENSE("GPL");
728 MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
729 MODULE_VERSION(DRV_VERSION);
731 module_init(qs_ata_init);
732 module_exit(qs_ata_exit);