libata: implement and use SHT initializers
[linux-2.6/x86.git] / drivers / ata / sata_sx4.c
blob1802f92180e4b2525c68e20634c9b7167ec18c43
1 /*
2 * sata_sx4.c - Promise SATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * Hardware documentation available under NDA.
34 Theory of operation
35 -------------------
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
66 and each READ looks like this:
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/init.h>
85 #include <linux/blkdev.h>
86 #include <linux/delay.h>
87 #include <linux/interrupt.h>
88 #include <linux/device.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_cmnd.h>
91 #include <linux/libata.h>
92 #include "sata_promise.h"
94 #define DRV_NAME "sata_sx4"
95 #define DRV_VERSION "0.12"
98 enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
138 board_20621 = 0, /* FastTrak S150 SX4 */
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
197 struct pdc_port_priv {
198 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
199 u8 *pkt;
200 dma_addr_t pkt_dma;
203 struct pdc_host_priv {
204 unsigned int doing_hdma;
205 unsigned int hdma_prod;
206 unsigned int hdma_cons;
207 struct {
208 struct ata_queued_cmd *qc;
209 unsigned int seq;
210 unsigned long pkt_ofs;
211 } hdma[32];
215 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
216 static void pdc_eng_timeout(struct ata_port *ap);
217 static void pdc_20621_phy_reset(struct ata_port *ap);
218 static int pdc_port_start(struct ata_port *ap);
219 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
220 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
221 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
222 static unsigned int pdc20621_dimm_init(struct ata_host *host);
223 static int pdc20621_detect_dimm(struct ata_host *host);
224 static unsigned int pdc20621_i2c_read(struct ata_host *host,
225 u32 device, u32 subaddr, u32 *pdata);
226 static int pdc20621_prog_dimm0(struct ata_host *host);
227 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
228 #ifdef ATA_VERBOSE_DEBUG
229 static void pdc20621_get_from_dimm(struct ata_host *host,
230 void *psource, u32 offset, u32 size);
231 #endif
232 static void pdc20621_put_to_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234 static void pdc20621_irq_clear(struct ata_port *ap);
235 static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
238 static struct scsi_host_template pdc_sata_sht = {
239 ATA_BASE_SHT(DRV_NAME),
240 .sg_tablesize = LIBATA_MAX_PRD,
241 .dma_boundary = ATA_DMA_BOUNDARY,
244 static const struct ata_port_operations pdc_20621_ops = {
245 .tf_load = pdc_tf_load_mmio,
246 .tf_read = ata_tf_read,
247 .check_status = ata_check_status,
248 .exec_command = pdc_exec_command_mmio,
249 .dev_select = ata_std_dev_select,
250 .phy_reset = pdc_20621_phy_reset,
251 .qc_prep = pdc20621_qc_prep,
252 .qc_issue = pdc20621_qc_issue_prot,
253 .data_xfer = ata_data_xfer,
254 .eng_timeout = pdc_eng_timeout,
255 .irq_clear = pdc20621_irq_clear,
256 .irq_on = ata_irq_on,
257 .port_start = pdc_port_start,
260 static const struct ata_port_info pdc_port_info[] = {
261 /* board_20621 */
263 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
264 ATA_FLAG_SRST | ATA_FLAG_MMIO |
265 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
266 .pio_mask = 0x1f, /* pio0-4 */
267 .mwdma_mask = 0x07, /* mwdma0-2 */
268 .udma_mask = ATA_UDMA6,
269 .port_ops = &pdc_20621_ops,
274 static const struct pci_device_id pdc_sata_pci_tbl[] = {
275 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
277 { } /* terminate list */
280 static struct pci_driver pdc_sata_pci_driver = {
281 .name = DRV_NAME,
282 .id_table = pdc_sata_pci_tbl,
283 .probe = pdc_sata_init_one,
284 .remove = ata_pci_remove_one,
288 static int pdc_port_start(struct ata_port *ap)
290 struct device *dev = ap->host->dev;
291 struct pdc_port_priv *pp;
292 int rc;
294 rc = ata_port_start(ap);
295 if (rc)
296 return rc;
298 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
299 if (!pp)
300 return -ENOMEM;
302 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
303 if (!pp->pkt)
304 return -ENOMEM;
306 ap->private_data = pp;
308 return 0;
311 static void pdc_20621_phy_reset(struct ata_port *ap)
313 VPRINTK("ENTER\n");
314 ap->cbl = ATA_CBL_SATA;
315 ata_port_probe(ap);
316 ata_bus_reset(ap);
319 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
320 unsigned int portno,
321 unsigned int total_len)
323 u32 addr;
324 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
325 __le32 *buf32 = (__le32 *) buf;
327 /* output ATA packet S/G table */
328 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
329 (PDC_DIMM_DATA_STEP * portno);
330 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
331 buf32[dw] = cpu_to_le32(addr);
332 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
334 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
335 PDC_20621_DIMM_BASE +
336 (PDC_DIMM_WINDOW_STEP * portno) +
337 PDC_DIMM_APKT_PRD,
338 buf32[dw], buf32[dw + 1]);
341 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
342 unsigned int portno,
343 unsigned int total_len)
345 u32 addr;
346 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
347 __le32 *buf32 = (__le32 *) buf;
349 /* output Host DMA packet S/G table */
350 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
351 (PDC_DIMM_DATA_STEP * portno);
353 buf32[dw] = cpu_to_le32(addr);
354 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
356 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
357 PDC_20621_DIMM_BASE +
358 (PDC_DIMM_WINDOW_STEP * portno) +
359 PDC_DIMM_HPKT_PRD,
360 buf32[dw], buf32[dw + 1]);
363 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
364 unsigned int devno, u8 *buf,
365 unsigned int portno)
367 unsigned int i, dw;
368 __le32 *buf32 = (__le32 *) buf;
369 u8 dev_reg;
371 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
372 (PDC_DIMM_WINDOW_STEP * portno) +
373 PDC_DIMM_APKT_PRD;
374 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
376 i = PDC_DIMM_ATA_PKT;
379 * Set up ATA packet
381 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
382 buf[i++] = PDC_PKT_READ;
383 else if (tf->protocol == ATA_PROT_NODATA)
384 buf[i++] = PDC_PKT_NODATA;
385 else
386 buf[i++] = 0;
387 buf[i++] = 0; /* reserved */
388 buf[i++] = portno + 1; /* seq. id */
389 buf[i++] = 0xff; /* delay seq. id */
391 /* dimm dma S/G, and next-pkt */
392 dw = i >> 2;
393 if (tf->protocol == ATA_PROT_NODATA)
394 buf32[dw] = 0;
395 else
396 buf32[dw] = cpu_to_le32(dimm_sg);
397 buf32[dw + 1] = 0;
398 i += 8;
400 if (devno == 0)
401 dev_reg = ATA_DEVICE_OBS;
402 else
403 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
405 /* select device */
406 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
407 buf[i++] = dev_reg;
409 /* device control register */
410 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
411 buf[i++] = tf->ctl;
413 return i;
416 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
417 unsigned int portno)
419 unsigned int dw;
420 u32 tmp;
421 __le32 *buf32 = (__le32 *) buf;
423 unsigned int host_sg = PDC_20621_DIMM_BASE +
424 (PDC_DIMM_WINDOW_STEP * portno) +
425 PDC_DIMM_HOST_PRD;
426 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
427 (PDC_DIMM_WINDOW_STEP * portno) +
428 PDC_DIMM_HPKT_PRD;
429 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
430 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
432 dw = PDC_DIMM_HOST_PKT >> 2;
435 * Set up Host DMA packet
437 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
438 tmp = PDC_PKT_READ;
439 else
440 tmp = 0;
441 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
442 tmp |= (0xff << 24); /* delay seq. id */
443 buf32[dw + 0] = cpu_to_le32(tmp);
444 buf32[dw + 1] = cpu_to_le32(host_sg);
445 buf32[dw + 2] = cpu_to_le32(dimm_sg);
446 buf32[dw + 3] = 0;
448 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
449 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
450 PDC_DIMM_HOST_PKT,
451 buf32[dw + 0],
452 buf32[dw + 1],
453 buf32[dw + 2],
454 buf32[dw + 3]);
457 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
459 struct scatterlist *sg;
460 struct ata_port *ap = qc->ap;
461 struct pdc_port_priv *pp = ap->private_data;
462 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
463 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
464 unsigned int portno = ap->port_no;
465 unsigned int i, si, idx, total_len = 0, sgt_len;
466 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
468 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
470 VPRINTK("ata%u: ENTER\n", ap->print_id);
472 /* hard-code chip #0 */
473 mmio += PDC_CHIP0_OFS;
476 * Build S/G table
478 idx = 0;
479 for_each_sg(qc->sg, sg, qc->n_elem, si) {
480 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
481 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
482 total_len += sg_dma_len(sg);
484 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
485 sgt_len = idx * 4;
488 * Build ATA, host DMA packets
490 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
491 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
493 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
494 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
496 if (qc->tf.flags & ATA_TFLAG_LBA48)
497 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
498 else
499 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
501 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
503 /* copy three S/G tables and two packets to DIMM MMIO window */
504 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
505 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
506 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
507 PDC_DIMM_HOST_PRD,
508 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
510 /* force host FIFO dump */
511 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
513 readl(dimm_mmio); /* MMIO PCI posting flush */
515 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
518 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
520 struct ata_port *ap = qc->ap;
521 struct pdc_port_priv *pp = ap->private_data;
522 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
523 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
524 unsigned int portno = ap->port_no;
525 unsigned int i;
527 VPRINTK("ata%u: ENTER\n", ap->print_id);
529 /* hard-code chip #0 */
530 mmio += PDC_CHIP0_OFS;
532 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
534 if (qc->tf.flags & ATA_TFLAG_LBA48)
535 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
536 else
537 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
539 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
541 /* copy three S/G tables and two packets to DIMM MMIO window */
542 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
543 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
545 /* force host FIFO dump */
546 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
548 readl(dimm_mmio); /* MMIO PCI posting flush */
550 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
553 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
555 switch (qc->tf.protocol) {
556 case ATA_PROT_DMA:
557 pdc20621_dma_prep(qc);
558 break;
559 case ATA_PROT_NODATA:
560 pdc20621_nodata_prep(qc);
561 break;
562 default:
563 break;
567 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
568 unsigned int seq,
569 u32 pkt_ofs)
571 struct ata_port *ap = qc->ap;
572 struct ata_host *host = ap->host;
573 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
575 /* hard-code chip #0 */
576 mmio += PDC_CHIP0_OFS;
578 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
579 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
581 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
582 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
585 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
586 unsigned int seq,
587 u32 pkt_ofs)
589 struct ata_port *ap = qc->ap;
590 struct pdc_host_priv *pp = ap->host->private_data;
591 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
593 if (!pp->doing_hdma) {
594 __pdc20621_push_hdma(qc, seq, pkt_ofs);
595 pp->doing_hdma = 1;
596 return;
599 pp->hdma[idx].qc = qc;
600 pp->hdma[idx].seq = seq;
601 pp->hdma[idx].pkt_ofs = pkt_ofs;
602 pp->hdma_prod++;
605 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
607 struct ata_port *ap = qc->ap;
608 struct pdc_host_priv *pp = ap->host->private_data;
609 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
611 /* if nothing on queue, we're done */
612 if (pp->hdma_prod == pp->hdma_cons) {
613 pp->doing_hdma = 0;
614 return;
617 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
618 pp->hdma[idx].pkt_ofs);
619 pp->hdma_cons++;
622 #ifdef ATA_VERBOSE_DEBUG
623 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
625 struct ata_port *ap = qc->ap;
626 unsigned int port_no = ap->port_no;
627 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
629 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
630 dimm_mmio += PDC_DIMM_HOST_PKT;
632 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
633 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
634 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
635 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
637 #else
638 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
639 #endif /* ATA_VERBOSE_DEBUG */
641 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
643 struct ata_port *ap = qc->ap;
644 struct ata_host *host = ap->host;
645 unsigned int port_no = ap->port_no;
646 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
647 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
648 u8 seq = (u8) (port_no + 1);
649 unsigned int port_ofs;
651 /* hard-code chip #0 */
652 mmio += PDC_CHIP0_OFS;
654 VPRINTK("ata%u: ENTER\n", ap->print_id);
656 wmb(); /* flush PRD, pkt writes */
658 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
660 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
661 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
662 seq += 4;
664 pdc20621_dump_hdma(qc);
665 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
666 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
667 port_ofs + PDC_DIMM_HOST_PKT,
668 port_ofs + PDC_DIMM_HOST_PKT,
669 seq);
670 } else {
671 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
672 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
674 writel(port_ofs + PDC_DIMM_ATA_PKT,
675 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
676 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
677 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
678 port_ofs + PDC_DIMM_ATA_PKT,
679 port_ofs + PDC_DIMM_ATA_PKT,
680 seq);
684 static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
686 switch (qc->tf.protocol) {
687 case ATA_PROT_DMA:
688 case ATA_PROT_NODATA:
689 pdc20621_packet_start(qc);
690 return 0;
692 case ATAPI_PROT_DMA:
693 BUG();
694 break;
696 default:
697 break;
700 return ata_qc_issue_prot(qc);
703 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
704 struct ata_queued_cmd *qc,
705 unsigned int doing_hdma,
706 void __iomem *mmio)
708 unsigned int port_no = ap->port_no;
709 unsigned int port_ofs =
710 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
711 u8 status;
712 unsigned int handled = 0;
714 VPRINTK("ENTER\n");
716 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
717 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
719 /* step two - DMA from DIMM to host */
720 if (doing_hdma) {
721 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
722 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
723 /* get drive status; clear intr; complete txn */
724 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
725 ata_qc_complete(qc);
726 pdc20621_pop_hdma(qc);
729 /* step one - exec ATA command */
730 else {
731 u8 seq = (u8) (port_no + 1 + 4);
732 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
733 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
735 /* submit hdma pkt */
736 pdc20621_dump_hdma(qc);
737 pdc20621_push_hdma(qc, seq,
738 port_ofs + PDC_DIMM_HOST_PKT);
740 handled = 1;
742 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
744 /* step one - DMA from host to DIMM */
745 if (doing_hdma) {
746 u8 seq = (u8) (port_no + 1);
747 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
748 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
750 /* submit ata pkt */
751 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
752 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
753 writel(port_ofs + PDC_DIMM_ATA_PKT,
754 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
755 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
758 /* step two - execute ATA command */
759 else {
760 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
761 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
762 /* get drive status; clear intr; complete txn */
763 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
764 ata_qc_complete(qc);
765 pdc20621_pop_hdma(qc);
767 handled = 1;
769 /* command completion, but no data xfer */
770 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
772 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
773 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
774 qc->err_mask |= ac_err_mask(status);
775 ata_qc_complete(qc);
776 handled = 1;
778 } else {
779 ap->stats.idle_irq++;
782 return handled;
785 static void pdc20621_irq_clear(struct ata_port *ap)
787 struct ata_host *host = ap->host;
788 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
790 mmio += PDC_CHIP0_OFS;
792 readl(mmio + PDC_20621_SEQMASK);
795 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
797 struct ata_host *host = dev_instance;
798 struct ata_port *ap;
799 u32 mask = 0;
800 unsigned int i, tmp, port_no;
801 unsigned int handled = 0;
802 void __iomem *mmio_base;
804 VPRINTK("ENTER\n");
806 if (!host || !host->iomap[PDC_MMIO_BAR]) {
807 VPRINTK("QUICK EXIT\n");
808 return IRQ_NONE;
811 mmio_base = host->iomap[PDC_MMIO_BAR];
813 /* reading should also clear interrupts */
814 mmio_base += PDC_CHIP0_OFS;
815 mask = readl(mmio_base + PDC_20621_SEQMASK);
816 VPRINTK("mask == 0x%x\n", mask);
818 if (mask == 0xffffffff) {
819 VPRINTK("QUICK EXIT 2\n");
820 return IRQ_NONE;
822 mask &= 0xffff; /* only 16 tags possible */
823 if (!mask) {
824 VPRINTK("QUICK EXIT 3\n");
825 return IRQ_NONE;
828 spin_lock(&host->lock);
830 for (i = 1; i < 9; i++) {
831 port_no = i - 1;
832 if (port_no > 3)
833 port_no -= 4;
834 if (port_no >= host->n_ports)
835 ap = NULL;
836 else
837 ap = host->ports[port_no];
838 tmp = mask & (1 << i);
839 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
840 if (tmp && ap &&
841 !(ap->flags & ATA_FLAG_DISABLED)) {
842 struct ata_queued_cmd *qc;
844 qc = ata_qc_from_tag(ap, ap->link.active_tag);
845 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
846 handled += pdc20621_host_intr(ap, qc, (i > 4),
847 mmio_base);
851 spin_unlock(&host->lock);
853 VPRINTK("mask == 0x%x\n", mask);
855 VPRINTK("EXIT\n");
857 return IRQ_RETVAL(handled);
860 static void pdc_eng_timeout(struct ata_port *ap)
862 u8 drv_stat;
863 struct ata_host *host = ap->host;
864 struct ata_queued_cmd *qc;
865 unsigned long flags;
867 DPRINTK("ENTER\n");
869 spin_lock_irqsave(&host->lock, flags);
871 qc = ata_qc_from_tag(ap, ap->link.active_tag);
873 switch (qc->tf.protocol) {
874 case ATA_PROT_DMA:
875 case ATA_PROT_NODATA:
876 ata_port_printk(ap, KERN_ERR, "command timeout\n");
877 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
878 break;
880 default:
881 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
883 ata_port_printk(ap, KERN_ERR,
884 "unknown timeout, cmd 0x%x stat 0x%x\n",
885 qc->tf.command, drv_stat);
887 qc->err_mask |= ac_err_mask(drv_stat);
888 break;
891 spin_unlock_irqrestore(&host->lock, flags);
892 ata_eh_qc_complete(qc);
893 DPRINTK("EXIT\n");
896 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
898 WARN_ON(tf->protocol == ATA_PROT_DMA ||
899 tf->protocol == ATA_PROT_NODATA);
900 ata_tf_load(ap, tf);
904 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
906 WARN_ON(tf->protocol == ATA_PROT_DMA ||
907 tf->protocol == ATA_PROT_NODATA);
908 ata_exec_command(ap, tf);
912 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
914 port->cmd_addr = base;
915 port->data_addr = base;
916 port->feature_addr =
917 port->error_addr = base + 0x4;
918 port->nsect_addr = base + 0x8;
919 port->lbal_addr = base + 0xc;
920 port->lbam_addr = base + 0x10;
921 port->lbah_addr = base + 0x14;
922 port->device_addr = base + 0x18;
923 port->command_addr =
924 port->status_addr = base + 0x1c;
925 port->altstatus_addr =
926 port->ctl_addr = base + 0x38;
930 #ifdef ATA_VERBOSE_DEBUG
931 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
932 u32 offset, u32 size)
934 u32 window_size;
935 u16 idx;
936 u8 page_mask;
937 long dist;
938 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
939 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
941 /* hard-code chip #0 */
942 mmio += PDC_CHIP0_OFS;
944 page_mask = 0x00;
945 window_size = 0x2000 * 4; /* 32K byte uchar size */
946 idx = (u16) (offset / window_size);
948 writel(0x01, mmio + PDC_GENERAL_CTLR);
949 readl(mmio + PDC_GENERAL_CTLR);
950 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
951 readl(mmio + PDC_DIMM_WINDOW_CTLR);
953 offset -= (idx * window_size);
954 idx++;
955 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
956 (long) (window_size - offset);
957 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
958 dist);
960 psource += dist;
961 size -= dist;
962 for (; (long) size >= (long) window_size ;) {
963 writel(0x01, mmio + PDC_GENERAL_CTLR);
964 readl(mmio + PDC_GENERAL_CTLR);
965 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
966 readl(mmio + PDC_DIMM_WINDOW_CTLR);
967 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
968 window_size / 4);
969 psource += window_size;
970 size -= window_size;
971 idx++;
974 if (size) {
975 writel(0x01, mmio + PDC_GENERAL_CTLR);
976 readl(mmio + PDC_GENERAL_CTLR);
977 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
978 readl(mmio + PDC_DIMM_WINDOW_CTLR);
979 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
980 size / 4);
983 #endif
986 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
987 u32 offset, u32 size)
989 u32 window_size;
990 u16 idx;
991 u8 page_mask;
992 long dist;
993 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
994 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
996 /* hard-code chip #0 */
997 mmio += PDC_CHIP0_OFS;
999 page_mask = 0x00;
1000 window_size = 0x2000 * 4; /* 32K byte uchar size */
1001 idx = (u16) (offset / window_size);
1003 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1004 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1005 offset -= (idx * window_size);
1006 idx++;
1007 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1008 (long) (window_size - offset);
1009 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1010 writel(0x01, mmio + PDC_GENERAL_CTLR);
1011 readl(mmio + PDC_GENERAL_CTLR);
1013 psource += dist;
1014 size -= dist;
1015 for (; (long) size >= (long) window_size ;) {
1016 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018 memcpy_toio(dimm_mmio, psource, window_size / 4);
1019 writel(0x01, mmio + PDC_GENERAL_CTLR);
1020 readl(mmio + PDC_GENERAL_CTLR);
1021 psource += window_size;
1022 size -= window_size;
1023 idx++;
1026 if (size) {
1027 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1028 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1029 memcpy_toio(dimm_mmio, psource, size / 4);
1030 writel(0x01, mmio + PDC_GENERAL_CTLR);
1031 readl(mmio + PDC_GENERAL_CTLR);
1036 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1037 u32 subaddr, u32 *pdata)
1039 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1040 u32 i2creg = 0;
1041 u32 status;
1042 u32 count = 0;
1044 /* hard-code chip #0 */
1045 mmio += PDC_CHIP0_OFS;
1047 i2creg |= device << 24;
1048 i2creg |= subaddr << 16;
1050 /* Set the device and subaddress */
1051 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1052 readl(mmio + PDC_I2C_ADDR_DATA);
1054 /* Write Control to perform read operation, mask int */
1055 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1056 mmio + PDC_I2C_CONTROL);
1058 for (count = 0; count <= 1000; count ++) {
1059 status = readl(mmio + PDC_I2C_CONTROL);
1060 if (status & PDC_I2C_COMPLETE) {
1061 status = readl(mmio + PDC_I2C_ADDR_DATA);
1062 break;
1063 } else if (count == 1000)
1064 return 0;
1067 *pdata = (status >> 8) & 0x000000ff;
1068 return 1;
1072 static int pdc20621_detect_dimm(struct ata_host *host)
1074 u32 data = 0;
1075 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1076 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1077 if (data == 100)
1078 return 100;
1079 } else
1080 return 0;
1082 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1083 if (data <= 0x75)
1084 return 133;
1085 } else
1086 return 0;
1088 return 0;
1092 static int pdc20621_prog_dimm0(struct ata_host *host)
1094 u32 spd0[50];
1095 u32 data = 0;
1096 int size, i;
1097 u8 bdimmsize;
1098 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1099 static const struct {
1100 unsigned int reg;
1101 unsigned int ofs;
1102 } pdc_i2c_read_data [] = {
1103 { PDC_DIMM_SPD_TYPE, 11 },
1104 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1105 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1106 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1107 { PDC_DIMM_SPD_ROW_NUM, 3 },
1108 { PDC_DIMM_SPD_BANK_NUM, 17 },
1109 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1110 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1111 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1112 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1113 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1114 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1117 /* hard-code chip #0 */
1118 mmio += PDC_CHIP0_OFS;
1120 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1121 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1122 pdc_i2c_read_data[i].reg,
1123 &spd0[pdc_i2c_read_data[i].ofs]);
1125 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1126 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1127 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1128 data |= (((((spd0[29] > spd0[28])
1129 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1130 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1132 if (spd0[18] & 0x08)
1133 data |= ((0x03) << 14);
1134 else if (spd0[18] & 0x04)
1135 data |= ((0x02) << 14);
1136 else if (spd0[18] & 0x01)
1137 data |= ((0x01) << 14);
1138 else
1139 data |= (0 << 14);
1142 Calculate the size of bDIMMSize (power of 2) and
1143 merge the DIMM size by program start/end address.
1146 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1147 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1148 data |= (((size / 16) - 1) << 16);
1149 data |= (0 << 23);
1150 data |= 8;
1151 writel(data, mmio + PDC_DIMM0_CONTROL);
1152 readl(mmio + PDC_DIMM0_CONTROL);
1153 return size;
1157 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1159 u32 data, spd0;
1160 int error, i;
1161 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1163 /* hard-code chip #0 */
1164 mmio += PDC_CHIP0_OFS;
1167 Set To Default : DIMM Module Global Control Register (0x022259F1)
1168 DIMM Arbitration Disable (bit 20)
1169 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1170 Refresh Enable (bit 17)
1173 data = 0x022259F1;
1174 writel(data, mmio + PDC_SDRAM_CONTROL);
1175 readl(mmio + PDC_SDRAM_CONTROL);
1177 /* Turn on for ECC */
1178 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1179 PDC_DIMM_SPD_TYPE, &spd0);
1180 if (spd0 == 0x02) {
1181 data |= (0x01 << 16);
1182 writel(data, mmio + PDC_SDRAM_CONTROL);
1183 readl(mmio + PDC_SDRAM_CONTROL);
1184 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1187 /* DIMM Initialization Select/Enable (bit 18/19) */
1188 data &= (~(1<<18));
1189 data |= (1<<19);
1190 writel(data, mmio + PDC_SDRAM_CONTROL);
1192 error = 1;
1193 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1194 data = readl(mmio + PDC_SDRAM_CONTROL);
1195 if (!(data & (1<<19))) {
1196 error = 0;
1197 break;
1199 msleep(i*100);
1201 return error;
1205 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1207 int speed, size, length;
1208 u32 addr, spd0, pci_status;
1209 u32 tmp = 0;
1210 u32 time_period = 0;
1211 u32 tcount = 0;
1212 u32 ticks = 0;
1213 u32 clock = 0;
1214 u32 fparam = 0;
1215 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1217 /* hard-code chip #0 */
1218 mmio += PDC_CHIP0_OFS;
1220 /* Initialize PLL based upon PCI Bus Frequency */
1222 /* Initialize Time Period Register */
1223 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1224 time_period = readl(mmio + PDC_TIME_PERIOD);
1225 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1227 /* Enable timer */
1228 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1229 readl(mmio + PDC_TIME_CONTROL);
1231 /* Wait 3 seconds */
1232 msleep(3000);
1235 When timer is enabled, counter is decreased every internal
1236 clock cycle.
1239 tcount = readl(mmio + PDC_TIME_COUNTER);
1240 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1243 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1244 register should be >= (0xffffffff - 3x10^8).
1246 if (tcount >= PCI_X_TCOUNT) {
1247 ticks = (time_period - tcount);
1248 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1250 clock = (ticks / 300000);
1251 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1253 clock = (clock * 33);
1254 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1256 /* PLL F Param (bit 22:16) */
1257 fparam = (1400000 / clock) - 2;
1258 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1260 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1261 pci_status = (0x8a001824 | (fparam << 16));
1262 } else
1263 pci_status = PCI_PLL_INIT;
1265 /* Initialize PLL. */
1266 VPRINTK("pci_status: 0x%x\n", pci_status);
1267 writel(pci_status, mmio + PDC_CTL_STATUS);
1268 readl(mmio + PDC_CTL_STATUS);
1271 Read SPD of DIMM by I2C interface,
1272 and program the DIMM Module Controller.
1274 if (!(speed = pdc20621_detect_dimm(host))) {
1275 printk(KERN_ERR "Detect Local DIMM Fail\n");
1276 return 1; /* DIMM error */
1278 VPRINTK("Local DIMM Speed = %d\n", speed);
1280 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1281 size = pdc20621_prog_dimm0(host);
1282 VPRINTK("Local DIMM Size = %dMB\n", size);
1284 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1285 if (pdc20621_prog_dimm_global(host)) {
1286 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1287 return 1;
1290 #ifdef ATA_VERBOSE_DEBUG
1292 u8 test_parttern1[40] =
1293 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1294 'N','o','t',' ','Y','e','t',' ',
1295 'D','e','f','i','n','e','d',' ',
1296 '1','.','1','0',
1297 '9','8','0','3','1','6','1','2',0,0};
1298 u8 test_parttern2[40] = {0};
1300 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1301 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1303 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1304 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1305 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1306 test_parttern2[1], &(test_parttern2[2]));
1307 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1308 40);
1309 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1310 test_parttern2[1], &(test_parttern2[2]));
1312 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1313 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1314 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1315 test_parttern2[1], &(test_parttern2[2]));
1317 #endif
1319 /* ECC initiliazation. */
1321 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1322 PDC_DIMM_SPD_TYPE, &spd0);
1323 if (spd0 == 0x02) {
1324 VPRINTK("Start ECC initialization\n");
1325 addr = 0;
1326 length = size * 1024 * 1024;
1327 while (addr < length) {
1328 pdc20621_put_to_dimm(host, (void *) &tmp, addr,
1329 sizeof(u32));
1330 addr += sizeof(u32);
1332 VPRINTK("Finish ECC initialization\n");
1334 return 0;
1338 static void pdc_20621_init(struct ata_host *host)
1340 u32 tmp;
1341 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1343 /* hard-code chip #0 */
1344 mmio += PDC_CHIP0_OFS;
1347 * Select page 0x40 for our 32k DIMM window
1349 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1350 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1351 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1354 * Reset Host DMA
1356 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1357 tmp |= PDC_RESET;
1358 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1359 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1361 udelay(10);
1363 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1364 tmp &= ~PDC_RESET;
1365 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1366 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1369 static int pdc_sata_init_one(struct pci_dev *pdev,
1370 const struct pci_device_id *ent)
1372 static int printed_version;
1373 const struct ata_port_info *ppi[] =
1374 { &pdc_port_info[ent->driver_data], NULL };
1375 struct ata_host *host;
1376 struct pdc_host_priv *hpriv;
1377 int i, rc;
1379 if (!printed_version++)
1380 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1382 /* allocate host */
1383 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1384 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1385 if (!host || !hpriv)
1386 return -ENOMEM;
1388 host->private_data = hpriv;
1390 /* acquire resources and fill host */
1391 rc = pcim_enable_device(pdev);
1392 if (rc)
1393 return rc;
1395 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1396 DRV_NAME);
1397 if (rc == -EBUSY)
1398 pcim_pin_device(pdev);
1399 if (rc)
1400 return rc;
1401 host->iomap = pcim_iomap_table(pdev);
1403 for (i = 0; i < 4; i++) {
1404 struct ata_port *ap = host->ports[i];
1405 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1406 unsigned int offset = 0x200 + i * 0x80;
1408 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1410 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1411 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1412 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1415 /* configure and activate */
1416 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1417 if (rc)
1418 return rc;
1419 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1420 if (rc)
1421 return rc;
1423 if (pdc20621_dimm_init(host))
1424 return -ENOMEM;
1425 pdc_20621_init(host);
1427 pci_set_master(pdev);
1428 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1429 IRQF_SHARED, &pdc_sata_sht);
1433 static int __init pdc_sata_init(void)
1435 return pci_register_driver(&pdc_sata_pci_driver);
1439 static void __exit pdc_sata_exit(void)
1441 pci_unregister_driver(&pdc_sata_pci_driver);
1445 MODULE_AUTHOR("Jeff Garzik");
1446 MODULE_DESCRIPTION("Promise SATA low-level driver");
1447 MODULE_LICENSE("GPL");
1448 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1449 MODULE_VERSION(DRV_VERSION);
1451 module_init(pdc_sata_init);
1452 module_exit(pdc_sata_exit);