Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6/mini2440.git] / drivers / ata / sata_sx4.c
blobeb05a3c82a9ee4a016fddc2033eb2bee20a6b1c5
1 /*
2 * sata_sx4.c - Promise SATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * Hardware documentation available under NDA.
34 Theory of operation
35 -------------------
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
66 and each READ looks like this:
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/init.h>
85 #include <linux/blkdev.h>
86 #include <linux/delay.h>
87 #include <linux/interrupt.h>
88 #include <linux/device.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_cmnd.h>
91 #include <linux/libata.h>
92 #include "sata_promise.h"
94 #define DRV_NAME "sata_sx4"
95 #define DRV_VERSION "0.12"
98 enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
138 board_20621 = 0, /* FastTrak S150 SX4 */
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
197 struct pdc_port_priv {
198 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
199 u8 *pkt;
200 dma_addr_t pkt_dma;
203 struct pdc_host_priv {
204 unsigned int doing_hdma;
205 unsigned int hdma_prod;
206 unsigned int hdma_cons;
207 struct {
208 struct ata_queued_cmd *qc;
209 unsigned int seq;
210 unsigned long pkt_ofs;
211 } hdma[32];
215 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
216 static void pdc_error_handler(struct ata_port *ap);
217 static void pdc_freeze(struct ata_port *ap);
218 static void pdc_thaw(struct ata_port *ap);
219 static int pdc_port_start(struct ata_port *ap);
220 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
221 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
222 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
223 static unsigned int pdc20621_dimm_init(struct ata_host *host);
224 static int pdc20621_detect_dimm(struct ata_host *host);
225 static unsigned int pdc20621_i2c_read(struct ata_host *host,
226 u32 device, u32 subaddr, u32 *pdata);
227 static int pdc20621_prog_dimm0(struct ata_host *host);
228 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
229 #ifdef ATA_VERBOSE_DEBUG
230 static void pdc20621_get_from_dimm(struct ata_host *host,
231 void *psource, u32 offset, u32 size);
232 #endif
233 static void pdc20621_put_to_dimm(struct ata_host *host,
234 void *psource, u32 offset, u32 size);
235 static void pdc20621_irq_clear(struct ata_port *ap);
236 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
237 static int pdc_softreset(struct ata_link *link, unsigned int *class,
238 unsigned long deadline);
239 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
240 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
243 static struct scsi_host_template pdc_sata_sht = {
244 ATA_BASE_SHT(DRV_NAME),
245 .sg_tablesize = LIBATA_MAX_PRD,
246 .dma_boundary = ATA_DMA_BOUNDARY,
249 /* TODO: inherit from base port_ops after converting to new EH */
250 static struct ata_port_operations pdc_20621_ops = {
251 .inherits = &ata_sff_port_ops,
253 .check_atapi_dma = pdc_check_atapi_dma,
254 .qc_prep = pdc20621_qc_prep,
255 .qc_issue = pdc20621_qc_issue,
257 .freeze = pdc_freeze,
258 .thaw = pdc_thaw,
259 .softreset = pdc_softreset,
260 .error_handler = pdc_error_handler,
261 .lost_interrupt = ATA_OP_NULL,
262 .post_internal_cmd = pdc_post_internal_cmd,
264 .port_start = pdc_port_start,
266 .sff_tf_load = pdc_tf_load_mmio,
267 .sff_exec_command = pdc_exec_command_mmio,
268 .sff_irq_clear = pdc20621_irq_clear,
271 static const struct ata_port_info pdc_port_info[] = {
272 /* board_20621 */
274 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
275 ATA_FLAG_SRST | ATA_FLAG_MMIO |
276 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
277 .pio_mask = ATA_PIO4,
278 .mwdma_mask = ATA_MWDMA2,
279 .udma_mask = ATA_UDMA6,
280 .port_ops = &pdc_20621_ops,
285 static const struct pci_device_id pdc_sata_pci_tbl[] = {
286 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
288 { } /* terminate list */
291 static struct pci_driver pdc_sata_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = pdc_sata_pci_tbl,
294 .probe = pdc_sata_init_one,
295 .remove = ata_pci_remove_one,
299 static int pdc_port_start(struct ata_port *ap)
301 struct device *dev = ap->host->dev;
302 struct pdc_port_priv *pp;
303 int rc;
305 rc = ata_port_start(ap);
306 if (rc)
307 return rc;
309 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
310 if (!pp)
311 return -ENOMEM;
313 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
314 if (!pp->pkt)
315 return -ENOMEM;
317 ap->private_data = pp;
319 return 0;
322 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
323 unsigned int portno,
324 unsigned int total_len)
326 u32 addr;
327 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
328 __le32 *buf32 = (__le32 *) buf;
330 /* output ATA packet S/G table */
331 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
332 (PDC_DIMM_DATA_STEP * portno);
333 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
334 buf32[dw] = cpu_to_le32(addr);
335 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
337 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
338 PDC_20621_DIMM_BASE +
339 (PDC_DIMM_WINDOW_STEP * portno) +
340 PDC_DIMM_APKT_PRD,
341 buf32[dw], buf32[dw + 1]);
344 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
345 unsigned int portno,
346 unsigned int total_len)
348 u32 addr;
349 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
350 __le32 *buf32 = (__le32 *) buf;
352 /* output Host DMA packet S/G table */
353 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
354 (PDC_DIMM_DATA_STEP * portno);
356 buf32[dw] = cpu_to_le32(addr);
357 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
359 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
360 PDC_20621_DIMM_BASE +
361 (PDC_DIMM_WINDOW_STEP * portno) +
362 PDC_DIMM_HPKT_PRD,
363 buf32[dw], buf32[dw + 1]);
366 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
367 unsigned int devno, u8 *buf,
368 unsigned int portno)
370 unsigned int i, dw;
371 __le32 *buf32 = (__le32 *) buf;
372 u8 dev_reg;
374 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
375 (PDC_DIMM_WINDOW_STEP * portno) +
376 PDC_DIMM_APKT_PRD;
377 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
379 i = PDC_DIMM_ATA_PKT;
382 * Set up ATA packet
384 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
385 buf[i++] = PDC_PKT_READ;
386 else if (tf->protocol == ATA_PROT_NODATA)
387 buf[i++] = PDC_PKT_NODATA;
388 else
389 buf[i++] = 0;
390 buf[i++] = 0; /* reserved */
391 buf[i++] = portno + 1; /* seq. id */
392 buf[i++] = 0xff; /* delay seq. id */
394 /* dimm dma S/G, and next-pkt */
395 dw = i >> 2;
396 if (tf->protocol == ATA_PROT_NODATA)
397 buf32[dw] = 0;
398 else
399 buf32[dw] = cpu_to_le32(dimm_sg);
400 buf32[dw + 1] = 0;
401 i += 8;
403 if (devno == 0)
404 dev_reg = ATA_DEVICE_OBS;
405 else
406 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
408 /* select device */
409 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
410 buf[i++] = dev_reg;
412 /* device control register */
413 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
414 buf[i++] = tf->ctl;
416 return i;
419 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
420 unsigned int portno)
422 unsigned int dw;
423 u32 tmp;
424 __le32 *buf32 = (__le32 *) buf;
426 unsigned int host_sg = PDC_20621_DIMM_BASE +
427 (PDC_DIMM_WINDOW_STEP * portno) +
428 PDC_DIMM_HOST_PRD;
429 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
430 (PDC_DIMM_WINDOW_STEP * portno) +
431 PDC_DIMM_HPKT_PRD;
432 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
433 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
435 dw = PDC_DIMM_HOST_PKT >> 2;
438 * Set up Host DMA packet
440 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
441 tmp = PDC_PKT_READ;
442 else
443 tmp = 0;
444 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
445 tmp |= (0xff << 24); /* delay seq. id */
446 buf32[dw + 0] = cpu_to_le32(tmp);
447 buf32[dw + 1] = cpu_to_le32(host_sg);
448 buf32[dw + 2] = cpu_to_le32(dimm_sg);
449 buf32[dw + 3] = 0;
451 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
452 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
453 PDC_DIMM_HOST_PKT,
454 buf32[dw + 0],
455 buf32[dw + 1],
456 buf32[dw + 2],
457 buf32[dw + 3]);
460 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
462 struct scatterlist *sg;
463 struct ata_port *ap = qc->ap;
464 struct pdc_port_priv *pp = ap->private_data;
465 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
466 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
467 unsigned int portno = ap->port_no;
468 unsigned int i, si, idx, total_len = 0, sgt_len;
469 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
471 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
473 VPRINTK("ata%u: ENTER\n", ap->print_id);
475 /* hard-code chip #0 */
476 mmio += PDC_CHIP0_OFS;
479 * Build S/G table
481 idx = 0;
482 for_each_sg(qc->sg, sg, qc->n_elem, si) {
483 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
484 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
485 total_len += sg_dma_len(sg);
487 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
488 sgt_len = idx * 4;
491 * Build ATA, host DMA packets
493 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
494 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
496 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
497 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
499 if (qc->tf.flags & ATA_TFLAG_LBA48)
500 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
501 else
502 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
504 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
506 /* copy three S/G tables and two packets to DIMM MMIO window */
507 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
508 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
509 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
510 PDC_DIMM_HOST_PRD,
511 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
513 /* force host FIFO dump */
514 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
516 readl(dimm_mmio); /* MMIO PCI posting flush */
518 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
521 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
523 struct ata_port *ap = qc->ap;
524 struct pdc_port_priv *pp = ap->private_data;
525 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
526 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
527 unsigned int portno = ap->port_no;
528 unsigned int i;
530 VPRINTK("ata%u: ENTER\n", ap->print_id);
532 /* hard-code chip #0 */
533 mmio += PDC_CHIP0_OFS;
535 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
537 if (qc->tf.flags & ATA_TFLAG_LBA48)
538 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
539 else
540 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
542 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
544 /* copy three S/G tables and two packets to DIMM MMIO window */
545 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
546 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
548 /* force host FIFO dump */
549 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
551 readl(dimm_mmio); /* MMIO PCI posting flush */
553 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
556 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
558 switch (qc->tf.protocol) {
559 case ATA_PROT_DMA:
560 pdc20621_dma_prep(qc);
561 break;
562 case ATA_PROT_NODATA:
563 pdc20621_nodata_prep(qc);
564 break;
565 default:
566 break;
570 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
571 unsigned int seq,
572 u32 pkt_ofs)
574 struct ata_port *ap = qc->ap;
575 struct ata_host *host = ap->host;
576 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
578 /* hard-code chip #0 */
579 mmio += PDC_CHIP0_OFS;
581 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
582 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
584 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
585 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
588 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
589 unsigned int seq,
590 u32 pkt_ofs)
592 struct ata_port *ap = qc->ap;
593 struct pdc_host_priv *pp = ap->host->private_data;
594 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
596 if (!pp->doing_hdma) {
597 __pdc20621_push_hdma(qc, seq, pkt_ofs);
598 pp->doing_hdma = 1;
599 return;
602 pp->hdma[idx].qc = qc;
603 pp->hdma[idx].seq = seq;
604 pp->hdma[idx].pkt_ofs = pkt_ofs;
605 pp->hdma_prod++;
608 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
610 struct ata_port *ap = qc->ap;
611 struct pdc_host_priv *pp = ap->host->private_data;
612 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
614 /* if nothing on queue, we're done */
615 if (pp->hdma_prod == pp->hdma_cons) {
616 pp->doing_hdma = 0;
617 return;
620 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
621 pp->hdma[idx].pkt_ofs);
622 pp->hdma_cons++;
625 #ifdef ATA_VERBOSE_DEBUG
626 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
628 struct ata_port *ap = qc->ap;
629 unsigned int port_no = ap->port_no;
630 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
632 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
633 dimm_mmio += PDC_DIMM_HOST_PKT;
635 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
636 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
637 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
638 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
640 #else
641 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
642 #endif /* ATA_VERBOSE_DEBUG */
644 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
646 struct ata_port *ap = qc->ap;
647 struct ata_host *host = ap->host;
648 unsigned int port_no = ap->port_no;
649 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
650 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
651 u8 seq = (u8) (port_no + 1);
652 unsigned int port_ofs;
654 /* hard-code chip #0 */
655 mmio += PDC_CHIP0_OFS;
657 VPRINTK("ata%u: ENTER\n", ap->print_id);
659 wmb(); /* flush PRD, pkt writes */
661 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
663 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
664 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
665 seq += 4;
667 pdc20621_dump_hdma(qc);
668 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
669 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
670 port_ofs + PDC_DIMM_HOST_PKT,
671 port_ofs + PDC_DIMM_HOST_PKT,
672 seq);
673 } else {
674 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
675 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
677 writel(port_ofs + PDC_DIMM_ATA_PKT,
678 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
679 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
680 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
681 port_ofs + PDC_DIMM_ATA_PKT,
682 port_ofs + PDC_DIMM_ATA_PKT,
683 seq);
687 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
689 switch (qc->tf.protocol) {
690 case ATA_PROT_NODATA:
691 if (qc->tf.flags & ATA_TFLAG_POLLING)
692 break;
693 /*FALLTHROUGH*/
694 case ATA_PROT_DMA:
695 pdc20621_packet_start(qc);
696 return 0;
698 case ATAPI_PROT_DMA:
699 BUG();
700 break;
702 default:
703 break;
706 return ata_sff_qc_issue(qc);
709 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
710 struct ata_queued_cmd *qc,
711 unsigned int doing_hdma,
712 void __iomem *mmio)
714 unsigned int port_no = ap->port_no;
715 unsigned int port_ofs =
716 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
717 u8 status;
718 unsigned int handled = 0;
720 VPRINTK("ENTER\n");
722 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
723 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
725 /* step two - DMA from DIMM to host */
726 if (doing_hdma) {
727 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
728 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
729 /* get drive status; clear intr; complete txn */
730 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
731 ata_qc_complete(qc);
732 pdc20621_pop_hdma(qc);
735 /* step one - exec ATA command */
736 else {
737 u8 seq = (u8) (port_no + 1 + 4);
738 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
739 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
741 /* submit hdma pkt */
742 pdc20621_dump_hdma(qc);
743 pdc20621_push_hdma(qc, seq,
744 port_ofs + PDC_DIMM_HOST_PKT);
746 handled = 1;
748 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
750 /* step one - DMA from host to DIMM */
751 if (doing_hdma) {
752 u8 seq = (u8) (port_no + 1);
753 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
754 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
756 /* submit ata pkt */
757 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
758 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
759 writel(port_ofs + PDC_DIMM_ATA_PKT,
760 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
761 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
764 /* step two - execute ATA command */
765 else {
766 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
767 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
768 /* get drive status; clear intr; complete txn */
769 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
770 ata_qc_complete(qc);
771 pdc20621_pop_hdma(qc);
773 handled = 1;
775 /* command completion, but no data xfer */
776 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
778 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
779 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
780 qc->err_mask |= ac_err_mask(status);
781 ata_qc_complete(qc);
782 handled = 1;
784 } else {
785 ap->stats.idle_irq++;
788 return handled;
791 static void pdc20621_irq_clear(struct ata_port *ap)
793 ioread8(ap->ioaddr.status_addr);
796 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
798 struct ata_host *host = dev_instance;
799 struct ata_port *ap;
800 u32 mask = 0;
801 unsigned int i, tmp, port_no;
802 unsigned int handled = 0;
803 void __iomem *mmio_base;
805 VPRINTK("ENTER\n");
807 if (!host || !host->iomap[PDC_MMIO_BAR]) {
808 VPRINTK("QUICK EXIT\n");
809 return IRQ_NONE;
812 mmio_base = host->iomap[PDC_MMIO_BAR];
814 /* reading should also clear interrupts */
815 mmio_base += PDC_CHIP0_OFS;
816 mask = readl(mmio_base + PDC_20621_SEQMASK);
817 VPRINTK("mask == 0x%x\n", mask);
819 if (mask == 0xffffffff) {
820 VPRINTK("QUICK EXIT 2\n");
821 return IRQ_NONE;
823 mask &= 0xffff; /* only 16 tags possible */
824 if (!mask) {
825 VPRINTK("QUICK EXIT 3\n");
826 return IRQ_NONE;
829 spin_lock(&host->lock);
831 for (i = 1; i < 9; i++) {
832 port_no = i - 1;
833 if (port_no > 3)
834 port_no -= 4;
835 if (port_no >= host->n_ports)
836 ap = NULL;
837 else
838 ap = host->ports[port_no];
839 tmp = mask & (1 << i);
840 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
841 if (tmp && ap &&
842 !(ap->flags & ATA_FLAG_DISABLED)) {
843 struct ata_queued_cmd *qc;
845 qc = ata_qc_from_tag(ap, ap->link.active_tag);
846 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
847 handled += pdc20621_host_intr(ap, qc, (i > 4),
848 mmio_base);
852 spin_unlock(&host->lock);
854 VPRINTK("mask == 0x%x\n", mask);
856 VPRINTK("EXIT\n");
858 return IRQ_RETVAL(handled);
861 static void pdc_freeze(struct ata_port *ap)
863 void __iomem *mmio = ap->ioaddr.cmd_addr;
864 u32 tmp;
866 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
868 tmp = readl(mmio + PDC_CTLSTAT);
869 tmp |= PDC_MASK_INT;
870 tmp &= ~PDC_DMA_ENABLE;
871 writel(tmp, mmio + PDC_CTLSTAT);
872 readl(mmio + PDC_CTLSTAT); /* flush */
875 static void pdc_thaw(struct ata_port *ap)
877 void __iomem *mmio = ap->ioaddr.cmd_addr;
878 u32 tmp;
880 /* FIXME: start HDMA engine, if zero ATA engines running */
882 /* clear IRQ */
883 ioread8(ap->ioaddr.status_addr);
885 /* turn IRQ back on */
886 tmp = readl(mmio + PDC_CTLSTAT);
887 tmp &= ~PDC_MASK_INT;
888 writel(tmp, mmio + PDC_CTLSTAT);
889 readl(mmio + PDC_CTLSTAT); /* flush */
892 static void pdc_reset_port(struct ata_port *ap)
894 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
895 unsigned int i;
896 u32 tmp;
898 /* FIXME: handle HDMA copy engine */
900 for (i = 11; i > 0; i--) {
901 tmp = readl(mmio);
902 if (tmp & PDC_RESET)
903 break;
905 udelay(100);
907 tmp |= PDC_RESET;
908 writel(tmp, mmio);
911 tmp &= ~PDC_RESET;
912 writel(tmp, mmio);
913 readl(mmio); /* flush */
916 static int pdc_softreset(struct ata_link *link, unsigned int *class,
917 unsigned long deadline)
919 pdc_reset_port(link->ap);
920 return ata_sff_softreset(link, class, deadline);
923 static void pdc_error_handler(struct ata_port *ap)
925 if (!(ap->pflags & ATA_PFLAG_FROZEN))
926 pdc_reset_port(ap);
928 ata_std_error_handler(ap);
931 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
933 struct ata_port *ap = qc->ap;
935 /* make DMA engine forget about the failed command */
936 if (qc->flags & ATA_QCFLAG_FAILED)
937 pdc_reset_port(ap);
940 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
942 u8 *scsicmd = qc->scsicmd->cmnd;
943 int pio = 1; /* atapi dma off by default */
945 /* Whitelist commands that may use DMA. */
946 switch (scsicmd[0]) {
947 case WRITE_12:
948 case WRITE_10:
949 case WRITE_6:
950 case READ_12:
951 case READ_10:
952 case READ_6:
953 case 0xad: /* READ_DVD_STRUCTURE */
954 case 0xbe: /* READ_CD */
955 pio = 0;
957 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
958 if (scsicmd[0] == WRITE_10) {
959 unsigned int lba =
960 (scsicmd[2] << 24) |
961 (scsicmd[3] << 16) |
962 (scsicmd[4] << 8) |
963 scsicmd[5];
964 if (lba >= 0xFFFF4FA2)
965 pio = 1;
967 return pio;
970 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
972 WARN_ON(tf->protocol == ATA_PROT_DMA ||
973 tf->protocol == ATAPI_PROT_DMA);
974 ata_sff_tf_load(ap, tf);
978 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
980 WARN_ON(tf->protocol == ATA_PROT_DMA ||
981 tf->protocol == ATAPI_PROT_DMA);
982 ata_sff_exec_command(ap, tf);
986 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
988 port->cmd_addr = base;
989 port->data_addr = base;
990 port->feature_addr =
991 port->error_addr = base + 0x4;
992 port->nsect_addr = base + 0x8;
993 port->lbal_addr = base + 0xc;
994 port->lbam_addr = base + 0x10;
995 port->lbah_addr = base + 0x14;
996 port->device_addr = base + 0x18;
997 port->command_addr =
998 port->status_addr = base + 0x1c;
999 port->altstatus_addr =
1000 port->ctl_addr = base + 0x38;
1004 #ifdef ATA_VERBOSE_DEBUG
1005 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1006 u32 offset, u32 size)
1008 u32 window_size;
1009 u16 idx;
1010 u8 page_mask;
1011 long dist;
1012 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1013 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1015 /* hard-code chip #0 */
1016 mmio += PDC_CHIP0_OFS;
1018 page_mask = 0x00;
1019 window_size = 0x2000 * 4; /* 32K byte uchar size */
1020 idx = (u16) (offset / window_size);
1022 writel(0x01, mmio + PDC_GENERAL_CTLR);
1023 readl(mmio + PDC_GENERAL_CTLR);
1024 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1025 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1027 offset -= (idx * window_size);
1028 idx++;
1029 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1030 (long) (window_size - offset);
1031 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1032 dist);
1034 psource += dist;
1035 size -= dist;
1036 for (; (long) size >= (long) window_size ;) {
1037 writel(0x01, mmio + PDC_GENERAL_CTLR);
1038 readl(mmio + PDC_GENERAL_CTLR);
1039 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1040 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1041 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1042 window_size / 4);
1043 psource += window_size;
1044 size -= window_size;
1045 idx++;
1048 if (size) {
1049 writel(0x01, mmio + PDC_GENERAL_CTLR);
1050 readl(mmio + PDC_GENERAL_CTLR);
1051 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1052 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1053 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1054 size / 4);
1057 #endif
1060 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1061 u32 offset, u32 size)
1063 u32 window_size;
1064 u16 idx;
1065 u8 page_mask;
1066 long dist;
1067 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1068 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1070 /* hard-code chip #0 */
1071 mmio += PDC_CHIP0_OFS;
1073 page_mask = 0x00;
1074 window_size = 0x2000 * 4; /* 32K byte uchar size */
1075 idx = (u16) (offset / window_size);
1077 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1078 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1079 offset -= (idx * window_size);
1080 idx++;
1081 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1082 (long) (window_size - offset);
1083 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1084 writel(0x01, mmio + PDC_GENERAL_CTLR);
1085 readl(mmio + PDC_GENERAL_CTLR);
1087 psource += dist;
1088 size -= dist;
1089 for (; (long) size >= (long) window_size ;) {
1090 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1091 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1092 memcpy_toio(dimm_mmio, psource, window_size / 4);
1093 writel(0x01, mmio + PDC_GENERAL_CTLR);
1094 readl(mmio + PDC_GENERAL_CTLR);
1095 psource += window_size;
1096 size -= window_size;
1097 idx++;
1100 if (size) {
1101 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1102 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1103 memcpy_toio(dimm_mmio, psource, size / 4);
1104 writel(0x01, mmio + PDC_GENERAL_CTLR);
1105 readl(mmio + PDC_GENERAL_CTLR);
1110 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1111 u32 subaddr, u32 *pdata)
1113 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1114 u32 i2creg = 0;
1115 u32 status;
1116 u32 count = 0;
1118 /* hard-code chip #0 */
1119 mmio += PDC_CHIP0_OFS;
1121 i2creg |= device << 24;
1122 i2creg |= subaddr << 16;
1124 /* Set the device and subaddress */
1125 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1126 readl(mmio + PDC_I2C_ADDR_DATA);
1128 /* Write Control to perform read operation, mask int */
1129 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1130 mmio + PDC_I2C_CONTROL);
1132 for (count = 0; count <= 1000; count ++) {
1133 status = readl(mmio + PDC_I2C_CONTROL);
1134 if (status & PDC_I2C_COMPLETE) {
1135 status = readl(mmio + PDC_I2C_ADDR_DATA);
1136 break;
1137 } else if (count == 1000)
1138 return 0;
1141 *pdata = (status >> 8) & 0x000000ff;
1142 return 1;
1146 static int pdc20621_detect_dimm(struct ata_host *host)
1148 u32 data = 0;
1149 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1150 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1151 if (data == 100)
1152 return 100;
1153 } else
1154 return 0;
1156 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1157 if (data <= 0x75)
1158 return 133;
1159 } else
1160 return 0;
1162 return 0;
1166 static int pdc20621_prog_dimm0(struct ata_host *host)
1168 u32 spd0[50];
1169 u32 data = 0;
1170 int size, i;
1171 u8 bdimmsize;
1172 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1173 static const struct {
1174 unsigned int reg;
1175 unsigned int ofs;
1176 } pdc_i2c_read_data [] = {
1177 { PDC_DIMM_SPD_TYPE, 11 },
1178 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1179 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1180 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1181 { PDC_DIMM_SPD_ROW_NUM, 3 },
1182 { PDC_DIMM_SPD_BANK_NUM, 17 },
1183 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1184 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1185 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1186 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1187 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1188 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1191 /* hard-code chip #0 */
1192 mmio += PDC_CHIP0_OFS;
1194 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1195 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1196 pdc_i2c_read_data[i].reg,
1197 &spd0[pdc_i2c_read_data[i].ofs]);
1199 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1200 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1201 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1202 data |= (((((spd0[29] > spd0[28])
1203 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1204 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1206 if (spd0[18] & 0x08)
1207 data |= ((0x03) << 14);
1208 else if (spd0[18] & 0x04)
1209 data |= ((0x02) << 14);
1210 else if (spd0[18] & 0x01)
1211 data |= ((0x01) << 14);
1212 else
1213 data |= (0 << 14);
1216 Calculate the size of bDIMMSize (power of 2) and
1217 merge the DIMM size by program start/end address.
1220 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1221 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1222 data |= (((size / 16) - 1) << 16);
1223 data |= (0 << 23);
1224 data |= 8;
1225 writel(data, mmio + PDC_DIMM0_CONTROL);
1226 readl(mmio + PDC_DIMM0_CONTROL);
1227 return size;
1231 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1233 u32 data, spd0;
1234 int error, i;
1235 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1237 /* hard-code chip #0 */
1238 mmio += PDC_CHIP0_OFS;
1241 Set To Default : DIMM Module Global Control Register (0x022259F1)
1242 DIMM Arbitration Disable (bit 20)
1243 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1244 Refresh Enable (bit 17)
1247 data = 0x022259F1;
1248 writel(data, mmio + PDC_SDRAM_CONTROL);
1249 readl(mmio + PDC_SDRAM_CONTROL);
1251 /* Turn on for ECC */
1252 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1253 PDC_DIMM_SPD_TYPE, &spd0);
1254 if (spd0 == 0x02) {
1255 data |= (0x01 << 16);
1256 writel(data, mmio + PDC_SDRAM_CONTROL);
1257 readl(mmio + PDC_SDRAM_CONTROL);
1258 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1261 /* DIMM Initialization Select/Enable (bit 18/19) */
1262 data &= (~(1<<18));
1263 data |= (1<<19);
1264 writel(data, mmio + PDC_SDRAM_CONTROL);
1266 error = 1;
1267 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1268 data = readl(mmio + PDC_SDRAM_CONTROL);
1269 if (!(data & (1<<19))) {
1270 error = 0;
1271 break;
1273 msleep(i*100);
1275 return error;
1279 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1281 int speed, size, length;
1282 u32 addr, spd0, pci_status;
1283 u32 tmp = 0;
1284 u32 time_period = 0;
1285 u32 tcount = 0;
1286 u32 ticks = 0;
1287 u32 clock = 0;
1288 u32 fparam = 0;
1289 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1291 /* hard-code chip #0 */
1292 mmio += PDC_CHIP0_OFS;
1294 /* Initialize PLL based upon PCI Bus Frequency */
1296 /* Initialize Time Period Register */
1297 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1298 time_period = readl(mmio + PDC_TIME_PERIOD);
1299 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1301 /* Enable timer */
1302 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1303 readl(mmio + PDC_TIME_CONTROL);
1305 /* Wait 3 seconds */
1306 msleep(3000);
1309 When timer is enabled, counter is decreased every internal
1310 clock cycle.
1313 tcount = readl(mmio + PDC_TIME_COUNTER);
1314 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1317 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1318 register should be >= (0xffffffff - 3x10^8).
1320 if (tcount >= PCI_X_TCOUNT) {
1321 ticks = (time_period - tcount);
1322 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1324 clock = (ticks / 300000);
1325 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1327 clock = (clock * 33);
1328 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1330 /* PLL F Param (bit 22:16) */
1331 fparam = (1400000 / clock) - 2;
1332 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1334 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1335 pci_status = (0x8a001824 | (fparam << 16));
1336 } else
1337 pci_status = PCI_PLL_INIT;
1339 /* Initialize PLL. */
1340 VPRINTK("pci_status: 0x%x\n", pci_status);
1341 writel(pci_status, mmio + PDC_CTL_STATUS);
1342 readl(mmio + PDC_CTL_STATUS);
1345 Read SPD of DIMM by I2C interface,
1346 and program the DIMM Module Controller.
1348 if (!(speed = pdc20621_detect_dimm(host))) {
1349 printk(KERN_ERR "Detect Local DIMM Fail\n");
1350 return 1; /* DIMM error */
1352 VPRINTK("Local DIMM Speed = %d\n", speed);
1354 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1355 size = pdc20621_prog_dimm0(host);
1356 VPRINTK("Local DIMM Size = %dMB\n", size);
1358 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1359 if (pdc20621_prog_dimm_global(host)) {
1360 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1361 return 1;
1364 #ifdef ATA_VERBOSE_DEBUG
1366 u8 test_parttern1[40] =
1367 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1368 'N','o','t',' ','Y','e','t',' ',
1369 'D','e','f','i','n','e','d',' ',
1370 '1','.','1','0',
1371 '9','8','0','3','1','6','1','2',0,0};
1372 u8 test_parttern2[40] = {0};
1374 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1375 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1377 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1378 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1379 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1380 test_parttern2[1], &(test_parttern2[2]));
1381 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1382 40);
1383 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1384 test_parttern2[1], &(test_parttern2[2]));
1386 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1387 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1388 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1389 test_parttern2[1], &(test_parttern2[2]));
1391 #endif
1393 /* ECC initiliazation. */
1395 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1396 PDC_DIMM_SPD_TYPE, &spd0);
1397 if (spd0 == 0x02) {
1398 VPRINTK("Start ECC initialization\n");
1399 addr = 0;
1400 length = size * 1024 * 1024;
1401 while (addr < length) {
1402 pdc20621_put_to_dimm(host, (void *) &tmp, addr,
1403 sizeof(u32));
1404 addr += sizeof(u32);
1406 VPRINTK("Finish ECC initialization\n");
1408 return 0;
1412 static void pdc_20621_init(struct ata_host *host)
1414 u32 tmp;
1415 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1417 /* hard-code chip #0 */
1418 mmio += PDC_CHIP0_OFS;
1421 * Select page 0x40 for our 32k DIMM window
1423 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1424 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1425 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1428 * Reset Host DMA
1430 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1431 tmp |= PDC_RESET;
1432 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1433 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1435 udelay(10);
1437 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1438 tmp &= ~PDC_RESET;
1439 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1440 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1443 static int pdc_sata_init_one(struct pci_dev *pdev,
1444 const struct pci_device_id *ent)
1446 static int printed_version;
1447 const struct ata_port_info *ppi[] =
1448 { &pdc_port_info[ent->driver_data], NULL };
1449 struct ata_host *host;
1450 struct pdc_host_priv *hpriv;
1451 int i, rc;
1453 if (!printed_version++)
1454 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1456 /* allocate host */
1457 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1458 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1459 if (!host || !hpriv)
1460 return -ENOMEM;
1462 host->private_data = hpriv;
1464 /* acquire resources and fill host */
1465 rc = pcim_enable_device(pdev);
1466 if (rc)
1467 return rc;
1469 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1470 DRV_NAME);
1471 if (rc == -EBUSY)
1472 pcim_pin_device(pdev);
1473 if (rc)
1474 return rc;
1475 host->iomap = pcim_iomap_table(pdev);
1477 for (i = 0; i < 4; i++) {
1478 struct ata_port *ap = host->ports[i];
1479 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1480 unsigned int offset = 0x200 + i * 0x80;
1482 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1484 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1485 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1486 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1489 /* configure and activate */
1490 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1491 if (rc)
1492 return rc;
1493 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1494 if (rc)
1495 return rc;
1497 if (pdc20621_dimm_init(host))
1498 return -ENOMEM;
1499 pdc_20621_init(host);
1501 pci_set_master(pdev);
1502 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1503 IRQF_SHARED, &pdc_sata_sht);
1507 static int __init pdc_sata_init(void)
1509 return pci_register_driver(&pdc_sata_pci_driver);
1513 static void __exit pdc_sata_exit(void)
1515 pci_unregister_driver(&pdc_sata_pci_driver);
1519 MODULE_AUTHOR("Jeff Garzik");
1520 MODULE_DESCRIPTION("Promise SATA low-level driver");
1521 MODULE_LICENSE("GPL");
1522 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1523 MODULE_VERSION(DRV_VERSION);
1525 module_init(pdc_sata_init);
1526 module_exit(pdc_sata_exit);