inode: Make unused inode LRU per superblock
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ata / sata_sx4.c
blob8fd3b7252bda029baf14fca9741c21f08448e6ef
1 /*
2 * sata_sx4.c - Promise SATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * Hardware documentation available under NDA.
34 Theory of operation
35 -------------------
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
66 and each READ looks like this:
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/init.h>
86 #include <linux/blkdev.h>
87 #include <linux/delay.h>
88 #include <linux/interrupt.h>
89 #include <linux/device.h>
90 #include <scsi/scsi_host.h>
91 #include <scsi/scsi_cmnd.h>
92 #include <linux/libata.h>
93 #include "sata_promise.h"
95 #define DRV_NAME "sata_sx4"
96 #define DRV_VERSION "0.12"
99 enum {
100 PDC_MMIO_BAR = 3,
101 PDC_DIMM_BAR = 4,
103 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
105 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
106 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
107 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
108 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
110 PDC_CTLSTAT = 0x60, /* IDEn control / status */
112 PDC_20621_SEQCTL = 0x400,
113 PDC_20621_SEQMASK = 0x480,
114 PDC_20621_GENERAL_CTL = 0x484,
115 PDC_20621_PAGE_SIZE = (32 * 1024),
117 /* chosen, not constant, values; we design our own DIMM mem map */
118 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
119 PDC_20621_DIMM_BASE = 0x00200000,
120 PDC_20621_DIMM_DATA = (64 * 1024),
121 PDC_DIMM_DATA_STEP = (256 * 1024),
122 PDC_DIMM_WINDOW_STEP = (8 * 1024),
123 PDC_DIMM_HOST_PRD = (6 * 1024),
124 PDC_DIMM_HOST_PKT = (128 * 0),
125 PDC_DIMM_HPKT_PRD = (128 * 1),
126 PDC_DIMM_ATA_PKT = (128 * 2),
127 PDC_DIMM_APKT_PRD = (128 * 3),
128 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
129 PDC_PAGE_WINDOW = 0x40,
130 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
131 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
132 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
134 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
136 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
137 (1<<23),
139 board_20621 = 0, /* FastTrak S150 SX4 */
141 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
142 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
143 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
145 PDC_MAX_HDMA = 32,
146 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
148 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
149 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
150 PDC_I2C_CONTROL = 0x48,
151 PDC_I2C_ADDR_DATA = 0x4C,
152 PDC_DIMM0_CONTROL = 0x80,
153 PDC_DIMM1_CONTROL = 0x84,
154 PDC_SDRAM_CONTROL = 0x88,
155 PDC_I2C_WRITE = 0, /* master -> slave */
156 PDC_I2C_READ = (1 << 6), /* master <- slave */
157 PDC_I2C_START = (1 << 7), /* start I2C proto */
158 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
159 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
160 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
161 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
162 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
163 PDC_DIMM_SPD_ROW_NUM = 3,
164 PDC_DIMM_SPD_COLUMN_NUM = 4,
165 PDC_DIMM_SPD_MODULE_ROW = 5,
166 PDC_DIMM_SPD_TYPE = 11,
167 PDC_DIMM_SPD_FRESH_RATE = 12,
168 PDC_DIMM_SPD_BANK_NUM = 17,
169 PDC_DIMM_SPD_CAS_LATENCY = 18,
170 PDC_DIMM_SPD_ATTRIBUTE = 21,
171 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
172 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
173 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
174 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
175 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
176 PDC_CTL_STATUS = 0x08,
177 PDC_DIMM_WINDOW_CTLR = 0x0C,
178 PDC_TIME_CONTROL = 0x3C,
179 PDC_TIME_PERIOD = 0x40,
180 PDC_TIME_COUNTER = 0x44,
181 PDC_GENERAL_CTLR = 0x484,
182 PCI_PLL_INIT = 0x8A531824,
183 PCI_X_TCOUNT = 0xEE1E5CFF,
185 /* PDC_TIME_CONTROL bits */
186 PDC_TIMER_BUZZER = (1 << 10),
187 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
188 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
189 PDC_TIMER_ENABLE = (1 << 7),
190 PDC_TIMER_MASK_INT = (1 << 5),
191 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
192 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
193 PDC_TIMER_ENABLE |
194 PDC_TIMER_MASK_INT,
197 #define ECC_ERASE_BUF_SZ (128 * 1024)
199 struct pdc_port_priv {
200 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
201 u8 *pkt;
202 dma_addr_t pkt_dma;
205 struct pdc_host_priv {
206 unsigned int doing_hdma;
207 unsigned int hdma_prod;
208 unsigned int hdma_cons;
209 struct {
210 struct ata_queued_cmd *qc;
211 unsigned int seq;
212 unsigned long pkt_ofs;
213 } hdma[32];
217 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
218 static void pdc_error_handler(struct ata_port *ap);
219 static void pdc_freeze(struct ata_port *ap);
220 static void pdc_thaw(struct ata_port *ap);
221 static int pdc_port_start(struct ata_port *ap);
222 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
223 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
225 static unsigned int pdc20621_dimm_init(struct ata_host *host);
226 static int pdc20621_detect_dimm(struct ata_host *host);
227 static unsigned int pdc20621_i2c_read(struct ata_host *host,
228 u32 device, u32 subaddr, u32 *pdata);
229 static int pdc20621_prog_dimm0(struct ata_host *host);
230 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
231 #ifdef ATA_VERBOSE_DEBUG
232 static void pdc20621_get_from_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234 #endif
235 static void pdc20621_put_to_dimm(struct ata_host *host,
236 void *psource, u32 offset, u32 size);
237 static void pdc20621_irq_clear(struct ata_port *ap);
238 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
239 static int pdc_softreset(struct ata_link *link, unsigned int *class,
240 unsigned long deadline);
241 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
242 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
245 static struct scsi_host_template pdc_sata_sht = {
246 ATA_BASE_SHT(DRV_NAME),
247 .sg_tablesize = LIBATA_MAX_PRD,
248 .dma_boundary = ATA_DMA_BOUNDARY,
251 /* TODO: inherit from base port_ops after converting to new EH */
252 static struct ata_port_operations pdc_20621_ops = {
253 .inherits = &ata_sff_port_ops,
255 .check_atapi_dma = pdc_check_atapi_dma,
256 .qc_prep = pdc20621_qc_prep,
257 .qc_issue = pdc20621_qc_issue,
259 .freeze = pdc_freeze,
260 .thaw = pdc_thaw,
261 .softreset = pdc_softreset,
262 .error_handler = pdc_error_handler,
263 .lost_interrupt = ATA_OP_NULL,
264 .post_internal_cmd = pdc_post_internal_cmd,
266 .port_start = pdc_port_start,
268 .sff_tf_load = pdc_tf_load_mmio,
269 .sff_exec_command = pdc_exec_command_mmio,
270 .sff_irq_clear = pdc20621_irq_clear,
273 static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
277 ATA_FLAG_PIO_POLLING,
278 .pio_mask = ATA_PIO4,
279 .mwdma_mask = ATA_MWDMA2,
280 .udma_mask = ATA_UDMA6,
281 .port_ops = &pdc_20621_ops,
286 static const struct pci_device_id pdc_sata_pci_tbl[] = {
287 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
289 { } /* terminate list */
292 static struct pci_driver pdc_sata_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = pdc_sata_pci_tbl,
295 .probe = pdc_sata_init_one,
296 .remove = ata_pci_remove_one,
300 static int pdc_port_start(struct ata_port *ap)
302 struct device *dev = ap->host->dev;
303 struct pdc_port_priv *pp;
305 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
306 if (!pp)
307 return -ENOMEM;
309 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt)
311 return -ENOMEM;
313 ap->private_data = pp;
315 return 0;
318 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
319 unsigned int portno,
320 unsigned int total_len)
322 u32 addr;
323 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
324 __le32 *buf32 = (__le32 *) buf;
326 /* output ATA packet S/G table */
327 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
328 (PDC_DIMM_DATA_STEP * portno);
329 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
330 buf32[dw] = cpu_to_le32(addr);
331 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
333 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
334 PDC_20621_DIMM_BASE +
335 (PDC_DIMM_WINDOW_STEP * portno) +
336 PDC_DIMM_APKT_PRD,
337 buf32[dw], buf32[dw + 1]);
340 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
341 unsigned int portno,
342 unsigned int total_len)
344 u32 addr;
345 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
346 __le32 *buf32 = (__le32 *) buf;
348 /* output Host DMA packet S/G table */
349 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
350 (PDC_DIMM_DATA_STEP * portno);
352 buf32[dw] = cpu_to_le32(addr);
353 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
355 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
356 PDC_20621_DIMM_BASE +
357 (PDC_DIMM_WINDOW_STEP * portno) +
358 PDC_DIMM_HPKT_PRD,
359 buf32[dw], buf32[dw + 1]);
362 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
363 unsigned int devno, u8 *buf,
364 unsigned int portno)
366 unsigned int i, dw;
367 __le32 *buf32 = (__le32 *) buf;
368 u8 dev_reg;
370 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
371 (PDC_DIMM_WINDOW_STEP * portno) +
372 PDC_DIMM_APKT_PRD;
373 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
375 i = PDC_DIMM_ATA_PKT;
378 * Set up ATA packet
380 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
381 buf[i++] = PDC_PKT_READ;
382 else if (tf->protocol == ATA_PROT_NODATA)
383 buf[i++] = PDC_PKT_NODATA;
384 else
385 buf[i++] = 0;
386 buf[i++] = 0; /* reserved */
387 buf[i++] = portno + 1; /* seq. id */
388 buf[i++] = 0xff; /* delay seq. id */
390 /* dimm dma S/G, and next-pkt */
391 dw = i >> 2;
392 if (tf->protocol == ATA_PROT_NODATA)
393 buf32[dw] = 0;
394 else
395 buf32[dw] = cpu_to_le32(dimm_sg);
396 buf32[dw + 1] = 0;
397 i += 8;
399 if (devno == 0)
400 dev_reg = ATA_DEVICE_OBS;
401 else
402 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
404 /* select device */
405 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
406 buf[i++] = dev_reg;
408 /* device control register */
409 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
410 buf[i++] = tf->ctl;
412 return i;
415 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
416 unsigned int portno)
418 unsigned int dw;
419 u32 tmp;
420 __le32 *buf32 = (__le32 *) buf;
422 unsigned int host_sg = PDC_20621_DIMM_BASE +
423 (PDC_DIMM_WINDOW_STEP * portno) +
424 PDC_DIMM_HOST_PRD;
425 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
426 (PDC_DIMM_WINDOW_STEP * portno) +
427 PDC_DIMM_HPKT_PRD;
428 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
429 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
431 dw = PDC_DIMM_HOST_PKT >> 2;
434 * Set up Host DMA packet
436 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
437 tmp = PDC_PKT_READ;
438 else
439 tmp = 0;
440 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
441 tmp |= (0xff << 24); /* delay seq. id */
442 buf32[dw + 0] = cpu_to_le32(tmp);
443 buf32[dw + 1] = cpu_to_le32(host_sg);
444 buf32[dw + 2] = cpu_to_le32(dimm_sg);
445 buf32[dw + 3] = 0;
447 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
448 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
449 PDC_DIMM_HOST_PKT,
450 buf32[dw + 0],
451 buf32[dw + 1],
452 buf32[dw + 2],
453 buf32[dw + 3]);
456 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
458 struct scatterlist *sg;
459 struct ata_port *ap = qc->ap;
460 struct pdc_port_priv *pp = ap->private_data;
461 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
462 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
463 unsigned int portno = ap->port_no;
464 unsigned int i, si, idx, total_len = 0, sgt_len;
465 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
467 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
469 VPRINTK("ata%u: ENTER\n", ap->print_id);
471 /* hard-code chip #0 */
472 mmio += PDC_CHIP0_OFS;
475 * Build S/G table
477 idx = 0;
478 for_each_sg(qc->sg, sg, qc->n_elem, si) {
479 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
480 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
481 total_len += sg_dma_len(sg);
483 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
484 sgt_len = idx * 4;
487 * Build ATA, host DMA packets
489 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
490 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
492 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
493 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
495 if (qc->tf.flags & ATA_TFLAG_LBA48)
496 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
497 else
498 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
500 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
502 /* copy three S/G tables and two packets to DIMM MMIO window */
503 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
504 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
505 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
506 PDC_DIMM_HOST_PRD,
507 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
509 /* force host FIFO dump */
510 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
512 readl(dimm_mmio); /* MMIO PCI posting flush */
514 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
517 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
519 struct ata_port *ap = qc->ap;
520 struct pdc_port_priv *pp = ap->private_data;
521 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
522 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
523 unsigned int portno = ap->port_no;
524 unsigned int i;
526 VPRINTK("ata%u: ENTER\n", ap->print_id);
528 /* hard-code chip #0 */
529 mmio += PDC_CHIP0_OFS;
531 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
533 if (qc->tf.flags & ATA_TFLAG_LBA48)
534 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
535 else
536 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
538 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
540 /* copy three S/G tables and two packets to DIMM MMIO window */
541 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
542 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
544 /* force host FIFO dump */
545 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
547 readl(dimm_mmio); /* MMIO PCI posting flush */
549 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
552 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
554 switch (qc->tf.protocol) {
555 case ATA_PROT_DMA:
556 pdc20621_dma_prep(qc);
557 break;
558 case ATA_PROT_NODATA:
559 pdc20621_nodata_prep(qc);
560 break;
561 default:
562 break;
566 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
567 unsigned int seq,
568 u32 pkt_ofs)
570 struct ata_port *ap = qc->ap;
571 struct ata_host *host = ap->host;
572 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
574 /* hard-code chip #0 */
575 mmio += PDC_CHIP0_OFS;
577 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
578 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
580 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
581 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
584 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
585 unsigned int seq,
586 u32 pkt_ofs)
588 struct ata_port *ap = qc->ap;
589 struct pdc_host_priv *pp = ap->host->private_data;
590 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
592 if (!pp->doing_hdma) {
593 __pdc20621_push_hdma(qc, seq, pkt_ofs);
594 pp->doing_hdma = 1;
595 return;
598 pp->hdma[idx].qc = qc;
599 pp->hdma[idx].seq = seq;
600 pp->hdma[idx].pkt_ofs = pkt_ofs;
601 pp->hdma_prod++;
604 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
606 struct ata_port *ap = qc->ap;
607 struct pdc_host_priv *pp = ap->host->private_data;
608 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
610 /* if nothing on queue, we're done */
611 if (pp->hdma_prod == pp->hdma_cons) {
612 pp->doing_hdma = 0;
613 return;
616 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
617 pp->hdma[idx].pkt_ofs);
618 pp->hdma_cons++;
621 #ifdef ATA_VERBOSE_DEBUG
622 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
624 struct ata_port *ap = qc->ap;
625 unsigned int port_no = ap->port_no;
626 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
628 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
629 dimm_mmio += PDC_DIMM_HOST_PKT;
631 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
632 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
633 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
634 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
636 #else
637 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
638 #endif /* ATA_VERBOSE_DEBUG */
640 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
642 struct ata_port *ap = qc->ap;
643 struct ata_host *host = ap->host;
644 unsigned int port_no = ap->port_no;
645 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
646 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
647 u8 seq = (u8) (port_no + 1);
648 unsigned int port_ofs;
650 /* hard-code chip #0 */
651 mmio += PDC_CHIP0_OFS;
653 VPRINTK("ata%u: ENTER\n", ap->print_id);
655 wmb(); /* flush PRD, pkt writes */
657 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
659 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
660 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
661 seq += 4;
663 pdc20621_dump_hdma(qc);
664 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
665 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
666 port_ofs + PDC_DIMM_HOST_PKT,
667 port_ofs + PDC_DIMM_HOST_PKT,
668 seq);
669 } else {
670 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
671 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
673 writel(port_ofs + PDC_DIMM_ATA_PKT,
674 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
675 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
676 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
677 port_ofs + PDC_DIMM_ATA_PKT,
678 port_ofs + PDC_DIMM_ATA_PKT,
679 seq);
683 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
685 switch (qc->tf.protocol) {
686 case ATA_PROT_NODATA:
687 if (qc->tf.flags & ATA_TFLAG_POLLING)
688 break;
689 /*FALLTHROUGH*/
690 case ATA_PROT_DMA:
691 pdc20621_packet_start(qc);
692 return 0;
694 case ATAPI_PROT_DMA:
695 BUG();
696 break;
698 default:
699 break;
702 return ata_sff_qc_issue(qc);
705 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
706 struct ata_queued_cmd *qc,
707 unsigned int doing_hdma,
708 void __iomem *mmio)
710 unsigned int port_no = ap->port_no;
711 unsigned int port_ofs =
712 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
713 u8 status;
714 unsigned int handled = 0;
716 VPRINTK("ENTER\n");
718 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
719 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
721 /* step two - DMA from DIMM to host */
722 if (doing_hdma) {
723 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
724 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
725 /* get drive status; clear intr; complete txn */
726 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
727 ata_qc_complete(qc);
728 pdc20621_pop_hdma(qc);
731 /* step one - exec ATA command */
732 else {
733 u8 seq = (u8) (port_no + 1 + 4);
734 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
735 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
737 /* submit hdma pkt */
738 pdc20621_dump_hdma(qc);
739 pdc20621_push_hdma(qc, seq,
740 port_ofs + PDC_DIMM_HOST_PKT);
742 handled = 1;
744 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
746 /* step one - DMA from host to DIMM */
747 if (doing_hdma) {
748 u8 seq = (u8) (port_no + 1);
749 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
750 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
752 /* submit ata pkt */
753 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
754 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
755 writel(port_ofs + PDC_DIMM_ATA_PKT,
756 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
757 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
760 /* step two - execute ATA command */
761 else {
762 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
763 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
764 /* get drive status; clear intr; complete txn */
765 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
766 ata_qc_complete(qc);
767 pdc20621_pop_hdma(qc);
769 handled = 1;
771 /* command completion, but no data xfer */
772 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
774 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
775 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
776 qc->err_mask |= ac_err_mask(status);
777 ata_qc_complete(qc);
778 handled = 1;
780 } else {
781 ap->stats.idle_irq++;
784 return handled;
787 static void pdc20621_irq_clear(struct ata_port *ap)
789 ioread8(ap->ioaddr.status_addr);
792 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
794 struct ata_host *host = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
801 VPRINTK("ENTER\n");
803 if (!host || !host->iomap[PDC_MMIO_BAR]) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
808 mmio_base = host->iomap[PDC_MMIO_BAR];
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
825 spin_lock(&host->lock);
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host->n_ports)
832 ap = NULL;
833 else
834 ap = host->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap) {
838 struct ata_queued_cmd *qc;
840 qc = ata_qc_from_tag(ap, ap->link.active_tag);
841 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
842 handled += pdc20621_host_intr(ap, qc, (i > 4),
843 mmio_base);
847 spin_unlock(&host->lock);
849 VPRINTK("mask == 0x%x\n", mask);
851 VPRINTK("EXIT\n");
853 return IRQ_RETVAL(handled);
856 static void pdc_freeze(struct ata_port *ap)
858 void __iomem *mmio = ap->ioaddr.cmd_addr;
859 u32 tmp;
861 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
863 tmp = readl(mmio + PDC_CTLSTAT);
864 tmp |= PDC_MASK_INT;
865 tmp &= ~PDC_DMA_ENABLE;
866 writel(tmp, mmio + PDC_CTLSTAT);
867 readl(mmio + PDC_CTLSTAT); /* flush */
870 static void pdc_thaw(struct ata_port *ap)
872 void __iomem *mmio = ap->ioaddr.cmd_addr;
873 u32 tmp;
875 /* FIXME: start HDMA engine, if zero ATA engines running */
877 /* clear IRQ */
878 ioread8(ap->ioaddr.status_addr);
880 /* turn IRQ back on */
881 tmp = readl(mmio + PDC_CTLSTAT);
882 tmp &= ~PDC_MASK_INT;
883 writel(tmp, mmio + PDC_CTLSTAT);
884 readl(mmio + PDC_CTLSTAT); /* flush */
887 static void pdc_reset_port(struct ata_port *ap)
889 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
890 unsigned int i;
891 u32 tmp;
893 /* FIXME: handle HDMA copy engine */
895 for (i = 11; i > 0; i--) {
896 tmp = readl(mmio);
897 if (tmp & PDC_RESET)
898 break;
900 udelay(100);
902 tmp |= PDC_RESET;
903 writel(tmp, mmio);
906 tmp &= ~PDC_RESET;
907 writel(tmp, mmio);
908 readl(mmio); /* flush */
911 static int pdc_softreset(struct ata_link *link, unsigned int *class,
912 unsigned long deadline)
914 pdc_reset_port(link->ap);
915 return ata_sff_softreset(link, class, deadline);
918 static void pdc_error_handler(struct ata_port *ap)
920 if (!(ap->pflags & ATA_PFLAG_FROZEN))
921 pdc_reset_port(ap);
923 ata_sff_error_handler(ap);
926 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
928 struct ata_port *ap = qc->ap;
930 /* make DMA engine forget about the failed command */
931 if (qc->flags & ATA_QCFLAG_FAILED)
932 pdc_reset_port(ap);
935 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
937 u8 *scsicmd = qc->scsicmd->cmnd;
938 int pio = 1; /* atapi dma off by default */
940 /* Whitelist commands that may use DMA. */
941 switch (scsicmd[0]) {
942 case WRITE_12:
943 case WRITE_10:
944 case WRITE_6:
945 case READ_12:
946 case READ_10:
947 case READ_6:
948 case 0xad: /* READ_DVD_STRUCTURE */
949 case 0xbe: /* READ_CD */
950 pio = 0;
952 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
953 if (scsicmd[0] == WRITE_10) {
954 unsigned int lba =
955 (scsicmd[2] << 24) |
956 (scsicmd[3] << 16) |
957 (scsicmd[4] << 8) |
958 scsicmd[5];
959 if (lba >= 0xFFFF4FA2)
960 pio = 1;
962 return pio;
965 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
967 WARN_ON(tf->protocol == ATA_PROT_DMA ||
968 tf->protocol == ATAPI_PROT_DMA);
969 ata_sff_tf_load(ap, tf);
973 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
975 WARN_ON(tf->protocol == ATA_PROT_DMA ||
976 tf->protocol == ATAPI_PROT_DMA);
977 ata_sff_exec_command(ap, tf);
981 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
983 port->cmd_addr = base;
984 port->data_addr = base;
985 port->feature_addr =
986 port->error_addr = base + 0x4;
987 port->nsect_addr = base + 0x8;
988 port->lbal_addr = base + 0xc;
989 port->lbam_addr = base + 0x10;
990 port->lbah_addr = base + 0x14;
991 port->device_addr = base + 0x18;
992 port->command_addr =
993 port->status_addr = base + 0x1c;
994 port->altstatus_addr =
995 port->ctl_addr = base + 0x38;
999 #ifdef ATA_VERBOSE_DEBUG
1000 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1001 u32 offset, u32 size)
1003 u32 window_size;
1004 u16 idx;
1005 u8 page_mask;
1006 long dist;
1007 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1008 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1010 /* hard-code chip #0 */
1011 mmio += PDC_CHIP0_OFS;
1013 page_mask = 0x00;
1014 window_size = 0x2000 * 4; /* 32K byte uchar size */
1015 idx = (u16) (offset / window_size);
1017 writel(0x01, mmio + PDC_GENERAL_CTLR);
1018 readl(mmio + PDC_GENERAL_CTLR);
1019 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1020 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1022 offset -= (idx * window_size);
1023 idx++;
1024 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1025 (long) (window_size - offset);
1026 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1027 dist);
1029 psource += dist;
1030 size -= dist;
1031 for (; (long) size >= (long) window_size ;) {
1032 writel(0x01, mmio + PDC_GENERAL_CTLR);
1033 readl(mmio + PDC_GENERAL_CTLR);
1034 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1035 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1036 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1037 window_size / 4);
1038 psource += window_size;
1039 size -= window_size;
1040 idx++;
1043 if (size) {
1044 writel(0x01, mmio + PDC_GENERAL_CTLR);
1045 readl(mmio + PDC_GENERAL_CTLR);
1046 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1047 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1048 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1049 size / 4);
1052 #endif
1055 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1056 u32 offset, u32 size)
1058 u32 window_size;
1059 u16 idx;
1060 u8 page_mask;
1061 long dist;
1062 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1063 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1065 /* hard-code chip #0 */
1066 mmio += PDC_CHIP0_OFS;
1068 page_mask = 0x00;
1069 window_size = 0x2000 * 4; /* 32K byte uchar size */
1070 idx = (u16) (offset / window_size);
1072 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1073 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1074 offset -= (idx * window_size);
1075 idx++;
1076 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1077 (long) (window_size - offset);
1078 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1079 writel(0x01, mmio + PDC_GENERAL_CTLR);
1080 readl(mmio + PDC_GENERAL_CTLR);
1082 psource += dist;
1083 size -= dist;
1084 for (; (long) size >= (long) window_size ;) {
1085 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1086 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1087 memcpy_toio(dimm_mmio, psource, window_size / 4);
1088 writel(0x01, mmio + PDC_GENERAL_CTLR);
1089 readl(mmio + PDC_GENERAL_CTLR);
1090 psource += window_size;
1091 size -= window_size;
1092 idx++;
1095 if (size) {
1096 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1097 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1098 memcpy_toio(dimm_mmio, psource, size / 4);
1099 writel(0x01, mmio + PDC_GENERAL_CTLR);
1100 readl(mmio + PDC_GENERAL_CTLR);
1105 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1106 u32 subaddr, u32 *pdata)
1108 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1109 u32 i2creg = 0;
1110 u32 status;
1111 u32 count = 0;
1113 /* hard-code chip #0 */
1114 mmio += PDC_CHIP0_OFS;
1116 i2creg |= device << 24;
1117 i2creg |= subaddr << 16;
1119 /* Set the device and subaddress */
1120 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1121 readl(mmio + PDC_I2C_ADDR_DATA);
1123 /* Write Control to perform read operation, mask int */
1124 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1125 mmio + PDC_I2C_CONTROL);
1127 for (count = 0; count <= 1000; count ++) {
1128 status = readl(mmio + PDC_I2C_CONTROL);
1129 if (status & PDC_I2C_COMPLETE) {
1130 status = readl(mmio + PDC_I2C_ADDR_DATA);
1131 break;
1132 } else if (count == 1000)
1133 return 0;
1136 *pdata = (status >> 8) & 0x000000ff;
1137 return 1;
1141 static int pdc20621_detect_dimm(struct ata_host *host)
1143 u32 data = 0;
1144 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1145 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1146 if (data == 100)
1147 return 100;
1148 } else
1149 return 0;
1151 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1152 if (data <= 0x75)
1153 return 133;
1154 } else
1155 return 0;
1157 return 0;
1161 static int pdc20621_prog_dimm0(struct ata_host *host)
1163 u32 spd0[50];
1164 u32 data = 0;
1165 int size, i;
1166 u8 bdimmsize;
1167 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1168 static const struct {
1169 unsigned int reg;
1170 unsigned int ofs;
1171 } pdc_i2c_read_data [] = {
1172 { PDC_DIMM_SPD_TYPE, 11 },
1173 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1174 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1175 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1176 { PDC_DIMM_SPD_ROW_NUM, 3 },
1177 { PDC_DIMM_SPD_BANK_NUM, 17 },
1178 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1179 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1180 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1181 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1182 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1183 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1186 /* hard-code chip #0 */
1187 mmio += PDC_CHIP0_OFS;
1189 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1190 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1191 pdc_i2c_read_data[i].reg,
1192 &spd0[pdc_i2c_read_data[i].ofs]);
1194 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1195 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1196 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1197 data |= (((((spd0[29] > spd0[28])
1198 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1199 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1201 if (spd0[18] & 0x08)
1202 data |= ((0x03) << 14);
1203 else if (spd0[18] & 0x04)
1204 data |= ((0x02) << 14);
1205 else if (spd0[18] & 0x01)
1206 data |= ((0x01) << 14);
1207 else
1208 data |= (0 << 14);
1211 Calculate the size of bDIMMSize (power of 2) and
1212 merge the DIMM size by program start/end address.
1215 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1216 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1217 data |= (((size / 16) - 1) << 16);
1218 data |= (0 << 23);
1219 data |= 8;
1220 writel(data, mmio + PDC_DIMM0_CONTROL);
1221 readl(mmio + PDC_DIMM0_CONTROL);
1222 return size;
1226 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1228 u32 data, spd0;
1229 int error, i;
1230 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1232 /* hard-code chip #0 */
1233 mmio += PDC_CHIP0_OFS;
1236 Set To Default : DIMM Module Global Control Register (0x022259F1)
1237 DIMM Arbitration Disable (bit 20)
1238 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1239 Refresh Enable (bit 17)
1242 data = 0x022259F1;
1243 writel(data, mmio + PDC_SDRAM_CONTROL);
1244 readl(mmio + PDC_SDRAM_CONTROL);
1246 /* Turn on for ECC */
1247 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1248 PDC_DIMM_SPD_TYPE, &spd0);
1249 if (spd0 == 0x02) {
1250 data |= (0x01 << 16);
1251 writel(data, mmio + PDC_SDRAM_CONTROL);
1252 readl(mmio + PDC_SDRAM_CONTROL);
1253 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1256 /* DIMM Initialization Select/Enable (bit 18/19) */
1257 data &= (~(1<<18));
1258 data |= (1<<19);
1259 writel(data, mmio + PDC_SDRAM_CONTROL);
1261 error = 1;
1262 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1263 data = readl(mmio + PDC_SDRAM_CONTROL);
1264 if (!(data & (1<<19))) {
1265 error = 0;
1266 break;
1268 msleep(i*100);
1270 return error;
1274 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1276 int speed, size, length;
1277 u32 addr, spd0, pci_status;
1278 u32 time_period = 0;
1279 u32 tcount = 0;
1280 u32 ticks = 0;
1281 u32 clock = 0;
1282 u32 fparam = 0;
1283 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1285 /* hard-code chip #0 */
1286 mmio += PDC_CHIP0_OFS;
1288 /* Initialize PLL based upon PCI Bus Frequency */
1290 /* Initialize Time Period Register */
1291 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1292 time_period = readl(mmio + PDC_TIME_PERIOD);
1293 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1295 /* Enable timer */
1296 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1297 readl(mmio + PDC_TIME_CONTROL);
1299 /* Wait 3 seconds */
1300 msleep(3000);
1303 When timer is enabled, counter is decreased every internal
1304 clock cycle.
1307 tcount = readl(mmio + PDC_TIME_COUNTER);
1308 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1311 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1312 register should be >= (0xffffffff - 3x10^8).
1314 if (tcount >= PCI_X_TCOUNT) {
1315 ticks = (time_period - tcount);
1316 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1318 clock = (ticks / 300000);
1319 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1321 clock = (clock * 33);
1322 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1324 /* PLL F Param (bit 22:16) */
1325 fparam = (1400000 / clock) - 2;
1326 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1328 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1329 pci_status = (0x8a001824 | (fparam << 16));
1330 } else
1331 pci_status = PCI_PLL_INIT;
1333 /* Initialize PLL. */
1334 VPRINTK("pci_status: 0x%x\n", pci_status);
1335 writel(pci_status, mmio + PDC_CTL_STATUS);
1336 readl(mmio + PDC_CTL_STATUS);
1339 Read SPD of DIMM by I2C interface,
1340 and program the DIMM Module Controller.
1342 if (!(speed = pdc20621_detect_dimm(host))) {
1343 printk(KERN_ERR "Detect Local DIMM Fail\n");
1344 return 1; /* DIMM error */
1346 VPRINTK("Local DIMM Speed = %d\n", speed);
1348 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1349 size = pdc20621_prog_dimm0(host);
1350 VPRINTK("Local DIMM Size = %dMB\n", size);
1352 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1353 if (pdc20621_prog_dimm_global(host)) {
1354 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1355 return 1;
1358 #ifdef ATA_VERBOSE_DEBUG
1360 u8 test_parttern1[40] =
1361 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1362 'N','o','t',' ','Y','e','t',' ',
1363 'D','e','f','i','n','e','d',' ',
1364 '1','.','1','0',
1365 '9','8','0','3','1','6','1','2',0,0};
1366 u8 test_parttern2[40] = {0};
1368 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1369 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1371 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1372 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1373 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1374 test_parttern2[1], &(test_parttern2[2]));
1375 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1376 40);
1377 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1378 test_parttern2[1], &(test_parttern2[2]));
1380 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1381 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1382 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1383 test_parttern2[1], &(test_parttern2[2]));
1385 #endif
1387 /* ECC initiliazation. */
1389 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1390 PDC_DIMM_SPD_TYPE, &spd0);
1391 if (spd0 == 0x02) {
1392 void *buf;
1393 VPRINTK("Start ECC initialization\n");
1394 addr = 0;
1395 length = size * 1024 * 1024;
1396 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1397 while (addr < length) {
1398 pdc20621_put_to_dimm(host, buf, addr,
1399 ECC_ERASE_BUF_SZ);
1400 addr += ECC_ERASE_BUF_SZ;
1402 kfree(buf);
1403 VPRINTK("Finish ECC initialization\n");
1405 return 0;
1409 static void pdc_20621_init(struct ata_host *host)
1411 u32 tmp;
1412 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1414 /* hard-code chip #0 */
1415 mmio += PDC_CHIP0_OFS;
1418 * Select page 0x40 for our 32k DIMM window
1420 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1421 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1422 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1425 * Reset Host DMA
1427 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1428 tmp |= PDC_RESET;
1429 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1430 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1432 udelay(10);
1434 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1435 tmp &= ~PDC_RESET;
1436 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1437 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1440 static int pdc_sata_init_one(struct pci_dev *pdev,
1441 const struct pci_device_id *ent)
1443 static int printed_version;
1444 const struct ata_port_info *ppi[] =
1445 { &pdc_port_info[ent->driver_data], NULL };
1446 struct ata_host *host;
1447 struct pdc_host_priv *hpriv;
1448 int i, rc;
1450 if (!printed_version++)
1451 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1453 /* allocate host */
1454 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1455 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1456 if (!host || !hpriv)
1457 return -ENOMEM;
1459 host->private_data = hpriv;
1461 /* acquire resources and fill host */
1462 rc = pcim_enable_device(pdev);
1463 if (rc)
1464 return rc;
1466 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1467 DRV_NAME);
1468 if (rc == -EBUSY)
1469 pcim_pin_device(pdev);
1470 if (rc)
1471 return rc;
1472 host->iomap = pcim_iomap_table(pdev);
1474 for (i = 0; i < 4; i++) {
1475 struct ata_port *ap = host->ports[i];
1476 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1477 unsigned int offset = 0x200 + i * 0x80;
1479 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1481 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1482 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1483 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1486 /* configure and activate */
1487 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1488 if (rc)
1489 return rc;
1490 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1491 if (rc)
1492 return rc;
1494 if (pdc20621_dimm_init(host))
1495 return -ENOMEM;
1496 pdc_20621_init(host);
1498 pci_set_master(pdev);
1499 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1500 IRQF_SHARED, &pdc_sata_sht);
1504 static int __init pdc_sata_init(void)
1506 return pci_register_driver(&pdc_sata_pci_driver);
1510 static void __exit pdc_sata_exit(void)
1512 pci_unregister_driver(&pdc_sata_pci_driver);
1516 MODULE_AUTHOR("Jeff Garzik");
1517 MODULE_DESCRIPTION("Promise SATA low-level driver");
1518 MODULE_LICENSE("GPL");
1519 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1520 MODULE_VERSION(DRV_VERSION);
1522 module_init(pdc_sata_init);
1523 module_exit(pdc_sata_exit);