ASoC: core: Don't schedule deferred_resume_work twice
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / sun_esp.c
blob676fe9ac7f61267bf156f7235aaa94e3c29af0a0
1 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/gfp.h>
17 #include <asm/irq.h>
18 #include <asm/io.h>
19 #include <asm/dma.h>
21 #include <scsi/scsi_host.h>
23 #include "esp_scsi.h"
25 #define DRV_MODULE_NAME "sun_esp"
26 #define PFX DRV_MODULE_NAME ": "
27 #define DRV_VERSION "1.100"
28 #define DRV_MODULE_RELDATE "August 27, 2008"
30 #define dma_read32(REG) \
31 sbus_readl(esp->dma_regs + (REG))
32 #define dma_write32(VAL, REG) \
33 sbus_writel((VAL), esp->dma_regs + (REG))
35 /* DVMA chip revisions */
36 enum dvma_rev {
37 dvmarev0,
38 dvmaesc1,
39 dvmarev1,
40 dvmarev2,
41 dvmarev3,
42 dvmarevplus,
43 dvmahme
46 static int __devinit esp_sbus_setup_dma(struct esp *esp,
47 struct platform_device *dma_of)
49 esp->dma = dma_of;
51 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
52 resource_size(&dma_of->resource[0]),
53 "espdma");
54 if (!esp->dma_regs)
55 return -ENOMEM;
57 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
58 case DMA_VERS0:
59 esp->dmarev = dvmarev0;
60 break;
61 case DMA_ESCV1:
62 esp->dmarev = dvmaesc1;
63 break;
64 case DMA_VERS1:
65 esp->dmarev = dvmarev1;
66 break;
67 case DMA_VERS2:
68 esp->dmarev = dvmarev2;
69 break;
70 case DMA_VERHME:
71 esp->dmarev = dvmahme;
72 break;
73 case DMA_VERSPLUS:
74 esp->dmarev = dvmarevplus;
75 break;
78 return 0;
82 static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
84 struct platform_device *op = esp->dev;
85 struct resource *res;
87 /* On HME, two reg sets exist, first is DVMA,
88 * second is ESP registers.
90 if (hme)
91 res = &op->resource[1];
92 else
93 res = &op->resource[0];
95 esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
96 if (!esp->regs)
97 return -ENOMEM;
99 return 0;
102 static int __devinit esp_sbus_map_command_block(struct esp *esp)
104 struct platform_device *op = esp->dev;
106 esp->command_block = dma_alloc_coherent(&op->dev, 16,
107 &esp->command_block_dma,
108 GFP_ATOMIC);
109 if (!esp->command_block)
110 return -ENOMEM;
111 return 0;
114 static int __devinit esp_sbus_register_irq(struct esp *esp)
116 struct Scsi_Host *host = esp->host;
117 struct platform_device *op = esp->dev;
119 host->irq = op->archdata.irqs[0];
120 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
123 static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
125 struct platform_device *op = esp->dev;
126 struct device_node *dp;
128 dp = op->dev.of_node;
129 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
130 if (esp->scsi_id != 0xff)
131 goto done;
133 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
134 if (esp->scsi_id != 0xff)
135 goto done;
137 esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
138 "scsi-initiator-id", 7);
140 done:
141 esp->host->this_id = esp->scsi_id;
142 esp->scsi_id_mask = (1 << esp->scsi_id);
145 static void __devinit esp_get_differential(struct esp *esp)
147 struct platform_device *op = esp->dev;
148 struct device_node *dp;
150 dp = op->dev.of_node;
151 if (of_find_property(dp, "differential", NULL))
152 esp->flags |= ESP_FLAG_DIFFERENTIAL;
153 else
154 esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
157 static void __devinit esp_get_clock_params(struct esp *esp)
159 struct platform_device *op = esp->dev;
160 struct device_node *bus_dp, *dp;
161 int fmhz;
163 dp = op->dev.of_node;
164 bus_dp = dp->parent;
166 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
167 if (fmhz == 0)
168 fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
170 esp->cfreq = fmhz;
173 static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
175 struct device_node *dma_dp = dma_of->dev.of_node;
176 struct platform_device *op = esp->dev;
177 struct device_node *dp;
178 u8 bursts, val;
180 dp = op->dev.of_node;
181 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
182 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
183 if (val != 0xff)
184 bursts &= val;
186 val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
187 if (val != 0xff)
188 bursts &= val;
190 if (bursts == 0xff ||
191 (bursts & DMA_BURST16) == 0 ||
192 (bursts & DMA_BURST32) == 0)
193 bursts = (DMA_BURST32 - 1);
195 esp->bursts = bursts;
198 static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
200 esp_get_scsi_id(esp, espdma);
201 esp_get_differential(esp);
202 esp_get_clock_params(esp);
203 esp_get_bursts(esp, espdma);
206 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
208 sbus_writeb(val, esp->regs + (reg * 4UL));
211 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
213 return sbus_readb(esp->regs + (reg * 4UL));
216 static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
217 size_t sz, int dir)
219 struct platform_device *op = esp->dev;
221 return dma_map_single(&op->dev, buf, sz, dir);
224 static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
225 int num_sg, int dir)
227 struct platform_device *op = esp->dev;
229 return dma_map_sg(&op->dev, sg, num_sg, dir);
232 static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
233 size_t sz, int dir)
235 struct platform_device *op = esp->dev;
237 dma_unmap_single(&op->dev, addr, sz, dir);
240 static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
241 int num_sg, int dir)
243 struct platform_device *op = esp->dev;
245 dma_unmap_sg(&op->dev, sg, num_sg, dir);
248 static int sbus_esp_irq_pending(struct esp *esp)
250 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
251 return 1;
252 return 0;
255 static void sbus_esp_reset_dma(struct esp *esp)
257 int can_do_burst16, can_do_burst32, can_do_burst64;
258 int can_do_sbus64, lim;
259 struct platform_device *op;
260 u32 val;
262 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
263 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
264 can_do_burst64 = 0;
265 can_do_sbus64 = 0;
266 op = esp->dev;
267 if (sbus_can_dma_64bit())
268 can_do_sbus64 = 1;
269 if (sbus_can_burst64())
270 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
272 /* Put the DVMA into a known state. */
273 if (esp->dmarev != dvmahme) {
274 val = dma_read32(DMA_CSR);
275 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
276 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
278 switch (esp->dmarev) {
279 case dvmahme:
280 dma_write32(DMA_RESET_FAS366, DMA_CSR);
281 dma_write32(DMA_RST_SCSI, DMA_CSR);
283 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
284 DMA_SCSI_DISAB | DMA_INT_ENAB);
286 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
287 DMA_BRST_SZ);
289 if (can_do_burst64)
290 esp->prev_hme_dmacsr |= DMA_BRST64;
291 else if (can_do_burst32)
292 esp->prev_hme_dmacsr |= DMA_BRST32;
294 if (can_do_sbus64) {
295 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
296 sbus_set_sbus64(&op->dev, esp->bursts);
299 lim = 1000;
300 while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
301 if (--lim == 0) {
302 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
303 "will not clear!\n",
304 esp->host->unique_id);
305 break;
307 udelay(1);
310 dma_write32(0, DMA_CSR);
311 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
313 dma_write32(0, DMA_ADDR);
314 break;
316 case dvmarev2:
317 if (esp->rev != ESP100) {
318 val = dma_read32(DMA_CSR);
319 dma_write32(val | DMA_3CLKS, DMA_CSR);
321 break;
323 case dvmarev3:
324 val = dma_read32(DMA_CSR);
325 val &= ~DMA_3CLKS;
326 val |= DMA_2CLKS;
327 if (can_do_burst32) {
328 val &= ~DMA_BRST_SZ;
329 val |= DMA_BRST32;
331 dma_write32(val, DMA_CSR);
332 break;
334 case dvmaesc1:
335 val = dma_read32(DMA_CSR);
336 val |= DMA_ADD_ENABLE;
337 val &= ~DMA_BCNT_ENAB;
338 if (!can_do_burst32 && can_do_burst16) {
339 val |= DMA_ESC_BURST;
340 } else {
341 val &= ~(DMA_ESC_BURST);
343 dma_write32(val, DMA_CSR);
344 break;
346 default:
347 break;
350 /* Enable interrupts. */
351 val = dma_read32(DMA_CSR);
352 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
355 static void sbus_esp_dma_drain(struct esp *esp)
357 u32 csr;
358 int lim;
360 if (esp->dmarev == dvmahme)
361 return;
363 csr = dma_read32(DMA_CSR);
364 if (!(csr & DMA_FIFO_ISDRAIN))
365 return;
367 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
368 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
370 lim = 1000;
371 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
372 if (--lim == 0) {
373 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
374 esp->host->unique_id);
375 break;
377 udelay(1);
381 static void sbus_esp_dma_invalidate(struct esp *esp)
383 if (esp->dmarev == dvmahme) {
384 dma_write32(DMA_RST_SCSI, DMA_CSR);
386 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
387 (DMA_PARITY_OFF | DMA_2CLKS |
388 DMA_SCSI_DISAB | DMA_INT_ENAB)) &
389 ~(DMA_ST_WRITE | DMA_ENABLE));
391 dma_write32(0, DMA_CSR);
392 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
394 /* This is necessary to avoid having the SCSI channel
395 * engine lock up on us.
397 dma_write32(0, DMA_ADDR);
398 } else {
399 u32 val;
400 int lim;
402 lim = 1000;
403 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
404 if (--lim == 0) {
405 printk(KERN_ALERT PFX "esp%d: DMA will not "
406 "invalidate!\n", esp->host->unique_id);
407 break;
409 udelay(1);
412 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
413 val |= DMA_FIFO_INV;
414 dma_write32(val, DMA_CSR);
415 val &= ~DMA_FIFO_INV;
416 dma_write32(val, DMA_CSR);
420 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
421 u32 dma_count, int write, u8 cmd)
423 u32 csr;
425 BUG_ON(!(cmd & ESP_CMD_DMA));
427 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
428 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
429 if (esp->rev == FASHME) {
430 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
431 sbus_esp_write8(esp, 0, FAS_RHI);
433 scsi_esp_cmd(esp, cmd);
435 csr = esp->prev_hme_dmacsr;
436 csr |= DMA_SCSI_DISAB | DMA_ENABLE;
437 if (write)
438 csr |= DMA_ST_WRITE;
439 else
440 csr &= ~DMA_ST_WRITE;
441 esp->prev_hme_dmacsr = csr;
443 dma_write32(dma_count, DMA_COUNT);
444 dma_write32(addr, DMA_ADDR);
445 dma_write32(csr, DMA_CSR);
446 } else {
447 csr = dma_read32(DMA_CSR);
448 csr |= DMA_ENABLE;
449 if (write)
450 csr |= DMA_ST_WRITE;
451 else
452 csr &= ~DMA_ST_WRITE;
453 dma_write32(csr, DMA_CSR);
454 if (esp->dmarev == dvmaesc1) {
455 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
456 dma_write32(end - addr, DMA_COUNT);
458 dma_write32(addr, DMA_ADDR);
460 scsi_esp_cmd(esp, cmd);
465 static int sbus_esp_dma_error(struct esp *esp)
467 u32 csr = dma_read32(DMA_CSR);
469 if (csr & DMA_HNDL_ERROR)
470 return 1;
472 return 0;
475 static const struct esp_driver_ops sbus_esp_ops = {
476 .esp_write8 = sbus_esp_write8,
477 .esp_read8 = sbus_esp_read8,
478 .map_single = sbus_esp_map_single,
479 .map_sg = sbus_esp_map_sg,
480 .unmap_single = sbus_esp_unmap_single,
481 .unmap_sg = sbus_esp_unmap_sg,
482 .irq_pending = sbus_esp_irq_pending,
483 .reset_dma = sbus_esp_reset_dma,
484 .dma_drain = sbus_esp_dma_drain,
485 .dma_invalidate = sbus_esp_dma_invalidate,
486 .send_dma_cmd = sbus_esp_send_dma_cmd,
487 .dma_error = sbus_esp_dma_error,
490 static int __devinit esp_sbus_probe_one(struct platform_device *op,
491 struct platform_device *espdma,
492 int hme)
494 struct scsi_host_template *tpnt = &scsi_esp_template;
495 struct Scsi_Host *host;
496 struct esp *esp;
497 int err;
499 host = scsi_host_alloc(tpnt, sizeof(struct esp));
501 err = -ENOMEM;
502 if (!host)
503 goto fail;
505 host->max_id = (hme ? 16 : 8);
506 esp = shost_priv(host);
508 esp->host = host;
509 esp->dev = op;
510 esp->ops = &sbus_esp_ops;
512 if (hme)
513 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
515 err = esp_sbus_setup_dma(esp, espdma);
516 if (err < 0)
517 goto fail_unlink;
519 err = esp_sbus_map_regs(esp, hme);
520 if (err < 0)
521 goto fail_unlink;
523 err = esp_sbus_map_command_block(esp);
524 if (err < 0)
525 goto fail_unmap_regs;
527 err = esp_sbus_register_irq(esp);
528 if (err < 0)
529 goto fail_unmap_command_block;
531 esp_sbus_get_props(esp, espdma);
533 /* Before we try to touch the ESP chip, ESC1 dma can
534 * come up with the reset bit set, so make sure that
535 * is clear first.
537 if (esp->dmarev == dvmaesc1) {
538 u32 val = dma_read32(DMA_CSR);
540 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
543 dev_set_drvdata(&op->dev, esp);
545 err = scsi_esp_register(esp, &op->dev);
546 if (err)
547 goto fail_free_irq;
549 return 0;
551 fail_free_irq:
552 free_irq(host->irq, esp);
553 fail_unmap_command_block:
554 dma_free_coherent(&op->dev, 16,
555 esp->command_block,
556 esp->command_block_dma);
557 fail_unmap_regs:
558 of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
559 fail_unlink:
560 scsi_host_put(host);
561 fail:
562 return err;
565 static int __devinit esp_sbus_probe(struct platform_device *op)
567 struct device_node *dma_node = NULL;
568 struct device_node *dp = op->dev.of_node;
569 struct platform_device *dma_of = NULL;
570 int hme = 0;
572 if (dp->parent &&
573 (!strcmp(dp->parent->name, "espdma") ||
574 !strcmp(dp->parent->name, "dma")))
575 dma_node = dp->parent;
576 else if (!strcmp(dp->name, "SUNW,fas")) {
577 dma_node = op->dev.of_node;
578 hme = 1;
580 if (dma_node)
581 dma_of = of_find_device_by_node(dma_node);
582 if (!dma_of)
583 return -ENODEV;
585 return esp_sbus_probe_one(op, dma_of, hme);
588 static int __devexit esp_sbus_remove(struct platform_device *op)
590 struct esp *esp = dev_get_drvdata(&op->dev);
591 struct platform_device *dma_of = esp->dma;
592 unsigned int irq = esp->host->irq;
593 bool is_hme;
594 u32 val;
596 scsi_esp_unregister(esp);
598 /* Disable interrupts. */
599 val = dma_read32(DMA_CSR);
600 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
602 free_irq(irq, esp);
604 is_hme = (esp->dmarev == dvmahme);
606 dma_free_coherent(&op->dev, 16,
607 esp->command_block,
608 esp->command_block_dma);
609 of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
610 SBUS_ESP_REG_SIZE);
611 of_iounmap(&dma_of->resource[0], esp->dma_regs,
612 resource_size(&dma_of->resource[0]));
614 scsi_host_put(esp->host);
616 dev_set_drvdata(&op->dev, NULL);
618 return 0;
621 static const struct of_device_id esp_match[] = {
623 .name = "SUNW,esp",
626 .name = "SUNW,fas",
629 .name = "esp",
633 MODULE_DEVICE_TABLE(of, esp_match);
635 static struct platform_driver esp_sbus_driver = {
636 .driver = {
637 .name = "esp",
638 .owner = THIS_MODULE,
639 .of_match_table = esp_match,
641 .probe = esp_sbus_probe,
642 .remove = __devexit_p(esp_sbus_remove),
645 static int __init sunesp_init(void)
647 return platform_driver_register(&esp_sbus_driver);
650 static void __exit sunesp_exit(void)
652 platform_driver_unregister(&esp_sbus_driver);
655 MODULE_DESCRIPTION("Sun ESP SCSI driver");
656 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
657 MODULE_LICENSE("GPL");
658 MODULE_VERSION(DRV_VERSION);
660 module_init(sunesp_init);
661 module_exit(sunesp_exit);