esp: move some definitions to header file
[qemu/qmp-unstable.git] / hw / esp.c
blob60c22215801d7dc6003a610993dab0a32d048622
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "sysbus.h"
27 #include "pci.h"
28 #include "esp.h"
29 #include "trace.h"
30 #include "qemu-log.h"
33 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34 * also produced as NCR89C100. See
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
36 * and
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
40 static void esp_raise_irq(ESPState *s)
42 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
43 s->rregs[ESP_RSTAT] |= STAT_INT;
44 qemu_irq_raise(s->irq);
45 trace_esp_raise_irq();
49 static void esp_lower_irq(ESPState *s)
51 if (s->rregs[ESP_RSTAT] & STAT_INT) {
52 s->rregs[ESP_RSTAT] &= ~STAT_INT;
53 qemu_irq_lower(s->irq);
54 trace_esp_lower_irq();
58 void esp_dma_enable(ESPState *s, int irq, int level)
60 if (level) {
61 s->dma_enabled = 1;
62 trace_esp_dma_enable();
63 if (s->dma_cb) {
64 s->dma_cb(s);
65 s->dma_cb = NULL;
67 } else {
68 trace_esp_dma_disable();
69 s->dma_enabled = 0;
73 void esp_request_cancelled(SCSIRequest *req)
75 ESPState *s = req->hba_private;
77 if (req == s->current_req) {
78 scsi_req_unref(s->current_req);
79 s->current_req = NULL;
80 s->current_dev = NULL;
84 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
86 uint32_t dmalen;
87 int target;
89 target = s->wregs[ESP_WBUSID] & BUSID_DID;
90 if (s->dma) {
91 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
92 s->dma_memory_read(s->dma_opaque, buf, dmalen);
93 } else {
94 dmalen = s->ti_size;
95 memcpy(buf, s->ti_buf, dmalen);
96 buf[0] = buf[2] >> 5;
98 trace_esp_get_cmd(dmalen, target);
100 s->ti_size = 0;
101 s->ti_rptr = 0;
102 s->ti_wptr = 0;
104 if (s->current_req) {
105 /* Started a new command before the old one finished. Cancel it. */
106 scsi_req_cancel(s->current_req);
107 s->async_len = 0;
110 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
111 if (!s->current_dev) {
112 // No such drive
113 s->rregs[ESP_RSTAT] = 0;
114 s->rregs[ESP_RINTR] = INTR_DC;
115 s->rregs[ESP_RSEQ] = SEQ_0;
116 esp_raise_irq(s);
117 return 0;
119 return dmalen;
122 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
124 int32_t datalen;
125 int lun;
126 SCSIDevice *current_lun;
128 trace_esp_do_busid_cmd(busid);
129 lun = busid & 7;
130 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
131 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
132 datalen = scsi_req_enqueue(s->current_req);
133 s->ti_size = datalen;
134 if (datalen != 0) {
135 s->rregs[ESP_RSTAT] = STAT_TC;
136 s->dma_left = 0;
137 s->dma_counter = 0;
138 if (datalen > 0) {
139 s->rregs[ESP_RSTAT] |= STAT_DI;
140 } else {
141 s->rregs[ESP_RSTAT] |= STAT_DO;
143 scsi_req_continue(s->current_req);
145 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
146 s->rregs[ESP_RSEQ] = SEQ_CD;
147 esp_raise_irq(s);
150 static void do_cmd(ESPState *s, uint8_t *buf)
152 uint8_t busid = buf[0];
154 do_busid_cmd(s, &buf[1], busid);
157 static void handle_satn(ESPState *s)
159 uint8_t buf[32];
160 int len;
162 if (s->dma && !s->dma_enabled) {
163 s->dma_cb = handle_satn;
164 return;
166 len = get_cmd(s, buf);
167 if (len)
168 do_cmd(s, buf);
171 static void handle_s_without_atn(ESPState *s)
173 uint8_t buf[32];
174 int len;
176 if (s->dma && !s->dma_enabled) {
177 s->dma_cb = handle_s_without_atn;
178 return;
180 len = get_cmd(s, buf);
181 if (len) {
182 do_busid_cmd(s, buf, 0);
186 static void handle_satn_stop(ESPState *s)
188 if (s->dma && !s->dma_enabled) {
189 s->dma_cb = handle_satn_stop;
190 return;
192 s->cmdlen = get_cmd(s, s->cmdbuf);
193 if (s->cmdlen) {
194 trace_esp_handle_satn_stop(s->cmdlen);
195 s->do_cmd = 1;
196 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
197 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
198 s->rregs[ESP_RSEQ] = SEQ_CD;
199 esp_raise_irq(s);
203 static void write_response(ESPState *s)
205 trace_esp_write_response(s->status);
206 s->ti_buf[0] = s->status;
207 s->ti_buf[1] = 0;
208 if (s->dma) {
209 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
210 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
211 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
212 s->rregs[ESP_RSEQ] = SEQ_CD;
213 } else {
214 s->ti_size = 2;
215 s->ti_rptr = 0;
216 s->ti_wptr = 0;
217 s->rregs[ESP_RFLAGS] = 2;
219 esp_raise_irq(s);
222 static void esp_dma_done(ESPState *s)
224 s->rregs[ESP_RSTAT] |= STAT_TC;
225 s->rregs[ESP_RINTR] = INTR_BS;
226 s->rregs[ESP_RSEQ] = 0;
227 s->rregs[ESP_RFLAGS] = 0;
228 s->rregs[ESP_TCLO] = 0;
229 s->rregs[ESP_TCMID] = 0;
230 esp_raise_irq(s);
233 static void esp_do_dma(ESPState *s)
235 uint32_t len;
236 int to_device;
238 to_device = (s->ti_size < 0);
239 len = s->dma_left;
240 if (s->do_cmd) {
241 trace_esp_do_dma(s->cmdlen, len);
242 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
243 s->ti_size = 0;
244 s->cmdlen = 0;
245 s->do_cmd = 0;
246 do_cmd(s, s->cmdbuf);
247 return;
249 if (s->async_len == 0) {
250 /* Defer until data is available. */
251 return;
253 if (len > s->async_len) {
254 len = s->async_len;
256 if (to_device) {
257 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
258 } else {
259 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
261 s->dma_left -= len;
262 s->async_buf += len;
263 s->async_len -= len;
264 if (to_device)
265 s->ti_size += len;
266 else
267 s->ti_size -= len;
268 if (s->async_len == 0) {
269 scsi_req_continue(s->current_req);
270 /* If there is still data to be read from the device then
271 complete the DMA operation immediately. Otherwise defer
272 until the scsi layer has completed. */
273 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
274 return;
278 /* Partially filled a scsi buffer. Complete immediately. */
279 esp_dma_done(s);
282 void esp_command_complete(SCSIRequest *req, uint32_t status,
283 size_t resid)
285 ESPState *s = req->hba_private;
287 trace_esp_command_complete();
288 if (s->ti_size != 0) {
289 trace_esp_command_complete_unexpected();
291 s->ti_size = 0;
292 s->dma_left = 0;
293 s->async_len = 0;
294 if (status) {
295 trace_esp_command_complete_fail();
297 s->status = status;
298 s->rregs[ESP_RSTAT] = STAT_ST;
299 esp_dma_done(s);
300 if (s->current_req) {
301 scsi_req_unref(s->current_req);
302 s->current_req = NULL;
303 s->current_dev = NULL;
307 void esp_transfer_data(SCSIRequest *req, uint32_t len)
309 ESPState *s = req->hba_private;
311 trace_esp_transfer_data(s->dma_left, s->ti_size);
312 s->async_len = len;
313 s->async_buf = scsi_req_get_buf(req);
314 if (s->dma_left) {
315 esp_do_dma(s);
316 } else if (s->dma_counter != 0 && s->ti_size <= 0) {
317 /* If this was the last part of a DMA transfer then the
318 completion interrupt is deferred to here. */
319 esp_dma_done(s);
323 static void handle_ti(ESPState *s)
325 uint32_t dmalen, minlen;
327 if (s->dma && !s->dma_enabled) {
328 s->dma_cb = handle_ti;
329 return;
332 dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
333 if (dmalen==0) {
334 dmalen=0x10000;
336 s->dma_counter = dmalen;
338 if (s->do_cmd)
339 minlen = (dmalen < 32) ? dmalen : 32;
340 else if (s->ti_size < 0)
341 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
342 else
343 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
344 trace_esp_handle_ti(minlen);
345 if (s->dma) {
346 s->dma_left = minlen;
347 s->rregs[ESP_RSTAT] &= ~STAT_TC;
348 esp_do_dma(s);
349 } else if (s->do_cmd) {
350 trace_esp_handle_ti_cmd(s->cmdlen);
351 s->ti_size = 0;
352 s->cmdlen = 0;
353 s->do_cmd = 0;
354 do_cmd(s, s->cmdbuf);
355 return;
359 void esp_hard_reset(ESPState *s)
361 memset(s->rregs, 0, ESP_REGS);
362 memset(s->wregs, 0, ESP_REGS);
363 s->rregs[ESP_TCHI] = s->chip_id;
364 s->ti_size = 0;
365 s->ti_rptr = 0;
366 s->ti_wptr = 0;
367 s->dma = 0;
368 s->do_cmd = 0;
369 s->dma_cb = NULL;
371 s->rregs[ESP_CFG1] = 7;
374 static void esp_soft_reset(ESPState *s)
376 qemu_irq_lower(s->irq);
377 esp_hard_reset(s);
380 static void parent_esp_reset(ESPState *s, int irq, int level)
382 if (level) {
383 esp_soft_reset(s);
387 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
389 uint32_t old_val;
391 trace_esp_mem_readb(saddr, s->rregs[saddr]);
392 switch (saddr) {
393 case ESP_FIFO:
394 if (s->ti_size > 0) {
395 s->ti_size--;
396 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
397 /* Data out. */
398 qemu_log_mask(LOG_UNIMP,
399 "esp: PIO data read not implemented\n");
400 s->rregs[ESP_FIFO] = 0;
401 } else {
402 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
404 esp_raise_irq(s);
406 if (s->ti_size == 0) {
407 s->ti_rptr = 0;
408 s->ti_wptr = 0;
410 break;
411 case ESP_RINTR:
412 /* Clear sequence step, interrupt register and all status bits
413 except TC */
414 old_val = s->rregs[ESP_RINTR];
415 s->rregs[ESP_RINTR] = 0;
416 s->rregs[ESP_RSTAT] &= ~STAT_TC;
417 s->rregs[ESP_RSEQ] = SEQ_CD;
418 esp_lower_irq(s);
420 return old_val;
421 default:
422 break;
424 return s->rregs[saddr];
427 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
429 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
430 switch (saddr) {
431 case ESP_TCLO:
432 case ESP_TCMID:
433 s->rregs[ESP_RSTAT] &= ~STAT_TC;
434 break;
435 case ESP_FIFO:
436 if (s->do_cmd) {
437 s->cmdbuf[s->cmdlen++] = val & 0xff;
438 } else if (s->ti_size == TI_BUFSZ - 1) {
439 trace_esp_error_fifo_overrun();
440 } else {
441 s->ti_size++;
442 s->ti_buf[s->ti_wptr++] = val & 0xff;
444 break;
445 case ESP_CMD:
446 s->rregs[saddr] = val;
447 if (val & CMD_DMA) {
448 s->dma = 1;
449 /* Reload DMA counter. */
450 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
451 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
452 } else {
453 s->dma = 0;
455 switch(val & CMD_CMD) {
456 case CMD_NOP:
457 trace_esp_mem_writeb_cmd_nop(val);
458 break;
459 case CMD_FLUSH:
460 trace_esp_mem_writeb_cmd_flush(val);
461 //s->ti_size = 0;
462 s->rregs[ESP_RINTR] = INTR_FC;
463 s->rregs[ESP_RSEQ] = 0;
464 s->rregs[ESP_RFLAGS] = 0;
465 break;
466 case CMD_RESET:
467 trace_esp_mem_writeb_cmd_reset(val);
468 esp_soft_reset(s);
469 break;
470 case CMD_BUSRESET:
471 trace_esp_mem_writeb_cmd_bus_reset(val);
472 s->rregs[ESP_RINTR] = INTR_RST;
473 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
474 esp_raise_irq(s);
476 break;
477 case CMD_TI:
478 handle_ti(s);
479 break;
480 case CMD_ICCS:
481 trace_esp_mem_writeb_cmd_iccs(val);
482 write_response(s);
483 s->rregs[ESP_RINTR] = INTR_FC;
484 s->rregs[ESP_RSTAT] |= STAT_MI;
485 break;
486 case CMD_MSGACC:
487 trace_esp_mem_writeb_cmd_msgacc(val);
488 s->rregs[ESP_RINTR] = INTR_DC;
489 s->rregs[ESP_RSEQ] = 0;
490 s->rregs[ESP_RFLAGS] = 0;
491 esp_raise_irq(s);
492 break;
493 case CMD_PAD:
494 trace_esp_mem_writeb_cmd_pad(val);
495 s->rregs[ESP_RSTAT] = STAT_TC;
496 s->rregs[ESP_RINTR] = INTR_FC;
497 s->rregs[ESP_RSEQ] = 0;
498 break;
499 case CMD_SATN:
500 trace_esp_mem_writeb_cmd_satn(val);
501 break;
502 case CMD_RSTATN:
503 trace_esp_mem_writeb_cmd_rstatn(val);
504 break;
505 case CMD_SEL:
506 trace_esp_mem_writeb_cmd_sel(val);
507 handle_s_without_atn(s);
508 break;
509 case CMD_SELATN:
510 trace_esp_mem_writeb_cmd_selatn(val);
511 handle_satn(s);
512 break;
513 case CMD_SELATNS:
514 trace_esp_mem_writeb_cmd_selatns(val);
515 handle_satn_stop(s);
516 break;
517 case CMD_ENSEL:
518 trace_esp_mem_writeb_cmd_ensel(val);
519 s->rregs[ESP_RINTR] = 0;
520 break;
521 case CMD_DISSEL:
522 trace_esp_mem_writeb_cmd_dissel(val);
523 s->rregs[ESP_RINTR] = 0;
524 esp_raise_irq(s);
525 break;
526 default:
527 trace_esp_error_unhandled_command(val);
528 break;
530 break;
531 case ESP_WBUSID ... ESP_WSYNO:
532 break;
533 case ESP_CFG1:
534 s->rregs[saddr] = val;
535 break;
536 case ESP_WCCF ... ESP_WTEST:
537 break;
538 case ESP_CFG2 ... ESP_RES4:
539 s->rregs[saddr] = val;
540 break;
541 default:
542 trace_esp_error_invalid_write(val, saddr);
543 return;
545 s->wregs[saddr] = val;
548 static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
549 unsigned size, bool is_write)
551 return (size == 1) || (is_write && size == 4);
554 const VMStateDescription vmstate_esp = {
555 .name ="esp",
556 .version_id = 3,
557 .minimum_version_id = 3,
558 .minimum_version_id_old = 3,
559 .fields = (VMStateField []) {
560 VMSTATE_BUFFER(rregs, ESPState),
561 VMSTATE_BUFFER(wregs, ESPState),
562 VMSTATE_INT32(ti_size, ESPState),
563 VMSTATE_UINT32(ti_rptr, ESPState),
564 VMSTATE_UINT32(ti_wptr, ESPState),
565 VMSTATE_BUFFER(ti_buf, ESPState),
566 VMSTATE_UINT32(status, ESPState),
567 VMSTATE_UINT32(dma, ESPState),
568 VMSTATE_BUFFER(cmdbuf, ESPState),
569 VMSTATE_UINT32(cmdlen, ESPState),
570 VMSTATE_UINT32(do_cmd, ESPState),
571 VMSTATE_UINT32(dma_left, ESPState),
572 VMSTATE_END_OF_LIST()
576 typedef struct {
577 SysBusDevice busdev;
578 MemoryRegion iomem;
579 uint32_t it_shift;
580 ESPState esp;
581 } SysBusESPState;
583 static void sysbus_esp_mem_write(void *opaque, target_phys_addr_t addr,
584 uint64_t val, unsigned int size)
586 SysBusESPState *sysbus = opaque;
587 uint32_t saddr;
589 saddr = addr >> sysbus->it_shift;
590 esp_reg_write(&sysbus->esp, saddr, val);
593 static uint64_t sysbus_esp_mem_read(void *opaque, target_phys_addr_t addr,
594 unsigned int size)
596 SysBusESPState *sysbus = opaque;
597 uint32_t saddr;
599 saddr = addr >> sysbus->it_shift;
600 return esp_reg_read(&sysbus->esp, saddr);
603 static const MemoryRegionOps sysbus_esp_mem_ops = {
604 .read = sysbus_esp_mem_read,
605 .write = sysbus_esp_mem_write,
606 .endianness = DEVICE_NATIVE_ENDIAN,
607 .valid.accepts = esp_mem_accepts,
610 void esp_init(target_phys_addr_t espaddr, int it_shift,
611 ESPDMAMemoryReadWriteFunc dma_memory_read,
612 ESPDMAMemoryReadWriteFunc dma_memory_write,
613 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
614 qemu_irq *dma_enable)
616 DeviceState *dev;
617 SysBusDevice *s;
618 SysBusESPState *sysbus;
619 ESPState *esp;
621 dev = qdev_create(NULL, "esp");
622 sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
623 esp = &sysbus->esp;
624 esp->dma_memory_read = dma_memory_read;
625 esp->dma_memory_write = dma_memory_write;
626 esp->dma_opaque = dma_opaque;
627 sysbus->it_shift = it_shift;
628 /* XXX for now until rc4030 has been changed to use DMA enable signal */
629 esp->dma_enabled = 1;
630 qdev_init_nofail(dev);
631 s = sysbus_from_qdev(dev);
632 sysbus_connect_irq(s, 0, irq);
633 sysbus_mmio_map(s, 0, espaddr);
634 *reset = qdev_get_gpio_in(dev, 0);
635 *dma_enable = qdev_get_gpio_in(dev, 1);
638 static const struct SCSIBusInfo esp_scsi_info = {
639 .tcq = false,
640 .max_target = ESP_MAX_DEVS,
641 .max_lun = 7,
643 .transfer_data = esp_transfer_data,
644 .complete = esp_command_complete,
645 .cancel = esp_request_cancelled
648 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
650 DeviceState *d = opaque;
651 SysBusESPState *sysbus = container_of(d, SysBusESPState, busdev.qdev);
652 ESPState *s = &sysbus->esp;
654 switch (irq) {
655 case 0:
656 parent_esp_reset(s, irq, level);
657 break;
658 case 1:
659 esp_dma_enable(opaque, irq, level);
660 break;
664 static int sysbus_esp_init(SysBusDevice *dev)
666 SysBusESPState *sysbus = FROM_SYSBUS(SysBusESPState, dev);
667 ESPState *s = &sysbus->esp;
669 sysbus_init_irq(dev, &s->irq);
670 assert(sysbus->it_shift != -1);
672 s->chip_id = TCHI_FAS100A;
673 memory_region_init_io(&sysbus->iomem, &sysbus_esp_mem_ops, sysbus,
674 "esp", ESP_REGS << sysbus->it_shift);
675 sysbus_init_mmio(dev, &sysbus->iomem);
677 qdev_init_gpio_in(&dev->qdev, sysbus_esp_gpio_demux, 2);
679 scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
680 return scsi_bus_legacy_handle_cmdline(&s->bus);
683 static void sysbus_esp_hard_reset(DeviceState *dev)
685 SysBusESPState *sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
686 esp_hard_reset(&sysbus->esp);
689 static const VMStateDescription vmstate_sysbus_esp_scsi = {
690 .name = "sysbusespscsi",
691 .version_id = 0,
692 .minimum_version_id = 0,
693 .minimum_version_id_old = 0,
694 .fields = (VMStateField[]) {
695 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
696 VMSTATE_END_OF_LIST()
700 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
702 DeviceClass *dc = DEVICE_CLASS(klass);
703 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
705 k->init = sysbus_esp_init;
706 dc->reset = sysbus_esp_hard_reset;
707 dc->vmsd = &vmstate_sysbus_esp_scsi;
710 static const TypeInfo sysbus_esp_info = {
711 .name = "esp",
712 .parent = TYPE_SYS_BUS_DEVICE,
713 .instance_size = sizeof(SysBusESPState),
714 .class_init = sysbus_esp_class_init,
717 #define DMA_CMD 0x0
718 #define DMA_STC 0x1
719 #define DMA_SPA 0x2
720 #define DMA_WBC 0x3
721 #define DMA_WAC 0x4
722 #define DMA_STAT 0x5
723 #define DMA_SMDLA 0x6
724 #define DMA_WMAC 0x7
726 #define DMA_CMD_MASK 0x03
727 #define DMA_CMD_DIAG 0x04
728 #define DMA_CMD_MDL 0x10
729 #define DMA_CMD_INTE_P 0x20
730 #define DMA_CMD_INTE_D 0x40
731 #define DMA_CMD_DIR 0x80
733 #define DMA_STAT_PWDN 0x01
734 #define DMA_STAT_ERROR 0x02
735 #define DMA_STAT_ABORT 0x04
736 #define DMA_STAT_DONE 0x08
737 #define DMA_STAT_SCSIINT 0x10
738 #define DMA_STAT_BCMBLT 0x20
740 #define SBAC_STATUS 0x1000
742 typedef struct PCIESPState {
743 PCIDevice dev;
744 MemoryRegion io;
745 uint32_t dma_regs[8];
746 uint32_t sbac;
747 ESPState esp;
748 } PCIESPState;
750 static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
752 trace_esp_pci_dma_idle(val);
753 esp_dma_enable(&pci->esp, 0, 0);
756 static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
758 trace_esp_pci_dma_blast(val);
759 qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
762 static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
764 trace_esp_pci_dma_abort(val);
765 if (pci->esp.current_req) {
766 scsi_req_cancel(pci->esp.current_req);
770 static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
772 trace_esp_pci_dma_start(val);
774 pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
775 pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
776 pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
778 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
779 | DMA_STAT_DONE | DMA_STAT_ABORT
780 | DMA_STAT_ERROR | DMA_STAT_PWDN);
782 esp_dma_enable(&pci->esp, 0, 1);
785 static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
787 trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
788 switch (saddr) {
789 case DMA_CMD:
790 pci->dma_regs[saddr] = val;
791 switch (val & DMA_CMD_MASK) {
792 case 0x0: /* IDLE */
793 esp_pci_handle_idle(pci, val);
794 break;
795 case 0x1: /* BLAST */
796 esp_pci_handle_blast(pci, val);
797 break;
798 case 0x2: /* ABORT */
799 esp_pci_handle_abort(pci, val);
800 break;
801 case 0x3: /* START */
802 esp_pci_handle_start(pci, val);
803 break;
804 default: /* can't happen */
805 abort();
807 break;
808 case DMA_STC:
809 case DMA_SPA:
810 case DMA_SMDLA:
811 pci->dma_regs[saddr] = val;
812 break;
813 case DMA_STAT:
814 if (!(pci->sbac & SBAC_STATUS)) {
815 /* clear some bits on write */
816 uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
817 pci->dma_regs[DMA_STAT] &= ~(val & mask);
819 break;
820 default:
821 trace_esp_pci_error_invalid_write_dma(val, saddr);
822 return;
826 static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
828 uint32_t val;
830 val = pci->dma_regs[saddr];
831 if (saddr == DMA_STAT) {
832 if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
833 val |= DMA_STAT_SCSIINT;
835 if (pci->sbac & SBAC_STATUS) {
836 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
837 DMA_STAT_DONE);
841 trace_esp_pci_dma_read(saddr, val);
842 return val;
845 static void esp_pci_io_write(void *opaque, target_phys_addr_t addr,
846 uint64_t val, unsigned int size)
848 PCIESPState *pci = opaque;
850 if (size < 4 || addr & 3) {
851 /* need to upgrade request: we only support 4-bytes accesses */
852 uint32_t current = 0, mask;
853 int shift;
855 if (addr < 0x40) {
856 current = pci->esp.wregs[addr >> 2];
857 } else if (addr < 0x60) {
858 current = pci->dma_regs[(addr - 0x40) >> 2];
859 } else if (addr < 0x74) {
860 current = pci->sbac;
863 shift = (4 - size) * 8;
864 mask = (~(uint32_t)0 << shift) >> shift;
866 shift = ((4 - (addr & 3)) & 3) * 8;
867 val <<= shift;
868 val |= current & ~(mask << shift);
869 addr &= ~3;
870 size = 4;
873 if (addr < 0x40) {
874 /* SCSI core reg */
875 esp_reg_write(&pci->esp, addr >> 2, val);
876 } else if (addr < 0x60) {
877 /* PCI DMA CCB */
878 esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
879 } else if (addr == 0x70) {
880 /* DMA SCSI Bus and control */
881 trace_esp_pci_sbac_write(pci->sbac, val);
882 pci->sbac = val;
883 } else {
884 trace_esp_pci_error_invalid_write((int)addr);
888 static uint64_t esp_pci_io_read(void *opaque, target_phys_addr_t addr,
889 unsigned int size)
891 PCIESPState *pci = opaque;
892 uint32_t ret;
894 if (addr < 0x40) {
895 /* SCSI core reg */
896 ret = esp_reg_read(&pci->esp, addr >> 2);
897 } else if (addr < 0x60) {
898 /* PCI DMA CCB */
899 ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
900 } else if (addr == 0x70) {
901 /* DMA SCSI Bus and control */
902 trace_esp_pci_sbac_read(pci->sbac);
903 ret = pci->sbac;
904 } else {
905 /* Invalid region */
906 trace_esp_pci_error_invalid_read((int)addr);
907 ret = 0;
910 /* give only requested data */
911 ret >>= (addr & 3) * 8;
912 ret &= ~(~(uint64_t)0 << (8 * size));
914 return ret;
917 static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
918 DMADirection dir)
920 dma_addr_t addr;
921 DMADirection expected_dir;
923 if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
924 expected_dir = DMA_DIRECTION_FROM_DEVICE;
925 } else {
926 expected_dir = DMA_DIRECTION_TO_DEVICE;
929 if (dir != expected_dir) {
930 trace_esp_pci_error_invalid_dma_direction();
931 return;
934 if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
935 qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
938 addr = pci->dma_regs[DMA_SPA];
939 if (pci->dma_regs[DMA_WBC] < len) {
940 len = pci->dma_regs[DMA_WBC];
943 pci_dma_rw(&pci->dev, addr, buf, len, dir);
945 /* update status registers */
946 pci->dma_regs[DMA_WBC] -= len;
947 pci->dma_regs[DMA_WAC] += len;
950 static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
952 PCIESPState *pci = opaque;
953 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
956 static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
958 PCIESPState *pci = opaque;
959 esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
962 static const MemoryRegionOps esp_pci_io_ops = {
963 .read = esp_pci_io_read,
964 .write = esp_pci_io_write,
965 .endianness = DEVICE_LITTLE_ENDIAN,
966 .impl = {
967 .min_access_size = 1,
968 .max_access_size = 4,
972 static void esp_pci_hard_reset(DeviceState *dev)
974 PCIESPState *pci = DO_UPCAST(PCIESPState, dev.qdev, dev);
975 esp_hard_reset(&pci->esp);
976 pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
977 | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
978 pci->dma_regs[DMA_WBC] &= ~0xffff;
979 pci->dma_regs[DMA_WAC] = 0xffffffff;
980 pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
981 | DMA_STAT_DONE | DMA_STAT_ABORT
982 | DMA_STAT_ERROR);
983 pci->dma_regs[DMA_WMAC] = 0xfffffffd;
986 static const VMStateDescription vmstate_esp_pci_scsi = {
987 .name = "pciespscsi",
988 .version_id = 0,
989 .minimum_version_id = 0,
990 .minimum_version_id_old = 0,
991 .fields = (VMStateField[]) {
992 VMSTATE_PCI_DEVICE(dev, PCIESPState),
993 VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
994 VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
995 VMSTATE_END_OF_LIST()
999 static void esp_pci_command_complete(SCSIRequest *req, uint32_t status,
1000 size_t resid)
1002 ESPState *s = req->hba_private;
1003 PCIESPState *pci = container_of(s, PCIESPState, esp);
1005 esp_command_complete(req, status, resid);
1006 pci->dma_regs[DMA_WBC] = 0;
1007 pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
1010 static const struct SCSIBusInfo esp_pci_scsi_info = {
1011 .tcq = false,
1012 .max_target = ESP_MAX_DEVS,
1013 .max_lun = 7,
1015 .transfer_data = esp_transfer_data,
1016 .complete = esp_pci_command_complete,
1017 .cancel = esp_request_cancelled,
1020 static int esp_pci_scsi_init(PCIDevice *dev)
1022 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, dev);
1023 ESPState *s = &pci->esp;
1024 uint8_t *pci_conf;
1026 pci_conf = pci->dev.config;
1028 /* Interrupt pin A */
1029 pci_conf[PCI_INTERRUPT_PIN] = 0x01;
1031 s->dma_memory_read = esp_pci_dma_memory_read;
1032 s->dma_memory_write = esp_pci_dma_memory_write;
1033 s->dma_opaque = pci;
1034 s->chip_id = TCHI_AM53C974;
1035 memory_region_init_io(&pci->io, &esp_pci_io_ops, pci, "esp-io", 0x80);
1037 pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
1038 s->irq = pci->dev.irq[0];
1040 scsi_bus_new(&s->bus, &dev->qdev, &esp_pci_scsi_info);
1041 if (!dev->qdev.hotplugged) {
1042 return scsi_bus_legacy_handle_cmdline(&s->bus);
1044 return 0;
1047 static void esp_pci_scsi_uninit(PCIDevice *d)
1049 PCIESPState *pci = DO_UPCAST(PCIESPState, dev, d);
1051 memory_region_destroy(&pci->io);
1054 static void esp_pci_class_init(ObjectClass *klass, void *data)
1056 DeviceClass *dc = DEVICE_CLASS(klass);
1057 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1059 k->init = esp_pci_scsi_init;
1060 k->exit = esp_pci_scsi_uninit;
1061 k->vendor_id = PCI_VENDOR_ID_AMD;
1062 k->device_id = PCI_DEVICE_ID_AMD_SCSI;
1063 k->revision = 0x10;
1064 k->class_id = PCI_CLASS_STORAGE_SCSI;
1065 dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
1066 dc->reset = esp_pci_hard_reset;
1067 dc->vmsd = &vmstate_esp_pci_scsi;
1070 static const TypeInfo esp_pci_info = {
1071 .name = "am53c974",
1072 .parent = TYPE_PCI_DEVICE,
1073 .instance_size = sizeof(PCIESPState),
1074 .class_init = esp_pci_class_init,
1077 static void esp_register_types(void)
1079 type_register_static(&sysbus_esp_info);
1080 type_register_static(&esp_pci_info);
1083 type_init(esp_register_types)