2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
32 * also produced as NCR89C100. See
33 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
38 #define ESP_ERROR(fmt, ...) \
39 do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
44 typedef struct ESPState ESPState
;
48 uint8_t rregs
[ESP_REGS
];
49 uint8_t wregs
[ESP_REGS
];
53 uint32_t ti_rptr
, ti_wptr
;
56 uint8_t ti_buf
[TI_BUFSZ
];
58 SCSIDevice
*current_dev
;
59 SCSIRequest
*current_req
;
60 uint8_t cmdbuf
[TI_BUFSZ
];
64 /* The amount of data left in the current DMA transfer. */
66 /* The size of the current DMA transfer. Zero if no transfer is in
74 ESPDMAMemoryReadWriteFunc dma_memory_read
;
75 ESPDMAMemoryReadWriteFunc dma_memory_write
;
77 void (*dma_cb
)(ESPState
*s
);
85 #define ESP_WBUSID 0x4
89 #define ESP_WSYNTP 0x6
90 #define ESP_RFLAGS 0x7
107 #define CMD_FLUSH 0x01
108 #define CMD_RESET 0x02
109 #define CMD_BUSRESET 0x03
111 #define CMD_ICCS 0x11
112 #define CMD_MSGACC 0x12
114 #define CMD_SATN 0x1a
116 #define CMD_SELATN 0x42
117 #define CMD_SELATNS 0x43
118 #define CMD_ENSEL 0x44
126 #define STAT_PIO_MASK 0x06
131 #define STAT_INT 0x80
133 #define BUSID_DID 0x07
138 #define INTR_RST 0x80
143 #define CFG1_RESREPT 0x40
145 #define TCHI_FAS100A 0x4
147 static void esp_raise_irq(ESPState
*s
)
149 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
150 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
151 qemu_irq_raise(s
->irq
);
152 trace_esp_raise_irq();
156 static void esp_lower_irq(ESPState
*s
)
158 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
159 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
160 qemu_irq_lower(s
->irq
);
161 trace_esp_lower_irq();
165 static void esp_dma_enable(void *opaque
, int irq
, int level
)
167 DeviceState
*d
= opaque
;
168 ESPState
*s
= container_of(d
, ESPState
, busdev
.qdev
);
172 trace_esp_dma_enable();
178 trace_esp_dma_disable();
183 static void esp_request_cancelled(SCSIRequest
*req
)
185 ESPState
*s
= DO_UPCAST(ESPState
, busdev
.qdev
, req
->bus
->qbus
.parent
);
187 if (req
== s
->current_req
) {
188 scsi_req_unref(s
->current_req
);
189 s
->current_req
= NULL
;
190 s
->current_dev
= NULL
;
194 static uint32_t get_cmd(ESPState
*s
, uint8_t *buf
)
199 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
201 dmalen
= s
->rregs
[ESP_TCLO
] | (s
->rregs
[ESP_TCMID
] << 8);
202 s
->dma_memory_read(s
->dma_opaque
, buf
, dmalen
);
205 memcpy(buf
, s
->ti_buf
, dmalen
);
206 buf
[0] = buf
[2] >> 5;
208 trace_esp_get_cmd(dmalen
, target
);
214 if (s
->current_req
) {
215 /* Started a new command before the old one finished. Cancel it. */
216 scsi_req_cancel(s
->current_req
);
220 if (target
>= ESP_MAX_DEVS
|| !s
->bus
.devs
[target
]) {
222 s
->rregs
[ESP_RSTAT
] = 0;
223 s
->rregs
[ESP_RINTR
] = INTR_DC
;
224 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
228 s
->current_dev
= s
->bus
.devs
[target
];
232 static void do_busid_cmd(ESPState
*s
, uint8_t *buf
, uint8_t busid
)
237 trace_esp_do_busid_cmd(busid
);
239 s
->current_req
= scsi_req_new(s
->current_dev
, 0, lun
, buf
, NULL
);
240 datalen
= scsi_req_enqueue(s
->current_req
);
241 s
->ti_size
= datalen
;
243 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
247 s
->rregs
[ESP_RSTAT
] |= STAT_DI
;
249 s
->rregs
[ESP_RSTAT
] |= STAT_DO
;
251 scsi_req_continue(s
->current_req
);
253 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
254 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
258 static void do_cmd(ESPState
*s
, uint8_t *buf
)
260 uint8_t busid
= buf
[0];
262 do_busid_cmd(s
, &buf
[1], busid
);
265 static void handle_satn(ESPState
*s
)
270 if (!s
->dma_enabled
) {
271 s
->dma_cb
= handle_satn
;
274 len
= get_cmd(s
, buf
);
279 static void handle_s_without_atn(ESPState
*s
)
284 if (!s
->dma_enabled
) {
285 s
->dma_cb
= handle_s_without_atn
;
288 len
= get_cmd(s
, buf
);
290 do_busid_cmd(s
, buf
, 0);
294 static void handle_satn_stop(ESPState
*s
)
296 if (!s
->dma_enabled
) {
297 s
->dma_cb
= handle_satn_stop
;
300 s
->cmdlen
= get_cmd(s
, s
->cmdbuf
);
302 trace_esp_handle_satn_stop(s
->cmdlen
);
304 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
305 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
306 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
311 static void write_response(ESPState
*s
)
313 trace_esp_write_response(s
->status
);
314 s
->ti_buf
[0] = s
->status
;
317 s
->dma_memory_write(s
->dma_opaque
, s
->ti_buf
, 2);
318 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
319 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
320 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
325 s
->rregs
[ESP_RFLAGS
] = 2;
330 static void esp_dma_done(ESPState
*s
)
332 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
333 s
->rregs
[ESP_RINTR
] = INTR_BS
;
334 s
->rregs
[ESP_RSEQ
] = 0;
335 s
->rregs
[ESP_RFLAGS
] = 0;
336 s
->rregs
[ESP_TCLO
] = 0;
337 s
->rregs
[ESP_TCMID
] = 0;
341 static void esp_do_dma(ESPState
*s
)
346 to_device
= (s
->ti_size
< 0);
349 trace_esp_do_dma(s
->cmdlen
, len
);
350 s
->dma_memory_read(s
->dma_opaque
, &s
->cmdbuf
[s
->cmdlen
], len
);
354 do_cmd(s
, s
->cmdbuf
);
357 if (s
->async_len
== 0) {
358 /* Defer until data is available. */
361 if (len
> s
->async_len
) {
365 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
367 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
376 if (s
->async_len
== 0) {
377 scsi_req_continue(s
->current_req
);
378 /* If there is still data to be read from the device then
379 complete the DMA operation immediately. Otherwise defer
380 until the scsi layer has completed. */
381 if (to_device
|| s
->dma_left
!= 0 || s
->ti_size
== 0) {
386 /* Partially filled a scsi buffer. Complete immediately. */
390 static void esp_command_complete(SCSIRequest
*req
, uint32_t status
)
392 ESPState
*s
= DO_UPCAST(ESPState
, busdev
.qdev
, req
->bus
->qbus
.parent
);
394 trace_esp_command_complete();
395 if (s
->ti_size
!= 0) {
396 trace_esp_command_complete_unexpected();
402 trace_esp_command_complete_fail();
405 s
->rregs
[ESP_RSTAT
] = STAT_ST
;
407 if (s
->current_req
) {
408 scsi_req_unref(s
->current_req
);
409 s
->current_req
= NULL
;
410 s
->current_dev
= NULL
;
414 static void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
416 ESPState
*s
= DO_UPCAST(ESPState
, busdev
.qdev
, req
->bus
->qbus
.parent
);
418 trace_esp_transfer_data(s
->dma_left
, s
->ti_size
);
420 s
->async_buf
= scsi_req_get_buf(req
);
423 } else if (s
->dma_counter
!= 0 && s
->ti_size
<= 0) {
424 /* If this was the last part of a DMA transfer then the
425 completion interrupt is deferred to here. */
430 static void handle_ti(ESPState
*s
)
432 uint32_t dmalen
, minlen
;
434 dmalen
= s
->rregs
[ESP_TCLO
] | (s
->rregs
[ESP_TCMID
] << 8);
438 s
->dma_counter
= dmalen
;
441 minlen
= (dmalen
< 32) ? dmalen
: 32;
442 else if (s
->ti_size
< 0)
443 minlen
= (dmalen
< -s
->ti_size
) ? dmalen
: -s
->ti_size
;
445 minlen
= (dmalen
< s
->ti_size
) ? dmalen
: s
->ti_size
;
446 trace_esp_handle_ti(minlen
);
448 s
->dma_left
= minlen
;
449 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
451 } else if (s
->do_cmd
) {
452 trace_esp_handle_ti_cmd(s
->cmdlen
);
456 do_cmd(s
, s
->cmdbuf
);
461 static void esp_hard_reset(DeviceState
*d
)
463 ESPState
*s
= container_of(d
, ESPState
, busdev
.qdev
);
465 memset(s
->rregs
, 0, ESP_REGS
);
466 memset(s
->wregs
, 0, ESP_REGS
);
467 s
->rregs
[ESP_TCHI
] = TCHI_FAS100A
; // Indicate fas100a
475 s
->rregs
[ESP_CFG1
] = 7;
478 static void esp_soft_reset(DeviceState
*d
)
480 ESPState
*s
= container_of(d
, ESPState
, busdev
.qdev
);
482 qemu_irq_lower(s
->irq
);
486 static void parent_esp_reset(void *opaque
, int irq
, int level
)
489 esp_soft_reset(opaque
);
493 static void esp_gpio_demux(void *opaque
, int irq
, int level
)
497 parent_esp_reset(opaque
, irq
, level
);
500 esp_dma_enable(opaque
, irq
, level
);
505 static uint32_t esp_mem_readb(void *opaque
, target_phys_addr_t addr
)
507 ESPState
*s
= opaque
;
508 uint32_t saddr
, old_val
;
510 saddr
= addr
>> s
->it_shift
;
511 trace_esp_mem_readb(saddr
, s
->rregs
[saddr
]);
514 if (s
->ti_size
> 0) {
516 if ((s
->rregs
[ESP_RSTAT
] & STAT_PIO_MASK
) == 0) {
518 ESP_ERROR("PIO data read not implemented\n");
519 s
->rregs
[ESP_FIFO
] = 0;
521 s
->rregs
[ESP_FIFO
] = s
->ti_buf
[s
->ti_rptr
++];
525 if (s
->ti_size
== 0) {
531 /* Clear sequence step, interrupt register and all status bits
533 old_val
= s
->rregs
[ESP_RINTR
];
534 s
->rregs
[ESP_RINTR
] = 0;
535 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
536 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
543 return s
->rregs
[saddr
];
546 static void esp_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
548 ESPState
*s
= opaque
;
551 saddr
= addr
>> s
->it_shift
;
552 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
556 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
560 s
->cmdbuf
[s
->cmdlen
++] = val
& 0xff;
561 } else if (s
->ti_size
== TI_BUFSZ
- 1) {
562 ESP_ERROR("fifo overrun\n");
565 s
->ti_buf
[s
->ti_wptr
++] = val
& 0xff;
569 s
->rregs
[saddr
] = val
;
572 /* Reload DMA counter. */
573 s
->rregs
[ESP_TCLO
] = s
->wregs
[ESP_TCLO
];
574 s
->rregs
[ESP_TCMID
] = s
->wregs
[ESP_TCMID
];
578 switch(val
& CMD_CMD
) {
580 trace_esp_mem_writeb_cmd_nop(val
);
583 trace_esp_mem_writeb_cmd_flush(val
);
585 s
->rregs
[ESP_RINTR
] = INTR_FC
;
586 s
->rregs
[ESP_RSEQ
] = 0;
587 s
->rregs
[ESP_RFLAGS
] = 0;
590 trace_esp_mem_writeb_cmd_reset(val
);
591 esp_soft_reset(&s
->busdev
.qdev
);
594 trace_esp_mem_writeb_cmd_bus_reset(val
);
595 s
->rregs
[ESP_RINTR
] = INTR_RST
;
596 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
604 trace_esp_mem_writeb_cmd_iccs(val
);
606 s
->rregs
[ESP_RINTR
] = INTR_FC
;
607 s
->rregs
[ESP_RSTAT
] |= STAT_MI
;
610 trace_esp_mem_writeb_cmd_msgacc(val
);
611 s
->rregs
[ESP_RINTR
] = INTR_DC
;
612 s
->rregs
[ESP_RSEQ
] = 0;
613 s
->rregs
[ESP_RFLAGS
] = 0;
617 trace_esp_mem_writeb_cmd_pad(val
);
618 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
619 s
->rregs
[ESP_RINTR
] = INTR_FC
;
620 s
->rregs
[ESP_RSEQ
] = 0;
623 trace_esp_mem_writeb_cmd_satn(val
);
626 trace_esp_mem_writeb_cmd_sel(val
);
627 handle_s_without_atn(s
);
630 trace_esp_mem_writeb_cmd_selatn(val
);
634 trace_esp_mem_writeb_cmd_selatns(val
);
638 trace_esp_mem_writeb_cmd_ensel(val
);
639 s
->rregs
[ESP_RINTR
] = 0;
642 ESP_ERROR("Unhandled ESP command (%2.2x)\n", val
);
646 case ESP_WBUSID
... ESP_WSYNO
:
649 s
->rregs
[saddr
] = val
;
651 case ESP_WCCF
... ESP_WTEST
:
653 case ESP_CFG2
... ESP_RES4
:
654 s
->rregs
[saddr
] = val
;
657 ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", val
, saddr
);
660 s
->wregs
[saddr
] = val
;
663 static CPUReadMemoryFunc
* const esp_mem_read
[3] = {
669 static CPUWriteMemoryFunc
* const esp_mem_write
[3] = {
675 static const VMStateDescription vmstate_esp
= {
678 .minimum_version_id
= 3,
679 .minimum_version_id_old
= 3,
680 .fields
= (VMStateField
[]) {
681 VMSTATE_BUFFER(rregs
, ESPState
),
682 VMSTATE_BUFFER(wregs
, ESPState
),
683 VMSTATE_INT32(ti_size
, ESPState
),
684 VMSTATE_UINT32(ti_rptr
, ESPState
),
685 VMSTATE_UINT32(ti_wptr
, ESPState
),
686 VMSTATE_BUFFER(ti_buf
, ESPState
),
687 VMSTATE_UINT32(status
, ESPState
),
688 VMSTATE_UINT32(dma
, ESPState
),
689 VMSTATE_BUFFER(cmdbuf
, ESPState
),
690 VMSTATE_UINT32(cmdlen
, ESPState
),
691 VMSTATE_UINT32(do_cmd
, ESPState
),
692 VMSTATE_UINT32(dma_left
, ESPState
),
693 VMSTATE_END_OF_LIST()
697 void esp_init(target_phys_addr_t espaddr
, int it_shift
,
698 ESPDMAMemoryReadWriteFunc dma_memory_read
,
699 ESPDMAMemoryReadWriteFunc dma_memory_write
,
700 void *dma_opaque
, qemu_irq irq
, qemu_irq
*reset
,
701 qemu_irq
*dma_enable
)
707 dev
= qdev_create(NULL
, "esp");
708 esp
= DO_UPCAST(ESPState
, busdev
.qdev
, dev
);
709 esp
->dma_memory_read
= dma_memory_read
;
710 esp
->dma_memory_write
= dma_memory_write
;
711 esp
->dma_opaque
= dma_opaque
;
712 esp
->it_shift
= it_shift
;
713 /* XXX for now until rc4030 has been changed to use DMA enable signal */
714 esp
->dma_enabled
= 1;
715 qdev_init_nofail(dev
);
716 s
= sysbus_from_qdev(dev
);
717 sysbus_connect_irq(s
, 0, irq
);
718 sysbus_mmio_map(s
, 0, espaddr
);
719 *reset
= qdev_get_gpio_in(dev
, 0);
720 *dma_enable
= qdev_get_gpio_in(dev
, 1);
723 static const struct SCSIBusOps esp_scsi_ops
= {
724 .transfer_data
= esp_transfer_data
,
725 .complete
= esp_command_complete
,
726 .cancel
= esp_request_cancelled
729 static int esp_init1(SysBusDevice
*dev
)
731 ESPState
*s
= FROM_SYSBUS(ESPState
, dev
);
734 sysbus_init_irq(dev
, &s
->irq
);
735 assert(s
->it_shift
!= -1);
737 esp_io_memory
= cpu_register_io_memory(esp_mem_read
, esp_mem_write
, s
,
738 DEVICE_NATIVE_ENDIAN
);
739 sysbus_init_mmio(dev
, ESP_REGS
<< s
->it_shift
, esp_io_memory
);
741 qdev_init_gpio_in(&dev
->qdev
, esp_gpio_demux
, 2);
743 scsi_bus_new(&s
->bus
, &dev
->qdev
, 0, ESP_MAX_DEVS
, &esp_scsi_ops
);
744 return scsi_bus_legacy_handle_cmdline(&s
->bus
);
747 static SysBusDeviceInfo esp_info
= {
750 .qdev
.size
= sizeof(ESPState
),
751 .qdev
.vmsd
= &vmstate_esp
,
752 .qdev
.reset
= esp_hard_reset
,
753 .qdev
.props
= (Property
[]) {
758 static void esp_register_devices(void)
760 sysbus_register_withprop(&esp_info
);
763 device_init(esp_register_devices
)