2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
30 #include "hw/scsi/esp.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState
*s
)
47 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
48 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
49 qemu_irq_raise(s
->irq
);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState
*s
)
56 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
57 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
58 qemu_irq_lower(s
->irq
);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState
*s
)
65 qemu_irq_raise(s
->irq_data
);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState
*s
)
71 qemu_irq_lower(s
->irq_data
);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState
*s
, int irq
, int level
)
79 trace_esp_dma_enable();
85 trace_esp_dma_disable();
90 void esp_request_cancelled(SCSIRequest
*req
)
92 ESPState
*s
= req
->hba_private
;
94 if (req
== s
->current_req
) {
95 scsi_req_unref(s
->current_req
);
96 s
->current_req
= NULL
;
97 s
->current_dev
= NULL
;
101 static uint32_t esp_get_tc(ESPState
*s
)
105 dmalen
= s
->rregs
[ESP_TCLO
];
106 dmalen
|= s
->rregs
[ESP_TCMID
] << 8;
107 dmalen
|= s
->rregs
[ESP_TCHI
] << 16;
112 static void esp_set_tc(ESPState
*s
, uint32_t dmalen
)
114 s
->rregs
[ESP_TCLO
] = dmalen
;
115 s
->rregs
[ESP_TCMID
] = dmalen
>> 8;
116 s
->rregs
[ESP_TCHI
] = dmalen
>> 16;
119 static uint32_t esp_get_stc(ESPState
*s
)
123 dmalen
= s
->wregs
[ESP_TCLO
];
124 dmalen
|= s
->wregs
[ESP_TCMID
] << 8;
125 dmalen
|= s
->wregs
[ESP_TCHI
] << 16;
130 static uint8_t esp_pdma_read(ESPState
*s
)
135 val
= s
->cmdbuf
[s
->cmdlen
++];
137 val
= s
->ti_buf
[s
->ti_rptr
++];
143 static void esp_pdma_write(ESPState
*s
, uint8_t val
)
145 uint32_t dmalen
= esp_get_tc(s
);
152 s
->cmdbuf
[s
->cmdlen
++] = val
;
154 s
->ti_buf
[s
->ti_wptr
++] = val
;
158 esp_set_tc(s
, dmalen
);
161 static int esp_select(ESPState
*s
)
165 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
171 if (s
->current_req
) {
172 /* Started a new command before the old one finished. Cancel it. */
173 scsi_req_cancel(s
->current_req
);
177 s
->current_dev
= scsi_device_find(&s
->bus
, 0, target
, 0);
178 if (!s
->current_dev
) {
180 s
->rregs
[ESP_RSTAT
] = 0;
181 s
->rregs
[ESP_RINTR
] |= INTR_DC
;
182 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
189 static uint32_t get_cmd(ESPState
*s
)
191 uint8_t *buf
= s
->cmdbuf
;
195 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
197 dmalen
= esp_get_tc(s
);
198 if (dmalen
> ESP_CMDBUF_SZ
) {
201 if (s
->dma_memory_read
) {
202 s
->dma_memory_read(s
->dma_opaque
, buf
, dmalen
);
204 if (esp_select(s
) < 0) {
212 if (dmalen
> TI_BUFSZ
) {
215 memcpy(buf
, s
->ti_buf
, dmalen
);
216 buf
[0] = buf
[2] >> 5;
218 trace_esp_get_cmd(dmalen
, target
);
220 if (esp_select(s
) < 0) {
226 static void do_busid_cmd(ESPState
*s
, uint8_t *buf
, uint8_t busid
)
230 SCSIDevice
*current_lun
;
232 trace_esp_do_busid_cmd(busid
);
234 current_lun
= scsi_device_find(&s
->bus
, 0, s
->current_dev
->id
, lun
);
235 s
->current_req
= scsi_req_new(current_lun
, 0, lun
, buf
, s
);
236 datalen
= scsi_req_enqueue(s
->current_req
);
237 s
->ti_size
= datalen
;
239 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
242 s
->rregs
[ESP_RSTAT
] |= STAT_DI
;
244 s
->rregs
[ESP_RSTAT
] |= STAT_DO
;
246 scsi_req_continue(s
->current_req
);
248 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
249 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
254 static void do_cmd(ESPState
*s
)
256 uint8_t *buf
= s
->cmdbuf
;
257 uint8_t busid
= buf
[0];
259 do_busid_cmd(s
, &buf
[1], busid
);
262 static void satn_pdma_cb(ESPState
*s
)
270 static void handle_satn(ESPState
*s
)
274 if (s
->dma
&& !s
->dma_enabled
) {
275 s
->dma_cb
= handle_satn
;
278 s
->pdma_cb
= satn_pdma_cb
;
283 } else if (cmdlen
== 0) {
286 /* Target present, but no cmd yet - switch to command phase */
287 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
288 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
292 static void s_without_satn_pdma_cb(ESPState
*s
)
296 do_busid_cmd(s
, s
->cmdbuf
, 0);
300 static void handle_s_without_atn(ESPState
*s
)
304 if (s
->dma
&& !s
->dma_enabled
) {
305 s
->dma_cb
= handle_s_without_atn
;
308 s
->pdma_cb
= s_without_satn_pdma_cb
;
312 do_busid_cmd(s
, s
->cmdbuf
, 0);
313 } else if (cmdlen
== 0) {
316 /* Target present, but no cmd yet - switch to command phase */
317 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
318 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
322 static void satn_stop_pdma_cb(ESPState
*s
)
326 trace_esp_handle_satn_stop(s
->cmdlen
);
328 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
329 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
330 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
335 static void handle_satn_stop(ESPState
*s
)
339 if (s
->dma
&& !s
->dma_enabled
) {
340 s
->dma_cb
= handle_satn_stop
;
343 s
->pdma_cb
= satn_stop_pdma_cb
;
346 trace_esp_handle_satn_stop(s
->cmdlen
);
349 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
350 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
351 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
353 } else if (cmdlen
== 0) {
356 /* Target present, but no cmd yet - switch to command phase */
357 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
358 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
362 static void write_response_pdma_cb(ESPState
*s
)
364 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
365 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
366 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
370 static void write_response(ESPState
*s
)
372 trace_esp_write_response(s
->status
);
373 s
->ti_buf
[0] = s
->status
;
376 if (s
->dma_memory_write
) {
377 s
->dma_memory_write(s
->dma_opaque
, s
->ti_buf
, 2);
378 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
379 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
380 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
382 s
->pdma_cb
= write_response_pdma_cb
;
390 s
->rregs
[ESP_RFLAGS
] = 2;
395 static void esp_dma_done(ESPState
*s
)
397 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
398 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
399 s
->rregs
[ESP_RSEQ
] = 0;
400 s
->rregs
[ESP_RFLAGS
] = 0;
405 static void do_dma_pdma_cb(ESPState
*s
)
407 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
420 /* Copy FIFO data to device */
421 len
= MIN(s
->ti_wptr
, TI_BUFSZ
);
422 memcpy(s
->async_buf
, s
->ti_buf
, len
);
428 if (s
->async_len
== 0) {
429 scsi_req_continue(s
->current_req
);
433 if (esp_get_tc(s
) == 0) {
440 if (s
->async_len
== 0) {
441 if (s
->current_req
) {
442 scsi_req_continue(s
->current_req
);
446 * If there is still data to be read from the device then
447 * complete the DMA operation immediately. Otherwise defer
448 * until the scsi layer has completed.
450 if (esp_get_tc(s
) != 0 || s
->ti_size
== 0) {
455 if (esp_get_tc(s
) != 0) {
456 /* Copy device data to FIFO */
459 len
= MIN(s
->async_len
, TI_BUFSZ
);
460 memcpy(s
->ti_buf
, s
->async_buf
, len
);
465 esp_set_tc(s
, esp_get_tc(s
) - len
);
469 /* Partially filled a scsi buffer. Complete immediately. */
475 static void esp_do_dma(ESPState
*s
)
478 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
483 * handle_ti_cmd() case: esp_do_dma() is called only from
484 * handle_ti_cmd() with do_cmd != NULL (see the assert())
486 trace_esp_do_dma(s
->cmdlen
, len
);
487 assert(s
->cmdlen
<= sizeof(s
->cmdbuf
) &&
488 len
<= sizeof(s
->cmdbuf
) - s
->cmdlen
);
489 if (s
->dma_memory_read
) {
490 s
->dma_memory_read(s
->dma_opaque
, &s
->cmdbuf
[s
->cmdlen
], len
);
492 s
->pdma_cb
= do_dma_pdma_cb
;
496 trace_esp_handle_ti_cmd(s
->cmdlen
);
503 if (s
->async_len
== 0) {
504 /* Defer until data is available. */
507 if (len
> s
->async_len
) {
511 if (s
->dma_memory_read
) {
512 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
514 s
->pdma_cb
= do_dma_pdma_cb
;
519 if (s
->dma_memory_write
) {
520 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
522 /* Copy device data to FIFO */
523 len
= MIN(len
, TI_BUFSZ
- s
->ti_wptr
);
524 memcpy(&s
->ti_buf
[s
->ti_wptr
], s
->async_buf
, len
);
529 esp_set_tc(s
, esp_get_tc(s
) - len
);
530 s
->pdma_cb
= do_dma_pdma_cb
;
533 /* Indicate transfer to FIFO is complete */
534 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
538 esp_set_tc(s
, esp_get_tc(s
) - len
);
546 if (s
->async_len
== 0) {
547 scsi_req_continue(s
->current_req
);
549 * If there is still data to be read from the device then
550 * complete the DMA operation immediately. Otherwise defer
551 * until the scsi layer has completed.
553 if (to_device
|| esp_get_tc(s
) != 0 || s
->ti_size
== 0) {
558 /* Partially filled a scsi buffer. Complete immediately. */
563 static void esp_report_command_complete(ESPState
*s
, uint32_t status
)
565 trace_esp_command_complete();
566 if (s
->ti_size
!= 0) {
567 trace_esp_command_complete_unexpected();
572 trace_esp_command_complete_fail();
575 s
->rregs
[ESP_RSTAT
] = STAT_ST
;
578 if (s
->current_req
) {
579 scsi_req_unref(s
->current_req
);
580 s
->current_req
= NULL
;
581 s
->current_dev
= NULL
;
585 void esp_command_complete(SCSIRequest
*req
, size_t resid
)
587 ESPState
*s
= req
->hba_private
;
589 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
591 * Defer handling command complete until the previous
592 * interrupt has been handled.
594 trace_esp_command_complete_deferred();
595 s
->deferred_status
= req
->status
;
596 s
->deferred_complete
= true;
599 esp_report_command_complete(s
, req
->status
);
602 void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
604 ESPState
*s
= req
->hba_private
;
605 uint32_t dmalen
= esp_get_tc(s
);
608 trace_esp_transfer_data(dmalen
, s
->ti_size
);
610 s
->async_buf
= scsi_req_get_buf(req
);
613 } else if (s
->ti_size
<= 0) {
615 * If this was the last part of a DMA transfer then the
616 * completion interrupt is deferred to here.
623 static void handle_ti(ESPState
*s
)
627 if (s
->dma
&& !s
->dma_enabled
) {
628 s
->dma_cb
= handle_ti
;
632 dmalen
= esp_get_tc(s
);
634 trace_esp_handle_ti(dmalen
);
635 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
637 } else if (s
->do_cmd
) {
638 trace_esp_handle_ti_cmd(s
->cmdlen
);
646 void esp_hard_reset(ESPState
*s
)
648 memset(s
->rregs
, 0, ESP_REGS
);
649 memset(s
->wregs
, 0, ESP_REGS
);
658 s
->rregs
[ESP_CFG1
] = 7;
661 static void esp_soft_reset(ESPState
*s
)
663 qemu_irq_lower(s
->irq
);
664 qemu_irq_lower(s
->irq_data
);
668 static void parent_esp_reset(ESPState
*s
, int irq
, int level
)
675 uint64_t esp_reg_read(ESPState
*s
, uint32_t saddr
)
681 if ((s
->rregs
[ESP_RSTAT
] & STAT_PIO_MASK
) == 0) {
683 qemu_log_mask(LOG_UNIMP
, "esp: PIO data read not implemented\n");
684 s
->rregs
[ESP_FIFO
] = 0;
685 } else if (s
->ti_rptr
< s
->ti_wptr
) {
687 s
->rregs
[ESP_FIFO
] = s
->ti_buf
[s
->ti_rptr
++];
689 if (s
->ti_rptr
== s
->ti_wptr
) {
693 val
= s
->rregs
[ESP_FIFO
];
697 * Clear sequence step, interrupt register and all status bits
700 val
= s
->rregs
[ESP_RINTR
];
701 s
->rregs
[ESP_RINTR
] = 0;
702 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
703 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
705 if (s
->deferred_complete
) {
706 esp_report_command_complete(s
, s
->deferred_status
);
707 s
->deferred_complete
= false;
711 /* Return the unique id if the value has never been written */
712 if (!s
->tchi_written
) {
715 val
= s
->rregs
[saddr
];
719 val
= s
->rregs
[saddr
];
723 trace_esp_mem_readb(saddr
, val
);
727 void esp_reg_write(ESPState
*s
, uint32_t saddr
, uint64_t val
)
729 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
732 s
->tchi_written
= true;
736 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
740 if (s
->cmdlen
< ESP_CMDBUF_SZ
) {
741 s
->cmdbuf
[s
->cmdlen
++] = val
& 0xff;
743 trace_esp_error_fifo_overrun();
745 } else if (s
->ti_wptr
== TI_BUFSZ
- 1) {
746 trace_esp_error_fifo_overrun();
749 s
->ti_buf
[s
->ti_wptr
++] = val
& 0xff;
753 s
->rregs
[saddr
] = val
;
756 /* Reload DMA counter. */
757 if (esp_get_stc(s
) == 0) {
758 esp_set_tc(s
, 0x10000);
760 esp_set_tc(s
, esp_get_stc(s
));
765 switch (val
& CMD_CMD
) {
767 trace_esp_mem_writeb_cmd_nop(val
);
770 trace_esp_mem_writeb_cmd_flush(val
);
776 trace_esp_mem_writeb_cmd_reset(val
);
780 trace_esp_mem_writeb_cmd_bus_reset(val
);
781 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
782 s
->rregs
[ESP_RINTR
] |= INTR_RST
;
787 trace_esp_mem_writeb_cmd_ti(val
);
791 trace_esp_mem_writeb_cmd_iccs(val
);
793 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
794 s
->rregs
[ESP_RSTAT
] |= STAT_MI
;
797 trace_esp_mem_writeb_cmd_msgacc(val
);
798 s
->rregs
[ESP_RINTR
] |= INTR_DC
;
799 s
->rregs
[ESP_RSEQ
] = 0;
800 s
->rregs
[ESP_RFLAGS
] = 0;
804 trace_esp_mem_writeb_cmd_pad(val
);
805 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
806 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
807 s
->rregs
[ESP_RSEQ
] = 0;
810 trace_esp_mem_writeb_cmd_satn(val
);
813 trace_esp_mem_writeb_cmd_rstatn(val
);
816 trace_esp_mem_writeb_cmd_sel(val
);
817 handle_s_without_atn(s
);
820 trace_esp_mem_writeb_cmd_selatn(val
);
824 trace_esp_mem_writeb_cmd_selatns(val
);
828 trace_esp_mem_writeb_cmd_ensel(val
);
829 s
->rregs
[ESP_RINTR
] = 0;
832 trace_esp_mem_writeb_cmd_dissel(val
);
833 s
->rregs
[ESP_RINTR
] = 0;
837 trace_esp_error_unhandled_command(val
);
841 case ESP_WBUSID
... ESP_WSYNO
:
844 case ESP_CFG2
: case ESP_CFG3
:
845 case ESP_RES3
: case ESP_RES4
:
846 s
->rregs
[saddr
] = val
;
848 case ESP_WCCF
... ESP_WTEST
:
851 trace_esp_error_invalid_write(val
, saddr
);
854 s
->wregs
[saddr
] = val
;
857 static bool esp_mem_accepts(void *opaque
, hwaddr addr
,
858 unsigned size
, bool is_write
,
861 return (size
== 1) || (is_write
&& size
== 4);
864 static bool esp_is_before_version_5(void *opaque
, int version_id
)
866 ESPState
*s
= ESP(opaque
);
868 version_id
= MIN(version_id
, s
->mig_version_id
);
869 return version_id
< 5;
872 static int esp_pre_save(void *opaque
)
874 ESPState
*s
= ESP(opaque
);
876 s
->mig_version_id
= vmstate_esp
.version_id
;
880 static int esp_post_load(void *opaque
, int version_id
)
882 ESPState
*s
= ESP(opaque
);
884 version_id
= MIN(version_id
, s
->mig_version_id
);
886 if (version_id
< 5) {
887 esp_set_tc(s
, s
->mig_dma_left
);
890 s
->mig_version_id
= vmstate_esp
.version_id
;
894 const VMStateDescription vmstate_esp
= {
897 .minimum_version_id
= 3,
898 .pre_save
= esp_pre_save
,
899 .post_load
= esp_post_load
,
900 .fields
= (VMStateField
[]) {
901 VMSTATE_BUFFER(rregs
, ESPState
),
902 VMSTATE_BUFFER(wregs
, ESPState
),
903 VMSTATE_INT32(ti_size
, ESPState
),
904 VMSTATE_UINT32(ti_rptr
, ESPState
),
905 VMSTATE_UINT32(ti_wptr
, ESPState
),
906 VMSTATE_BUFFER(ti_buf
, ESPState
),
907 VMSTATE_UINT32(status
, ESPState
),
908 VMSTATE_UINT32(deferred_status
, ESPState
),
909 VMSTATE_BOOL(deferred_complete
, ESPState
),
910 VMSTATE_UINT32(dma
, ESPState
),
911 VMSTATE_PARTIAL_BUFFER(cmdbuf
, ESPState
, 16),
912 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf
, ESPState
, 16, 4),
913 VMSTATE_UINT32(cmdlen
, ESPState
),
914 VMSTATE_UINT32(do_cmd
, ESPState
),
915 VMSTATE_UINT32_TEST(mig_dma_left
, ESPState
, esp_is_before_version_5
),
916 VMSTATE_END_OF_LIST()
920 static void sysbus_esp_mem_write(void *opaque
, hwaddr addr
,
921 uint64_t val
, unsigned int size
)
923 SysBusESPState
*sysbus
= opaque
;
924 ESPState
*s
= ESP(&sysbus
->esp
);
927 saddr
= addr
>> sysbus
->it_shift
;
928 esp_reg_write(s
, saddr
, val
);
931 static uint64_t sysbus_esp_mem_read(void *opaque
, hwaddr addr
,
934 SysBusESPState
*sysbus
= opaque
;
935 ESPState
*s
= ESP(&sysbus
->esp
);
938 saddr
= addr
>> sysbus
->it_shift
;
939 return esp_reg_read(s
, saddr
);
942 static const MemoryRegionOps sysbus_esp_mem_ops
= {
943 .read
= sysbus_esp_mem_read
,
944 .write
= sysbus_esp_mem_write
,
945 .endianness
= DEVICE_NATIVE_ENDIAN
,
946 .valid
.accepts
= esp_mem_accepts
,
949 static void sysbus_esp_pdma_write(void *opaque
, hwaddr addr
,
950 uint64_t val
, unsigned int size
)
952 SysBusESPState
*sysbus
= opaque
;
953 ESPState
*s
= ESP(&sysbus
->esp
);
956 trace_esp_pdma_write(size
);
960 esp_pdma_write(s
, val
);
963 esp_pdma_write(s
, val
>> 8);
964 esp_pdma_write(s
, val
);
967 dmalen
= esp_get_tc(s
);
968 if (dmalen
== 0 || (s
->ti_wptr
== TI_BUFSZ
)) {
973 static uint64_t sysbus_esp_pdma_read(void *opaque
, hwaddr addr
,
976 SysBusESPState
*sysbus
= opaque
;
977 ESPState
*s
= ESP(&sysbus
->esp
);
980 trace_esp_pdma_read(size
);
984 val
= esp_pdma_read(s
);
987 val
= esp_pdma_read(s
);
988 val
= (val
<< 8) | esp_pdma_read(s
);
991 if (s
->ti_rptr
== s
->ti_wptr
) {
999 static const MemoryRegionOps sysbus_esp_pdma_ops
= {
1000 .read
= sysbus_esp_pdma_read
,
1001 .write
= sysbus_esp_pdma_write
,
1002 .endianness
= DEVICE_NATIVE_ENDIAN
,
1003 .valid
.min_access_size
= 1,
1004 .valid
.max_access_size
= 4,
1005 .impl
.min_access_size
= 1,
1006 .impl
.max_access_size
= 2,
1009 static const struct SCSIBusInfo esp_scsi_info
= {
1011 .max_target
= ESP_MAX_DEVS
,
1014 .transfer_data
= esp_transfer_data
,
1015 .complete
= esp_command_complete
,
1016 .cancel
= esp_request_cancelled
1019 static void sysbus_esp_gpio_demux(void *opaque
, int irq
, int level
)
1021 SysBusESPState
*sysbus
= SYSBUS_ESP(opaque
);
1022 ESPState
*s
= ESP(&sysbus
->esp
);
1026 parent_esp_reset(s
, irq
, level
);
1029 esp_dma_enable(opaque
, irq
, level
);
1034 static void sysbus_esp_realize(DeviceState
*dev
, Error
**errp
)
1036 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1037 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1038 ESPState
*s
= ESP(&sysbus
->esp
);
1040 if (!qdev_realize(DEVICE(s
), NULL
, errp
)) {
1044 sysbus_init_irq(sbd
, &s
->irq
);
1045 sysbus_init_irq(sbd
, &s
->irq_data
);
1046 assert(sysbus
->it_shift
!= -1);
1048 s
->chip_id
= TCHI_FAS100A
;
1049 memory_region_init_io(&sysbus
->iomem
, OBJECT(sysbus
), &sysbus_esp_mem_ops
,
1050 sysbus
, "esp-regs", ESP_REGS
<< sysbus
->it_shift
);
1051 sysbus_init_mmio(sbd
, &sysbus
->iomem
);
1052 memory_region_init_io(&sysbus
->pdma
, OBJECT(sysbus
), &sysbus_esp_pdma_ops
,
1053 sysbus
, "esp-pdma", 4);
1054 sysbus_init_mmio(sbd
, &sysbus
->pdma
);
1056 qdev_init_gpio_in(dev
, sysbus_esp_gpio_demux
, 2);
1058 scsi_bus_new(&s
->bus
, sizeof(s
->bus
), dev
, &esp_scsi_info
, NULL
);
1061 static void sysbus_esp_hard_reset(DeviceState
*dev
)
1063 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1064 ESPState
*s
= ESP(&sysbus
->esp
);
1069 static void sysbus_esp_init(Object
*obj
)
1071 SysBusESPState
*sysbus
= SYSBUS_ESP(obj
);
1073 object_initialize_child(obj
, "esp", &sysbus
->esp
, TYPE_ESP
);
1076 static const VMStateDescription vmstate_sysbus_esp_scsi
= {
1077 .name
= "sysbusespscsi",
1079 .minimum_version_id
= 1,
1080 .fields
= (VMStateField
[]) {
1081 VMSTATE_UINT8_V(esp
.mig_version_id
, SysBusESPState
, 2),
1082 VMSTATE_STRUCT(esp
, SysBusESPState
, 0, vmstate_esp
, ESPState
),
1083 VMSTATE_END_OF_LIST()
1087 static void sysbus_esp_class_init(ObjectClass
*klass
, void *data
)
1089 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1091 dc
->realize
= sysbus_esp_realize
;
1092 dc
->reset
= sysbus_esp_hard_reset
;
1093 dc
->vmsd
= &vmstate_sysbus_esp_scsi
;
1094 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1097 static const TypeInfo sysbus_esp_info
= {
1098 .name
= TYPE_SYSBUS_ESP
,
1099 .parent
= TYPE_SYS_BUS_DEVICE
,
1100 .instance_init
= sysbus_esp_init
,
1101 .instance_size
= sizeof(SysBusESPState
),
1102 .class_init
= sysbus_esp_class_init
,
1105 static void esp_class_init(ObjectClass
*klass
, void *data
)
1107 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1109 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1110 dc
->user_creatable
= false;
1111 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1114 static const TypeInfo esp_info
= {
1116 .parent
= TYPE_DEVICE
,
1117 .instance_size
= sizeof(ESPState
),
1118 .class_init
= esp_class_init
,
1121 static void esp_register_types(void)
1123 type_register_static(&sysbus_esp_info
);
1124 type_register_static(&esp_info
);
1127 type_init(esp_register_types
)