2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
30 #include "hw/scsi/esp.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState
*s
)
47 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
48 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
49 qemu_irq_raise(s
->irq
);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState
*s
)
56 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
57 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
58 qemu_irq_lower(s
->irq
);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState
*s
)
65 qemu_irq_raise(s
->irq_data
);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState
*s
)
71 qemu_irq_lower(s
->irq_data
);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState
*s
, int irq
, int level
)
79 trace_esp_dma_enable();
85 trace_esp_dma_disable();
90 void esp_request_cancelled(SCSIRequest
*req
)
92 ESPState
*s
= req
->hba_private
;
94 if (req
== s
->current_req
) {
95 scsi_req_unref(s
->current_req
);
96 s
->current_req
= NULL
;
97 s
->current_dev
= NULL
;
101 static uint32_t esp_get_tc(ESPState
*s
)
105 dmalen
= s
->rregs
[ESP_TCLO
];
106 dmalen
|= s
->rregs
[ESP_TCMID
] << 8;
107 dmalen
|= s
->rregs
[ESP_TCHI
] << 16;
112 static void esp_set_tc(ESPState
*s
, uint32_t dmalen
)
114 s
->rregs
[ESP_TCLO
] = dmalen
;
115 s
->rregs
[ESP_TCMID
] = dmalen
>> 8;
116 s
->rregs
[ESP_TCHI
] = dmalen
>> 16;
119 static uint32_t esp_get_stc(ESPState
*s
)
123 dmalen
= s
->wregs
[ESP_TCLO
];
124 dmalen
|= s
->wregs
[ESP_TCMID
] << 8;
125 dmalen
|= s
->wregs
[ESP_TCHI
] << 16;
130 static void set_pdma(ESPState
*s
, enum pdma_origin_id origin
)
132 s
->pdma_origin
= origin
;
135 static uint8_t esp_pdma_read(ESPState
*s
)
137 uint32_t dmalen
= esp_get_tc(s
);
144 switch (s
->pdma_origin
) {
147 val
= s
->cmdbuf
[s
->cmdlen
++];
149 val
= s
->ti_buf
[s
->ti_rptr
++];
153 val
= s
->async_buf
[0];
154 if (s
->async_len
> 0) {
160 g_assert_not_reached();
165 esp_set_tc(s
, dmalen
);
170 static void esp_pdma_write(ESPState
*s
, uint8_t val
)
172 uint32_t dmalen
= esp_get_tc(s
);
178 switch (s
->pdma_origin
) {
181 s
->cmdbuf
[s
->cmdlen
++] = val
;
183 s
->ti_buf
[s
->ti_wptr
++] = val
;
187 s
->async_buf
[0] = val
;
188 if (s
->async_len
> 0) {
194 g_assert_not_reached();
199 esp_set_tc(s
, dmalen
);
202 static int esp_select(ESPState
*s
)
206 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
212 if (s
->current_req
) {
213 /* Started a new command before the old one finished. Cancel it. */
214 scsi_req_cancel(s
->current_req
);
218 s
->current_dev
= scsi_device_find(&s
->bus
, 0, target
, 0);
219 if (!s
->current_dev
) {
221 s
->rregs
[ESP_RSTAT
] = 0;
222 s
->rregs
[ESP_RINTR
] = INTR_DC
;
223 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
230 static uint32_t get_cmd(ESPState
*s
)
232 uint8_t *buf
= s
->cmdbuf
;
236 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
238 dmalen
= esp_get_tc(s
);
239 if (dmalen
> ESP_CMDBUF_SZ
) {
242 if (s
->dma_memory_read
) {
243 s
->dma_memory_read(s
->dma_opaque
, buf
, dmalen
);
246 if (esp_select(s
) < 0) {
254 if (dmalen
> TI_BUFSZ
) {
257 memcpy(buf
, s
->ti_buf
, dmalen
);
258 buf
[0] = buf
[2] >> 5;
260 trace_esp_get_cmd(dmalen
, target
);
262 if (esp_select(s
) < 0) {
268 static void do_busid_cmd(ESPState
*s
, uint8_t *buf
, uint8_t busid
)
272 SCSIDevice
*current_lun
;
274 trace_esp_do_busid_cmd(busid
);
276 current_lun
= scsi_device_find(&s
->bus
, 0, s
->current_dev
->id
, lun
);
277 s
->current_req
= scsi_req_new(current_lun
, 0, lun
, buf
, s
);
278 datalen
= scsi_req_enqueue(s
->current_req
);
279 s
->ti_size
= datalen
;
281 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
284 s
->rregs
[ESP_RSTAT
] |= STAT_DI
;
286 s
->rregs
[ESP_RSTAT
] |= STAT_DO
;
288 scsi_req_continue(s
->current_req
);
290 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
291 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
295 static void do_cmd(ESPState
*s
)
297 uint8_t *buf
= s
->cmdbuf
;
298 uint8_t busid
= buf
[0];
300 do_busid_cmd(s
, &buf
[1], busid
);
303 static void satn_pdma_cb(ESPState
*s
)
311 static void handle_satn(ESPState
*s
)
315 if (s
->dma
&& !s
->dma_enabled
) {
316 s
->dma_cb
= handle_satn
;
319 s
->pdma_cb
= satn_pdma_cb
;
324 } else if (cmdlen
== 0) {
327 /* Target present, but no cmd yet - switch to command phase */
328 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
329 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
333 static void s_without_satn_pdma_cb(ESPState
*s
)
337 do_busid_cmd(s
, s
->cmdbuf
, 0);
341 static void handle_s_without_atn(ESPState
*s
)
345 if (s
->dma
&& !s
->dma_enabled
) {
346 s
->dma_cb
= handle_s_without_atn
;
349 s
->pdma_cb
= s_without_satn_pdma_cb
;
353 do_busid_cmd(s
, s
->cmdbuf
, 0);
354 } else if (cmdlen
== 0) {
357 /* Target present, but no cmd yet - switch to command phase */
358 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
359 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
363 static void satn_stop_pdma_cb(ESPState
*s
)
367 trace_esp_handle_satn_stop(s
->cmdlen
);
369 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
370 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
371 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
376 static void handle_satn_stop(ESPState
*s
)
380 if (s
->dma
&& !s
->dma_enabled
) {
381 s
->dma_cb
= handle_satn_stop
;
384 s
->pdma_cb
= satn_stop_pdma_cb
;
387 trace_esp_handle_satn_stop(s
->cmdlen
);
390 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
391 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
392 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
394 } else if (cmdlen
== 0) {
397 /* Target present, but no cmd yet - switch to command phase */
398 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
399 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
403 static void write_response_pdma_cb(ESPState
*s
)
405 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
406 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
407 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
411 static void write_response(ESPState
*s
)
413 trace_esp_write_response(s
->status
);
414 s
->ti_buf
[0] = s
->status
;
417 if (s
->dma_memory_write
) {
418 s
->dma_memory_write(s
->dma_opaque
, s
->ti_buf
, 2);
419 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
420 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
421 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
424 s
->pdma_cb
= write_response_pdma_cb
;
432 s
->rregs
[ESP_RFLAGS
] = 2;
437 static void esp_dma_done(ESPState
*s
)
439 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
440 s
->rregs
[ESP_RINTR
] = INTR_BS
;
441 s
->rregs
[ESP_RSEQ
] = 0;
442 s
->rregs
[ESP_RFLAGS
] = 0;
447 static void do_dma_pdma_cb(ESPState
*s
)
449 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
458 if (s
->async_len
== 0) {
459 scsi_req_continue(s
->current_req
);
461 * If there is still data to be read from the device then
462 * complete the DMA operation immediately. Otherwise defer
463 * until the scsi layer has completed.
465 if (to_device
|| esp_get_tc(s
) != 0 || s
->ti_size
== 0) {
470 /* Partially filled a scsi buffer. Complete immediately. */
474 static void esp_do_dma(ESPState
*s
)
477 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
482 * handle_ti_cmd() case: esp_do_dma() is called only from
483 * handle_ti_cmd() with do_cmd != NULL (see the assert())
485 trace_esp_do_dma(s
->cmdlen
, len
);
486 assert(s
->cmdlen
<= sizeof(s
->cmdbuf
) &&
487 len
<= sizeof(s
->cmdbuf
) - s
->cmdlen
);
488 if (s
->dma_memory_read
) {
489 s
->dma_memory_read(s
->dma_opaque
, &s
->cmdbuf
[s
->cmdlen
], len
);
492 s
->pdma_cb
= do_dma_pdma_cb
;
496 trace_esp_handle_ti_cmd(s
->cmdlen
);
503 if (s
->async_len
== 0) {
504 /* Defer until data is available. */
507 if (len
> s
->async_len
) {
511 if (s
->dma_memory_read
) {
512 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
515 s
->pdma_cb
= do_dma_pdma_cb
;
520 if (s
->dma_memory_write
) {
521 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
524 s
->pdma_cb
= do_dma_pdma_cb
;
529 esp_set_tc(s
, esp_get_tc(s
) - len
);
537 if (s
->async_len
== 0) {
538 scsi_req_continue(s
->current_req
);
540 * If there is still data to be read from the device then
541 * complete the DMA operation immediately. Otherwise defer
542 * until the scsi layer has completed.
544 if (to_device
|| esp_get_tc(s
) != 0 || s
->ti_size
== 0) {
549 /* Partially filled a scsi buffer. Complete immediately. */
553 static void esp_report_command_complete(ESPState
*s
, uint32_t status
)
555 trace_esp_command_complete();
556 if (s
->ti_size
!= 0) {
557 trace_esp_command_complete_unexpected();
562 trace_esp_command_complete_fail();
565 s
->rregs
[ESP_RSTAT
] = STAT_ST
;
567 if (s
->current_req
) {
568 scsi_req_unref(s
->current_req
);
569 s
->current_req
= NULL
;
570 s
->current_dev
= NULL
;
574 void esp_command_complete(SCSIRequest
*req
, size_t resid
)
576 ESPState
*s
= req
->hba_private
;
578 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
580 * Defer handling command complete until the previous
581 * interrupt has been handled.
583 trace_esp_command_complete_deferred();
584 s
->deferred_status
= req
->status
;
585 s
->deferred_complete
= true;
588 esp_report_command_complete(s
, req
->status
);
591 void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
593 ESPState
*s
= req
->hba_private
;
594 uint32_t dmalen
= esp_get_tc(s
);
597 trace_esp_transfer_data(dmalen
, s
->ti_size
);
599 s
->async_buf
= scsi_req_get_buf(req
);
602 } else if (s
->ti_size
<= 0) {
604 * If this was the last part of a DMA transfer then the
605 * completion interrupt is deferred to here.
611 static void handle_ti(ESPState
*s
)
615 if (s
->dma
&& !s
->dma_enabled
) {
616 s
->dma_cb
= handle_ti
;
620 dmalen
= esp_get_tc(s
);
622 trace_esp_handle_ti(dmalen
);
623 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
625 } else if (s
->do_cmd
) {
626 trace_esp_handle_ti_cmd(s
->cmdlen
);
634 void esp_hard_reset(ESPState
*s
)
636 memset(s
->rregs
, 0, ESP_REGS
);
637 memset(s
->wregs
, 0, ESP_REGS
);
646 s
->rregs
[ESP_CFG1
] = 7;
649 static void esp_soft_reset(ESPState
*s
)
651 qemu_irq_lower(s
->irq
);
652 qemu_irq_lower(s
->irq_data
);
656 static void parent_esp_reset(ESPState
*s
, int irq
, int level
)
663 uint64_t esp_reg_read(ESPState
*s
, uint32_t saddr
)
669 if ((s
->rregs
[ESP_RSTAT
] & STAT_PIO_MASK
) == 0) {
671 qemu_log_mask(LOG_UNIMP
, "esp: PIO data read not implemented\n");
672 s
->rregs
[ESP_FIFO
] = 0;
673 } else if (s
->ti_rptr
< s
->ti_wptr
) {
675 s
->rregs
[ESP_FIFO
] = s
->ti_buf
[s
->ti_rptr
++];
677 if (s
->ti_rptr
== s
->ti_wptr
) {
681 val
= s
->rregs
[ESP_FIFO
];
685 * Clear sequence step, interrupt register and all status bits
688 val
= s
->rregs
[ESP_RINTR
];
689 s
->rregs
[ESP_RINTR
] = 0;
690 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
691 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
693 if (s
->deferred_complete
) {
694 esp_report_command_complete(s
, s
->deferred_status
);
695 s
->deferred_complete
= false;
699 /* Return the unique id if the value has never been written */
700 if (!s
->tchi_written
) {
703 val
= s
->rregs
[saddr
];
707 val
= s
->rregs
[saddr
];
711 trace_esp_mem_readb(saddr
, val
);
715 void esp_reg_write(ESPState
*s
, uint32_t saddr
, uint64_t val
)
717 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
720 s
->tchi_written
= true;
724 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
728 if (s
->cmdlen
< ESP_CMDBUF_SZ
) {
729 s
->cmdbuf
[s
->cmdlen
++] = val
& 0xff;
731 trace_esp_error_fifo_overrun();
733 } else if (s
->ti_wptr
== TI_BUFSZ
- 1) {
734 trace_esp_error_fifo_overrun();
737 s
->ti_buf
[s
->ti_wptr
++] = val
& 0xff;
741 s
->rregs
[saddr
] = val
;
744 /* Reload DMA counter. */
745 if (esp_get_stc(s
) == 0) {
746 esp_set_tc(s
, 0x10000);
748 esp_set_tc(s
, esp_get_stc(s
));
753 switch (val
& CMD_CMD
) {
755 trace_esp_mem_writeb_cmd_nop(val
);
758 trace_esp_mem_writeb_cmd_flush(val
);
760 s
->rregs
[ESP_RINTR
] = INTR_FC
;
761 s
->rregs
[ESP_RSEQ
] = 0;
762 s
->rregs
[ESP_RFLAGS
] = 0;
765 trace_esp_mem_writeb_cmd_reset(val
);
769 trace_esp_mem_writeb_cmd_bus_reset(val
);
770 s
->rregs
[ESP_RINTR
] = INTR_RST
;
771 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
776 trace_esp_mem_writeb_cmd_ti(val
);
780 trace_esp_mem_writeb_cmd_iccs(val
);
782 s
->rregs
[ESP_RINTR
] = INTR_FC
;
783 s
->rregs
[ESP_RSTAT
] |= STAT_MI
;
786 trace_esp_mem_writeb_cmd_msgacc(val
);
787 s
->rregs
[ESP_RINTR
] = INTR_DC
;
788 s
->rregs
[ESP_RSEQ
] = 0;
789 s
->rregs
[ESP_RFLAGS
] = 0;
793 trace_esp_mem_writeb_cmd_pad(val
);
794 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
795 s
->rregs
[ESP_RINTR
] = INTR_FC
;
796 s
->rregs
[ESP_RSEQ
] = 0;
799 trace_esp_mem_writeb_cmd_satn(val
);
802 trace_esp_mem_writeb_cmd_rstatn(val
);
805 trace_esp_mem_writeb_cmd_sel(val
);
806 handle_s_without_atn(s
);
809 trace_esp_mem_writeb_cmd_selatn(val
);
813 trace_esp_mem_writeb_cmd_selatns(val
);
817 trace_esp_mem_writeb_cmd_ensel(val
);
818 s
->rregs
[ESP_RINTR
] = 0;
821 trace_esp_mem_writeb_cmd_dissel(val
);
822 s
->rregs
[ESP_RINTR
] = 0;
826 trace_esp_error_unhandled_command(val
);
830 case ESP_WBUSID
... ESP_WSYNO
:
833 case ESP_CFG2
: case ESP_CFG3
:
834 case ESP_RES3
: case ESP_RES4
:
835 s
->rregs
[saddr
] = val
;
837 case ESP_WCCF
... ESP_WTEST
:
840 trace_esp_error_invalid_write(val
, saddr
);
843 s
->wregs
[saddr
] = val
;
846 static bool esp_mem_accepts(void *opaque
, hwaddr addr
,
847 unsigned size
, bool is_write
,
850 return (size
== 1) || (is_write
&& size
== 4);
853 static bool esp_pdma_needed(void *opaque
)
855 ESPState
*s
= opaque
;
856 return s
->dma_memory_read
== NULL
&& s
->dma_memory_write
== NULL
&&
860 static const VMStateDescription vmstate_esp_pdma
= {
863 .minimum_version_id
= 2,
864 .needed
= esp_pdma_needed
,
865 .fields
= (VMStateField
[]) {
866 VMSTATE_INT32(pdma_origin
, ESPState
),
867 VMSTATE_END_OF_LIST()
871 static bool esp_is_before_version_5(void *opaque
, int version_id
)
873 ESPState
*s
= ESP(opaque
);
875 version_id
= MIN(version_id
, s
->mig_version_id
);
876 return version_id
< 5;
879 static int esp_pre_save(void *opaque
)
881 ESPState
*s
= ESP(opaque
);
883 s
->mig_version_id
= vmstate_esp
.version_id
;
887 static int esp_post_load(void *opaque
, int version_id
)
889 ESPState
*s
= ESP(opaque
);
891 version_id
= MIN(version_id
, s
->mig_version_id
);
893 if (version_id
< 5) {
894 esp_set_tc(s
, s
->mig_dma_left
);
897 s
->mig_version_id
= vmstate_esp
.version_id
;
901 const VMStateDescription vmstate_esp
= {
904 .minimum_version_id
= 3,
905 .pre_save
= esp_pre_save
,
906 .post_load
= esp_post_load
,
907 .fields
= (VMStateField
[]) {
908 VMSTATE_BUFFER(rregs
, ESPState
),
909 VMSTATE_BUFFER(wregs
, ESPState
),
910 VMSTATE_INT32(ti_size
, ESPState
),
911 VMSTATE_UINT32(ti_rptr
, ESPState
),
912 VMSTATE_UINT32(ti_wptr
, ESPState
),
913 VMSTATE_BUFFER(ti_buf
, ESPState
),
914 VMSTATE_UINT32(status
, ESPState
),
915 VMSTATE_UINT32(deferred_status
, ESPState
),
916 VMSTATE_BOOL(deferred_complete
, ESPState
),
917 VMSTATE_UINT32(dma
, ESPState
),
918 VMSTATE_PARTIAL_BUFFER(cmdbuf
, ESPState
, 16),
919 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf
, ESPState
, 16, 4),
920 VMSTATE_UINT32(cmdlen
, ESPState
),
921 VMSTATE_UINT32(do_cmd
, ESPState
),
922 VMSTATE_UINT32_TEST(mig_dma_left
, ESPState
, esp_is_before_version_5
),
923 VMSTATE_END_OF_LIST()
925 .subsections
= (const VMStateDescription
* []) {
931 static void sysbus_esp_mem_write(void *opaque
, hwaddr addr
,
932 uint64_t val
, unsigned int size
)
934 SysBusESPState
*sysbus
= opaque
;
935 ESPState
*s
= ESP(&sysbus
->esp
);
938 saddr
= addr
>> sysbus
->it_shift
;
939 esp_reg_write(s
, saddr
, val
);
942 static uint64_t sysbus_esp_mem_read(void *opaque
, hwaddr addr
,
945 SysBusESPState
*sysbus
= opaque
;
946 ESPState
*s
= ESP(&sysbus
->esp
);
949 saddr
= addr
>> sysbus
->it_shift
;
950 return esp_reg_read(s
, saddr
);
953 static const MemoryRegionOps sysbus_esp_mem_ops
= {
954 .read
= sysbus_esp_mem_read
,
955 .write
= sysbus_esp_mem_write
,
956 .endianness
= DEVICE_NATIVE_ENDIAN
,
957 .valid
.accepts
= esp_mem_accepts
,
960 static void sysbus_esp_pdma_write(void *opaque
, hwaddr addr
,
961 uint64_t val
, unsigned int size
)
963 SysBusESPState
*sysbus
= opaque
;
964 ESPState
*s
= ESP(&sysbus
->esp
);
967 trace_esp_pdma_write(size
);
971 esp_pdma_write(s
, val
);
974 esp_pdma_write(s
, val
>> 8);
975 esp_pdma_write(s
, val
);
978 dmalen
= esp_get_tc(s
);
979 if (dmalen
== 0 && s
->pdma_cb
) {
986 static uint64_t sysbus_esp_pdma_read(void *opaque
, hwaddr addr
,
989 SysBusESPState
*sysbus
= opaque
;
990 ESPState
*s
= ESP(&sysbus
->esp
);
991 uint32_t dmalen
= esp_get_tc(s
);
994 trace_esp_pdma_read(size
);
1001 val
= esp_pdma_read(s
);
1004 val
= esp_pdma_read(s
);
1005 val
= (val
<< 8) | esp_pdma_read(s
);
1008 dmalen
= esp_get_tc(s
);
1009 if (dmalen
== 0 && s
->pdma_cb
) {
1017 static const MemoryRegionOps sysbus_esp_pdma_ops
= {
1018 .read
= sysbus_esp_pdma_read
,
1019 .write
= sysbus_esp_pdma_write
,
1020 .endianness
= DEVICE_NATIVE_ENDIAN
,
1021 .valid
.min_access_size
= 1,
1022 .valid
.max_access_size
= 2,
1025 static const struct SCSIBusInfo esp_scsi_info
= {
1027 .max_target
= ESP_MAX_DEVS
,
1030 .transfer_data
= esp_transfer_data
,
1031 .complete
= esp_command_complete
,
1032 .cancel
= esp_request_cancelled
1035 static void sysbus_esp_gpio_demux(void *opaque
, int irq
, int level
)
1037 SysBusESPState
*sysbus
= SYSBUS_ESP(opaque
);
1038 ESPState
*s
= ESP(&sysbus
->esp
);
1042 parent_esp_reset(s
, irq
, level
);
1045 esp_dma_enable(opaque
, irq
, level
);
1050 static void sysbus_esp_realize(DeviceState
*dev
, Error
**errp
)
1052 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1053 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1054 ESPState
*s
= ESP(&sysbus
->esp
);
1056 if (!qdev_realize(DEVICE(s
), NULL
, errp
)) {
1060 sysbus_init_irq(sbd
, &s
->irq
);
1061 sysbus_init_irq(sbd
, &s
->irq_data
);
1062 assert(sysbus
->it_shift
!= -1);
1064 s
->chip_id
= TCHI_FAS100A
;
1065 memory_region_init_io(&sysbus
->iomem
, OBJECT(sysbus
), &sysbus_esp_mem_ops
,
1066 sysbus
, "esp-regs", ESP_REGS
<< sysbus
->it_shift
);
1067 sysbus_init_mmio(sbd
, &sysbus
->iomem
);
1068 memory_region_init_io(&sysbus
->pdma
, OBJECT(sysbus
), &sysbus_esp_pdma_ops
,
1069 sysbus
, "esp-pdma", 2);
1070 sysbus_init_mmio(sbd
, &sysbus
->pdma
);
1072 qdev_init_gpio_in(dev
, sysbus_esp_gpio_demux
, 2);
1074 scsi_bus_new(&s
->bus
, sizeof(s
->bus
), dev
, &esp_scsi_info
, NULL
);
1077 static void sysbus_esp_hard_reset(DeviceState
*dev
)
1079 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1080 ESPState
*s
= ESP(&sysbus
->esp
);
1085 static void sysbus_esp_init(Object
*obj
)
1087 SysBusESPState
*sysbus
= SYSBUS_ESP(obj
);
1089 object_initialize_child(obj
, "esp", &sysbus
->esp
, TYPE_ESP
);
1092 static const VMStateDescription vmstate_sysbus_esp_scsi
= {
1093 .name
= "sysbusespscsi",
1095 .minimum_version_id
= 1,
1096 .fields
= (VMStateField
[]) {
1097 VMSTATE_UINT8_V(esp
.mig_version_id
, SysBusESPState
, 2),
1098 VMSTATE_STRUCT(esp
, SysBusESPState
, 0, vmstate_esp
, ESPState
),
1099 VMSTATE_END_OF_LIST()
1103 static void sysbus_esp_class_init(ObjectClass
*klass
, void *data
)
1105 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1107 dc
->realize
= sysbus_esp_realize
;
1108 dc
->reset
= sysbus_esp_hard_reset
;
1109 dc
->vmsd
= &vmstate_sysbus_esp_scsi
;
1110 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1113 static const TypeInfo sysbus_esp_info
= {
1114 .name
= TYPE_SYSBUS_ESP
,
1115 .parent
= TYPE_SYS_BUS_DEVICE
,
1116 .instance_init
= sysbus_esp_init
,
1117 .instance_size
= sizeof(SysBusESPState
),
1118 .class_init
= sysbus_esp_class_init
,
1121 static void esp_class_init(ObjectClass
*klass
, void *data
)
1123 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1125 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1126 dc
->user_creatable
= false;
1127 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1130 static const TypeInfo esp_info
= {
1132 .parent
= TYPE_DEVICE
,
1133 .instance_size
= sizeof(ESPState
),
1134 .class_init
= esp_class_init
,
1137 static void esp_register_types(void)
1139 type_register_static(&sysbus_esp_info
);
1140 type_register_static(&esp_info
);
1143 type_init(esp_register_types
)