2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
30 #include "hw/scsi/esp.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState
*s
)
47 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
48 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
49 qemu_irq_raise(s
->irq
);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState
*s
)
56 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
57 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
58 qemu_irq_lower(s
->irq
);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState
*s
)
65 qemu_irq_raise(s
->irq_data
);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState
*s
)
71 qemu_irq_lower(s
->irq_data
);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState
*s
, int irq
, int level
)
79 trace_esp_dma_enable();
85 trace_esp_dma_disable();
90 void esp_request_cancelled(SCSIRequest
*req
)
92 ESPState
*s
= req
->hba_private
;
94 if (req
== s
->current_req
) {
95 scsi_req_unref(s
->current_req
);
96 s
->current_req
= NULL
;
97 s
->current_dev
= NULL
;
102 static void esp_fifo_push(Fifo8
*fifo
, uint8_t val
)
104 if (fifo8_num_used(fifo
) == fifo
->capacity
) {
105 trace_esp_error_fifo_overrun();
109 fifo8_push(fifo
, val
);
112 static uint8_t esp_fifo_pop(Fifo8
*fifo
)
114 if (fifo8_is_empty(fifo
)) {
118 return fifo8_pop(fifo
);
121 static uint32_t esp_fifo_pop_buf(Fifo8
*fifo
, uint8_t *dest
, int maxlen
)
130 buf
= fifo8_pop_buf(fifo
, maxlen
, &n
);
132 memcpy(dest
, buf
, n
);
138 static uint32_t esp_get_tc(ESPState
*s
)
142 dmalen
= s
->rregs
[ESP_TCLO
];
143 dmalen
|= s
->rregs
[ESP_TCMID
] << 8;
144 dmalen
|= s
->rregs
[ESP_TCHI
] << 16;
149 static void esp_set_tc(ESPState
*s
, uint32_t dmalen
)
151 s
->rregs
[ESP_TCLO
] = dmalen
;
152 s
->rregs
[ESP_TCMID
] = dmalen
>> 8;
153 s
->rregs
[ESP_TCHI
] = dmalen
>> 16;
156 static uint32_t esp_get_stc(ESPState
*s
)
160 dmalen
= s
->wregs
[ESP_TCLO
];
161 dmalen
|= s
->wregs
[ESP_TCMID
] << 8;
162 dmalen
|= s
->wregs
[ESP_TCHI
] << 16;
167 static uint8_t esp_pdma_read(ESPState
*s
)
172 val
= esp_fifo_pop(&s
->cmdfifo
);
174 val
= esp_fifo_pop(&s
->fifo
);
180 static void esp_pdma_write(ESPState
*s
, uint8_t val
)
182 uint32_t dmalen
= esp_get_tc(s
);
189 esp_fifo_push(&s
->cmdfifo
, val
);
191 esp_fifo_push(&s
->fifo
, val
);
195 esp_set_tc(s
, dmalen
);
198 static void esp_set_pdma_cb(ESPState
*s
, enum pdma_cb cb
)
203 static int esp_select(ESPState
*s
)
207 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
210 fifo8_reset(&s
->fifo
);
212 s
->current_dev
= scsi_device_find(&s
->bus
, 0, target
, 0);
213 if (!s
->current_dev
) {
215 s
->rregs
[ESP_RSTAT
] = 0;
216 s
->rregs
[ESP_RINTR
] = INTR_DC
;
217 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
223 * Note that we deliberately don't raise the IRQ here: this will be done
224 * either in do_command_phase() for DATA OUT transfers or by the deferred
225 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
227 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
228 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
232 static uint32_t get_cmd(ESPState
*s
, uint32_t maxlen
)
234 uint8_t buf
[ESP_CMDFIFO_SZ
];
238 if (s
->current_req
) {
239 /* Started a new command before the old one finished. Cancel it. */
240 scsi_req_cancel(s
->current_req
);
243 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
245 dmalen
= MIN(esp_get_tc(s
), maxlen
);
249 if (s
->dma_memory_read
) {
250 s
->dma_memory_read(s
->dma_opaque
, buf
, dmalen
);
251 dmalen
= MIN(fifo8_num_free(&s
->cmdfifo
), dmalen
);
252 fifo8_push_all(&s
->cmdfifo
, buf
, dmalen
);
254 if (esp_select(s
) < 0) {
255 fifo8_reset(&s
->cmdfifo
);
259 fifo8_reset(&s
->cmdfifo
);
263 dmalen
= MIN(fifo8_num_used(&s
->fifo
), maxlen
);
267 n
= esp_fifo_pop_buf(&s
->fifo
, buf
, dmalen
);
268 n
= MIN(fifo8_num_free(&s
->cmdfifo
), n
);
269 fifo8_push_all(&s
->cmdfifo
, buf
, n
);
271 trace_esp_get_cmd(dmalen
, target
);
273 if (esp_select(s
) < 0) {
274 fifo8_reset(&s
->cmdfifo
);
280 static void do_command_phase(ESPState
*s
)
284 SCSIDevice
*current_lun
;
285 uint8_t buf
[ESP_CMDFIFO_SZ
];
287 trace_esp_do_command_phase(s
->lun
);
288 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
289 if (!cmdlen
|| !s
->current_dev
) {
292 esp_fifo_pop_buf(&s
->cmdfifo
, buf
, cmdlen
);
294 current_lun
= scsi_device_find(&s
->bus
, 0, s
->current_dev
->id
, s
->lun
);
295 s
->current_req
= scsi_req_new(current_lun
, 0, s
->lun
, buf
, cmdlen
, s
);
296 datalen
= scsi_req_enqueue(s
->current_req
);
297 s
->ti_size
= datalen
;
298 fifo8_reset(&s
->cmdfifo
);
300 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
301 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
306 * Switch to DATA IN phase but wait until initial data xfer is
307 * complete before raising the command completion interrupt
309 s
->data_in_ready
= false;
310 s
->rregs
[ESP_RSTAT
] |= STAT_DI
;
312 s
->rregs
[ESP_RSTAT
] |= STAT_DO
;
313 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
317 scsi_req_continue(s
->current_req
);
322 static void do_message_phase(ESPState
*s
)
324 if (s
->cmdfifo_cdb_offset
) {
325 uint8_t message
= esp_fifo_pop(&s
->cmdfifo
);
327 trace_esp_do_identify(message
);
328 s
->lun
= message
& 7;
329 s
->cmdfifo_cdb_offset
--;
332 /* Ignore extended messages for now */
333 if (s
->cmdfifo_cdb_offset
) {
334 int len
= MIN(s
->cmdfifo_cdb_offset
, fifo8_num_used(&s
->cmdfifo
));
335 esp_fifo_pop_buf(&s
->cmdfifo
, NULL
, len
);
336 s
->cmdfifo_cdb_offset
= 0;
340 static void do_cmd(ESPState
*s
)
343 assert(s
->cmdfifo_cdb_offset
== 0);
347 static void satn_pdma_cb(ESPState
*s
)
349 if (!esp_get_tc(s
) && !fifo8_is_empty(&s
->cmdfifo
)) {
350 s
->cmdfifo_cdb_offset
= 1;
356 static void handle_satn(ESPState
*s
)
360 if (s
->dma
&& !s
->dma_enabled
) {
361 s
->dma_cb
= handle_satn
;
364 esp_set_pdma_cb(s
, SATN_PDMA_CB
);
365 cmdlen
= get_cmd(s
, ESP_CMDFIFO_SZ
);
367 s
->cmdfifo_cdb_offset
= 1;
370 } else if (cmdlen
== 0) {
372 /* Target present, but no cmd yet - switch to command phase */
373 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
374 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
378 static void s_without_satn_pdma_cb(ESPState
*s
)
380 if (!esp_get_tc(s
) && !fifo8_is_empty(&s
->cmdfifo
)) {
381 s
->cmdfifo_cdb_offset
= 0;
387 static void handle_s_without_atn(ESPState
*s
)
391 if (s
->dma
&& !s
->dma_enabled
) {
392 s
->dma_cb
= handle_s_without_atn
;
395 esp_set_pdma_cb(s
, S_WITHOUT_SATN_PDMA_CB
);
396 cmdlen
= get_cmd(s
, ESP_CMDFIFO_SZ
);
398 s
->cmdfifo_cdb_offset
= 0;
401 } else if (cmdlen
== 0) {
403 /* Target present, but no cmd yet - switch to command phase */
404 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
405 s
->rregs
[ESP_RSTAT
] = STAT_CD
;
409 static void satn_stop_pdma_cb(ESPState
*s
)
411 if (!esp_get_tc(s
) && !fifo8_is_empty(&s
->cmdfifo
)) {
412 trace_esp_handle_satn_stop(fifo8_num_used(&s
->cmdfifo
));
414 s
->cmdfifo_cdb_offset
= 1;
415 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
416 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
417 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
422 static void handle_satn_stop(ESPState
*s
)
426 if (s
->dma
&& !s
->dma_enabled
) {
427 s
->dma_cb
= handle_satn_stop
;
430 esp_set_pdma_cb(s
, SATN_STOP_PDMA_CB
);
431 cmdlen
= get_cmd(s
, 1);
433 trace_esp_handle_satn_stop(fifo8_num_used(&s
->cmdfifo
));
435 s
->cmdfifo_cdb_offset
= 1;
436 s
->rregs
[ESP_RSTAT
] = STAT_MO
;
437 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
438 s
->rregs
[ESP_RSEQ
] = SEQ_MO
;
440 } else if (cmdlen
== 0) {
442 /* Target present, switch to message out phase */
443 s
->rregs
[ESP_RSEQ
] = SEQ_MO
;
444 s
->rregs
[ESP_RSTAT
] = STAT_MO
;
448 static void write_response_pdma_cb(ESPState
*s
)
450 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
451 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
452 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
456 static void write_response(ESPState
*s
)
460 trace_esp_write_response(s
->status
);
466 if (s
->dma_memory_write
) {
467 s
->dma_memory_write(s
->dma_opaque
, buf
, 2);
468 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
469 s
->rregs
[ESP_RINTR
] |= INTR_BS
| INTR_FC
;
470 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
472 esp_set_pdma_cb(s
, WRITE_RESPONSE_PDMA_CB
);
477 fifo8_reset(&s
->fifo
);
478 fifo8_push_all(&s
->fifo
, buf
, 2);
479 s
->rregs
[ESP_RFLAGS
] = 2;
484 static void esp_dma_done(ESPState
*s
)
486 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
487 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
488 s
->rregs
[ESP_RFLAGS
] = 0;
493 static void do_dma_pdma_cb(ESPState
*s
)
495 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
500 /* Ensure we have received complete command after SATN and stop */
501 if (esp_get_tc(s
) || fifo8_is_empty(&s
->cmdfifo
)) {
506 if ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_CD
) {
507 /* No command received */
508 if (s
->cmdfifo_cdb_offset
== fifo8_num_used(&s
->cmdfifo
)) {
512 /* Command has been received */
517 * Extra message out bytes received: update cmdfifo_cdb_offset
518 * and then switch to commmand phase
520 s
->cmdfifo_cdb_offset
= fifo8_num_used(&s
->cmdfifo
);
521 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
522 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
523 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
529 if (!s
->current_req
) {
534 /* Copy FIFO data to device */
535 len
= MIN(s
->async_len
, ESP_FIFO_SZ
);
536 len
= MIN(len
, fifo8_num_used(&s
->fifo
));
537 n
= esp_fifo_pop_buf(&s
->fifo
, s
->async_buf
, len
);
543 /* Unaligned accesses can cause FIFO wraparound */
545 n
= esp_fifo_pop_buf(&s
->fifo
, s
->async_buf
, len
);
551 if (s
->async_len
== 0) {
552 scsi_req_continue(s
->current_req
);
556 if (esp_get_tc(s
) == 0) {
563 if (s
->async_len
== 0) {
564 /* Defer until the scsi layer has completed */
565 scsi_req_continue(s
->current_req
);
566 s
->data_in_ready
= false;
570 if (esp_get_tc(s
) != 0) {
571 /* Copy device data to FIFO */
572 len
= MIN(s
->async_len
, esp_get_tc(s
));
573 len
= MIN(len
, fifo8_num_free(&s
->fifo
));
574 fifo8_push_all(&s
->fifo
, s
->async_buf
, len
);
578 esp_set_tc(s
, esp_get_tc(s
) - len
);
580 if (esp_get_tc(s
) == 0) {
581 /* Indicate transfer to FIFO is complete */
582 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
587 /* Partially filled a scsi buffer. Complete immediately. */
593 static void esp_do_dma(ESPState
*s
)
595 uint32_t len
, cmdlen
;
596 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
597 uint8_t buf
[ESP_CMDFIFO_SZ
];
602 * handle_ti_cmd() case: esp_do_dma() is called only from
603 * handle_ti_cmd() with do_cmd != NULL (see the assert())
605 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
606 trace_esp_do_dma(cmdlen
, len
);
607 if (s
->dma_memory_read
) {
608 len
= MIN(len
, fifo8_num_free(&s
->cmdfifo
));
609 s
->dma_memory_read(s
->dma_opaque
, buf
, len
);
610 fifo8_push_all(&s
->cmdfifo
, buf
, len
);
612 esp_set_pdma_cb(s
, DO_DMA_PDMA_CB
);
616 trace_esp_handle_ti_cmd(cmdlen
);
618 if ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_CD
) {
619 /* No command received */
620 if (s
->cmdfifo_cdb_offset
== fifo8_num_used(&s
->cmdfifo
)) {
624 /* Command has been received */
629 * Extra message out bytes received: update cmdfifo_cdb_offset
630 * and then switch to commmand phase
632 s
->cmdfifo_cdb_offset
= fifo8_num_used(&s
->cmdfifo
);
633 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
634 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
635 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
640 if (!s
->current_req
) {
643 if (s
->async_len
== 0) {
644 /* Defer until data is available. */
647 if (len
> s
->async_len
) {
651 if (s
->dma_memory_read
) {
652 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
654 esp_set_pdma_cb(s
, DO_DMA_PDMA_CB
);
659 if (s
->dma_memory_write
) {
660 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
662 /* Adjust TC for any leftover data in the FIFO */
663 if (!fifo8_is_empty(&s
->fifo
)) {
664 esp_set_tc(s
, esp_get_tc(s
) - fifo8_num_used(&s
->fifo
));
667 /* Copy device data to FIFO */
668 len
= MIN(len
, fifo8_num_free(&s
->fifo
));
669 fifo8_push_all(&s
->fifo
, s
->async_buf
, len
);
675 * MacOS toolbox uses a TI length of 16 bytes for all commands, so
676 * commands shorter than this must be padded accordingly
678 if (len
< esp_get_tc(s
) && esp_get_tc(s
) <= ESP_FIFO_SZ
) {
679 while (fifo8_num_used(&s
->fifo
) < ESP_FIFO_SZ
) {
680 esp_fifo_push(&s
->fifo
, 0);
685 esp_set_tc(s
, esp_get_tc(s
) - len
);
686 esp_set_pdma_cb(s
, DO_DMA_PDMA_CB
);
689 /* Indicate transfer to FIFO is complete */
690 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
694 esp_set_tc(s
, esp_get_tc(s
) - len
);
702 if (s
->async_len
== 0) {
703 scsi_req_continue(s
->current_req
);
705 * If there is still data to be read from the device then
706 * complete the DMA operation immediately. Otherwise defer
707 * until the scsi layer has completed.
709 if (to_device
|| esp_get_tc(s
) != 0 || s
->ti_size
== 0) {
714 /* Partially filled a scsi buffer. Complete immediately. */
719 static void esp_do_nodma(ESPState
*s
)
721 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
726 cmdlen
= fifo8_num_used(&s
->cmdfifo
);
727 trace_esp_handle_ti_cmd(cmdlen
);
729 if ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_CD
) {
730 /* No command received */
731 if (s
->cmdfifo_cdb_offset
== fifo8_num_used(&s
->cmdfifo
)) {
735 /* Command has been received */
740 * Extra message out bytes received: update cmdfifo_cdb_offset
741 * and then switch to commmand phase
743 s
->cmdfifo_cdb_offset
= fifo8_num_used(&s
->cmdfifo
);
744 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
745 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
746 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
752 if (!s
->current_req
) {
756 if (s
->async_len
== 0) {
757 /* Defer until data is available. */
762 len
= MIN(fifo8_num_used(&s
->fifo
), ESP_FIFO_SZ
);
763 esp_fifo_pop_buf(&s
->fifo
, s
->async_buf
, len
);
768 if (fifo8_is_empty(&s
->fifo
)) {
769 fifo8_push(&s
->fifo
, s
->async_buf
[0]);
776 if (s
->async_len
== 0) {
777 scsi_req_continue(s
->current_req
);
781 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
785 static void esp_pdma_cb(ESPState
*s
)
787 switch (s
->pdma_cb
) {
791 case S_WITHOUT_SATN_PDMA_CB
:
792 s_without_satn_pdma_cb(s
);
794 case SATN_STOP_PDMA_CB
:
795 satn_stop_pdma_cb(s
);
797 case WRITE_RESPONSE_PDMA_CB
:
798 write_response_pdma_cb(s
);
804 g_assert_not_reached();
808 void esp_command_complete(SCSIRequest
*req
, size_t resid
)
810 ESPState
*s
= req
->hba_private
;
811 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
813 trace_esp_command_complete();
816 * Non-DMA transfers from the target will leave the last byte in
817 * the FIFO so don't reset ti_size in this case
819 if (s
->dma
|| to_device
) {
820 if (s
->ti_size
!= 0) {
821 trace_esp_command_complete_unexpected();
828 trace_esp_command_complete_fail();
830 s
->status
= req
->status
;
833 * If the transfer is finished, switch to status phase. For non-DMA
834 * transfers from the target the last byte is still in the FIFO
836 if (s
->ti_size
== 0) {
837 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
842 if (s
->current_req
) {
843 scsi_req_unref(s
->current_req
);
844 s
->current_req
= NULL
;
845 s
->current_dev
= NULL
;
849 void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
851 ESPState
*s
= req
->hba_private
;
852 int to_device
= ((s
->rregs
[ESP_RSTAT
] & 7) == STAT_DO
);
853 uint32_t dmalen
= esp_get_tc(s
);
856 trace_esp_transfer_data(dmalen
, s
->ti_size
);
858 s
->async_buf
= scsi_req_get_buf(req
);
860 if (!to_device
&& !s
->data_in_ready
) {
862 * Initial incoming data xfer is complete so raise command
863 * completion interrupt
865 s
->data_in_ready
= true;
866 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
867 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
871 if (s
->ti_cmd
== 0) {
873 * Always perform the initial transfer upon reception of the next TI
874 * command to ensure the DMA/non-DMA status of the command is correct.
875 * It is not possible to use s->dma directly in the section below as
876 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
877 * async data transfer is delayed then s->dma is set incorrectly.
882 if (s
->ti_cmd
== (CMD_TI
| CMD_DMA
)) {
885 } else if (s
->ti_size
<= 0) {
887 * If this was the last part of a DMA transfer then the
888 * completion interrupt is deferred to here.
893 } else if (s
->ti_cmd
== CMD_TI
) {
898 static void handle_ti(ESPState
*s
)
902 if (s
->dma
&& !s
->dma_enabled
) {
903 s
->dma_cb
= handle_ti
;
907 s
->ti_cmd
= s
->rregs
[ESP_CMD
];
909 dmalen
= esp_get_tc(s
);
910 trace_esp_handle_ti(dmalen
);
911 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
914 trace_esp_handle_ti(s
->ti_size
);
919 void esp_hard_reset(ESPState
*s
)
921 memset(s
->rregs
, 0, ESP_REGS
);
922 memset(s
->wregs
, 0, ESP_REGS
);
926 fifo8_reset(&s
->fifo
);
927 fifo8_reset(&s
->cmdfifo
);
932 s
->rregs
[ESP_CFG1
] = 7;
935 static void esp_soft_reset(ESPState
*s
)
937 qemu_irq_lower(s
->irq
);
938 qemu_irq_lower(s
->irq_data
);
942 static void esp_bus_reset(ESPState
*s
)
944 qbus_reset_all(BUS(&s
->bus
));
947 static void parent_esp_reset(ESPState
*s
, int irq
, int level
)
954 uint64_t esp_reg_read(ESPState
*s
, uint32_t saddr
)
960 if (s
->dma_memory_read
&& s
->dma_memory_write
&&
961 (s
->rregs
[ESP_RSTAT
] & STAT_PIO_MASK
) == 0) {
963 qemu_log_mask(LOG_UNIMP
, "esp: PIO data read not implemented\n");
964 s
->rregs
[ESP_FIFO
] = 0;
966 if ((s
->rregs
[ESP_RSTAT
] & 0x7) == STAT_DI
) {
971 * The last byte of a non-DMA transfer has been read out
972 * of the FIFO so switch to status phase
974 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
977 s
->rregs
[ESP_FIFO
] = esp_fifo_pop(&s
->fifo
);
979 val
= s
->rregs
[ESP_FIFO
];
983 * Clear sequence step, interrupt register and all status bits
986 val
= s
->rregs
[ESP_RINTR
];
987 s
->rregs
[ESP_RINTR
] = 0;
988 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
990 * According to the datasheet ESP_RSEQ should be cleared, but as the
991 * emulation currently defers information transfers to the next TI
992 * command leave it for now so that pedantic guests such as the old
993 * Linux 2.6 driver see the correct flags before the next SCSI phase
996 * s->rregs[ESP_RSEQ] = SEQ_0;
1001 /* Return the unique id if the value has never been written */
1002 if (!s
->tchi_written
) {
1005 val
= s
->rregs
[saddr
];
1009 /* Bottom 5 bits indicate number of bytes in FIFO */
1010 val
= fifo8_num_used(&s
->fifo
);
1013 val
= s
->rregs
[saddr
];
1017 trace_esp_mem_readb(saddr
, val
);
1021 void esp_reg_write(ESPState
*s
, uint32_t saddr
, uint64_t val
)
1023 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
1026 s
->tchi_written
= true;
1030 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
1034 esp_fifo_push(&s
->cmdfifo
, val
);
1037 * If any unexpected message out/command phase data is
1038 * transferred using non-DMA, raise the interrupt
1040 if (s
->rregs
[ESP_CMD
] == CMD_TI
) {
1041 s
->rregs
[ESP_RINTR
] |= INTR_BS
;
1045 esp_fifo_push(&s
->fifo
, val
);
1049 s
->rregs
[saddr
] = val
;
1050 if (val
& CMD_DMA
) {
1052 /* Reload DMA counter. */
1053 if (esp_get_stc(s
) == 0) {
1054 esp_set_tc(s
, 0x10000);
1056 esp_set_tc(s
, esp_get_stc(s
));
1061 switch (val
& CMD_CMD
) {
1063 trace_esp_mem_writeb_cmd_nop(val
);
1066 trace_esp_mem_writeb_cmd_flush(val
);
1067 fifo8_reset(&s
->fifo
);
1070 trace_esp_mem_writeb_cmd_reset(val
);
1074 trace_esp_mem_writeb_cmd_bus_reset(val
);
1076 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
1077 s
->rregs
[ESP_RINTR
] |= INTR_RST
;
1082 trace_esp_mem_writeb_cmd_ti(val
);
1086 trace_esp_mem_writeb_cmd_iccs(val
);
1088 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
1089 s
->rregs
[ESP_RSTAT
] |= STAT_MI
;
1092 trace_esp_mem_writeb_cmd_msgacc(val
);
1093 s
->rregs
[ESP_RINTR
] |= INTR_DC
;
1094 s
->rregs
[ESP_RSEQ
] = 0;
1095 s
->rregs
[ESP_RFLAGS
] = 0;
1099 trace_esp_mem_writeb_cmd_pad(val
);
1100 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
1101 s
->rregs
[ESP_RINTR
] |= INTR_FC
;
1102 s
->rregs
[ESP_RSEQ
] = 0;
1105 trace_esp_mem_writeb_cmd_satn(val
);
1108 trace_esp_mem_writeb_cmd_rstatn(val
);
1111 trace_esp_mem_writeb_cmd_sel(val
);
1112 handle_s_without_atn(s
);
1115 trace_esp_mem_writeb_cmd_selatn(val
);
1119 trace_esp_mem_writeb_cmd_selatns(val
);
1120 handle_satn_stop(s
);
1123 trace_esp_mem_writeb_cmd_ensel(val
);
1124 s
->rregs
[ESP_RINTR
] = 0;
1127 trace_esp_mem_writeb_cmd_dissel(val
);
1128 s
->rregs
[ESP_RINTR
] = 0;
1132 trace_esp_error_unhandled_command(val
);
1136 case ESP_WBUSID
... ESP_WSYNO
:
1139 case ESP_CFG2
: case ESP_CFG3
:
1140 case ESP_RES3
: case ESP_RES4
:
1141 s
->rregs
[saddr
] = val
;
1143 case ESP_WCCF
... ESP_WTEST
:
1146 trace_esp_error_invalid_write(val
, saddr
);
1149 s
->wregs
[saddr
] = val
;
1152 static bool esp_mem_accepts(void *opaque
, hwaddr addr
,
1153 unsigned size
, bool is_write
,
1156 return (size
== 1) || (is_write
&& size
== 4);
1159 static bool esp_is_before_version_5(void *opaque
, int version_id
)
1161 ESPState
*s
= ESP(opaque
);
1163 version_id
= MIN(version_id
, s
->mig_version_id
);
1164 return version_id
< 5;
1167 static bool esp_is_version_5(void *opaque
, int version_id
)
1169 ESPState
*s
= ESP(opaque
);
1171 version_id
= MIN(version_id
, s
->mig_version_id
);
1172 return version_id
>= 5;
1175 static bool esp_is_version_6(void *opaque
, int version_id
)
1177 ESPState
*s
= ESP(opaque
);
1179 version_id
= MIN(version_id
, s
->mig_version_id
);
1180 return version_id
>= 6;
1183 int esp_pre_save(void *opaque
)
1185 ESPState
*s
= ESP(object_resolve_path_component(
1186 OBJECT(opaque
), "esp"));
1188 s
->mig_version_id
= vmstate_esp
.version_id
;
1192 static int esp_post_load(void *opaque
, int version_id
)
1194 ESPState
*s
= ESP(opaque
);
1197 version_id
= MIN(version_id
, s
->mig_version_id
);
1199 if (version_id
< 5) {
1200 esp_set_tc(s
, s
->mig_dma_left
);
1202 /* Migrate ti_buf to fifo */
1203 len
= s
->mig_ti_wptr
- s
->mig_ti_rptr
;
1204 for (i
= 0; i
< len
; i
++) {
1205 fifo8_push(&s
->fifo
, s
->mig_ti_buf
[i
]);
1208 /* Migrate cmdbuf to cmdfifo */
1209 for (i
= 0; i
< s
->mig_cmdlen
; i
++) {
1210 fifo8_push(&s
->cmdfifo
, s
->mig_cmdbuf
[i
]);
1214 s
->mig_version_id
= vmstate_esp
.version_id
;
1219 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1220 * guest CPU to perform the transfers between the SCSI bus and memory
1221 * itself. This is indicated by the dma_memory_read and dma_memory_write
1222 * functions being NULL (in contrast to the ESP PCI device) whilst
1223 * dma_enabled is still set.
1226 static bool esp_pdma_needed(void *opaque
)
1228 ESPState
*s
= ESP(opaque
);
1230 return s
->dma_memory_read
== NULL
&& s
->dma_memory_write
== NULL
&&
1234 static const VMStateDescription vmstate_esp_pdma
= {
1237 .minimum_version_id
= 0,
1238 .needed
= esp_pdma_needed
,
1239 .fields
= (VMStateField
[]) {
1240 VMSTATE_UINT8(pdma_cb
, ESPState
),
1241 VMSTATE_END_OF_LIST()
1245 const VMStateDescription vmstate_esp
= {
1248 .minimum_version_id
= 3,
1249 .post_load
= esp_post_load
,
1250 .fields
= (VMStateField
[]) {
1251 VMSTATE_BUFFER(rregs
, ESPState
),
1252 VMSTATE_BUFFER(wregs
, ESPState
),
1253 VMSTATE_INT32(ti_size
, ESPState
),
1254 VMSTATE_UINT32_TEST(mig_ti_rptr
, ESPState
, esp_is_before_version_5
),
1255 VMSTATE_UINT32_TEST(mig_ti_wptr
, ESPState
, esp_is_before_version_5
),
1256 VMSTATE_BUFFER_TEST(mig_ti_buf
, ESPState
, esp_is_before_version_5
),
1257 VMSTATE_UINT32(status
, ESPState
),
1258 VMSTATE_UINT32_TEST(mig_deferred_status
, ESPState
,
1259 esp_is_before_version_5
),
1260 VMSTATE_BOOL_TEST(mig_deferred_complete
, ESPState
,
1261 esp_is_before_version_5
),
1262 VMSTATE_UINT32(dma
, ESPState
),
1263 VMSTATE_STATIC_BUFFER(mig_cmdbuf
, ESPState
, 0,
1264 esp_is_before_version_5
, 0, 16),
1265 VMSTATE_STATIC_BUFFER(mig_cmdbuf
, ESPState
, 4,
1266 esp_is_before_version_5
, 16,
1267 sizeof(typeof_field(ESPState
, mig_cmdbuf
))),
1268 VMSTATE_UINT32_TEST(mig_cmdlen
, ESPState
, esp_is_before_version_5
),
1269 VMSTATE_UINT32(do_cmd
, ESPState
),
1270 VMSTATE_UINT32_TEST(mig_dma_left
, ESPState
, esp_is_before_version_5
),
1271 VMSTATE_BOOL_TEST(data_in_ready
, ESPState
, esp_is_version_5
),
1272 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset
, ESPState
, esp_is_version_5
),
1273 VMSTATE_FIFO8_TEST(fifo
, ESPState
, esp_is_version_5
),
1274 VMSTATE_FIFO8_TEST(cmdfifo
, ESPState
, esp_is_version_5
),
1275 VMSTATE_UINT8_TEST(ti_cmd
, ESPState
, esp_is_version_5
),
1276 VMSTATE_UINT8_TEST(lun
, ESPState
, esp_is_version_6
),
1277 VMSTATE_END_OF_LIST()
1279 .subsections
= (const VMStateDescription
* []) {
1285 static void sysbus_esp_mem_write(void *opaque
, hwaddr addr
,
1286 uint64_t val
, unsigned int size
)
1288 SysBusESPState
*sysbus
= opaque
;
1289 ESPState
*s
= ESP(&sysbus
->esp
);
1292 saddr
= addr
>> sysbus
->it_shift
;
1293 esp_reg_write(s
, saddr
, val
);
1296 static uint64_t sysbus_esp_mem_read(void *opaque
, hwaddr addr
,
1299 SysBusESPState
*sysbus
= opaque
;
1300 ESPState
*s
= ESP(&sysbus
->esp
);
1303 saddr
= addr
>> sysbus
->it_shift
;
1304 return esp_reg_read(s
, saddr
);
1307 static const MemoryRegionOps sysbus_esp_mem_ops
= {
1308 .read
= sysbus_esp_mem_read
,
1309 .write
= sysbus_esp_mem_write
,
1310 .endianness
= DEVICE_NATIVE_ENDIAN
,
1311 .valid
.accepts
= esp_mem_accepts
,
1314 static void sysbus_esp_pdma_write(void *opaque
, hwaddr addr
,
1315 uint64_t val
, unsigned int size
)
1317 SysBusESPState
*sysbus
= opaque
;
1318 ESPState
*s
= ESP(&sysbus
->esp
);
1320 trace_esp_pdma_write(size
);
1324 esp_pdma_write(s
, val
);
1327 esp_pdma_write(s
, val
>> 8);
1328 esp_pdma_write(s
, val
);
1334 static uint64_t sysbus_esp_pdma_read(void *opaque
, hwaddr addr
,
1337 SysBusESPState
*sysbus
= opaque
;
1338 ESPState
*s
= ESP(&sysbus
->esp
);
1341 trace_esp_pdma_read(size
);
1345 val
= esp_pdma_read(s
);
1348 val
= esp_pdma_read(s
);
1349 val
= (val
<< 8) | esp_pdma_read(s
);
1352 if (fifo8_num_used(&s
->fifo
) < 2) {
1358 static void *esp_load_request(QEMUFile
*f
, SCSIRequest
*req
)
1360 ESPState
*s
= container_of(req
->bus
, ESPState
, bus
);
1363 s
->current_req
= req
;
1367 static const MemoryRegionOps sysbus_esp_pdma_ops
= {
1368 .read
= sysbus_esp_pdma_read
,
1369 .write
= sysbus_esp_pdma_write
,
1370 .endianness
= DEVICE_NATIVE_ENDIAN
,
1371 .valid
.min_access_size
= 1,
1372 .valid
.max_access_size
= 4,
1373 .impl
.min_access_size
= 1,
1374 .impl
.max_access_size
= 2,
1377 static const struct SCSIBusInfo esp_scsi_info
= {
1379 .max_target
= ESP_MAX_DEVS
,
1382 .load_request
= esp_load_request
,
1383 .transfer_data
= esp_transfer_data
,
1384 .complete
= esp_command_complete
,
1385 .cancel
= esp_request_cancelled
1388 static void sysbus_esp_gpio_demux(void *opaque
, int irq
, int level
)
1390 SysBusESPState
*sysbus
= SYSBUS_ESP(opaque
);
1391 ESPState
*s
= ESP(&sysbus
->esp
);
1395 parent_esp_reset(s
, irq
, level
);
1398 esp_dma_enable(opaque
, irq
, level
);
1403 static void sysbus_esp_realize(DeviceState
*dev
, Error
**errp
)
1405 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1406 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1407 ESPState
*s
= ESP(&sysbus
->esp
);
1409 if (!qdev_realize(DEVICE(s
), NULL
, errp
)) {
1413 sysbus_init_irq(sbd
, &s
->irq
);
1414 sysbus_init_irq(sbd
, &s
->irq_data
);
1415 assert(sysbus
->it_shift
!= -1);
1417 s
->chip_id
= TCHI_FAS100A
;
1418 memory_region_init_io(&sysbus
->iomem
, OBJECT(sysbus
), &sysbus_esp_mem_ops
,
1419 sysbus
, "esp-regs", ESP_REGS
<< sysbus
->it_shift
);
1420 sysbus_init_mmio(sbd
, &sysbus
->iomem
);
1421 memory_region_init_io(&sysbus
->pdma
, OBJECT(sysbus
), &sysbus_esp_pdma_ops
,
1422 sysbus
, "esp-pdma", 4);
1423 sysbus_init_mmio(sbd
, &sysbus
->pdma
);
1425 qdev_init_gpio_in(dev
, sysbus_esp_gpio_demux
, 2);
1427 scsi_bus_init(&s
->bus
, sizeof(s
->bus
), dev
, &esp_scsi_info
);
1430 static void sysbus_esp_hard_reset(DeviceState
*dev
)
1432 SysBusESPState
*sysbus
= SYSBUS_ESP(dev
);
1433 ESPState
*s
= ESP(&sysbus
->esp
);
1438 static void sysbus_esp_init(Object
*obj
)
1440 SysBusESPState
*sysbus
= SYSBUS_ESP(obj
);
1442 object_initialize_child(obj
, "esp", &sysbus
->esp
, TYPE_ESP
);
1445 static const VMStateDescription vmstate_sysbus_esp_scsi
= {
1446 .name
= "sysbusespscsi",
1448 .minimum_version_id
= 1,
1449 .pre_save
= esp_pre_save
,
1450 .fields
= (VMStateField
[]) {
1451 VMSTATE_UINT8_V(esp
.mig_version_id
, SysBusESPState
, 2),
1452 VMSTATE_STRUCT(esp
, SysBusESPState
, 0, vmstate_esp
, ESPState
),
1453 VMSTATE_END_OF_LIST()
1457 static void sysbus_esp_class_init(ObjectClass
*klass
, void *data
)
1459 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1461 dc
->realize
= sysbus_esp_realize
;
1462 dc
->reset
= sysbus_esp_hard_reset
;
1463 dc
->vmsd
= &vmstate_sysbus_esp_scsi
;
1464 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1467 static const TypeInfo sysbus_esp_info
= {
1468 .name
= TYPE_SYSBUS_ESP
,
1469 .parent
= TYPE_SYS_BUS_DEVICE
,
1470 .instance_init
= sysbus_esp_init
,
1471 .instance_size
= sizeof(SysBusESPState
),
1472 .class_init
= sysbus_esp_class_init
,
1475 static void esp_finalize(Object
*obj
)
1477 ESPState
*s
= ESP(obj
);
1479 fifo8_destroy(&s
->fifo
);
1480 fifo8_destroy(&s
->cmdfifo
);
1483 static void esp_init(Object
*obj
)
1485 ESPState
*s
= ESP(obj
);
1487 fifo8_create(&s
->fifo
, ESP_FIFO_SZ
);
1488 fifo8_create(&s
->cmdfifo
, ESP_CMDFIFO_SZ
);
1491 static void esp_class_init(ObjectClass
*klass
, void *data
)
1493 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1495 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1496 dc
->user_creatable
= false;
1497 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1500 static const TypeInfo esp_info
= {
1502 .parent
= TYPE_DEVICE
,
1503 .instance_init
= esp_init
,
1504 .instance_finalize
= esp_finalize
,
1505 .instance_size
= sizeof(ESPState
),
1506 .class_init
= esp_class_init
,
1509 static void esp_register_types(void)
1511 type_register_static(&sysbus_esp_info
);
1512 type_register_static(&esp_info
);
1515 type_init(esp_register_types
)