Merge tag 'pull-riscv-to-apply-20240806-2' of https://github.com/alistair23/qemu...
[qemu/kevin.git] / hw / scsi / esp.c
blobb7af82562323fddd93af9089a16e78b0ddf2749b
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 * Copyright (c) 2023 Mark Cave-Ayland
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38 * also produced as NCR89C100. See
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * and
41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43 * On Macintosh Quadra it is a NCR53C96.
46 static void esp_raise_irq(ESPState *s)
48 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49 s->rregs[ESP_RSTAT] |= STAT_INT;
50 qemu_irq_raise(s->irq);
51 trace_esp_raise_irq();
55 static void esp_lower_irq(ESPState *s)
57 if (s->rregs[ESP_RSTAT] & STAT_INT) {
58 s->rregs[ESP_RSTAT] &= ~STAT_INT;
59 qemu_irq_lower(s->irq);
60 trace_esp_lower_irq();
64 static void esp_raise_drq(ESPState *s)
66 if (!(s->drq_state)) {
67 qemu_irq_raise(s->drq_irq);
68 trace_esp_raise_drq();
69 s->drq_state = true;
73 static void esp_lower_drq(ESPState *s)
75 if (s->drq_state) {
76 qemu_irq_lower(s->drq_irq);
77 trace_esp_lower_drq();
78 s->drq_state = false;
82 static const char *esp_phase_names[8] = {
83 "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
87 static void esp_set_phase(ESPState *s, uint8_t phase)
89 s->rregs[ESP_RSTAT] &= ~7;
90 s->rregs[ESP_RSTAT] |= phase;
92 trace_esp_set_phase(esp_phase_names[phase]);
95 static uint8_t esp_get_phase(ESPState *s)
97 return s->rregs[ESP_RSTAT] & 7;
100 void esp_dma_enable(ESPState *s, int irq, int level)
102 if (level) {
103 s->dma_enabled = 1;
104 trace_esp_dma_enable();
105 if (s->dma_cb) {
106 s->dma_cb(s);
107 s->dma_cb = NULL;
109 } else {
110 trace_esp_dma_disable();
111 s->dma_enabled = 0;
115 void esp_request_cancelled(SCSIRequest *req)
117 ESPState *s = req->hba_private;
119 if (req == s->current_req) {
120 scsi_req_unref(s->current_req);
121 s->current_req = NULL;
122 s->current_dev = NULL;
123 s->async_len = 0;
127 static void esp_update_drq(ESPState *s)
129 bool to_device;
131 switch (esp_get_phase(s)) {
132 case STAT_MO:
133 case STAT_CD:
134 case STAT_DO:
135 to_device = true;
136 break;
138 case STAT_DI:
139 case STAT_ST:
140 case STAT_MI:
141 to_device = false;
142 break;
144 default:
145 return;
148 if (s->dma) {
149 /* DMA request so update DRQ according to transfer direction */
150 if (to_device) {
151 if (fifo8_num_free(&s->fifo) < 2) {
152 esp_lower_drq(s);
153 } else {
154 esp_raise_drq(s);
156 } else {
157 if (fifo8_num_used(&s->fifo) < 2) {
158 esp_lower_drq(s);
159 } else {
160 esp_raise_drq(s);
163 } else {
164 /* Not a DMA request */
165 esp_lower_drq(s);
169 static void esp_fifo_push(ESPState *s, uint8_t val)
171 if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172 trace_esp_error_fifo_overrun();
173 } else {
174 fifo8_push(&s->fifo, val);
177 esp_update_drq(s);
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
182 fifo8_push_all(&s->fifo, buf, len);
183 esp_update_drq(s);
186 static uint8_t esp_fifo_pop(ESPState *s)
188 uint8_t val;
190 if (fifo8_is_empty(&s->fifo)) {
191 val = 0;
192 } else {
193 val = fifo8_pop(&s->fifo);
196 esp_update_drq(s);
197 return val;
200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
202 uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
204 esp_update_drq(s);
205 return len;
208 static uint32_t esp_get_tc(ESPState *s)
210 uint32_t dmalen;
212 dmalen = s->rregs[ESP_TCLO];
213 dmalen |= s->rregs[ESP_TCMID] << 8;
214 dmalen |= s->rregs[ESP_TCHI] << 16;
216 return dmalen;
219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
221 uint32_t old_tc = esp_get_tc(s);
223 s->rregs[ESP_TCLO] = dmalen;
224 s->rregs[ESP_TCMID] = dmalen >> 8;
225 s->rregs[ESP_TCHI] = dmalen >> 16;
227 if (old_tc && dmalen == 0) {
228 s->rregs[ESP_RSTAT] |= STAT_TC;
232 static uint32_t esp_get_stc(ESPState *s)
234 uint32_t dmalen;
236 dmalen = s->wregs[ESP_TCLO];
237 dmalen |= s->wregs[ESP_TCMID] << 8;
238 dmalen |= s->wregs[ESP_TCHI] << 16;
240 return dmalen;
243 static uint8_t esp_pdma_read(ESPState *s)
245 uint8_t val;
247 val = esp_fifo_pop(s);
248 return val;
251 static void esp_pdma_write(ESPState *s, uint8_t val)
253 uint32_t dmalen = esp_get_tc(s);
255 esp_fifo_push(s, val);
257 if (dmalen && s->drq_state) {
258 dmalen--;
259 esp_set_tc(s, dmalen);
263 static int esp_select(ESPState *s)
265 int target;
267 target = s->wregs[ESP_WBUSID] & BUSID_DID;
269 s->ti_size = 0;
270 s->rregs[ESP_RSEQ] = SEQ_0;
272 if (s->current_req) {
273 /* Started a new command before the old one finished. Cancel it. */
274 scsi_req_cancel(s->current_req);
277 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
278 if (!s->current_dev) {
279 /* No such drive */
280 s->rregs[ESP_RSTAT] = 0;
281 s->rregs[ESP_RINTR] = INTR_DC;
282 esp_raise_irq(s);
283 return -1;
287 * Note that we deliberately don't raise the IRQ here: this will be done
288 * either in esp_transfer_data() or esp_command_complete()
290 return 0;
293 static void esp_do_dma(ESPState *s);
294 static void esp_do_nodma(ESPState *s);
296 static void do_command_phase(ESPState *s)
298 uint32_t cmdlen;
299 int32_t datalen;
300 SCSIDevice *current_lun;
301 uint8_t buf[ESP_CMDFIFO_SZ];
303 trace_esp_do_command_phase(s->lun);
304 cmdlen = fifo8_num_used(&s->cmdfifo);
305 if (!cmdlen || !s->current_dev) {
306 return;
308 fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
310 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
311 if (!current_lun) {
312 /* No such drive */
313 s->rregs[ESP_RSTAT] = 0;
314 s->rregs[ESP_RINTR] = INTR_DC;
315 s->rregs[ESP_RSEQ] = SEQ_0;
316 esp_raise_irq(s);
317 return;
320 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
321 datalen = scsi_req_enqueue(s->current_req);
322 s->ti_size = datalen;
323 fifo8_reset(&s->cmdfifo);
324 s->data_ready = false;
325 if (datalen != 0) {
327 * Switch to DATA phase but wait until initial data xfer is
328 * complete before raising the command completion interrupt
330 if (datalen > 0) {
331 esp_set_phase(s, STAT_DI);
332 } else {
333 esp_set_phase(s, STAT_DO);
335 scsi_req_continue(s->current_req);
336 return;
340 static void do_message_phase(ESPState *s)
342 if (s->cmdfifo_cdb_offset) {
343 uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
344 fifo8_pop(&s->cmdfifo);
346 trace_esp_do_identify(message);
347 s->lun = message & 7;
348 s->cmdfifo_cdb_offset--;
351 /* Ignore extended messages for now */
352 if (s->cmdfifo_cdb_offset) {
353 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354 fifo8_drop(&s->cmdfifo, len);
355 s->cmdfifo_cdb_offset = 0;
359 static void do_cmd(ESPState *s)
361 do_message_phase(s);
362 assert(s->cmdfifo_cdb_offset == 0);
363 do_command_phase(s);
366 static void handle_satn(ESPState *s)
368 if (s->dma && !s->dma_enabled) {
369 s->dma_cb = handle_satn;
370 return;
373 if (esp_select(s) < 0) {
374 return;
377 esp_set_phase(s, STAT_MO);
379 if (s->dma) {
380 esp_do_dma(s);
381 } else {
382 esp_do_nodma(s);
386 static void handle_s_without_atn(ESPState *s)
388 if (s->dma && !s->dma_enabled) {
389 s->dma_cb = handle_s_without_atn;
390 return;
393 if (esp_select(s) < 0) {
394 return;
397 esp_set_phase(s, STAT_CD);
398 s->cmdfifo_cdb_offset = 0;
400 if (s->dma) {
401 esp_do_dma(s);
402 } else {
403 esp_do_nodma(s);
407 static void handle_satn_stop(ESPState *s)
409 if (s->dma && !s->dma_enabled) {
410 s->dma_cb = handle_satn_stop;
411 return;
414 if (esp_select(s) < 0) {
415 return;
418 esp_set_phase(s, STAT_MO);
419 s->cmdfifo_cdb_offset = 0;
421 if (s->dma) {
422 esp_do_dma(s);
423 } else {
424 esp_do_nodma(s);
428 static void handle_pad(ESPState *s)
430 if (s->dma) {
431 esp_do_dma(s);
432 } else {
433 esp_do_nodma(s);
437 static void write_response(ESPState *s)
439 trace_esp_write_response(s->status);
441 if (s->dma) {
442 esp_do_dma(s);
443 } else {
444 esp_do_nodma(s);
448 static bool esp_cdb_ready(ESPState *s)
450 int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
451 const uint8_t *pbuf;
452 uint32_t n;
453 int cdblen;
455 if (len <= 0) {
456 return false;
459 pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
460 if (n < len) {
462 * In normal use the cmdfifo should never wrap, but include this check
463 * to prevent a malicious guest from reading past the end of the
464 * cmdfifo data buffer below
466 return false;
469 cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
471 return cdblen < 0 ? false : (len >= cdblen);
474 static void esp_dma_ti_check(ESPState *s)
476 if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
477 s->rregs[ESP_RINTR] |= INTR_BS;
478 esp_raise_irq(s);
482 static void esp_do_dma(ESPState *s)
484 uint32_t len, cmdlen;
485 uint8_t buf[ESP_CMDFIFO_SZ];
487 len = esp_get_tc(s);
489 switch (esp_get_phase(s)) {
490 case STAT_MO:
491 if (s->dma_memory_read) {
492 len = MIN(len, fifo8_num_free(&s->cmdfifo));
493 s->dma_memory_read(s->dma_opaque, buf, len);
494 esp_set_tc(s, esp_get_tc(s) - len);
495 } else {
496 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
497 len = MIN(fifo8_num_free(&s->cmdfifo), len);
500 fifo8_push_all(&s->cmdfifo, buf, len);
501 s->cmdfifo_cdb_offset += len;
503 switch (s->rregs[ESP_CMD]) {
504 case CMD_SELATN | CMD_DMA:
505 if (fifo8_num_used(&s->cmdfifo) >= 1) {
506 /* First byte received, switch to command phase */
507 esp_set_phase(s, STAT_CD);
508 s->rregs[ESP_RSEQ] = SEQ_CD;
509 s->cmdfifo_cdb_offset = 1;
511 if (fifo8_num_used(&s->cmdfifo) > 1) {
512 /* Process any additional command phase data */
513 esp_do_dma(s);
516 break;
518 case CMD_SELATNS | CMD_DMA:
519 if (fifo8_num_used(&s->cmdfifo) == 1) {
520 /* First byte received, stop in message out phase */
521 s->rregs[ESP_RSEQ] = SEQ_MO;
522 s->cmdfifo_cdb_offset = 1;
524 /* Raise command completion interrupt */
525 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
526 esp_raise_irq(s);
528 break;
530 case CMD_TI | CMD_DMA:
531 /* ATN remains asserted until TC == 0 */
532 if (esp_get_tc(s) == 0) {
533 esp_set_phase(s, STAT_CD);
534 s->rregs[ESP_CMD] = 0;
535 s->rregs[ESP_RINTR] |= INTR_BS;
536 esp_raise_irq(s);
538 break;
540 break;
542 case STAT_CD:
543 cmdlen = fifo8_num_used(&s->cmdfifo);
544 trace_esp_do_dma(cmdlen, len);
545 if (s->dma_memory_read) {
546 len = MIN(len, fifo8_num_free(&s->cmdfifo));
547 s->dma_memory_read(s->dma_opaque, buf, len);
548 fifo8_push_all(&s->cmdfifo, buf, len);
549 esp_set_tc(s, esp_get_tc(s) - len);
550 } else {
551 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
552 len = MIN(fifo8_num_free(&s->cmdfifo), len);
553 fifo8_push_all(&s->cmdfifo, buf, len);
555 trace_esp_handle_ti_cmd(cmdlen);
556 s->ti_size = 0;
557 if (esp_get_tc(s) == 0) {
558 /* Command has been received */
559 do_cmd(s);
561 break;
563 case STAT_DO:
564 if (!s->current_req) {
565 return;
567 if (s->async_len == 0 && esp_get_tc(s)) {
568 /* Defer until data is available. */
569 return;
571 if (len > s->async_len) {
572 len = s->async_len;
575 switch (s->rregs[ESP_CMD]) {
576 case CMD_TI | CMD_DMA:
577 if (s->dma_memory_read) {
578 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
579 esp_set_tc(s, esp_get_tc(s) - len);
580 } else {
581 /* Copy FIFO data to device */
582 len = MIN(s->async_len, ESP_FIFO_SZ);
583 len = MIN(len, fifo8_num_used(&s->fifo));
584 len = esp_fifo_pop_buf(s, s->async_buf, len);
587 s->async_buf += len;
588 s->async_len -= len;
589 s->ti_size += len;
590 break;
592 case CMD_PAD | CMD_DMA:
593 /* Copy TC zero bytes into the incoming stream */
594 if (!s->dma_memory_read) {
595 len = MIN(s->async_len, ESP_FIFO_SZ);
596 len = MIN(len, fifo8_num_free(&s->fifo));
599 memset(s->async_buf, 0, len);
601 s->async_buf += len;
602 s->async_len -= len;
603 s->ti_size += len;
604 break;
607 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
608 /* Defer until the scsi layer has completed */
609 scsi_req_continue(s->current_req);
610 return;
613 esp_dma_ti_check(s);
614 break;
616 case STAT_DI:
617 if (!s->current_req) {
618 return;
620 if (s->async_len == 0 && esp_get_tc(s)) {
621 /* Defer until data is available. */
622 return;
624 if (len > s->async_len) {
625 len = s->async_len;
628 switch (s->rregs[ESP_CMD]) {
629 case CMD_TI | CMD_DMA:
630 if (s->dma_memory_write) {
631 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
632 } else {
633 /* Copy device data to FIFO */
634 len = MIN(len, fifo8_num_free(&s->fifo));
635 esp_fifo_push_buf(s, s->async_buf, len);
638 s->async_buf += len;
639 s->async_len -= len;
640 s->ti_size -= len;
641 esp_set_tc(s, esp_get_tc(s) - len);
642 break;
644 case CMD_PAD | CMD_DMA:
645 /* Drop TC bytes from the incoming stream */
646 if (!s->dma_memory_write) {
647 len = MIN(len, fifo8_num_free(&s->fifo));
650 s->async_buf += len;
651 s->async_len -= len;
652 s->ti_size -= len;
653 esp_set_tc(s, esp_get_tc(s) - len);
654 break;
657 if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
658 /* If the guest underflows TC then terminate SCSI request */
659 scsi_req_continue(s->current_req);
660 return;
663 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
664 /* Defer until the scsi layer has completed */
665 scsi_req_continue(s->current_req);
666 return;
669 esp_dma_ti_check(s);
670 break;
672 case STAT_ST:
673 switch (s->rregs[ESP_CMD]) {
674 case CMD_ICCS | CMD_DMA:
675 len = MIN(len, 1);
677 if (len) {
678 buf[0] = s->status;
680 if (s->dma_memory_write) {
681 s->dma_memory_write(s->dma_opaque, buf, len);
682 } else {
683 esp_fifo_push_buf(s, buf, len);
686 esp_set_tc(s, esp_get_tc(s) - len);
687 esp_set_phase(s, STAT_MI);
689 if (esp_get_tc(s) > 0) {
690 /* Process any message in phase data */
691 esp_do_dma(s);
694 break;
696 default:
697 /* Consume remaining data if the guest underflows TC */
698 if (fifo8_num_used(&s->fifo) < 2) {
699 s->rregs[ESP_RINTR] |= INTR_BS;
700 esp_raise_irq(s);
702 break;
704 break;
706 case STAT_MI:
707 switch (s->rregs[ESP_CMD]) {
708 case CMD_ICCS | CMD_DMA:
709 len = MIN(len, 1);
711 if (len) {
712 buf[0] = 0;
714 if (s->dma_memory_write) {
715 s->dma_memory_write(s->dma_opaque, buf, len);
716 } else {
717 esp_fifo_push_buf(s, buf, len);
720 esp_set_tc(s, esp_get_tc(s) - len);
722 /* Raise end of command interrupt */
723 s->rregs[ESP_RINTR] |= INTR_FC;
724 esp_raise_irq(s);
726 break;
728 break;
732 static void esp_nodma_ti_dataout(ESPState *s)
734 int len;
736 if (!s->current_req) {
737 return;
739 if (s->async_len == 0) {
740 /* Defer until data is available. */
741 return;
743 len = MIN(s->async_len, ESP_FIFO_SZ);
744 len = MIN(len, fifo8_num_used(&s->fifo));
745 esp_fifo_pop_buf(s, s->async_buf, len);
746 s->async_buf += len;
747 s->async_len -= len;
748 s->ti_size += len;
750 if (s->async_len == 0) {
751 scsi_req_continue(s->current_req);
752 return;
755 s->rregs[ESP_RINTR] |= INTR_BS;
756 esp_raise_irq(s);
759 static void esp_do_nodma(ESPState *s)
761 uint8_t buf[ESP_FIFO_SZ];
762 uint32_t cmdlen;
763 int len;
765 switch (esp_get_phase(s)) {
766 case STAT_MO:
767 switch (s->rregs[ESP_CMD]) {
768 case CMD_SELATN:
769 /* Copy FIFO into cmdfifo */
770 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
771 len = MIN(fifo8_num_free(&s->cmdfifo), len);
772 fifo8_push_all(&s->cmdfifo, buf, len);
774 if (fifo8_num_used(&s->cmdfifo) >= 1) {
775 /* First byte received, switch to command phase */
776 esp_set_phase(s, STAT_CD);
777 s->rregs[ESP_RSEQ] = SEQ_CD;
778 s->cmdfifo_cdb_offset = 1;
780 if (fifo8_num_used(&s->cmdfifo) > 1) {
781 /* Process any additional command phase data */
782 esp_do_nodma(s);
785 break;
787 case CMD_SELATNS:
788 /* Copy one byte from FIFO into cmdfifo */
789 len = esp_fifo_pop_buf(s, buf,
790 MIN(fifo8_num_used(&s->fifo), 1));
791 len = MIN(fifo8_num_free(&s->cmdfifo), len);
792 fifo8_push_all(&s->cmdfifo, buf, len);
794 if (fifo8_num_used(&s->cmdfifo) >= 1) {
795 /* First byte received, stop in message out phase */
796 s->rregs[ESP_RSEQ] = SEQ_MO;
797 s->cmdfifo_cdb_offset = 1;
799 /* Raise command completion interrupt */
800 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
801 esp_raise_irq(s);
803 break;
805 case CMD_TI:
806 /* Copy FIFO into cmdfifo */
807 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
808 len = MIN(fifo8_num_free(&s->cmdfifo), len);
809 fifo8_push_all(&s->cmdfifo, buf, len);
811 /* ATN remains asserted until FIFO empty */
812 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
813 esp_set_phase(s, STAT_CD);
814 s->rregs[ESP_CMD] = 0;
815 s->rregs[ESP_RINTR] |= INTR_BS;
816 esp_raise_irq(s);
817 break;
819 break;
821 case STAT_CD:
822 switch (s->rregs[ESP_CMD]) {
823 case CMD_TI:
824 /* Copy FIFO into cmdfifo */
825 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
826 len = MIN(fifo8_num_free(&s->cmdfifo), len);
827 fifo8_push_all(&s->cmdfifo, buf, len);
829 cmdlen = fifo8_num_used(&s->cmdfifo);
830 trace_esp_handle_ti_cmd(cmdlen);
832 /* CDB may be transferred in one or more TI commands */
833 if (esp_cdb_ready(s)) {
834 /* Command has been received */
835 do_cmd(s);
836 } else {
838 * If data was transferred from the FIFO then raise bus
839 * service interrupt to indicate transfer complete. Otherwise
840 * defer until the next FIFO write.
842 if (len) {
843 /* Raise interrupt to indicate transfer complete */
844 s->rregs[ESP_RINTR] |= INTR_BS;
845 esp_raise_irq(s);
848 break;
850 case CMD_SEL | CMD_DMA:
851 case CMD_SELATN | CMD_DMA:
852 /* Copy FIFO into cmdfifo */
853 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
854 len = MIN(fifo8_num_free(&s->cmdfifo), len);
855 fifo8_push_all(&s->cmdfifo, buf, len);
857 /* Handle when DMA transfer is terminated by non-DMA FIFO write */
858 if (esp_cdb_ready(s)) {
859 /* Command has been received */
860 do_cmd(s);
862 break;
864 case CMD_SEL:
865 case CMD_SELATN:
866 /* FIFO already contain entire CDB: copy to cmdfifo and execute */
867 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
868 len = MIN(fifo8_num_free(&s->cmdfifo), len);
869 fifo8_push_all(&s->cmdfifo, buf, len);
871 do_cmd(s);
872 break;
874 break;
876 case STAT_DO:
877 /* Accumulate data in FIFO until non-DMA TI is executed */
878 break;
880 case STAT_DI:
881 if (!s->current_req) {
882 return;
884 if (s->async_len == 0) {
885 /* Defer until data is available. */
886 return;
888 if (fifo8_is_empty(&s->fifo)) {
889 esp_fifo_push(s, s->async_buf[0]);
890 s->async_buf++;
891 s->async_len--;
892 s->ti_size--;
895 if (s->async_len == 0) {
896 scsi_req_continue(s->current_req);
897 return;
900 /* If preloading the FIFO, defer until TI command issued */
901 if (s->rregs[ESP_CMD] != CMD_TI) {
902 return;
905 s->rregs[ESP_RINTR] |= INTR_BS;
906 esp_raise_irq(s);
907 break;
909 case STAT_ST:
910 switch (s->rregs[ESP_CMD]) {
911 case CMD_ICCS:
912 esp_fifo_push(s, s->status);
913 esp_set_phase(s, STAT_MI);
915 /* Process any message in phase data */
916 esp_do_nodma(s);
917 break;
919 break;
921 case STAT_MI:
922 switch (s->rregs[ESP_CMD]) {
923 case CMD_ICCS:
924 esp_fifo_push(s, 0);
926 /* Raise end of command interrupt */
927 s->rregs[ESP_RINTR] |= INTR_FC;
928 esp_raise_irq(s);
929 break;
931 break;
935 void esp_command_complete(SCSIRequest *req, size_t resid)
937 ESPState *s = req->hba_private;
938 int to_device = (esp_get_phase(s) == STAT_DO);
940 trace_esp_command_complete();
943 * Non-DMA transfers from the target will leave the last byte in
944 * the FIFO so don't reset ti_size in this case
946 if (s->dma || to_device) {
947 if (s->ti_size != 0) {
948 trace_esp_command_complete_unexpected();
952 s->async_len = 0;
953 if (req->status) {
954 trace_esp_command_complete_fail();
956 s->status = req->status;
959 * Switch to status phase. For non-DMA transfers from the target the last
960 * byte is still in the FIFO
962 s->ti_size = 0;
964 switch (s->rregs[ESP_CMD]) {
965 case CMD_SEL | CMD_DMA:
966 case CMD_SEL:
967 case CMD_SELATN | CMD_DMA:
968 case CMD_SELATN:
970 * No data phase for sequencer command so raise deferred bus service
971 * and function complete interrupt
973 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
974 s->rregs[ESP_RSEQ] = SEQ_CD;
975 break;
977 case CMD_TI | CMD_DMA:
978 case CMD_TI:
979 s->rregs[ESP_CMD] = 0;
980 break;
983 /* Raise bus service interrupt to indicate change to STATUS phase */
984 esp_set_phase(s, STAT_ST);
985 s->rregs[ESP_RINTR] |= INTR_BS;
986 esp_raise_irq(s);
988 if (s->current_req) {
989 scsi_req_unref(s->current_req);
990 s->current_req = NULL;
991 s->current_dev = NULL;
995 void esp_transfer_data(SCSIRequest *req, uint32_t len)
997 ESPState *s = req->hba_private;
998 uint32_t dmalen = esp_get_tc(s);
1000 trace_esp_transfer_data(dmalen, s->ti_size);
1001 s->async_len = len;
1002 s->async_buf = scsi_req_get_buf(req);
1004 if (!s->data_ready) {
1005 s->data_ready = true;
1007 switch (s->rregs[ESP_CMD]) {
1008 case CMD_SEL | CMD_DMA:
1009 case CMD_SEL:
1010 case CMD_SELATN | CMD_DMA:
1011 case CMD_SELATN:
1013 * Initial incoming data xfer is complete for sequencer command
1014 * so raise deferred bus service and function complete interrupt
1016 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1017 s->rregs[ESP_RSEQ] = SEQ_CD;
1018 break;
1020 case CMD_SELATNS | CMD_DMA:
1021 case CMD_SELATNS:
1023 * Initial incoming data xfer is complete so raise command
1024 * completion interrupt
1026 s->rregs[ESP_RINTR] |= INTR_BS;
1027 s->rregs[ESP_RSEQ] = SEQ_MO;
1028 break;
1030 case CMD_TI | CMD_DMA:
1031 case CMD_TI:
1033 * Bus service interrupt raised because of initial change to
1034 * DATA phase
1036 s->rregs[ESP_CMD] = 0;
1037 s->rregs[ESP_RINTR] |= INTR_BS;
1038 break;
1041 esp_raise_irq(s);
1045 * Always perform the initial transfer upon reception of the next TI
1046 * command to ensure the DMA/non-DMA status of the command is correct.
1047 * It is not possible to use s->dma directly in the section below as
1048 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1049 * async data transfer is delayed then s->dma is set incorrectly.
1052 if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1053 /* When the SCSI layer returns more data, raise deferred INTR_BS */
1054 esp_dma_ti_check(s);
1056 esp_do_dma(s);
1057 } else if (s->rregs[ESP_CMD] == CMD_TI) {
1058 esp_do_nodma(s);
1062 static void handle_ti(ESPState *s)
1064 uint32_t dmalen;
1066 if (s->dma && !s->dma_enabled) {
1067 s->dma_cb = handle_ti;
1068 return;
1071 if (s->dma) {
1072 dmalen = esp_get_tc(s);
1073 trace_esp_handle_ti(dmalen);
1074 esp_do_dma(s);
1075 } else {
1076 trace_esp_handle_ti(s->ti_size);
1077 esp_do_nodma(s);
1079 if (esp_get_phase(s) == STAT_DO) {
1080 esp_nodma_ti_dataout(s);
1085 void esp_hard_reset(ESPState *s)
1087 memset(s->rregs, 0, ESP_REGS);
1088 memset(s->wregs, 0, ESP_REGS);
1089 s->tchi_written = 0;
1090 s->ti_size = 0;
1091 s->async_len = 0;
1092 fifo8_reset(&s->fifo);
1093 fifo8_reset(&s->cmdfifo);
1094 s->dma = 0;
1095 s->dma_cb = NULL;
1097 s->rregs[ESP_CFG1] = 7;
1100 static void esp_soft_reset(ESPState *s)
1102 qemu_irq_lower(s->irq);
1103 qemu_irq_lower(s->drq_irq);
1104 esp_hard_reset(s);
1107 static void esp_bus_reset(ESPState *s)
1109 bus_cold_reset(BUS(&s->bus));
1112 static void parent_esp_reset(ESPState *s, int irq, int level)
1114 if (level) {
1115 esp_soft_reset(s);
1119 static void esp_run_cmd(ESPState *s)
1121 uint8_t cmd = s->rregs[ESP_CMD];
1123 if (cmd & CMD_DMA) {
1124 s->dma = 1;
1125 /* Reload DMA counter. */
1126 if (esp_get_stc(s) == 0) {
1127 esp_set_tc(s, 0x10000);
1128 } else {
1129 esp_set_tc(s, esp_get_stc(s));
1131 } else {
1132 s->dma = 0;
1134 switch (cmd & CMD_CMD) {
1135 case CMD_NOP:
1136 trace_esp_mem_writeb_cmd_nop(cmd);
1137 break;
1138 case CMD_FLUSH:
1139 trace_esp_mem_writeb_cmd_flush(cmd);
1140 fifo8_reset(&s->fifo);
1141 break;
1142 case CMD_RESET:
1143 trace_esp_mem_writeb_cmd_reset(cmd);
1144 esp_soft_reset(s);
1145 break;
1146 case CMD_BUSRESET:
1147 trace_esp_mem_writeb_cmd_bus_reset(cmd);
1148 esp_bus_reset(s);
1149 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1150 s->rregs[ESP_RINTR] |= INTR_RST;
1151 esp_raise_irq(s);
1153 break;
1154 case CMD_TI:
1155 trace_esp_mem_writeb_cmd_ti(cmd);
1156 handle_ti(s);
1157 break;
1158 case CMD_ICCS:
1159 trace_esp_mem_writeb_cmd_iccs(cmd);
1160 write_response(s);
1161 break;
1162 case CMD_MSGACC:
1163 trace_esp_mem_writeb_cmd_msgacc(cmd);
1164 s->rregs[ESP_RINTR] |= INTR_DC;
1165 s->rregs[ESP_RSEQ] = 0;
1166 s->rregs[ESP_RFLAGS] = 0;
1167 esp_raise_irq(s);
1168 break;
1169 case CMD_PAD:
1170 trace_esp_mem_writeb_cmd_pad(cmd);
1171 handle_pad(s);
1172 break;
1173 case CMD_SATN:
1174 trace_esp_mem_writeb_cmd_satn(cmd);
1175 break;
1176 case CMD_RSTATN:
1177 trace_esp_mem_writeb_cmd_rstatn(cmd);
1178 break;
1179 case CMD_SEL:
1180 trace_esp_mem_writeb_cmd_sel(cmd);
1181 handle_s_without_atn(s);
1182 break;
1183 case CMD_SELATN:
1184 trace_esp_mem_writeb_cmd_selatn(cmd);
1185 handle_satn(s);
1186 break;
1187 case CMD_SELATNS:
1188 trace_esp_mem_writeb_cmd_selatns(cmd);
1189 handle_satn_stop(s);
1190 break;
1191 case CMD_ENSEL:
1192 trace_esp_mem_writeb_cmd_ensel(cmd);
1193 s->rregs[ESP_RINTR] = 0;
1194 break;
1195 case CMD_DISSEL:
1196 trace_esp_mem_writeb_cmd_dissel(cmd);
1197 s->rregs[ESP_RINTR] = 0;
1198 esp_raise_irq(s);
1199 break;
1200 default:
1201 trace_esp_error_unhandled_command(cmd);
1202 break;
1206 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1208 uint32_t val;
1210 switch (saddr) {
1211 case ESP_FIFO:
1212 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1213 val = s->rregs[ESP_FIFO];
1214 break;
1215 case ESP_RINTR:
1217 * Clear sequence step, interrupt register and all status bits
1218 * except TC
1220 val = s->rregs[ESP_RINTR];
1221 s->rregs[ESP_RINTR] = 0;
1222 esp_lower_irq(s);
1223 s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1225 * According to the datasheet ESP_RSEQ should be cleared, but as the
1226 * emulation currently defers information transfers to the next TI
1227 * command leave it for now so that pedantic guests such as the old
1228 * Linux 2.6 driver see the correct flags before the next SCSI phase
1229 * transition.
1231 * s->rregs[ESP_RSEQ] = SEQ_0;
1233 break;
1234 case ESP_TCHI:
1235 /* Return the unique id if the value has never been written */
1236 if (!s->tchi_written) {
1237 val = s->chip_id;
1238 } else {
1239 val = s->rregs[saddr];
1241 break;
1242 case ESP_RFLAGS:
1243 /* Bottom 5 bits indicate number of bytes in FIFO */
1244 val = fifo8_num_used(&s->fifo);
1245 break;
1246 default:
1247 val = s->rregs[saddr];
1248 break;
1251 trace_esp_mem_readb(saddr, val);
1252 return val;
1255 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1257 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1258 switch (saddr) {
1259 case ESP_TCHI:
1260 s->tchi_written = true;
1261 /* fall through */
1262 case ESP_TCLO:
1263 case ESP_TCMID:
1264 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1265 break;
1266 case ESP_FIFO:
1267 if (!fifo8_is_full(&s->fifo)) {
1268 esp_fifo_push(s, val);
1270 esp_do_nodma(s);
1271 break;
1272 case ESP_CMD:
1273 s->rregs[saddr] = val;
1274 esp_run_cmd(s);
1275 break;
1276 case ESP_WBUSID ... ESP_WSYNO:
1277 break;
1278 case ESP_CFG1:
1279 case ESP_CFG2: case ESP_CFG3:
1280 case ESP_RES3: case ESP_RES4:
1281 s->rregs[saddr] = val;
1282 break;
1283 case ESP_WCCF ... ESP_WTEST:
1284 break;
1285 default:
1286 trace_esp_error_invalid_write(val, saddr);
1287 return;
1289 s->wregs[saddr] = val;
1292 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1293 unsigned size, bool is_write,
1294 MemTxAttrs attrs)
1296 return (size == 1) || (is_write && size == 4);
1299 static bool esp_is_before_version_5(void *opaque, int version_id)
1301 ESPState *s = ESP(opaque);
1303 version_id = MIN(version_id, s->mig_version_id);
1304 return version_id < 5;
1307 static bool esp_is_version_5(void *opaque, int version_id)
1309 ESPState *s = ESP(opaque);
1311 version_id = MIN(version_id, s->mig_version_id);
1312 return version_id >= 5;
1315 static bool esp_is_version_6(void *opaque, int version_id)
1317 ESPState *s = ESP(opaque);
1319 version_id = MIN(version_id, s->mig_version_id);
1320 return version_id >= 6;
1323 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1325 ESPState *s = ESP(opaque);
1327 version_id = MIN(version_id, s->mig_version_id);
1328 return version_id >= 5 && version_id <= 6;
1331 int esp_pre_save(void *opaque)
1333 ESPState *s = ESP(object_resolve_path_component(
1334 OBJECT(opaque), "esp"));
1336 s->mig_version_id = vmstate_esp.version_id;
1337 return 0;
1340 static int esp_post_load(void *opaque, int version_id)
1342 ESPState *s = ESP(opaque);
1343 int len, i;
1345 version_id = MIN(version_id, s->mig_version_id);
1347 if (version_id < 5) {
1348 esp_set_tc(s, s->mig_dma_left);
1350 /* Migrate ti_buf to fifo */
1351 len = s->mig_ti_wptr - s->mig_ti_rptr;
1352 for (i = 0; i < len; i++) {
1353 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1356 /* Migrate cmdbuf to cmdfifo */
1357 for (i = 0; i < s->mig_cmdlen; i++) {
1358 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1362 s->mig_version_id = vmstate_esp.version_id;
1363 return 0;
1366 const VMStateDescription vmstate_esp = {
1367 .name = "esp",
1368 .version_id = 7,
1369 .minimum_version_id = 3,
1370 .post_load = esp_post_load,
1371 .fields = (const VMStateField[]) {
1372 VMSTATE_BUFFER(rregs, ESPState),
1373 VMSTATE_BUFFER(wregs, ESPState),
1374 VMSTATE_INT32(ti_size, ESPState),
1375 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1376 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1377 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1378 VMSTATE_UINT32(status, ESPState),
1379 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1380 esp_is_before_version_5),
1381 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1382 esp_is_before_version_5),
1383 VMSTATE_UINT32(dma, ESPState),
1384 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1385 esp_is_before_version_5, 0, 16),
1386 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1387 esp_is_before_version_5, 16,
1388 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1389 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1390 VMSTATE_UINT32(do_cmd, ESPState),
1391 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1392 VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1393 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1394 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1395 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1396 VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1397 esp_is_between_version_5_and_6),
1398 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1399 VMSTATE_BOOL(drq_state, ESPState),
1400 VMSTATE_END_OF_LIST()
1404 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1405 uint64_t val, unsigned int size)
1407 SysBusESPState *sysbus = opaque;
1408 ESPState *s = ESP(&sysbus->esp);
1409 uint32_t saddr;
1411 saddr = addr >> sysbus->it_shift;
1412 esp_reg_write(s, saddr, val);
1415 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1416 unsigned int size)
1418 SysBusESPState *sysbus = opaque;
1419 ESPState *s = ESP(&sysbus->esp);
1420 uint32_t saddr;
1422 saddr = addr >> sysbus->it_shift;
1423 return esp_reg_read(s, saddr);
1426 static const MemoryRegionOps sysbus_esp_mem_ops = {
1427 .read = sysbus_esp_mem_read,
1428 .write = sysbus_esp_mem_write,
1429 .endianness = DEVICE_NATIVE_ENDIAN,
1430 .valid.accepts = esp_mem_accepts,
1433 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1434 uint64_t val, unsigned int size)
1436 SysBusESPState *sysbus = opaque;
1437 ESPState *s = ESP(&sysbus->esp);
1439 trace_esp_pdma_write(size);
1441 switch (size) {
1442 case 1:
1443 esp_pdma_write(s, val);
1444 break;
1445 case 2:
1446 esp_pdma_write(s, val >> 8);
1447 esp_pdma_write(s, val);
1448 break;
1450 esp_do_dma(s);
1453 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1454 unsigned int size)
1456 SysBusESPState *sysbus = opaque;
1457 ESPState *s = ESP(&sysbus->esp);
1458 uint64_t val = 0;
1460 trace_esp_pdma_read(size);
1462 switch (size) {
1463 case 1:
1464 val = esp_pdma_read(s);
1465 break;
1466 case 2:
1467 val = esp_pdma_read(s);
1468 val = (val << 8) | esp_pdma_read(s);
1469 break;
1471 esp_do_dma(s);
1472 return val;
1475 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1477 ESPState *s = container_of(req->bus, ESPState, bus);
1479 scsi_req_ref(req);
1480 s->current_req = req;
1481 return s;
1484 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1485 .read = sysbus_esp_pdma_read,
1486 .write = sysbus_esp_pdma_write,
1487 .endianness = DEVICE_NATIVE_ENDIAN,
1488 .valid.min_access_size = 1,
1489 .valid.max_access_size = 4,
1490 .impl.min_access_size = 1,
1491 .impl.max_access_size = 2,
1494 static const struct SCSIBusInfo esp_scsi_info = {
1495 .tcq = false,
1496 .max_target = ESP_MAX_DEVS,
1497 .max_lun = 7,
1499 .load_request = esp_load_request,
1500 .transfer_data = esp_transfer_data,
1501 .complete = esp_command_complete,
1502 .cancel = esp_request_cancelled
1505 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1507 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1508 ESPState *s = ESP(&sysbus->esp);
1510 switch (irq) {
1511 case 0:
1512 parent_esp_reset(s, irq, level);
1513 break;
1514 case 1:
1515 esp_dma_enable(s, irq, level);
1516 break;
1520 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1522 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1523 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1524 ESPState *s = ESP(&sysbus->esp);
1526 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1527 return;
1530 sysbus_init_irq(sbd, &s->irq);
1531 sysbus_init_irq(sbd, &s->drq_irq);
1532 assert(sysbus->it_shift != -1);
1534 s->chip_id = TCHI_FAS100A;
1535 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1536 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1537 sysbus_init_mmio(sbd, &sysbus->iomem);
1538 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1539 sysbus, "esp-pdma", 4);
1540 sysbus_init_mmio(sbd, &sysbus->pdma);
1542 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1544 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1547 static void sysbus_esp_hard_reset(DeviceState *dev)
1549 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1550 ESPState *s = ESP(&sysbus->esp);
1552 esp_hard_reset(s);
1555 static void sysbus_esp_init(Object *obj)
1557 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1559 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1562 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1563 .name = "sysbusespscsi",
1564 .version_id = 2,
1565 .minimum_version_id = 1,
1566 .pre_save = esp_pre_save,
1567 .fields = (const VMStateField[]) {
1568 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1569 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1570 VMSTATE_END_OF_LIST()
1574 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1576 DeviceClass *dc = DEVICE_CLASS(klass);
1578 dc->realize = sysbus_esp_realize;
1579 dc->reset = sysbus_esp_hard_reset;
1580 dc->vmsd = &vmstate_sysbus_esp_scsi;
1581 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1584 static void esp_finalize(Object *obj)
1586 ESPState *s = ESP(obj);
1588 fifo8_destroy(&s->fifo);
1589 fifo8_destroy(&s->cmdfifo);
1592 static void esp_init(Object *obj)
1594 ESPState *s = ESP(obj);
1596 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1597 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1600 static void esp_class_init(ObjectClass *klass, void *data)
1602 DeviceClass *dc = DEVICE_CLASS(klass);
1604 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1605 dc->user_creatable = false;
1606 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1609 static const TypeInfo esp_info_types[] = {
1611 .name = TYPE_SYSBUS_ESP,
1612 .parent = TYPE_SYS_BUS_DEVICE,
1613 .instance_init = sysbus_esp_init,
1614 .instance_size = sizeof(SysBusESPState),
1615 .class_init = sysbus_esp_class_init,
1618 .name = TYPE_ESP,
1619 .parent = TYPE_DEVICE,
1620 .instance_init = esp_init,
1621 .instance_finalize = esp_finalize,
1622 .instance_size = sizeof(ESPState),
1623 .class_init = esp_class_init,
1627 DEFINE_TYPES(esp_info_types)