esp: add trivial implementation of the ESP_RFLAGS register
[qemu/ar7.git] / hw / scsi / esp.c
blob8a9b1500de6317a245776a7d344fa014a3004f37
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState *s)
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState *s)
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState *s)
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState *s)
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState *s, int irq, int level)
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
90 void esp_request_cancelled(SCSIRequest *req)
92 ESPState *s = req->hba_private;
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
101 static void esp_fifo_push(ESPState *s, uint8_t val)
103 if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) {
104 trace_esp_error_fifo_overrun();
105 return;
108 fifo8_push(&s->fifo, val);
111 static uint8_t esp_fifo_pop(ESPState *s)
113 if (fifo8_is_empty(&s->fifo)) {
114 return 0;
117 return fifo8_pop(&s->fifo);
120 static void esp_cmdfifo_push(ESPState *s, uint8_t val)
122 if (fifo8_num_used(&s->cmdfifo) == ESP_CMDFIFO_SZ) {
123 trace_esp_error_fifo_overrun();
124 return;
127 fifo8_push(&s->cmdfifo, val);
130 static uint8_t esp_cmdfifo_pop(ESPState *s)
132 if (fifo8_is_empty(&s->cmdfifo)) {
133 return 0;
136 return fifo8_pop(&s->cmdfifo);
139 static uint32_t esp_get_tc(ESPState *s)
141 uint32_t dmalen;
143 dmalen = s->rregs[ESP_TCLO];
144 dmalen |= s->rregs[ESP_TCMID] << 8;
145 dmalen |= s->rregs[ESP_TCHI] << 16;
147 return dmalen;
150 static void esp_set_tc(ESPState *s, uint32_t dmalen)
152 s->rregs[ESP_TCLO] = dmalen;
153 s->rregs[ESP_TCMID] = dmalen >> 8;
154 s->rregs[ESP_TCHI] = dmalen >> 16;
157 static uint32_t esp_get_stc(ESPState *s)
159 uint32_t dmalen;
161 dmalen = s->wregs[ESP_TCLO];
162 dmalen |= s->wregs[ESP_TCMID] << 8;
163 dmalen |= s->wregs[ESP_TCHI] << 16;
165 return dmalen;
168 static uint8_t esp_pdma_read(ESPState *s)
170 uint8_t val;
172 if (s->do_cmd) {
173 val = esp_cmdfifo_pop(s);
174 } else {
175 val = esp_fifo_pop(s);
178 return val;
181 static void esp_pdma_write(ESPState *s, uint8_t val)
183 uint32_t dmalen = esp_get_tc(s);
185 if (dmalen == 0) {
186 return;
189 if (s->do_cmd) {
190 esp_cmdfifo_push(s, val);
191 } else {
192 esp_fifo_push(s, val);
195 dmalen--;
196 esp_set_tc(s, dmalen);
199 static int esp_select(ESPState *s)
201 int target;
203 target = s->wregs[ESP_WBUSID] & BUSID_DID;
205 s->ti_size = 0;
206 fifo8_reset(&s->fifo);
208 if (s->current_req) {
209 /* Started a new command before the old one finished. Cancel it. */
210 scsi_req_cancel(s->current_req);
211 s->async_len = 0;
214 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
215 if (!s->current_dev) {
216 /* No such drive */
217 s->rregs[ESP_RSTAT] = 0;
218 s->rregs[ESP_RINTR] |= INTR_DC;
219 s->rregs[ESP_RSEQ] = SEQ_0;
220 esp_raise_irq(s);
221 return -1;
225 * Note that we deliberately don't raise the IRQ here: this will be done
226 * either in do_busid_cmd() for DATA OUT transfers or by the deferred
227 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
229 s->rregs[ESP_RINTR] |= INTR_FC;
230 s->rregs[ESP_RSEQ] = SEQ_CD;
231 return 0;
234 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
236 uint8_t buf[ESP_CMDFIFO_SZ];
237 uint32_t dmalen, n;
238 int target;
240 target = s->wregs[ESP_WBUSID] & BUSID_DID;
241 if (s->dma) {
242 dmalen = MIN(esp_get_tc(s), maxlen);
243 if (dmalen == 0) {
244 return 0;
246 if (s->dma_memory_read) {
247 s->dma_memory_read(s->dma_opaque, buf, dmalen);
248 fifo8_push_all(&s->cmdfifo, buf, dmalen);
249 } else {
250 if (esp_select(s) < 0) {
251 fifo8_reset(&s->cmdfifo);
252 return -1;
254 esp_raise_drq(s);
255 fifo8_reset(&s->cmdfifo);
256 return 0;
258 } else {
259 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
260 if (dmalen == 0) {
261 return 0;
263 memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
264 if (dmalen >= 3) {
265 buf[0] = buf[2] >> 5;
267 fifo8_push_all(&s->cmdfifo, buf, dmalen);
269 trace_esp_get_cmd(dmalen, target);
271 if (esp_select(s) < 0) {
272 fifo8_reset(&s->cmdfifo);
273 return -1;
275 return dmalen;
278 static void do_busid_cmd(ESPState *s, uint8_t busid)
280 uint32_t n, cmdlen;
281 int32_t datalen;
282 int lun;
283 SCSIDevice *current_lun;
284 uint8_t *buf;
286 trace_esp_do_busid_cmd(busid);
287 lun = busid & 7;
288 cmdlen = fifo8_num_used(&s->cmdfifo);
289 buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
291 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
292 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
293 datalen = scsi_req_enqueue(s->current_req);
294 s->ti_size = datalen;
295 fifo8_reset(&s->cmdfifo);
296 if (datalen != 0) {
297 s->rregs[ESP_RSTAT] = STAT_TC;
298 s->rregs[ESP_RSEQ] = SEQ_CD;
299 esp_set_tc(s, 0);
300 if (datalen > 0) {
302 * Switch to DATA IN phase but wait until initial data xfer is
303 * complete before raising the command completion interrupt
305 s->data_in_ready = false;
306 s->rregs[ESP_RSTAT] |= STAT_DI;
307 } else {
308 s->rregs[ESP_RSTAT] |= STAT_DO;
309 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
310 esp_raise_irq(s);
311 esp_lower_drq(s);
313 scsi_req_continue(s->current_req);
314 return;
318 static void do_cmd(ESPState *s)
320 uint8_t busid = fifo8_pop(&s->cmdfifo);
321 uint32_t n;
323 s->cmdfifo_cdb_offset--;
325 /* Ignore extended messages for now */
326 if (s->cmdfifo_cdb_offset) {
327 fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
328 s->cmdfifo_cdb_offset = 0;
331 do_busid_cmd(s, busid);
334 static void satn_pdma_cb(ESPState *s)
336 s->do_cmd = 0;
337 if (!fifo8_is_empty(&s->cmdfifo)) {
338 s->cmdfifo_cdb_offset = 1;
339 do_cmd(s);
343 static void handle_satn(ESPState *s)
345 int32_t cmdlen;
347 if (s->dma && !s->dma_enabled) {
348 s->dma_cb = handle_satn;
349 return;
351 s->pdma_cb = satn_pdma_cb;
352 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
353 if (cmdlen > 0) {
354 s->cmdfifo_cdb_offset = 1;
355 do_cmd(s);
356 } else if (cmdlen == 0) {
357 s->do_cmd = 1;
358 /* Target present, but no cmd yet - switch to command phase */
359 s->rregs[ESP_RSEQ] = SEQ_CD;
360 s->rregs[ESP_RSTAT] = STAT_CD;
364 static void s_without_satn_pdma_cb(ESPState *s)
366 uint32_t len;
368 s->do_cmd = 0;
369 len = fifo8_num_used(&s->cmdfifo);
370 if (len) {
371 s->cmdfifo_cdb_offset = 0;
372 do_busid_cmd(s, 0);
376 static void handle_s_without_atn(ESPState *s)
378 int32_t cmdlen;
380 if (s->dma && !s->dma_enabled) {
381 s->dma_cb = handle_s_without_atn;
382 return;
384 s->pdma_cb = s_without_satn_pdma_cb;
385 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
386 if (cmdlen > 0) {
387 s->cmdfifo_cdb_offset = 0;
388 do_busid_cmd(s, 0);
389 } else if (cmdlen == 0) {
390 s->do_cmd = 1;
391 /* Target present, but no cmd yet - switch to command phase */
392 s->rregs[ESP_RSEQ] = SEQ_CD;
393 s->rregs[ESP_RSTAT] = STAT_CD;
397 static void satn_stop_pdma_cb(ESPState *s)
399 s->do_cmd = 0;
400 if (!fifo8_is_empty(&s->cmdfifo)) {
401 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
402 s->do_cmd = 1;
403 s->cmdfifo_cdb_offset = 1;
404 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
405 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
406 s->rregs[ESP_RSEQ] = SEQ_CD;
407 esp_raise_irq(s);
411 static void handle_satn_stop(ESPState *s)
413 int32_t cmdlen;
415 if (s->dma && !s->dma_enabled) {
416 s->dma_cb = handle_satn_stop;
417 return;
419 s->pdma_cb = satn_stop_pdma_cb;
420 cmdlen = get_cmd(s, 1);
421 if (cmdlen > 0) {
422 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
423 s->do_cmd = 1;
424 s->cmdfifo_cdb_offset = 1;
425 s->rregs[ESP_RSTAT] = STAT_MO;
426 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
427 s->rregs[ESP_RSEQ] = SEQ_MO;
428 esp_raise_irq(s);
429 } else if (cmdlen == 0) {
430 s->do_cmd = 1;
431 /* Target present, switch to message out phase */
432 s->rregs[ESP_RSEQ] = SEQ_MO;
433 s->rregs[ESP_RSTAT] = STAT_MO;
437 static void write_response_pdma_cb(ESPState *s)
439 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
440 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
441 s->rregs[ESP_RSEQ] = SEQ_CD;
442 esp_raise_irq(s);
445 static void write_response(ESPState *s)
447 uint32_t n;
449 trace_esp_write_response(s->status);
451 fifo8_reset(&s->fifo);
452 esp_fifo_push(s, s->status);
453 esp_fifo_push(s, 0);
455 if (s->dma) {
456 if (s->dma_memory_write) {
457 s->dma_memory_write(s->dma_opaque,
458 (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
459 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
460 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
461 s->rregs[ESP_RSEQ] = SEQ_CD;
462 } else {
463 s->pdma_cb = write_response_pdma_cb;
464 esp_raise_drq(s);
465 return;
467 } else {
468 s->ti_size = 2;
469 s->rregs[ESP_RFLAGS] = 2;
471 esp_raise_irq(s);
474 static void esp_dma_done(ESPState *s)
476 s->rregs[ESP_RSTAT] |= STAT_TC;
477 s->rregs[ESP_RINTR] |= INTR_BS;
478 s->rregs[ESP_RSEQ] = 0;
479 s->rregs[ESP_RFLAGS] = 0;
480 esp_set_tc(s, 0);
481 esp_raise_irq(s);
484 static void do_dma_pdma_cb(ESPState *s)
486 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
487 int len;
488 uint32_t n;
490 if (s->do_cmd) {
491 s->ti_size = 0;
492 s->do_cmd = 0;
493 do_cmd(s);
494 esp_lower_drq(s);
495 return;
498 if (to_device) {
499 /* Copy FIFO data to device */
500 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
501 memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
502 s->async_buf += len;
503 s->async_len -= len;
504 s->ti_size += len;
505 if (s->async_len == 0) {
506 scsi_req_continue(s->current_req);
507 return;
510 if (esp_get_tc(s) == 0) {
511 esp_lower_drq(s);
512 esp_dma_done(s);
515 return;
516 } else {
517 if (s->async_len == 0) {
518 if (s->current_req) {
519 /* Defer until the scsi layer has completed */
520 scsi_req_continue(s->current_req);
521 s->data_in_ready = false;
523 return;
526 if (esp_get_tc(s) != 0) {
527 /* Copy device data to FIFO */
528 len = MIN(s->async_len, fifo8_num_free(&s->fifo));
529 fifo8_push_all(&s->fifo, s->async_buf, len);
530 s->async_buf += len;
531 s->async_len -= len;
532 s->ti_size -= len;
533 esp_set_tc(s, esp_get_tc(s) - len);
534 return;
537 /* Partially filled a scsi buffer. Complete immediately. */
538 esp_lower_drq(s);
539 esp_dma_done(s);
543 static void esp_do_dma(ESPState *s)
545 uint32_t len, cmdlen;
546 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
547 uint8_t buf[ESP_CMDFIFO_SZ];
549 len = esp_get_tc(s);
550 if (s->do_cmd) {
552 * handle_ti_cmd() case: esp_do_dma() is called only from
553 * handle_ti_cmd() with do_cmd != NULL (see the assert())
555 cmdlen = fifo8_num_used(&s->cmdfifo);
556 trace_esp_do_dma(cmdlen, len);
557 if (s->dma_memory_read) {
558 s->dma_memory_read(s->dma_opaque, buf, len);
559 fifo8_push_all(&s->cmdfifo, buf, len);
560 } else {
561 s->pdma_cb = do_dma_pdma_cb;
562 esp_raise_drq(s);
563 return;
565 trace_esp_handle_ti_cmd(cmdlen);
566 s->ti_size = 0;
567 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
568 /* No command received */
569 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
570 return;
573 /* Command has been received */
574 s->do_cmd = 0;
575 do_cmd(s);
576 } else {
578 * Extra message out bytes received: update cmdfifo_cdb_offset
579 * and then switch to commmand phase
581 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
582 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
583 s->rregs[ESP_RSEQ] = SEQ_CD;
584 s->rregs[ESP_RINTR] |= INTR_BS;
585 esp_raise_irq(s);
587 return;
589 if (s->async_len == 0) {
590 /* Defer until data is available. */
591 return;
593 if (len > s->async_len) {
594 len = s->async_len;
596 if (to_device) {
597 if (s->dma_memory_read) {
598 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
599 } else {
600 s->pdma_cb = do_dma_pdma_cb;
601 esp_raise_drq(s);
602 return;
604 } else {
605 if (s->dma_memory_write) {
606 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
607 } else {
608 /* Copy device data to FIFO */
609 len = MIN(len, fifo8_num_free(&s->fifo));
610 fifo8_push_all(&s->fifo, s->async_buf, len);
611 s->async_buf += len;
612 s->async_len -= len;
613 s->ti_size -= len;
614 esp_set_tc(s, esp_get_tc(s) - len);
615 s->pdma_cb = do_dma_pdma_cb;
616 esp_raise_drq(s);
618 /* Indicate transfer to FIFO is complete */
619 s->rregs[ESP_RSTAT] |= STAT_TC;
620 return;
623 esp_set_tc(s, esp_get_tc(s) - len);
624 s->async_buf += len;
625 s->async_len -= len;
626 if (to_device) {
627 s->ti_size += len;
628 } else {
629 s->ti_size -= len;
631 if (s->async_len == 0) {
632 scsi_req_continue(s->current_req);
634 * If there is still data to be read from the device then
635 * complete the DMA operation immediately. Otherwise defer
636 * until the scsi layer has completed.
638 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
639 return;
643 /* Partially filled a scsi buffer. Complete immediately. */
644 esp_dma_done(s);
645 esp_lower_drq(s);
648 void esp_command_complete(SCSIRequest *req, size_t resid)
650 ESPState *s = req->hba_private;
652 trace_esp_command_complete();
653 if (s->ti_size != 0) {
654 trace_esp_command_complete_unexpected();
656 s->ti_size = 0;
657 s->async_len = 0;
658 if (req->status) {
659 trace_esp_command_complete_fail();
661 s->status = req->status;
662 s->rregs[ESP_RSTAT] = STAT_ST;
663 esp_dma_done(s);
664 esp_lower_drq(s);
665 if (s->current_req) {
666 scsi_req_unref(s->current_req);
667 s->current_req = NULL;
668 s->current_dev = NULL;
672 void esp_transfer_data(SCSIRequest *req, uint32_t len)
674 ESPState *s = req->hba_private;
675 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
676 uint32_t dmalen = esp_get_tc(s);
678 assert(!s->do_cmd);
679 trace_esp_transfer_data(dmalen, s->ti_size);
680 s->async_len = len;
681 s->async_buf = scsi_req_get_buf(req);
683 if (!to_device && !s->data_in_ready) {
685 * Initial incoming data xfer is complete so raise command
686 * completion interrupt
688 s->data_in_ready = true;
689 s->rregs[ESP_RSTAT] |= STAT_TC;
690 s->rregs[ESP_RINTR] |= INTR_BS;
691 esp_raise_irq(s);
694 * If data is ready to transfer and the TI command has already
695 * been executed, start DMA immediately. Otherwise DMA will start
696 * when host sends the TI command
698 if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
699 esp_do_dma(s);
701 return;
704 if (dmalen) {
705 esp_do_dma(s);
706 } else if (s->ti_size <= 0) {
708 * If this was the last part of a DMA transfer then the
709 * completion interrupt is deferred to here.
711 esp_dma_done(s);
712 esp_lower_drq(s);
716 static void handle_ti(ESPState *s)
718 uint32_t dmalen, cmdlen;
720 if (s->dma && !s->dma_enabled) {
721 s->dma_cb = handle_ti;
722 return;
725 dmalen = esp_get_tc(s);
726 if (s->dma) {
727 trace_esp_handle_ti(dmalen);
728 s->rregs[ESP_RSTAT] &= ~STAT_TC;
729 esp_do_dma(s);
730 } else if (s->do_cmd) {
731 cmdlen = fifo8_num_used(&s->cmdfifo);
732 trace_esp_handle_ti_cmd(cmdlen);
733 s->ti_size = 0;
734 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
735 /* No command received */
736 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
737 return;
740 /* Command has been received */
741 s->do_cmd = 0;
742 do_cmd(s);
743 } else {
745 * Extra message out bytes received: update cmdfifo_cdb_offset
746 * and then switch to commmand phase
748 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
749 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
750 s->rregs[ESP_RSEQ] = SEQ_CD;
751 s->rregs[ESP_RINTR] |= INTR_BS;
752 esp_raise_irq(s);
757 void esp_hard_reset(ESPState *s)
759 memset(s->rregs, 0, ESP_REGS);
760 memset(s->wregs, 0, ESP_REGS);
761 s->tchi_written = 0;
762 s->ti_size = 0;
763 fifo8_reset(&s->fifo);
764 fifo8_reset(&s->cmdfifo);
765 s->dma = 0;
766 s->do_cmd = 0;
767 s->dma_cb = NULL;
769 s->rregs[ESP_CFG1] = 7;
772 static void esp_soft_reset(ESPState *s)
774 qemu_irq_lower(s->irq);
775 qemu_irq_lower(s->irq_data);
776 esp_hard_reset(s);
779 static void parent_esp_reset(ESPState *s, int irq, int level)
781 if (level) {
782 esp_soft_reset(s);
786 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
788 uint32_t val;
790 switch (saddr) {
791 case ESP_FIFO:
792 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
793 /* Data out. */
794 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
795 s->rregs[ESP_FIFO] = 0;
796 } else {
797 s->ti_size--;
798 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
800 val = s->rregs[ESP_FIFO];
801 break;
802 case ESP_RINTR:
804 * Clear sequence step, interrupt register and all status bits
805 * except TC
807 val = s->rregs[ESP_RINTR];
808 s->rregs[ESP_RINTR] = 0;
809 s->rregs[ESP_RSTAT] &= ~STAT_TC;
810 s->rregs[ESP_RSEQ] = SEQ_0;
811 esp_lower_irq(s);
812 break;
813 case ESP_TCHI:
814 /* Return the unique id if the value has never been written */
815 if (!s->tchi_written) {
816 val = s->chip_id;
817 } else {
818 val = s->rregs[saddr];
820 break;
821 case ESP_RFLAGS:
822 /* Bottom 5 bits indicate number of bytes in FIFO */
823 val = fifo8_num_used(&s->fifo);
824 break;
825 default:
826 val = s->rregs[saddr];
827 break;
830 trace_esp_mem_readb(saddr, val);
831 return val;
834 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
836 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
837 switch (saddr) {
838 case ESP_TCHI:
839 s->tchi_written = true;
840 /* fall through */
841 case ESP_TCLO:
842 case ESP_TCMID:
843 s->rregs[ESP_RSTAT] &= ~STAT_TC;
844 break;
845 case ESP_FIFO:
846 if (s->do_cmd) {
847 esp_cmdfifo_push(s, val);
848 } else {
849 s->ti_size++;
850 esp_fifo_push(s, val);
853 /* Non-DMA transfers raise an interrupt after every byte */
854 if (s->rregs[ESP_CMD] == CMD_TI) {
855 s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
856 esp_raise_irq(s);
858 break;
859 case ESP_CMD:
860 s->rregs[saddr] = val;
861 if (val & CMD_DMA) {
862 s->dma = 1;
863 /* Reload DMA counter. */
864 if (esp_get_stc(s) == 0) {
865 esp_set_tc(s, 0x10000);
866 } else {
867 esp_set_tc(s, esp_get_stc(s));
869 } else {
870 s->dma = 0;
872 switch (val & CMD_CMD) {
873 case CMD_NOP:
874 trace_esp_mem_writeb_cmd_nop(val);
875 break;
876 case CMD_FLUSH:
877 trace_esp_mem_writeb_cmd_flush(val);
878 fifo8_reset(&s->fifo);
879 break;
880 case CMD_RESET:
881 trace_esp_mem_writeb_cmd_reset(val);
882 esp_soft_reset(s);
883 break;
884 case CMD_BUSRESET:
885 trace_esp_mem_writeb_cmd_bus_reset(val);
886 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
887 s->rregs[ESP_RINTR] |= INTR_RST;
888 esp_raise_irq(s);
890 break;
891 case CMD_TI:
892 trace_esp_mem_writeb_cmd_ti(val);
893 handle_ti(s);
894 break;
895 case CMD_ICCS:
896 trace_esp_mem_writeb_cmd_iccs(val);
897 write_response(s);
898 s->rregs[ESP_RINTR] |= INTR_FC;
899 s->rregs[ESP_RSTAT] |= STAT_MI;
900 break;
901 case CMD_MSGACC:
902 trace_esp_mem_writeb_cmd_msgacc(val);
903 s->rregs[ESP_RINTR] |= INTR_DC;
904 s->rregs[ESP_RSEQ] = 0;
905 s->rregs[ESP_RFLAGS] = 0;
906 esp_raise_irq(s);
907 break;
908 case CMD_PAD:
909 trace_esp_mem_writeb_cmd_pad(val);
910 s->rregs[ESP_RSTAT] = STAT_TC;
911 s->rregs[ESP_RINTR] |= INTR_FC;
912 s->rregs[ESP_RSEQ] = 0;
913 break;
914 case CMD_SATN:
915 trace_esp_mem_writeb_cmd_satn(val);
916 break;
917 case CMD_RSTATN:
918 trace_esp_mem_writeb_cmd_rstatn(val);
919 break;
920 case CMD_SEL:
921 trace_esp_mem_writeb_cmd_sel(val);
922 handle_s_without_atn(s);
923 break;
924 case CMD_SELATN:
925 trace_esp_mem_writeb_cmd_selatn(val);
926 handle_satn(s);
927 break;
928 case CMD_SELATNS:
929 trace_esp_mem_writeb_cmd_selatns(val);
930 handle_satn_stop(s);
931 break;
932 case CMD_ENSEL:
933 trace_esp_mem_writeb_cmd_ensel(val);
934 s->rregs[ESP_RINTR] = 0;
935 break;
936 case CMD_DISSEL:
937 trace_esp_mem_writeb_cmd_dissel(val);
938 s->rregs[ESP_RINTR] = 0;
939 esp_raise_irq(s);
940 break;
941 default:
942 trace_esp_error_unhandled_command(val);
943 break;
945 break;
946 case ESP_WBUSID ... ESP_WSYNO:
947 break;
948 case ESP_CFG1:
949 case ESP_CFG2: case ESP_CFG3:
950 case ESP_RES3: case ESP_RES4:
951 s->rregs[saddr] = val;
952 break;
953 case ESP_WCCF ... ESP_WTEST:
954 break;
955 default:
956 trace_esp_error_invalid_write(val, saddr);
957 return;
959 s->wregs[saddr] = val;
962 static bool esp_mem_accepts(void *opaque, hwaddr addr,
963 unsigned size, bool is_write,
964 MemTxAttrs attrs)
966 return (size == 1) || (is_write && size == 4);
969 static bool esp_is_before_version_5(void *opaque, int version_id)
971 ESPState *s = ESP(opaque);
973 version_id = MIN(version_id, s->mig_version_id);
974 return version_id < 5;
977 static bool esp_is_version_5(void *opaque, int version_id)
979 ESPState *s = ESP(opaque);
981 version_id = MIN(version_id, s->mig_version_id);
982 return version_id == 5;
985 static int esp_pre_save(void *opaque)
987 ESPState *s = ESP(opaque);
989 s->mig_version_id = vmstate_esp.version_id;
990 return 0;
993 static int esp_post_load(void *opaque, int version_id)
995 ESPState *s = ESP(opaque);
996 int len, i;
998 version_id = MIN(version_id, s->mig_version_id);
1000 if (version_id < 5) {
1001 esp_set_tc(s, s->mig_dma_left);
1003 /* Migrate ti_buf to fifo */
1004 len = s->mig_ti_wptr - s->mig_ti_rptr;
1005 for (i = 0; i < len; i++) {
1006 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1009 /* Migrate cmdbuf to cmdfifo */
1010 for (i = 0; i < s->mig_cmdlen; i++) {
1011 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1015 s->mig_version_id = vmstate_esp.version_id;
1016 return 0;
1019 const VMStateDescription vmstate_esp = {
1020 .name = "esp",
1021 .version_id = 5,
1022 .minimum_version_id = 3,
1023 .pre_save = esp_pre_save,
1024 .post_load = esp_post_load,
1025 .fields = (VMStateField[]) {
1026 VMSTATE_BUFFER(rregs, ESPState),
1027 VMSTATE_BUFFER(wregs, ESPState),
1028 VMSTATE_INT32(ti_size, ESPState),
1029 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1030 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1031 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1032 VMSTATE_UINT32(status, ESPState),
1033 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1034 esp_is_before_version_5),
1035 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1036 esp_is_before_version_5),
1037 VMSTATE_UINT32(dma, ESPState),
1038 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1039 esp_is_before_version_5, 0, 16),
1040 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1041 esp_is_before_version_5, 16,
1042 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1043 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1044 VMSTATE_UINT32(do_cmd, ESPState),
1045 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1046 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1047 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1048 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1049 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1050 VMSTATE_END_OF_LIST()
1054 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1055 uint64_t val, unsigned int size)
1057 SysBusESPState *sysbus = opaque;
1058 ESPState *s = ESP(&sysbus->esp);
1059 uint32_t saddr;
1061 saddr = addr >> sysbus->it_shift;
1062 esp_reg_write(s, saddr, val);
1065 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1066 unsigned int size)
1068 SysBusESPState *sysbus = opaque;
1069 ESPState *s = ESP(&sysbus->esp);
1070 uint32_t saddr;
1072 saddr = addr >> sysbus->it_shift;
1073 return esp_reg_read(s, saddr);
1076 static const MemoryRegionOps sysbus_esp_mem_ops = {
1077 .read = sysbus_esp_mem_read,
1078 .write = sysbus_esp_mem_write,
1079 .endianness = DEVICE_NATIVE_ENDIAN,
1080 .valid.accepts = esp_mem_accepts,
1083 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1084 uint64_t val, unsigned int size)
1086 SysBusESPState *sysbus = opaque;
1087 ESPState *s = ESP(&sysbus->esp);
1088 uint32_t dmalen;
1090 trace_esp_pdma_write(size);
1092 switch (size) {
1093 case 1:
1094 esp_pdma_write(s, val);
1095 break;
1096 case 2:
1097 esp_pdma_write(s, val >> 8);
1098 esp_pdma_write(s, val);
1099 break;
1101 dmalen = esp_get_tc(s);
1102 if (dmalen == 0 || fifo8_is_full(&s->fifo)) {
1103 s->pdma_cb(s);
1107 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1108 unsigned int size)
1110 SysBusESPState *sysbus = opaque;
1111 ESPState *s = ESP(&sysbus->esp);
1112 uint64_t val = 0;
1114 trace_esp_pdma_read(size);
1116 switch (size) {
1117 case 1:
1118 val = esp_pdma_read(s);
1119 break;
1120 case 2:
1121 val = esp_pdma_read(s);
1122 val = (val << 8) | esp_pdma_read(s);
1123 break;
1125 if (fifo8_is_empty(&s->fifo)) {
1126 s->pdma_cb(s);
1128 return val;
1131 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1132 .read = sysbus_esp_pdma_read,
1133 .write = sysbus_esp_pdma_write,
1134 .endianness = DEVICE_NATIVE_ENDIAN,
1135 .valid.min_access_size = 1,
1136 .valid.max_access_size = 4,
1137 .impl.min_access_size = 1,
1138 .impl.max_access_size = 2,
1141 static const struct SCSIBusInfo esp_scsi_info = {
1142 .tcq = false,
1143 .max_target = ESP_MAX_DEVS,
1144 .max_lun = 7,
1146 .transfer_data = esp_transfer_data,
1147 .complete = esp_command_complete,
1148 .cancel = esp_request_cancelled
1151 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1153 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1154 ESPState *s = ESP(&sysbus->esp);
1156 switch (irq) {
1157 case 0:
1158 parent_esp_reset(s, irq, level);
1159 break;
1160 case 1:
1161 esp_dma_enable(opaque, irq, level);
1162 break;
1166 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1168 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1169 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1170 ESPState *s = ESP(&sysbus->esp);
1172 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1173 return;
1176 sysbus_init_irq(sbd, &s->irq);
1177 sysbus_init_irq(sbd, &s->irq_data);
1178 assert(sysbus->it_shift != -1);
1180 s->chip_id = TCHI_FAS100A;
1181 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1182 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1183 sysbus_init_mmio(sbd, &sysbus->iomem);
1184 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1185 sysbus, "esp-pdma", 4);
1186 sysbus_init_mmio(sbd, &sysbus->pdma);
1188 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1190 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1193 static void sysbus_esp_hard_reset(DeviceState *dev)
1195 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1196 ESPState *s = ESP(&sysbus->esp);
1198 esp_hard_reset(s);
1201 static void sysbus_esp_init(Object *obj)
1203 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1205 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1208 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1209 .name = "sysbusespscsi",
1210 .version_id = 2,
1211 .minimum_version_id = 1,
1212 .fields = (VMStateField[]) {
1213 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1214 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1215 VMSTATE_END_OF_LIST()
1219 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1221 DeviceClass *dc = DEVICE_CLASS(klass);
1223 dc->realize = sysbus_esp_realize;
1224 dc->reset = sysbus_esp_hard_reset;
1225 dc->vmsd = &vmstate_sysbus_esp_scsi;
1226 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1229 static const TypeInfo sysbus_esp_info = {
1230 .name = TYPE_SYSBUS_ESP,
1231 .parent = TYPE_SYS_BUS_DEVICE,
1232 .instance_init = sysbus_esp_init,
1233 .instance_size = sizeof(SysBusESPState),
1234 .class_init = sysbus_esp_class_init,
1237 static void esp_finalize(Object *obj)
1239 ESPState *s = ESP(obj);
1241 fifo8_destroy(&s->fifo);
1242 fifo8_destroy(&s->cmdfifo);
1245 static void esp_init(Object *obj)
1247 ESPState *s = ESP(obj);
1249 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1250 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1253 static void esp_class_init(ObjectClass *klass, void *data)
1255 DeviceClass *dc = DEVICE_CLASS(klass);
1257 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1258 dc->user_creatable = false;
1259 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1262 static const TypeInfo esp_info = {
1263 .name = TYPE_ESP,
1264 .parent = TYPE_DEVICE,
1265 .instance_init = esp_init,
1266 .instance_finalize = esp_finalize,
1267 .instance_size = sizeof(ESPState),
1268 .class_init = esp_class_init,
1271 static void esp_register_types(void)
1273 type_register_static(&sysbus_esp_info);
1274 type_register_static(&esp_info);
1277 type_init(esp_register_types)