esp: latch individual bits in ESP_RINTR register
[qemu/ar7.git] / hw / scsi / esp.c
blob54d008c60913e4695b94675bed58ccd310aaab79
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState *s)
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState *s)
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState *s)
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState *s)
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState *s, int irq, int level)
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
90 void esp_request_cancelled(SCSIRequest *req)
92 ESPState *s = req->hba_private;
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
101 static uint32_t esp_get_tc(ESPState *s)
103 uint32_t dmalen;
105 dmalen = s->rregs[ESP_TCLO];
106 dmalen |= s->rregs[ESP_TCMID] << 8;
107 dmalen |= s->rregs[ESP_TCHI] << 16;
109 return dmalen;
112 static void esp_set_tc(ESPState *s, uint32_t dmalen)
114 s->rregs[ESP_TCLO] = dmalen;
115 s->rregs[ESP_TCMID] = dmalen >> 8;
116 s->rregs[ESP_TCHI] = dmalen >> 16;
119 static uint32_t esp_get_stc(ESPState *s)
121 uint32_t dmalen;
123 dmalen = s->wregs[ESP_TCLO];
124 dmalen |= s->wregs[ESP_TCMID] << 8;
125 dmalen |= s->wregs[ESP_TCHI] << 16;
127 return dmalen;
130 static uint8_t esp_pdma_read(ESPState *s)
132 uint8_t val;
134 if (s->do_cmd) {
135 val = s->cmdbuf[s->cmdlen++];
136 } else {
137 val = s->ti_buf[s->ti_rptr++];
140 return val;
143 static void esp_pdma_write(ESPState *s, uint8_t val)
145 uint32_t dmalen = esp_get_tc(s);
147 if (dmalen == 0) {
148 return;
151 if (s->do_cmd) {
152 s->cmdbuf[s->cmdlen++] = val;
153 } else {
154 s->ti_buf[s->ti_wptr++] = val;
157 dmalen--;
158 esp_set_tc(s, dmalen);
161 static int esp_select(ESPState *s)
163 int target;
165 target = s->wregs[ESP_WBUSID] & BUSID_DID;
167 s->ti_size = 0;
168 s->ti_rptr = 0;
169 s->ti_wptr = 0;
171 if (s->current_req) {
172 /* Started a new command before the old one finished. Cancel it. */
173 scsi_req_cancel(s->current_req);
174 s->async_len = 0;
177 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
178 if (!s->current_dev) {
179 /* No such drive */
180 s->rregs[ESP_RSTAT] = 0;
181 s->rregs[ESP_RINTR] |= INTR_DC;
182 s->rregs[ESP_RSEQ] = SEQ_0;
183 esp_raise_irq(s);
184 return -1;
186 return 0;
189 static uint32_t get_cmd(ESPState *s)
191 uint8_t *buf = s->cmdbuf;
192 uint32_t dmalen;
193 int target;
195 target = s->wregs[ESP_WBUSID] & BUSID_DID;
196 if (s->dma) {
197 dmalen = esp_get_tc(s);
198 if (dmalen > ESP_CMDBUF_SZ) {
199 return 0;
201 if (s->dma_memory_read) {
202 s->dma_memory_read(s->dma_opaque, buf, dmalen);
203 } else {
204 if (esp_select(s) < 0) {
205 return -1;
207 esp_raise_drq(s);
208 return 0;
210 } else {
211 dmalen = s->ti_size;
212 if (dmalen > TI_BUFSZ) {
213 return 0;
215 memcpy(buf, s->ti_buf, dmalen);
216 buf[0] = buf[2] >> 5;
218 trace_esp_get_cmd(dmalen, target);
220 if (esp_select(s) < 0) {
221 return -1;
223 return dmalen;
226 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
228 int32_t datalen;
229 int lun;
230 SCSIDevice *current_lun;
232 trace_esp_do_busid_cmd(busid);
233 lun = busid & 7;
234 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
235 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
236 datalen = scsi_req_enqueue(s->current_req);
237 s->ti_size = datalen;
238 if (datalen != 0) {
239 s->rregs[ESP_RSTAT] = STAT_TC;
240 esp_set_tc(s, 0);
241 if (datalen > 0) {
242 s->rregs[ESP_RSTAT] |= STAT_DI;
243 } else {
244 s->rregs[ESP_RSTAT] |= STAT_DO;
246 scsi_req_continue(s->current_req);
248 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
249 s->rregs[ESP_RSEQ] = SEQ_CD;
250 esp_raise_irq(s);
251 esp_lower_drq(s);
254 static void do_cmd(ESPState *s)
256 uint8_t *buf = s->cmdbuf;
257 uint8_t busid = buf[0];
259 do_busid_cmd(s, &buf[1], busid);
262 static void satn_pdma_cb(ESPState *s)
264 s->do_cmd = 0;
265 if (s->cmdlen) {
266 do_cmd(s);
270 static void handle_satn(ESPState *s)
272 int32_t cmdlen;
274 if (s->dma && !s->dma_enabled) {
275 s->dma_cb = handle_satn;
276 return;
278 s->pdma_cb = satn_pdma_cb;
279 cmdlen = get_cmd(s);
280 if (cmdlen > 0) {
281 s->cmdlen = cmdlen;
282 do_cmd(s);
283 } else if (cmdlen == 0) {
284 s->cmdlen = 0;
285 s->do_cmd = 1;
286 /* Target present, but no cmd yet - switch to command phase */
287 s->rregs[ESP_RSEQ] = SEQ_CD;
288 s->rregs[ESP_RSTAT] = STAT_CD;
292 static void s_without_satn_pdma_cb(ESPState *s)
294 s->do_cmd = 0;
295 if (s->cmdlen) {
296 do_busid_cmd(s, s->cmdbuf, 0);
300 static void handle_s_without_atn(ESPState *s)
302 int32_t cmdlen;
304 if (s->dma && !s->dma_enabled) {
305 s->dma_cb = handle_s_without_atn;
306 return;
308 s->pdma_cb = s_without_satn_pdma_cb;
309 cmdlen = get_cmd(s);
310 if (cmdlen > 0) {
311 s->cmdlen = cmdlen;
312 do_busid_cmd(s, s->cmdbuf, 0);
313 } else if (cmdlen == 0) {
314 s->cmdlen = 0;
315 s->do_cmd = 1;
316 /* Target present, but no cmd yet - switch to command phase */
317 s->rregs[ESP_RSEQ] = SEQ_CD;
318 s->rregs[ESP_RSTAT] = STAT_CD;
322 static void satn_stop_pdma_cb(ESPState *s)
324 s->do_cmd = 0;
325 if (s->cmdlen) {
326 trace_esp_handle_satn_stop(s->cmdlen);
327 s->do_cmd = 1;
328 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
329 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
330 s->rregs[ESP_RSEQ] = SEQ_CD;
331 esp_raise_irq(s);
335 static void handle_satn_stop(ESPState *s)
337 int32_t cmdlen;
339 if (s->dma && !s->dma_enabled) {
340 s->dma_cb = handle_satn_stop;
341 return;
343 s->pdma_cb = satn_stop_pdma_cb;
344 cmdlen = get_cmd(s);
345 if (cmdlen > 0) {
346 trace_esp_handle_satn_stop(s->cmdlen);
347 s->cmdlen = cmdlen;
348 s->do_cmd = 1;
349 s->rregs[ESP_RSTAT] = STAT_CD;
350 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
351 s->rregs[ESP_RSEQ] = SEQ_CD;
352 esp_raise_irq(s);
353 } else if (cmdlen == 0) {
354 s->cmdlen = 0;
355 s->do_cmd = 1;
356 /* Target present, but no cmd yet - switch to command phase */
357 s->rregs[ESP_RSEQ] = SEQ_CD;
358 s->rregs[ESP_RSTAT] = STAT_CD;
362 static void write_response_pdma_cb(ESPState *s)
364 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
365 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
366 s->rregs[ESP_RSEQ] = SEQ_CD;
367 esp_raise_irq(s);
370 static void write_response(ESPState *s)
372 trace_esp_write_response(s->status);
373 s->ti_buf[0] = s->status;
374 s->ti_buf[1] = 0;
375 if (s->dma) {
376 if (s->dma_memory_write) {
377 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
378 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
379 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
380 s->rregs[ESP_RSEQ] = SEQ_CD;
381 } else {
382 s->pdma_cb = write_response_pdma_cb;
383 esp_raise_drq(s);
384 return;
386 } else {
387 s->ti_size = 2;
388 s->ti_rptr = 0;
389 s->ti_wptr = 2;
390 s->rregs[ESP_RFLAGS] = 2;
392 esp_raise_irq(s);
395 static void esp_dma_done(ESPState *s)
397 s->rregs[ESP_RSTAT] |= STAT_TC;
398 s->rregs[ESP_RINTR] |= INTR_BS;
399 s->rregs[ESP_RSEQ] = 0;
400 s->rregs[ESP_RFLAGS] = 0;
401 esp_set_tc(s, 0);
402 esp_raise_irq(s);
405 static void do_dma_pdma_cb(ESPState *s)
407 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
408 int len;
410 if (s->do_cmd) {
411 s->ti_size = 0;
412 s->cmdlen = 0;
413 s->do_cmd = 0;
414 do_cmd(s);
415 esp_lower_drq(s);
416 return;
419 if (to_device) {
420 /* Copy FIFO data to device */
421 len = MIN(s->ti_wptr, TI_BUFSZ);
422 memcpy(s->async_buf, s->ti_buf, len);
423 s->ti_wptr = 0;
424 s->ti_rptr = 0;
425 s->async_buf += len;
426 s->async_len -= len;
427 s->ti_size += len;
428 if (s->async_len == 0) {
429 scsi_req_continue(s->current_req);
430 return;
433 if (esp_get_tc(s) == 0) {
434 esp_lower_drq(s);
435 esp_dma_done(s);
438 return;
439 } else {
440 if (s->async_len == 0) {
441 if (s->current_req) {
442 scsi_req_continue(s->current_req);
446 * If there is still data to be read from the device then
447 * complete the DMA operation immediately. Otherwise defer
448 * until the scsi layer has completed.
450 if (esp_get_tc(s) != 0 || s->ti_size == 0) {
451 return;
455 if (esp_get_tc(s) != 0) {
456 /* Copy device data to FIFO */
457 s->ti_wptr = 0;
458 s->ti_rptr = 0;
459 len = MIN(s->async_len, TI_BUFSZ);
460 memcpy(s->ti_buf, s->async_buf, len);
461 s->ti_wptr += len;
462 s->async_buf += len;
463 s->async_len -= len;
464 s->ti_size -= len;
465 esp_set_tc(s, esp_get_tc(s) - len);
466 return;
469 /* Partially filled a scsi buffer. Complete immediately. */
470 esp_lower_drq(s);
471 esp_dma_done(s);
475 static void esp_do_dma(ESPState *s)
477 uint32_t len;
478 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
480 len = esp_get_tc(s);
481 if (s->do_cmd) {
483 * handle_ti_cmd() case: esp_do_dma() is called only from
484 * handle_ti_cmd() with do_cmd != NULL (see the assert())
486 trace_esp_do_dma(s->cmdlen, len);
487 assert(s->cmdlen <= sizeof(s->cmdbuf) &&
488 len <= sizeof(s->cmdbuf) - s->cmdlen);
489 if (s->dma_memory_read) {
490 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
491 } else {
492 s->pdma_cb = do_dma_pdma_cb;
493 esp_raise_drq(s);
494 return;
496 trace_esp_handle_ti_cmd(s->cmdlen);
497 s->ti_size = 0;
498 s->cmdlen = 0;
499 s->do_cmd = 0;
500 do_cmd(s);
501 return;
503 if (s->async_len == 0) {
504 /* Defer until data is available. */
505 return;
507 if (len > s->async_len) {
508 len = s->async_len;
510 if (to_device) {
511 if (s->dma_memory_read) {
512 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
513 } else {
514 s->pdma_cb = do_dma_pdma_cb;
515 esp_raise_drq(s);
516 return;
518 } else {
519 if (s->dma_memory_write) {
520 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
521 } else {
522 /* Copy device data to FIFO */
523 len = MIN(len, TI_BUFSZ - s->ti_wptr);
524 memcpy(&s->ti_buf[s->ti_wptr], s->async_buf, len);
525 s->ti_wptr += len;
526 s->async_buf += len;
527 s->async_len -= len;
528 s->ti_size -= len;
529 esp_set_tc(s, esp_get_tc(s) - len);
530 s->pdma_cb = do_dma_pdma_cb;
531 esp_raise_drq(s);
533 /* Indicate transfer to FIFO is complete */
534 s->rregs[ESP_RSTAT] |= STAT_TC;
535 return;
538 esp_set_tc(s, esp_get_tc(s) - len);
539 s->async_buf += len;
540 s->async_len -= len;
541 if (to_device) {
542 s->ti_size += len;
543 } else {
544 s->ti_size -= len;
546 if (s->async_len == 0) {
547 scsi_req_continue(s->current_req);
549 * If there is still data to be read from the device then
550 * complete the DMA operation immediately. Otherwise defer
551 * until the scsi layer has completed.
553 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
554 return;
558 /* Partially filled a scsi buffer. Complete immediately. */
559 esp_dma_done(s);
560 esp_lower_drq(s);
563 static void esp_report_command_complete(ESPState *s, uint32_t status)
565 trace_esp_command_complete();
566 if (s->ti_size != 0) {
567 trace_esp_command_complete_unexpected();
569 s->ti_size = 0;
570 s->async_len = 0;
571 if (status) {
572 trace_esp_command_complete_fail();
574 s->status = status;
575 s->rregs[ESP_RSTAT] = STAT_ST;
576 esp_dma_done(s);
577 esp_lower_drq(s);
578 if (s->current_req) {
579 scsi_req_unref(s->current_req);
580 s->current_req = NULL;
581 s->current_dev = NULL;
585 void esp_command_complete(SCSIRequest *req, size_t resid)
587 ESPState *s = req->hba_private;
589 if (s->rregs[ESP_RSTAT] & STAT_INT) {
591 * Defer handling command complete until the previous
592 * interrupt has been handled.
594 trace_esp_command_complete_deferred();
595 s->deferred_status = req->status;
596 s->deferred_complete = true;
597 return;
599 esp_report_command_complete(s, req->status);
602 void esp_transfer_data(SCSIRequest *req, uint32_t len)
604 ESPState *s = req->hba_private;
605 uint32_t dmalen = esp_get_tc(s);
607 assert(!s->do_cmd);
608 trace_esp_transfer_data(dmalen, s->ti_size);
609 s->async_len = len;
610 s->async_buf = scsi_req_get_buf(req);
611 if (dmalen) {
612 esp_do_dma(s);
613 } else if (s->ti_size <= 0) {
615 * If this was the last part of a DMA transfer then the
616 * completion interrupt is deferred to here.
618 esp_dma_done(s);
619 esp_lower_drq(s);
623 static void handle_ti(ESPState *s)
625 uint32_t dmalen;
627 if (s->dma && !s->dma_enabled) {
628 s->dma_cb = handle_ti;
629 return;
632 dmalen = esp_get_tc(s);
633 if (s->dma) {
634 trace_esp_handle_ti(dmalen);
635 s->rregs[ESP_RSTAT] &= ~STAT_TC;
636 esp_do_dma(s);
637 } else if (s->do_cmd) {
638 trace_esp_handle_ti_cmd(s->cmdlen);
639 s->ti_size = 0;
640 s->cmdlen = 0;
641 s->do_cmd = 0;
642 do_cmd(s);
646 void esp_hard_reset(ESPState *s)
648 memset(s->rregs, 0, ESP_REGS);
649 memset(s->wregs, 0, ESP_REGS);
650 s->tchi_written = 0;
651 s->ti_size = 0;
652 s->ti_rptr = 0;
653 s->ti_wptr = 0;
654 s->dma = 0;
655 s->do_cmd = 0;
656 s->dma_cb = NULL;
658 s->rregs[ESP_CFG1] = 7;
661 static void esp_soft_reset(ESPState *s)
663 qemu_irq_lower(s->irq);
664 qemu_irq_lower(s->irq_data);
665 esp_hard_reset(s);
668 static void parent_esp_reset(ESPState *s, int irq, int level)
670 if (level) {
671 esp_soft_reset(s);
675 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
677 uint32_t val;
679 switch (saddr) {
680 case ESP_FIFO:
681 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
682 /* Data out. */
683 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
684 s->rregs[ESP_FIFO] = 0;
685 } else if (s->ti_rptr < s->ti_wptr) {
686 s->ti_size--;
687 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
689 if (s->ti_rptr == s->ti_wptr) {
690 s->ti_rptr = 0;
691 s->ti_wptr = 0;
693 val = s->rregs[ESP_FIFO];
694 break;
695 case ESP_RINTR:
697 * Clear sequence step, interrupt register and all status bits
698 * except TC
700 val = s->rregs[ESP_RINTR];
701 s->rregs[ESP_RINTR] = 0;
702 s->rregs[ESP_RSTAT] &= ~STAT_TC;
703 s->rregs[ESP_RSEQ] = SEQ_0;
704 esp_lower_irq(s);
705 if (s->deferred_complete) {
706 esp_report_command_complete(s, s->deferred_status);
707 s->deferred_complete = false;
709 break;
710 case ESP_TCHI:
711 /* Return the unique id if the value has never been written */
712 if (!s->tchi_written) {
713 val = s->chip_id;
714 } else {
715 val = s->rregs[saddr];
717 break;
718 default:
719 val = s->rregs[saddr];
720 break;
723 trace_esp_mem_readb(saddr, val);
724 return val;
727 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
729 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
730 switch (saddr) {
731 case ESP_TCHI:
732 s->tchi_written = true;
733 /* fall through */
734 case ESP_TCLO:
735 case ESP_TCMID:
736 s->rregs[ESP_RSTAT] &= ~STAT_TC;
737 break;
738 case ESP_FIFO:
739 if (s->do_cmd) {
740 if (s->cmdlen < ESP_CMDBUF_SZ) {
741 s->cmdbuf[s->cmdlen++] = val & 0xff;
742 } else {
743 trace_esp_error_fifo_overrun();
745 } else if (s->ti_wptr == TI_BUFSZ - 1) {
746 trace_esp_error_fifo_overrun();
747 } else {
748 s->ti_size++;
749 s->ti_buf[s->ti_wptr++] = val & 0xff;
751 break;
752 case ESP_CMD:
753 s->rregs[saddr] = val;
754 if (val & CMD_DMA) {
755 s->dma = 1;
756 /* Reload DMA counter. */
757 if (esp_get_stc(s) == 0) {
758 esp_set_tc(s, 0x10000);
759 } else {
760 esp_set_tc(s, esp_get_stc(s));
762 } else {
763 s->dma = 0;
765 switch (val & CMD_CMD) {
766 case CMD_NOP:
767 trace_esp_mem_writeb_cmd_nop(val);
768 break;
769 case CMD_FLUSH:
770 trace_esp_mem_writeb_cmd_flush(val);
771 /*s->ti_size = 0;*/
772 s->ti_wptr = 0;
773 s->ti_rptr = 0;
774 break;
775 case CMD_RESET:
776 trace_esp_mem_writeb_cmd_reset(val);
777 esp_soft_reset(s);
778 break;
779 case CMD_BUSRESET:
780 trace_esp_mem_writeb_cmd_bus_reset(val);
781 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
782 s->rregs[ESP_RINTR] |= INTR_RST;
783 esp_raise_irq(s);
785 break;
786 case CMD_TI:
787 trace_esp_mem_writeb_cmd_ti(val);
788 handle_ti(s);
789 break;
790 case CMD_ICCS:
791 trace_esp_mem_writeb_cmd_iccs(val);
792 write_response(s);
793 s->rregs[ESP_RINTR] |= INTR_FC;
794 s->rregs[ESP_RSTAT] |= STAT_MI;
795 break;
796 case CMD_MSGACC:
797 trace_esp_mem_writeb_cmd_msgacc(val);
798 s->rregs[ESP_RINTR] |= INTR_DC;
799 s->rregs[ESP_RSEQ] = 0;
800 s->rregs[ESP_RFLAGS] = 0;
801 esp_raise_irq(s);
802 break;
803 case CMD_PAD:
804 trace_esp_mem_writeb_cmd_pad(val);
805 s->rregs[ESP_RSTAT] = STAT_TC;
806 s->rregs[ESP_RINTR] |= INTR_FC;
807 s->rregs[ESP_RSEQ] = 0;
808 break;
809 case CMD_SATN:
810 trace_esp_mem_writeb_cmd_satn(val);
811 break;
812 case CMD_RSTATN:
813 trace_esp_mem_writeb_cmd_rstatn(val);
814 break;
815 case CMD_SEL:
816 trace_esp_mem_writeb_cmd_sel(val);
817 handle_s_without_atn(s);
818 break;
819 case CMD_SELATN:
820 trace_esp_mem_writeb_cmd_selatn(val);
821 handle_satn(s);
822 break;
823 case CMD_SELATNS:
824 trace_esp_mem_writeb_cmd_selatns(val);
825 handle_satn_stop(s);
826 break;
827 case CMD_ENSEL:
828 trace_esp_mem_writeb_cmd_ensel(val);
829 s->rregs[ESP_RINTR] = 0;
830 break;
831 case CMD_DISSEL:
832 trace_esp_mem_writeb_cmd_dissel(val);
833 s->rregs[ESP_RINTR] = 0;
834 esp_raise_irq(s);
835 break;
836 default:
837 trace_esp_error_unhandled_command(val);
838 break;
840 break;
841 case ESP_WBUSID ... ESP_WSYNO:
842 break;
843 case ESP_CFG1:
844 case ESP_CFG2: case ESP_CFG3:
845 case ESP_RES3: case ESP_RES4:
846 s->rregs[saddr] = val;
847 break;
848 case ESP_WCCF ... ESP_WTEST:
849 break;
850 default:
851 trace_esp_error_invalid_write(val, saddr);
852 return;
854 s->wregs[saddr] = val;
857 static bool esp_mem_accepts(void *opaque, hwaddr addr,
858 unsigned size, bool is_write,
859 MemTxAttrs attrs)
861 return (size == 1) || (is_write && size == 4);
864 static bool esp_is_before_version_5(void *opaque, int version_id)
866 ESPState *s = ESP(opaque);
868 version_id = MIN(version_id, s->mig_version_id);
869 return version_id < 5;
872 static int esp_pre_save(void *opaque)
874 ESPState *s = ESP(opaque);
876 s->mig_version_id = vmstate_esp.version_id;
877 return 0;
880 static int esp_post_load(void *opaque, int version_id)
882 ESPState *s = ESP(opaque);
884 version_id = MIN(version_id, s->mig_version_id);
886 if (version_id < 5) {
887 esp_set_tc(s, s->mig_dma_left);
890 s->mig_version_id = vmstate_esp.version_id;
891 return 0;
894 const VMStateDescription vmstate_esp = {
895 .name = "esp",
896 .version_id = 5,
897 .minimum_version_id = 3,
898 .pre_save = esp_pre_save,
899 .post_load = esp_post_load,
900 .fields = (VMStateField[]) {
901 VMSTATE_BUFFER(rregs, ESPState),
902 VMSTATE_BUFFER(wregs, ESPState),
903 VMSTATE_INT32(ti_size, ESPState),
904 VMSTATE_UINT32(ti_rptr, ESPState),
905 VMSTATE_UINT32(ti_wptr, ESPState),
906 VMSTATE_BUFFER(ti_buf, ESPState),
907 VMSTATE_UINT32(status, ESPState),
908 VMSTATE_UINT32(deferred_status, ESPState),
909 VMSTATE_BOOL(deferred_complete, ESPState),
910 VMSTATE_UINT32(dma, ESPState),
911 VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
912 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
913 VMSTATE_UINT32(cmdlen, ESPState),
914 VMSTATE_UINT32(do_cmd, ESPState),
915 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
916 VMSTATE_END_OF_LIST()
920 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
921 uint64_t val, unsigned int size)
923 SysBusESPState *sysbus = opaque;
924 ESPState *s = ESP(&sysbus->esp);
925 uint32_t saddr;
927 saddr = addr >> sysbus->it_shift;
928 esp_reg_write(s, saddr, val);
931 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
932 unsigned int size)
934 SysBusESPState *sysbus = opaque;
935 ESPState *s = ESP(&sysbus->esp);
936 uint32_t saddr;
938 saddr = addr >> sysbus->it_shift;
939 return esp_reg_read(s, saddr);
942 static const MemoryRegionOps sysbus_esp_mem_ops = {
943 .read = sysbus_esp_mem_read,
944 .write = sysbus_esp_mem_write,
945 .endianness = DEVICE_NATIVE_ENDIAN,
946 .valid.accepts = esp_mem_accepts,
949 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
950 uint64_t val, unsigned int size)
952 SysBusESPState *sysbus = opaque;
953 ESPState *s = ESP(&sysbus->esp);
954 uint32_t dmalen;
956 trace_esp_pdma_write(size);
958 switch (size) {
959 case 1:
960 esp_pdma_write(s, val);
961 break;
962 case 2:
963 esp_pdma_write(s, val >> 8);
964 esp_pdma_write(s, val);
965 break;
967 dmalen = esp_get_tc(s);
968 if (dmalen == 0 || (s->ti_wptr == TI_BUFSZ)) {
969 s->pdma_cb(s);
973 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
974 unsigned int size)
976 SysBusESPState *sysbus = opaque;
977 ESPState *s = ESP(&sysbus->esp);
978 uint64_t val = 0;
980 trace_esp_pdma_read(size);
982 switch (size) {
983 case 1:
984 val = esp_pdma_read(s);
985 break;
986 case 2:
987 val = esp_pdma_read(s);
988 val = (val << 8) | esp_pdma_read(s);
989 break;
991 if (s->ti_rptr == s->ti_wptr) {
992 s->ti_wptr = 0;
993 s->ti_rptr = 0;
994 s->pdma_cb(s);
996 return val;
999 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1000 .read = sysbus_esp_pdma_read,
1001 .write = sysbus_esp_pdma_write,
1002 .endianness = DEVICE_NATIVE_ENDIAN,
1003 .valid.min_access_size = 1,
1004 .valid.max_access_size = 4,
1005 .impl.min_access_size = 1,
1006 .impl.max_access_size = 2,
1009 static const struct SCSIBusInfo esp_scsi_info = {
1010 .tcq = false,
1011 .max_target = ESP_MAX_DEVS,
1012 .max_lun = 7,
1014 .transfer_data = esp_transfer_data,
1015 .complete = esp_command_complete,
1016 .cancel = esp_request_cancelled
1019 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1021 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1022 ESPState *s = ESP(&sysbus->esp);
1024 switch (irq) {
1025 case 0:
1026 parent_esp_reset(s, irq, level);
1027 break;
1028 case 1:
1029 esp_dma_enable(opaque, irq, level);
1030 break;
1034 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1036 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1037 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1038 ESPState *s = ESP(&sysbus->esp);
1040 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1041 return;
1044 sysbus_init_irq(sbd, &s->irq);
1045 sysbus_init_irq(sbd, &s->irq_data);
1046 assert(sysbus->it_shift != -1);
1048 s->chip_id = TCHI_FAS100A;
1049 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1050 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1051 sysbus_init_mmio(sbd, &sysbus->iomem);
1052 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1053 sysbus, "esp-pdma", 4);
1054 sysbus_init_mmio(sbd, &sysbus->pdma);
1056 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1058 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1061 static void sysbus_esp_hard_reset(DeviceState *dev)
1063 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1064 ESPState *s = ESP(&sysbus->esp);
1066 esp_hard_reset(s);
1069 static void sysbus_esp_init(Object *obj)
1071 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1073 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1076 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1077 .name = "sysbusespscsi",
1078 .version_id = 2,
1079 .minimum_version_id = 1,
1080 .fields = (VMStateField[]) {
1081 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1082 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1083 VMSTATE_END_OF_LIST()
1087 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1089 DeviceClass *dc = DEVICE_CLASS(klass);
1091 dc->realize = sysbus_esp_realize;
1092 dc->reset = sysbus_esp_hard_reset;
1093 dc->vmsd = &vmstate_sysbus_esp_scsi;
1094 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1097 static const TypeInfo sysbus_esp_info = {
1098 .name = TYPE_SYSBUS_ESP,
1099 .parent = TYPE_SYS_BUS_DEVICE,
1100 .instance_init = sysbus_esp_init,
1101 .instance_size = sizeof(SysBusESPState),
1102 .class_init = sysbus_esp_class_init,
1105 static void esp_class_init(ObjectClass *klass, void *data)
1107 DeviceClass *dc = DEVICE_CLASS(klass);
1109 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1110 dc->user_creatable = false;
1111 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1114 static const TypeInfo esp_info = {
1115 .name = TYPE_ESP,
1116 .parent = TYPE_DEVICE,
1117 .instance_size = sizeof(ESPState),
1118 .class_init = esp_class_init,
1121 static void esp_register_types(void)
1123 type_register_static(&sysbus_esp_info);
1124 type_register_static(&esp_info);
1127 type_init(esp_register_types)