os-posix: Add os_set_daemonize()
[qemu/armbru.git] / hw / scsi / esp.c
blob58d0edbd56d7f9b989d221637b05ce9f0b2c65f4
1 /*
2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 * On Macintosh Quadra it is a NCR53C96.
45 static void esp_raise_irq(ESPState *s)
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
54 static void esp_lower_irq(ESPState *s)
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
63 static void esp_raise_drq(ESPState *s)
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
69 static void esp_lower_drq(ESPState *s)
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
75 void esp_dma_enable(ESPState *s, int irq, int level)
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
90 void esp_request_cancelled(SCSIRequest *req)
92 ESPState *s = req->hba_private;
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
98 s->async_len = 0;
102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
104 if (fifo8_num_used(fifo) == fifo->capacity) {
105 trace_esp_error_fifo_overrun();
106 return;
109 fifo8_push(fifo, val);
112 static uint8_t esp_fifo_pop(Fifo8 *fifo)
114 if (fifo8_is_empty(fifo)) {
115 return 0;
118 return fifo8_pop(fifo);
121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
123 const uint8_t *buf;
124 uint32_t n;
126 if (maxlen == 0) {
127 return 0;
130 buf = fifo8_pop_buf(fifo, maxlen, &n);
131 if (dest) {
132 memcpy(dest, buf, n);
135 return n;
138 static uint32_t esp_get_tc(ESPState *s)
140 uint32_t dmalen;
142 dmalen = s->rregs[ESP_TCLO];
143 dmalen |= s->rregs[ESP_TCMID] << 8;
144 dmalen |= s->rregs[ESP_TCHI] << 16;
146 return dmalen;
149 static void esp_set_tc(ESPState *s, uint32_t dmalen)
151 s->rregs[ESP_TCLO] = dmalen;
152 s->rregs[ESP_TCMID] = dmalen >> 8;
153 s->rregs[ESP_TCHI] = dmalen >> 16;
156 static uint32_t esp_get_stc(ESPState *s)
158 uint32_t dmalen;
160 dmalen = s->wregs[ESP_TCLO];
161 dmalen |= s->wregs[ESP_TCMID] << 8;
162 dmalen |= s->wregs[ESP_TCHI] << 16;
164 return dmalen;
167 static uint8_t esp_pdma_read(ESPState *s)
169 uint8_t val;
171 if (s->do_cmd) {
172 val = esp_fifo_pop(&s->cmdfifo);
173 } else {
174 val = esp_fifo_pop(&s->fifo);
177 return val;
180 static void esp_pdma_write(ESPState *s, uint8_t val)
182 uint32_t dmalen = esp_get_tc(s);
184 if (dmalen == 0) {
185 return;
188 if (s->do_cmd) {
189 esp_fifo_push(&s->cmdfifo, val);
190 } else {
191 esp_fifo_push(&s->fifo, val);
194 dmalen--;
195 esp_set_tc(s, dmalen);
198 static int esp_select(ESPState *s)
200 int target;
202 target = s->wregs[ESP_WBUSID] & BUSID_DID;
204 s->ti_size = 0;
205 fifo8_reset(&s->fifo);
207 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
208 if (!s->current_dev) {
209 /* No such drive */
210 s->rregs[ESP_RSTAT] = 0;
211 s->rregs[ESP_RINTR] = INTR_DC;
212 s->rregs[ESP_RSEQ] = SEQ_0;
213 esp_raise_irq(s);
214 return -1;
218 * Note that we deliberately don't raise the IRQ here: this will be done
219 * either in do_command_phase() for DATA OUT transfers or by the deferred
220 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
222 s->rregs[ESP_RINTR] |= INTR_FC;
223 s->rregs[ESP_RSEQ] = SEQ_CD;
224 return 0;
227 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
229 uint8_t buf[ESP_CMDFIFO_SZ];
230 uint32_t dmalen, n;
231 int target;
233 if (s->current_req) {
234 /* Started a new command before the old one finished. Cancel it. */
235 scsi_req_cancel(s->current_req);
238 target = s->wregs[ESP_WBUSID] & BUSID_DID;
239 if (s->dma) {
240 dmalen = MIN(esp_get_tc(s), maxlen);
241 if (dmalen == 0) {
242 return 0;
244 if (s->dma_memory_read) {
245 s->dma_memory_read(s->dma_opaque, buf, dmalen);
246 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
247 fifo8_push_all(&s->cmdfifo, buf, dmalen);
248 } else {
249 if (esp_select(s) < 0) {
250 fifo8_reset(&s->cmdfifo);
251 return -1;
253 esp_raise_drq(s);
254 fifo8_reset(&s->cmdfifo);
255 return 0;
257 } else {
258 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
259 if (dmalen == 0) {
260 return 0;
262 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
263 n = MIN(fifo8_num_free(&s->cmdfifo), n);
264 fifo8_push_all(&s->cmdfifo, buf, n);
266 trace_esp_get_cmd(dmalen, target);
268 if (esp_select(s) < 0) {
269 fifo8_reset(&s->cmdfifo);
270 return -1;
272 return dmalen;
275 static void do_command_phase(ESPState *s)
277 uint32_t cmdlen;
278 int32_t datalen;
279 SCSIDevice *current_lun;
280 uint8_t buf[ESP_CMDFIFO_SZ];
282 trace_esp_do_command_phase(s->lun);
283 cmdlen = fifo8_num_used(&s->cmdfifo);
284 if (!cmdlen || !s->current_dev) {
285 return;
287 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
289 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
290 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, s);
291 datalen = scsi_req_enqueue(s->current_req);
292 s->ti_size = datalen;
293 fifo8_reset(&s->cmdfifo);
294 if (datalen != 0) {
295 s->rregs[ESP_RSTAT] = STAT_TC;
296 s->rregs[ESP_RSEQ] = SEQ_CD;
297 s->ti_cmd = 0;
298 esp_set_tc(s, 0);
299 if (datalen > 0) {
301 * Switch to DATA IN phase but wait until initial data xfer is
302 * complete before raising the command completion interrupt
304 s->data_in_ready = false;
305 s->rregs[ESP_RSTAT] |= STAT_DI;
306 } else {
307 s->rregs[ESP_RSTAT] |= STAT_DO;
308 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
309 esp_raise_irq(s);
310 esp_lower_drq(s);
312 scsi_req_continue(s->current_req);
313 return;
317 static void do_message_phase(ESPState *s)
319 if (s->cmdfifo_cdb_offset) {
320 uint8_t message = esp_fifo_pop(&s->cmdfifo);
322 trace_esp_do_identify(message);
323 s->lun = message & 7;
324 s->cmdfifo_cdb_offset--;
327 /* Ignore extended messages for now */
328 if (s->cmdfifo_cdb_offset) {
329 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
330 esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
331 s->cmdfifo_cdb_offset = 0;
335 static void do_cmd(ESPState *s)
337 do_message_phase(s);
338 assert(s->cmdfifo_cdb_offset == 0);
339 do_command_phase(s);
342 static void satn_pdma_cb(ESPState *s)
344 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
345 s->cmdfifo_cdb_offset = 1;
346 s->do_cmd = 0;
347 do_cmd(s);
351 static void handle_satn(ESPState *s)
353 int32_t cmdlen;
355 if (s->dma && !s->dma_enabled) {
356 s->dma_cb = handle_satn;
357 return;
359 s->pdma_cb = satn_pdma_cb;
360 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
361 if (cmdlen > 0) {
362 s->cmdfifo_cdb_offset = 1;
363 s->do_cmd = 0;
364 do_cmd(s);
365 } else if (cmdlen == 0) {
366 s->do_cmd = 1;
367 /* Target present, but no cmd yet - switch to command phase */
368 s->rregs[ESP_RSEQ] = SEQ_CD;
369 s->rregs[ESP_RSTAT] = STAT_CD;
373 static void s_without_satn_pdma_cb(ESPState *s)
375 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
376 s->cmdfifo_cdb_offset = 0;
377 s->do_cmd = 0;
378 do_cmd(s);
382 static void handle_s_without_atn(ESPState *s)
384 int32_t cmdlen;
386 if (s->dma && !s->dma_enabled) {
387 s->dma_cb = handle_s_without_atn;
388 return;
390 s->pdma_cb = s_without_satn_pdma_cb;
391 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
392 if (cmdlen > 0) {
393 s->cmdfifo_cdb_offset = 0;
394 s->do_cmd = 0;
395 do_cmd(s);
396 } else if (cmdlen == 0) {
397 s->do_cmd = 1;
398 /* Target present, but no cmd yet - switch to command phase */
399 s->rregs[ESP_RSEQ] = SEQ_CD;
400 s->rregs[ESP_RSTAT] = STAT_CD;
404 static void satn_stop_pdma_cb(ESPState *s)
406 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
407 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
408 s->do_cmd = 1;
409 s->cmdfifo_cdb_offset = 1;
410 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
411 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
412 s->rregs[ESP_RSEQ] = SEQ_CD;
413 esp_raise_irq(s);
417 static void handle_satn_stop(ESPState *s)
419 int32_t cmdlen;
421 if (s->dma && !s->dma_enabled) {
422 s->dma_cb = handle_satn_stop;
423 return;
425 s->pdma_cb = satn_stop_pdma_cb;
426 cmdlen = get_cmd(s, 1);
427 if (cmdlen > 0) {
428 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
429 s->do_cmd = 1;
430 s->cmdfifo_cdb_offset = 1;
431 s->rregs[ESP_RSTAT] = STAT_MO;
432 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
433 s->rregs[ESP_RSEQ] = SEQ_MO;
434 esp_raise_irq(s);
435 } else if (cmdlen == 0) {
436 s->do_cmd = 1;
437 /* Target present, switch to message out phase */
438 s->rregs[ESP_RSEQ] = SEQ_MO;
439 s->rregs[ESP_RSTAT] = STAT_MO;
443 static void write_response_pdma_cb(ESPState *s)
445 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
446 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
447 s->rregs[ESP_RSEQ] = SEQ_CD;
448 esp_raise_irq(s);
451 static void write_response(ESPState *s)
453 uint8_t buf[2];
455 trace_esp_write_response(s->status);
457 buf[0] = s->status;
458 buf[1] = 0;
460 if (s->dma) {
461 if (s->dma_memory_write) {
462 s->dma_memory_write(s->dma_opaque, buf, 2);
463 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
464 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
465 s->rregs[ESP_RSEQ] = SEQ_CD;
466 } else {
467 s->pdma_cb = write_response_pdma_cb;
468 esp_raise_drq(s);
469 return;
471 } else {
472 fifo8_reset(&s->fifo);
473 fifo8_push_all(&s->fifo, buf, 2);
474 s->rregs[ESP_RFLAGS] = 2;
476 esp_raise_irq(s);
479 static void esp_dma_done(ESPState *s)
481 s->rregs[ESP_RSTAT] |= STAT_TC;
482 s->rregs[ESP_RINTR] |= INTR_BS;
483 s->rregs[ESP_RFLAGS] = 0;
484 esp_set_tc(s, 0);
485 esp_raise_irq(s);
488 static void do_dma_pdma_cb(ESPState *s)
490 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
491 int len;
492 uint32_t n;
494 if (s->do_cmd) {
495 /* Ensure we have received complete command after SATN and stop */
496 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
497 return;
500 s->ti_size = 0;
501 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
502 /* No command received */
503 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
504 return;
507 /* Command has been received */
508 s->do_cmd = 0;
509 do_cmd(s);
510 } else {
512 * Extra message out bytes received: update cmdfifo_cdb_offset
513 * and then switch to commmand phase
515 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
516 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
517 s->rregs[ESP_RSEQ] = SEQ_CD;
518 s->rregs[ESP_RINTR] |= INTR_BS;
519 esp_raise_irq(s);
521 return;
524 if (!s->current_req) {
525 return;
528 if (to_device) {
529 /* Copy FIFO data to device */
530 len = MIN(s->async_len, ESP_FIFO_SZ);
531 len = MIN(len, fifo8_num_used(&s->fifo));
532 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
533 s->async_buf += n;
534 s->async_len -= n;
535 s->ti_size += n;
537 if (n < len) {
538 /* Unaligned accesses can cause FIFO wraparound */
539 len = len - n;
540 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
541 s->async_buf += n;
542 s->async_len -= n;
543 s->ti_size += n;
546 if (s->async_len == 0) {
547 scsi_req_continue(s->current_req);
548 return;
551 if (esp_get_tc(s) == 0) {
552 esp_lower_drq(s);
553 esp_dma_done(s);
556 return;
557 } else {
558 if (s->async_len == 0) {
559 /* Defer until the scsi layer has completed */
560 scsi_req_continue(s->current_req);
561 s->data_in_ready = false;
562 return;
565 if (esp_get_tc(s) != 0) {
566 /* Copy device data to FIFO */
567 len = MIN(s->async_len, esp_get_tc(s));
568 len = MIN(len, fifo8_num_free(&s->fifo));
569 fifo8_push_all(&s->fifo, s->async_buf, len);
570 s->async_buf += len;
571 s->async_len -= len;
572 s->ti_size -= len;
573 esp_set_tc(s, esp_get_tc(s) - len);
575 if (esp_get_tc(s) == 0) {
576 /* Indicate transfer to FIFO is complete */
577 s->rregs[ESP_RSTAT] |= STAT_TC;
579 return;
582 /* Partially filled a scsi buffer. Complete immediately. */
583 esp_lower_drq(s);
584 esp_dma_done(s);
588 static void esp_do_dma(ESPState *s)
590 uint32_t len, cmdlen;
591 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
592 uint8_t buf[ESP_CMDFIFO_SZ];
594 len = esp_get_tc(s);
595 if (s->do_cmd) {
597 * handle_ti_cmd() case: esp_do_dma() is called only from
598 * handle_ti_cmd() with do_cmd != NULL (see the assert())
600 cmdlen = fifo8_num_used(&s->cmdfifo);
601 trace_esp_do_dma(cmdlen, len);
602 if (s->dma_memory_read) {
603 len = MIN(len, fifo8_num_free(&s->cmdfifo));
604 s->dma_memory_read(s->dma_opaque, buf, len);
605 fifo8_push_all(&s->cmdfifo, buf, len);
606 } else {
607 s->pdma_cb = do_dma_pdma_cb;
608 esp_raise_drq(s);
609 return;
611 trace_esp_handle_ti_cmd(cmdlen);
612 s->ti_size = 0;
613 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
614 /* No command received */
615 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
616 return;
619 /* Command has been received */
620 s->do_cmd = 0;
621 do_cmd(s);
622 } else {
624 * Extra message out bytes received: update cmdfifo_cdb_offset
625 * and then switch to commmand phase
627 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
628 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
629 s->rregs[ESP_RSEQ] = SEQ_CD;
630 s->rregs[ESP_RINTR] |= INTR_BS;
631 esp_raise_irq(s);
633 return;
635 if (!s->current_req) {
636 return;
638 if (s->async_len == 0) {
639 /* Defer until data is available. */
640 return;
642 if (len > s->async_len) {
643 len = s->async_len;
645 if (to_device) {
646 if (s->dma_memory_read) {
647 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
648 } else {
649 s->pdma_cb = do_dma_pdma_cb;
650 esp_raise_drq(s);
651 return;
653 } else {
654 if (s->dma_memory_write) {
655 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
656 } else {
657 /* Adjust TC for any leftover data in the FIFO */
658 if (!fifo8_is_empty(&s->fifo)) {
659 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
662 /* Copy device data to FIFO */
663 len = MIN(len, fifo8_num_free(&s->fifo));
664 fifo8_push_all(&s->fifo, s->async_buf, len);
665 s->async_buf += len;
666 s->async_len -= len;
667 s->ti_size -= len;
670 * MacOS toolbox uses a TI length of 16 bytes for all commands, so
671 * commands shorter than this must be padded accordingly
673 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
674 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
675 esp_fifo_push(&s->fifo, 0);
676 len++;
680 esp_set_tc(s, esp_get_tc(s) - len);
681 s->pdma_cb = do_dma_pdma_cb;
682 esp_raise_drq(s);
684 /* Indicate transfer to FIFO is complete */
685 s->rregs[ESP_RSTAT] |= STAT_TC;
686 return;
689 esp_set_tc(s, esp_get_tc(s) - len);
690 s->async_buf += len;
691 s->async_len -= len;
692 if (to_device) {
693 s->ti_size += len;
694 } else {
695 s->ti_size -= len;
697 if (s->async_len == 0) {
698 scsi_req_continue(s->current_req);
700 * If there is still data to be read from the device then
701 * complete the DMA operation immediately. Otherwise defer
702 * until the scsi layer has completed.
704 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
705 return;
709 /* Partially filled a scsi buffer. Complete immediately. */
710 esp_dma_done(s);
711 esp_lower_drq(s);
714 static void esp_do_nodma(ESPState *s)
716 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
717 uint32_t cmdlen;
718 int len;
720 if (s->do_cmd) {
721 cmdlen = fifo8_num_used(&s->cmdfifo);
722 trace_esp_handle_ti_cmd(cmdlen);
723 s->ti_size = 0;
724 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
725 /* No command received */
726 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
727 return;
730 /* Command has been received */
731 s->do_cmd = 0;
732 do_cmd(s);
733 } else {
735 * Extra message out bytes received: update cmdfifo_cdb_offset
736 * and then switch to commmand phase
738 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
739 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
740 s->rregs[ESP_RSEQ] = SEQ_CD;
741 s->rregs[ESP_RINTR] |= INTR_BS;
742 esp_raise_irq(s);
744 return;
747 if (!s->current_req) {
748 return;
751 if (s->async_len == 0) {
752 /* Defer until data is available. */
753 return;
756 if (to_device) {
757 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
758 esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
759 s->async_buf += len;
760 s->async_len -= len;
761 s->ti_size += len;
762 } else {
763 if (fifo8_is_empty(&s->fifo)) {
764 fifo8_push(&s->fifo, s->async_buf[0]);
765 s->async_buf++;
766 s->async_len--;
767 s->ti_size--;
771 if (s->async_len == 0) {
772 scsi_req_continue(s->current_req);
773 return;
776 s->rregs[ESP_RINTR] |= INTR_BS;
777 esp_raise_irq(s);
780 void esp_command_complete(SCSIRequest *req, size_t resid)
782 ESPState *s = req->hba_private;
783 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
785 trace_esp_command_complete();
788 * Non-DMA transfers from the target will leave the last byte in
789 * the FIFO so don't reset ti_size in this case
791 if (s->dma || to_device) {
792 if (s->ti_size != 0) {
793 trace_esp_command_complete_unexpected();
795 s->ti_size = 0;
798 s->async_len = 0;
799 if (req->status) {
800 trace_esp_command_complete_fail();
802 s->status = req->status;
805 * If the transfer is finished, switch to status phase. For non-DMA
806 * transfers from the target the last byte is still in the FIFO
808 if (s->ti_size == 0) {
809 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
810 esp_dma_done(s);
811 esp_lower_drq(s);
814 if (s->current_req) {
815 scsi_req_unref(s->current_req);
816 s->current_req = NULL;
817 s->current_dev = NULL;
821 void esp_transfer_data(SCSIRequest *req, uint32_t len)
823 ESPState *s = req->hba_private;
824 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
825 uint32_t dmalen = esp_get_tc(s);
827 assert(!s->do_cmd);
828 trace_esp_transfer_data(dmalen, s->ti_size);
829 s->async_len = len;
830 s->async_buf = scsi_req_get_buf(req);
832 if (!to_device && !s->data_in_ready) {
834 * Initial incoming data xfer is complete so raise command
835 * completion interrupt
837 s->data_in_ready = true;
838 s->rregs[ESP_RSTAT] |= STAT_TC;
839 s->rregs[ESP_RINTR] |= INTR_BS;
840 esp_raise_irq(s);
843 if (s->ti_cmd == 0) {
845 * Always perform the initial transfer upon reception of the next TI
846 * command to ensure the DMA/non-DMA status of the command is correct.
847 * It is not possible to use s->dma directly in the section below as
848 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
849 * async data transfer is delayed then s->dma is set incorrectly.
851 return;
854 if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
855 if (dmalen) {
856 esp_do_dma(s);
857 } else if (s->ti_size <= 0) {
859 * If this was the last part of a DMA transfer then the
860 * completion interrupt is deferred to here.
862 esp_dma_done(s);
863 esp_lower_drq(s);
865 } else if (s->ti_cmd == CMD_TI) {
866 esp_do_nodma(s);
870 static void handle_ti(ESPState *s)
872 uint32_t dmalen;
874 if (s->dma && !s->dma_enabled) {
875 s->dma_cb = handle_ti;
876 return;
879 s->ti_cmd = s->rregs[ESP_CMD];
880 if (s->dma) {
881 dmalen = esp_get_tc(s);
882 trace_esp_handle_ti(dmalen);
883 s->rregs[ESP_RSTAT] &= ~STAT_TC;
884 esp_do_dma(s);
885 } else {
886 trace_esp_handle_ti(s->ti_size);
887 esp_do_nodma(s);
891 void esp_hard_reset(ESPState *s)
893 memset(s->rregs, 0, ESP_REGS);
894 memset(s->wregs, 0, ESP_REGS);
895 s->tchi_written = 0;
896 s->ti_size = 0;
897 s->async_len = 0;
898 fifo8_reset(&s->fifo);
899 fifo8_reset(&s->cmdfifo);
900 s->dma = 0;
901 s->do_cmd = 0;
902 s->dma_cb = NULL;
904 s->rregs[ESP_CFG1] = 7;
907 static void esp_soft_reset(ESPState *s)
909 qemu_irq_lower(s->irq);
910 qemu_irq_lower(s->irq_data);
911 esp_hard_reset(s);
914 static void parent_esp_reset(ESPState *s, int irq, int level)
916 if (level) {
917 esp_soft_reset(s);
921 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
923 uint32_t val;
925 switch (saddr) {
926 case ESP_FIFO:
927 if (s->dma_memory_read && s->dma_memory_write &&
928 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
929 /* Data out. */
930 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
931 s->rregs[ESP_FIFO] = 0;
932 } else {
933 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
934 if (s->ti_size) {
935 esp_do_nodma(s);
936 } else {
938 * The last byte of a non-DMA transfer has been read out
939 * of the FIFO so switch to status phase
941 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
944 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
946 val = s->rregs[ESP_FIFO];
947 break;
948 case ESP_RINTR:
950 * Clear sequence step, interrupt register and all status bits
951 * except TC
953 val = s->rregs[ESP_RINTR];
954 s->rregs[ESP_RINTR] = 0;
955 s->rregs[ESP_RSTAT] &= ~STAT_TC;
957 * According to the datasheet ESP_RSEQ should be cleared, but as the
958 * emulation currently defers information transfers to the next TI
959 * command leave it for now so that pedantic guests such as the old
960 * Linux 2.6 driver see the correct flags before the next SCSI phase
961 * transition.
963 * s->rregs[ESP_RSEQ] = SEQ_0;
965 esp_lower_irq(s);
966 break;
967 case ESP_TCHI:
968 /* Return the unique id if the value has never been written */
969 if (!s->tchi_written) {
970 val = s->chip_id;
971 } else {
972 val = s->rregs[saddr];
974 break;
975 case ESP_RFLAGS:
976 /* Bottom 5 bits indicate number of bytes in FIFO */
977 val = fifo8_num_used(&s->fifo);
978 break;
979 default:
980 val = s->rregs[saddr];
981 break;
984 trace_esp_mem_readb(saddr, val);
985 return val;
988 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
990 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
991 switch (saddr) {
992 case ESP_TCHI:
993 s->tchi_written = true;
994 /* fall through */
995 case ESP_TCLO:
996 case ESP_TCMID:
997 s->rregs[ESP_RSTAT] &= ~STAT_TC;
998 break;
999 case ESP_FIFO:
1000 if (s->do_cmd) {
1001 esp_fifo_push(&s->cmdfifo, val);
1004 * If any unexpected message out/command phase data is
1005 * transferred using non-DMA, raise the interrupt
1007 if (s->rregs[ESP_CMD] == CMD_TI) {
1008 s->rregs[ESP_RINTR] |= INTR_BS;
1009 esp_raise_irq(s);
1011 } else {
1012 esp_fifo_push(&s->fifo, val);
1014 break;
1015 case ESP_CMD:
1016 s->rregs[saddr] = val;
1017 if (val & CMD_DMA) {
1018 s->dma = 1;
1019 /* Reload DMA counter. */
1020 if (esp_get_stc(s) == 0) {
1021 esp_set_tc(s, 0x10000);
1022 } else {
1023 esp_set_tc(s, esp_get_stc(s));
1025 } else {
1026 s->dma = 0;
1028 switch (val & CMD_CMD) {
1029 case CMD_NOP:
1030 trace_esp_mem_writeb_cmd_nop(val);
1031 break;
1032 case CMD_FLUSH:
1033 trace_esp_mem_writeb_cmd_flush(val);
1034 fifo8_reset(&s->fifo);
1035 break;
1036 case CMD_RESET:
1037 trace_esp_mem_writeb_cmd_reset(val);
1038 esp_soft_reset(s);
1039 break;
1040 case CMD_BUSRESET:
1041 trace_esp_mem_writeb_cmd_bus_reset(val);
1042 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1043 s->rregs[ESP_RINTR] |= INTR_RST;
1044 esp_raise_irq(s);
1046 break;
1047 case CMD_TI:
1048 trace_esp_mem_writeb_cmd_ti(val);
1049 handle_ti(s);
1050 break;
1051 case CMD_ICCS:
1052 trace_esp_mem_writeb_cmd_iccs(val);
1053 write_response(s);
1054 s->rregs[ESP_RINTR] |= INTR_FC;
1055 s->rregs[ESP_RSTAT] |= STAT_MI;
1056 break;
1057 case CMD_MSGACC:
1058 trace_esp_mem_writeb_cmd_msgacc(val);
1059 s->rregs[ESP_RINTR] |= INTR_DC;
1060 s->rregs[ESP_RSEQ] = 0;
1061 s->rregs[ESP_RFLAGS] = 0;
1062 esp_raise_irq(s);
1063 break;
1064 case CMD_PAD:
1065 trace_esp_mem_writeb_cmd_pad(val);
1066 s->rregs[ESP_RSTAT] = STAT_TC;
1067 s->rregs[ESP_RINTR] |= INTR_FC;
1068 s->rregs[ESP_RSEQ] = 0;
1069 break;
1070 case CMD_SATN:
1071 trace_esp_mem_writeb_cmd_satn(val);
1072 break;
1073 case CMD_RSTATN:
1074 trace_esp_mem_writeb_cmd_rstatn(val);
1075 break;
1076 case CMD_SEL:
1077 trace_esp_mem_writeb_cmd_sel(val);
1078 handle_s_without_atn(s);
1079 break;
1080 case CMD_SELATN:
1081 trace_esp_mem_writeb_cmd_selatn(val);
1082 handle_satn(s);
1083 break;
1084 case CMD_SELATNS:
1085 trace_esp_mem_writeb_cmd_selatns(val);
1086 handle_satn_stop(s);
1087 break;
1088 case CMD_ENSEL:
1089 trace_esp_mem_writeb_cmd_ensel(val);
1090 s->rregs[ESP_RINTR] = 0;
1091 break;
1092 case CMD_DISSEL:
1093 trace_esp_mem_writeb_cmd_dissel(val);
1094 s->rregs[ESP_RINTR] = 0;
1095 esp_raise_irq(s);
1096 break;
1097 default:
1098 trace_esp_error_unhandled_command(val);
1099 break;
1101 break;
1102 case ESP_WBUSID ... ESP_WSYNO:
1103 break;
1104 case ESP_CFG1:
1105 case ESP_CFG2: case ESP_CFG3:
1106 case ESP_RES3: case ESP_RES4:
1107 s->rregs[saddr] = val;
1108 break;
1109 case ESP_WCCF ... ESP_WTEST:
1110 break;
1111 default:
1112 trace_esp_error_invalid_write(val, saddr);
1113 return;
1115 s->wregs[saddr] = val;
1118 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1119 unsigned size, bool is_write,
1120 MemTxAttrs attrs)
1122 return (size == 1) || (is_write && size == 4);
1125 static bool esp_is_before_version_5(void *opaque, int version_id)
1127 ESPState *s = ESP(opaque);
1129 version_id = MIN(version_id, s->mig_version_id);
1130 return version_id < 5;
1133 static bool esp_is_version_5(void *opaque, int version_id)
1135 ESPState *s = ESP(opaque);
1137 version_id = MIN(version_id, s->mig_version_id);
1138 return version_id >= 5;
1141 static bool esp_is_version_6(void *opaque, int version_id)
1143 ESPState *s = ESP(opaque);
1145 version_id = MIN(version_id, s->mig_version_id);
1146 return version_id >= 6;
1149 int esp_pre_save(void *opaque)
1151 ESPState *s = ESP(object_resolve_path_component(
1152 OBJECT(opaque), "esp"));
1154 s->mig_version_id = vmstate_esp.version_id;
1155 return 0;
1158 static int esp_post_load(void *opaque, int version_id)
1160 ESPState *s = ESP(opaque);
1161 int len, i;
1163 version_id = MIN(version_id, s->mig_version_id);
1165 if (version_id < 5) {
1166 esp_set_tc(s, s->mig_dma_left);
1168 /* Migrate ti_buf to fifo */
1169 len = s->mig_ti_wptr - s->mig_ti_rptr;
1170 for (i = 0; i < len; i++) {
1171 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1174 /* Migrate cmdbuf to cmdfifo */
1175 for (i = 0; i < s->mig_cmdlen; i++) {
1176 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1180 s->mig_version_id = vmstate_esp.version_id;
1181 return 0;
1184 const VMStateDescription vmstate_esp = {
1185 .name = "esp",
1186 .version_id = 6,
1187 .minimum_version_id = 3,
1188 .post_load = esp_post_load,
1189 .fields = (VMStateField[]) {
1190 VMSTATE_BUFFER(rregs, ESPState),
1191 VMSTATE_BUFFER(wregs, ESPState),
1192 VMSTATE_INT32(ti_size, ESPState),
1193 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1194 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1195 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1196 VMSTATE_UINT32(status, ESPState),
1197 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1198 esp_is_before_version_5),
1199 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1200 esp_is_before_version_5),
1201 VMSTATE_UINT32(dma, ESPState),
1202 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1203 esp_is_before_version_5, 0, 16),
1204 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1205 esp_is_before_version_5, 16,
1206 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1207 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1208 VMSTATE_UINT32(do_cmd, ESPState),
1209 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1210 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1211 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1212 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1213 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1214 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1215 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1216 VMSTATE_END_OF_LIST()
1220 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1221 uint64_t val, unsigned int size)
1223 SysBusESPState *sysbus = opaque;
1224 ESPState *s = ESP(&sysbus->esp);
1225 uint32_t saddr;
1227 saddr = addr >> sysbus->it_shift;
1228 esp_reg_write(s, saddr, val);
1231 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1232 unsigned int size)
1234 SysBusESPState *sysbus = opaque;
1235 ESPState *s = ESP(&sysbus->esp);
1236 uint32_t saddr;
1238 saddr = addr >> sysbus->it_shift;
1239 return esp_reg_read(s, saddr);
1242 static const MemoryRegionOps sysbus_esp_mem_ops = {
1243 .read = sysbus_esp_mem_read,
1244 .write = sysbus_esp_mem_write,
1245 .endianness = DEVICE_NATIVE_ENDIAN,
1246 .valid.accepts = esp_mem_accepts,
1249 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1250 uint64_t val, unsigned int size)
1252 SysBusESPState *sysbus = opaque;
1253 ESPState *s = ESP(&sysbus->esp);
1255 trace_esp_pdma_write(size);
1257 switch (size) {
1258 case 1:
1259 esp_pdma_write(s, val);
1260 break;
1261 case 2:
1262 esp_pdma_write(s, val >> 8);
1263 esp_pdma_write(s, val);
1264 break;
1266 s->pdma_cb(s);
1269 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1270 unsigned int size)
1272 SysBusESPState *sysbus = opaque;
1273 ESPState *s = ESP(&sysbus->esp);
1274 uint64_t val = 0;
1276 trace_esp_pdma_read(size);
1278 switch (size) {
1279 case 1:
1280 val = esp_pdma_read(s);
1281 break;
1282 case 2:
1283 val = esp_pdma_read(s);
1284 val = (val << 8) | esp_pdma_read(s);
1285 break;
1287 if (fifo8_num_used(&s->fifo) < 2) {
1288 s->pdma_cb(s);
1290 return val;
1293 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1294 .read = sysbus_esp_pdma_read,
1295 .write = sysbus_esp_pdma_write,
1296 .endianness = DEVICE_NATIVE_ENDIAN,
1297 .valid.min_access_size = 1,
1298 .valid.max_access_size = 4,
1299 .impl.min_access_size = 1,
1300 .impl.max_access_size = 2,
1303 static const struct SCSIBusInfo esp_scsi_info = {
1304 .tcq = false,
1305 .max_target = ESP_MAX_DEVS,
1306 .max_lun = 7,
1308 .transfer_data = esp_transfer_data,
1309 .complete = esp_command_complete,
1310 .cancel = esp_request_cancelled
1313 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1315 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1316 ESPState *s = ESP(&sysbus->esp);
1318 switch (irq) {
1319 case 0:
1320 parent_esp_reset(s, irq, level);
1321 break;
1322 case 1:
1323 esp_dma_enable(opaque, irq, level);
1324 break;
1328 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1330 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1331 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1332 ESPState *s = ESP(&sysbus->esp);
1334 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1335 return;
1338 sysbus_init_irq(sbd, &s->irq);
1339 sysbus_init_irq(sbd, &s->irq_data);
1340 assert(sysbus->it_shift != -1);
1342 s->chip_id = TCHI_FAS100A;
1343 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1344 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1345 sysbus_init_mmio(sbd, &sysbus->iomem);
1346 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1347 sysbus, "esp-pdma", 4);
1348 sysbus_init_mmio(sbd, &sysbus->pdma);
1350 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1352 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1355 static void sysbus_esp_hard_reset(DeviceState *dev)
1357 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1358 ESPState *s = ESP(&sysbus->esp);
1360 esp_hard_reset(s);
1363 static void sysbus_esp_init(Object *obj)
1365 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1367 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1370 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1371 .name = "sysbusespscsi",
1372 .version_id = 2,
1373 .minimum_version_id = 1,
1374 .pre_save = esp_pre_save,
1375 .fields = (VMStateField[]) {
1376 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1377 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1378 VMSTATE_END_OF_LIST()
1382 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1384 DeviceClass *dc = DEVICE_CLASS(klass);
1386 dc->realize = sysbus_esp_realize;
1387 dc->reset = sysbus_esp_hard_reset;
1388 dc->vmsd = &vmstate_sysbus_esp_scsi;
1389 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1392 static const TypeInfo sysbus_esp_info = {
1393 .name = TYPE_SYSBUS_ESP,
1394 .parent = TYPE_SYS_BUS_DEVICE,
1395 .instance_init = sysbus_esp_init,
1396 .instance_size = sizeof(SysBusESPState),
1397 .class_init = sysbus_esp_class_init,
1400 static void esp_finalize(Object *obj)
1402 ESPState *s = ESP(obj);
1404 fifo8_destroy(&s->fifo);
1405 fifo8_destroy(&s->cmdfifo);
1408 static void esp_init(Object *obj)
1410 ESPState *s = ESP(obj);
1412 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1413 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1416 static void esp_class_init(ObjectClass *klass, void *data)
1418 DeviceClass *dc = DEVICE_CLASS(klass);
1420 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1421 dc->user_creatable = false;
1422 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1425 static const TypeInfo esp_info = {
1426 .name = TYPE_ESP,
1427 .parent = TYPE_DEVICE,
1428 .instance_init = esp_init,
1429 .instance_finalize = esp_finalize,
1430 .instance_size = sizeof(ESPState),
1431 .class_init = esp_class_init,
1434 static void esp_register_types(void)
1436 type_register_static(&sysbus_esp_info);
1437 type_register_static(&esp_info);
1440 type_init(esp_register_types)