2 * QEMU ESP/NCR53C9x emulation
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
34 * also produced as NCR89C100. See
35 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
37 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
40 static void esp_raise_irq(ESPState
*s
)
42 if (!(s
->rregs
[ESP_RSTAT
] & STAT_INT
)) {
43 s
->rregs
[ESP_RSTAT
] |= STAT_INT
;
44 qemu_irq_raise(s
->irq
);
45 trace_esp_raise_irq();
49 static void esp_lower_irq(ESPState
*s
)
51 if (s
->rregs
[ESP_RSTAT
] & STAT_INT
) {
52 s
->rregs
[ESP_RSTAT
] &= ~STAT_INT
;
53 qemu_irq_lower(s
->irq
);
54 trace_esp_lower_irq();
58 void esp_dma_enable(ESPState
*s
, int irq
, int level
)
62 trace_esp_dma_enable();
68 trace_esp_dma_disable();
73 void esp_request_cancelled(SCSIRequest
*req
)
75 ESPState
*s
= req
->hba_private
;
77 if (req
== s
->current_req
) {
78 scsi_req_unref(s
->current_req
);
79 s
->current_req
= NULL
;
80 s
->current_dev
= NULL
;
84 static uint32_t get_cmd(ESPState
*s
, uint8_t *buf
)
89 target
= s
->wregs
[ESP_WBUSID
] & BUSID_DID
;
91 dmalen
= s
->rregs
[ESP_TCLO
] | (s
->rregs
[ESP_TCMID
] << 8);
92 s
->dma_memory_read(s
->dma_opaque
, buf
, dmalen
);
95 memcpy(buf
, s
->ti_buf
, dmalen
);
98 trace_esp_get_cmd(dmalen
, target
);
104 if (s
->current_req
) {
105 /* Started a new command before the old one finished. Cancel it. */
106 scsi_req_cancel(s
->current_req
);
110 s
->current_dev
= scsi_device_find(&s
->bus
, 0, target
, 0);
111 if (!s
->current_dev
) {
113 s
->rregs
[ESP_RSTAT
] = 0;
114 s
->rregs
[ESP_RINTR
] = INTR_DC
;
115 s
->rregs
[ESP_RSEQ
] = SEQ_0
;
122 static void do_busid_cmd(ESPState
*s
, uint8_t *buf
, uint8_t busid
)
126 SCSIDevice
*current_lun
;
128 trace_esp_do_busid_cmd(busid
);
130 current_lun
= scsi_device_find(&s
->bus
, 0, s
->current_dev
->id
, lun
);
131 s
->current_req
= scsi_req_new(current_lun
, 0, lun
, buf
, s
);
132 datalen
= scsi_req_enqueue(s
->current_req
);
133 s
->ti_size
= datalen
;
135 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
139 s
->rregs
[ESP_RSTAT
] |= STAT_DI
;
141 s
->rregs
[ESP_RSTAT
] |= STAT_DO
;
143 scsi_req_continue(s
->current_req
);
145 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
146 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
150 static void do_cmd(ESPState
*s
, uint8_t *buf
)
152 uint8_t busid
= buf
[0];
154 do_busid_cmd(s
, &buf
[1], busid
);
157 static void handle_satn(ESPState
*s
)
162 if (s
->dma
&& !s
->dma_enabled
) {
163 s
->dma_cb
= handle_satn
;
166 len
= get_cmd(s
, buf
);
171 static void handle_s_without_atn(ESPState
*s
)
176 if (s
->dma
&& !s
->dma_enabled
) {
177 s
->dma_cb
= handle_s_without_atn
;
180 len
= get_cmd(s
, buf
);
182 do_busid_cmd(s
, buf
, 0);
186 static void handle_satn_stop(ESPState
*s
)
188 if (s
->dma
&& !s
->dma_enabled
) {
189 s
->dma_cb
= handle_satn_stop
;
192 s
->cmdlen
= get_cmd(s
, s
->cmdbuf
);
194 trace_esp_handle_satn_stop(s
->cmdlen
);
196 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_CD
;
197 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
198 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
203 static void write_response(ESPState
*s
)
205 trace_esp_write_response(s
->status
);
206 s
->ti_buf
[0] = s
->status
;
209 s
->dma_memory_write(s
->dma_opaque
, s
->ti_buf
, 2);
210 s
->rregs
[ESP_RSTAT
] = STAT_TC
| STAT_ST
;
211 s
->rregs
[ESP_RINTR
] = INTR_BS
| INTR_FC
;
212 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
217 s
->rregs
[ESP_RFLAGS
] = 2;
222 static void esp_dma_done(ESPState
*s
)
224 s
->rregs
[ESP_RSTAT
] |= STAT_TC
;
225 s
->rregs
[ESP_RINTR
] = INTR_BS
;
226 s
->rregs
[ESP_RSEQ
] = 0;
227 s
->rregs
[ESP_RFLAGS
] = 0;
228 s
->rregs
[ESP_TCLO
] = 0;
229 s
->rregs
[ESP_TCMID
] = 0;
233 static void esp_do_dma(ESPState
*s
)
238 to_device
= (s
->ti_size
< 0);
241 trace_esp_do_dma(s
->cmdlen
, len
);
242 s
->dma_memory_read(s
->dma_opaque
, &s
->cmdbuf
[s
->cmdlen
], len
);
246 do_cmd(s
, s
->cmdbuf
);
249 if (s
->async_len
== 0) {
250 /* Defer until data is available. */
253 if (len
> s
->async_len
) {
257 s
->dma_memory_read(s
->dma_opaque
, s
->async_buf
, len
);
259 s
->dma_memory_write(s
->dma_opaque
, s
->async_buf
, len
);
268 if (s
->async_len
== 0) {
269 scsi_req_continue(s
->current_req
);
270 /* If there is still data to be read from the device then
271 complete the DMA operation immediately. Otherwise defer
272 until the scsi layer has completed. */
273 if (to_device
|| s
->dma_left
!= 0 || s
->ti_size
== 0) {
278 /* Partially filled a scsi buffer. Complete immediately. */
282 void esp_command_complete(SCSIRequest
*req
, uint32_t status
,
285 ESPState
*s
= req
->hba_private
;
287 trace_esp_command_complete();
288 if (s
->ti_size
!= 0) {
289 trace_esp_command_complete_unexpected();
295 trace_esp_command_complete_fail();
298 s
->rregs
[ESP_RSTAT
] = STAT_ST
;
300 if (s
->current_req
) {
301 scsi_req_unref(s
->current_req
);
302 s
->current_req
= NULL
;
303 s
->current_dev
= NULL
;
307 void esp_transfer_data(SCSIRequest
*req
, uint32_t len
)
309 ESPState
*s
= req
->hba_private
;
311 trace_esp_transfer_data(s
->dma_left
, s
->ti_size
);
313 s
->async_buf
= scsi_req_get_buf(req
);
316 } else if (s
->dma_counter
!= 0 && s
->ti_size
<= 0) {
317 /* If this was the last part of a DMA transfer then the
318 completion interrupt is deferred to here. */
323 static void handle_ti(ESPState
*s
)
325 uint32_t dmalen
, minlen
;
327 if (s
->dma
&& !s
->dma_enabled
) {
328 s
->dma_cb
= handle_ti
;
332 dmalen
= s
->rregs
[ESP_TCLO
] | (s
->rregs
[ESP_TCMID
] << 8);
336 s
->dma_counter
= dmalen
;
339 minlen
= (dmalen
< 32) ? dmalen
: 32;
340 else if (s
->ti_size
< 0)
341 minlen
= (dmalen
< -s
->ti_size
) ? dmalen
: -s
->ti_size
;
343 minlen
= (dmalen
< s
->ti_size
) ? dmalen
: s
->ti_size
;
344 trace_esp_handle_ti(minlen
);
346 s
->dma_left
= minlen
;
347 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
349 } else if (s
->do_cmd
) {
350 trace_esp_handle_ti_cmd(s
->cmdlen
);
354 do_cmd(s
, s
->cmdbuf
);
359 void esp_hard_reset(ESPState
*s
)
361 memset(s
->rregs
, 0, ESP_REGS
);
362 memset(s
->wregs
, 0, ESP_REGS
);
363 s
->rregs
[ESP_TCHI
] = s
->chip_id
;
371 s
->rregs
[ESP_CFG1
] = 7;
374 static void esp_soft_reset(ESPState
*s
)
376 qemu_irq_lower(s
->irq
);
380 static void parent_esp_reset(ESPState
*s
, int irq
, int level
)
387 uint64_t esp_reg_read(ESPState
*s
, uint32_t saddr
)
391 trace_esp_mem_readb(saddr
, s
->rregs
[saddr
]);
394 if (s
->ti_size
> 0) {
396 if ((s
->rregs
[ESP_RSTAT
] & STAT_PIO_MASK
) == 0) {
398 qemu_log_mask(LOG_UNIMP
,
399 "esp: PIO data read not implemented\n");
400 s
->rregs
[ESP_FIFO
] = 0;
402 s
->rregs
[ESP_FIFO
] = s
->ti_buf
[s
->ti_rptr
++];
406 if (s
->ti_size
== 0) {
412 /* Clear sequence step, interrupt register and all status bits
414 old_val
= s
->rregs
[ESP_RINTR
];
415 s
->rregs
[ESP_RINTR
] = 0;
416 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
417 s
->rregs
[ESP_RSEQ
] = SEQ_CD
;
424 return s
->rregs
[saddr
];
427 void esp_reg_write(ESPState
*s
, uint32_t saddr
, uint64_t val
)
429 trace_esp_mem_writeb(saddr
, s
->wregs
[saddr
], val
);
433 s
->rregs
[ESP_RSTAT
] &= ~STAT_TC
;
437 s
->cmdbuf
[s
->cmdlen
++] = val
& 0xff;
438 } else if (s
->ti_size
== TI_BUFSZ
- 1) {
439 trace_esp_error_fifo_overrun();
442 s
->ti_buf
[s
->ti_wptr
++] = val
& 0xff;
446 s
->rregs
[saddr
] = val
;
449 /* Reload DMA counter. */
450 s
->rregs
[ESP_TCLO
] = s
->wregs
[ESP_TCLO
];
451 s
->rregs
[ESP_TCMID
] = s
->wregs
[ESP_TCMID
];
455 switch(val
& CMD_CMD
) {
457 trace_esp_mem_writeb_cmd_nop(val
);
460 trace_esp_mem_writeb_cmd_flush(val
);
462 s
->rregs
[ESP_RINTR
] = INTR_FC
;
463 s
->rregs
[ESP_RSEQ
] = 0;
464 s
->rregs
[ESP_RFLAGS
] = 0;
467 trace_esp_mem_writeb_cmd_reset(val
);
471 trace_esp_mem_writeb_cmd_bus_reset(val
);
472 s
->rregs
[ESP_RINTR
] = INTR_RST
;
473 if (!(s
->wregs
[ESP_CFG1
] & CFG1_RESREPT
)) {
481 trace_esp_mem_writeb_cmd_iccs(val
);
483 s
->rregs
[ESP_RINTR
] = INTR_FC
;
484 s
->rregs
[ESP_RSTAT
] |= STAT_MI
;
487 trace_esp_mem_writeb_cmd_msgacc(val
);
488 s
->rregs
[ESP_RINTR
] = INTR_DC
;
489 s
->rregs
[ESP_RSEQ
] = 0;
490 s
->rregs
[ESP_RFLAGS
] = 0;
494 trace_esp_mem_writeb_cmd_pad(val
);
495 s
->rregs
[ESP_RSTAT
] = STAT_TC
;
496 s
->rregs
[ESP_RINTR
] = INTR_FC
;
497 s
->rregs
[ESP_RSEQ
] = 0;
500 trace_esp_mem_writeb_cmd_satn(val
);
503 trace_esp_mem_writeb_cmd_rstatn(val
);
506 trace_esp_mem_writeb_cmd_sel(val
);
507 handle_s_without_atn(s
);
510 trace_esp_mem_writeb_cmd_selatn(val
);
514 trace_esp_mem_writeb_cmd_selatns(val
);
518 trace_esp_mem_writeb_cmd_ensel(val
);
519 s
->rregs
[ESP_RINTR
] = 0;
522 trace_esp_mem_writeb_cmd_dissel(val
);
523 s
->rregs
[ESP_RINTR
] = 0;
527 trace_esp_error_unhandled_command(val
);
531 case ESP_WBUSID
... ESP_WSYNO
:
534 s
->rregs
[saddr
] = val
;
536 case ESP_WCCF
... ESP_WTEST
:
538 case ESP_CFG2
... ESP_RES4
:
539 s
->rregs
[saddr
] = val
;
542 trace_esp_error_invalid_write(val
, saddr
);
545 s
->wregs
[saddr
] = val
;
548 static bool esp_mem_accepts(void *opaque
, target_phys_addr_t addr
,
549 unsigned size
, bool is_write
)
551 return (size
== 1) || (is_write
&& size
== 4);
554 const VMStateDescription vmstate_esp
= {
557 .minimum_version_id
= 3,
558 .minimum_version_id_old
= 3,
559 .fields
= (VMStateField
[]) {
560 VMSTATE_BUFFER(rregs
, ESPState
),
561 VMSTATE_BUFFER(wregs
, ESPState
),
562 VMSTATE_INT32(ti_size
, ESPState
),
563 VMSTATE_UINT32(ti_rptr
, ESPState
),
564 VMSTATE_UINT32(ti_wptr
, ESPState
),
565 VMSTATE_BUFFER(ti_buf
, ESPState
),
566 VMSTATE_UINT32(status
, ESPState
),
567 VMSTATE_UINT32(dma
, ESPState
),
568 VMSTATE_BUFFER(cmdbuf
, ESPState
),
569 VMSTATE_UINT32(cmdlen
, ESPState
),
570 VMSTATE_UINT32(do_cmd
, ESPState
),
571 VMSTATE_UINT32(dma_left
, ESPState
),
572 VMSTATE_END_OF_LIST()
583 static void sysbus_esp_mem_write(void *opaque
, target_phys_addr_t addr
,
584 uint64_t val
, unsigned int size
)
586 SysBusESPState
*sysbus
= opaque
;
589 saddr
= addr
>> sysbus
->it_shift
;
590 esp_reg_write(&sysbus
->esp
, saddr
, val
);
593 static uint64_t sysbus_esp_mem_read(void *opaque
, target_phys_addr_t addr
,
596 SysBusESPState
*sysbus
= opaque
;
599 saddr
= addr
>> sysbus
->it_shift
;
600 return esp_reg_read(&sysbus
->esp
, saddr
);
603 static const MemoryRegionOps sysbus_esp_mem_ops
= {
604 .read
= sysbus_esp_mem_read
,
605 .write
= sysbus_esp_mem_write
,
606 .endianness
= DEVICE_NATIVE_ENDIAN
,
607 .valid
.accepts
= esp_mem_accepts
,
610 void esp_init(target_phys_addr_t espaddr
, int it_shift
,
611 ESPDMAMemoryReadWriteFunc dma_memory_read
,
612 ESPDMAMemoryReadWriteFunc dma_memory_write
,
613 void *dma_opaque
, qemu_irq irq
, qemu_irq
*reset
,
614 qemu_irq
*dma_enable
)
618 SysBusESPState
*sysbus
;
621 dev
= qdev_create(NULL
, "esp");
622 sysbus
= DO_UPCAST(SysBusESPState
, busdev
.qdev
, dev
);
624 esp
->dma_memory_read
= dma_memory_read
;
625 esp
->dma_memory_write
= dma_memory_write
;
626 esp
->dma_opaque
= dma_opaque
;
627 sysbus
->it_shift
= it_shift
;
628 /* XXX for now until rc4030 has been changed to use DMA enable signal */
629 esp
->dma_enabled
= 1;
630 qdev_init_nofail(dev
);
631 s
= sysbus_from_qdev(dev
);
632 sysbus_connect_irq(s
, 0, irq
);
633 sysbus_mmio_map(s
, 0, espaddr
);
634 *reset
= qdev_get_gpio_in(dev
, 0);
635 *dma_enable
= qdev_get_gpio_in(dev
, 1);
638 static const struct SCSIBusInfo esp_scsi_info
= {
640 .max_target
= ESP_MAX_DEVS
,
643 .transfer_data
= esp_transfer_data
,
644 .complete
= esp_command_complete
,
645 .cancel
= esp_request_cancelled
648 static void sysbus_esp_gpio_demux(void *opaque
, int irq
, int level
)
650 DeviceState
*d
= opaque
;
651 SysBusESPState
*sysbus
= container_of(d
, SysBusESPState
, busdev
.qdev
);
652 ESPState
*s
= &sysbus
->esp
;
656 parent_esp_reset(s
, irq
, level
);
659 esp_dma_enable(opaque
, irq
, level
);
664 static int sysbus_esp_init(SysBusDevice
*dev
)
666 SysBusESPState
*sysbus
= FROM_SYSBUS(SysBusESPState
, dev
);
667 ESPState
*s
= &sysbus
->esp
;
669 sysbus_init_irq(dev
, &s
->irq
);
670 assert(sysbus
->it_shift
!= -1);
672 s
->chip_id
= TCHI_FAS100A
;
673 memory_region_init_io(&sysbus
->iomem
, &sysbus_esp_mem_ops
, sysbus
,
674 "esp", ESP_REGS
<< sysbus
->it_shift
);
675 sysbus_init_mmio(dev
, &sysbus
->iomem
);
677 qdev_init_gpio_in(&dev
->qdev
, sysbus_esp_gpio_demux
, 2);
679 scsi_bus_new(&s
->bus
, &dev
->qdev
, &esp_scsi_info
);
680 return scsi_bus_legacy_handle_cmdline(&s
->bus
);
683 static void sysbus_esp_hard_reset(DeviceState
*dev
)
685 SysBusESPState
*sysbus
= DO_UPCAST(SysBusESPState
, busdev
.qdev
, dev
);
686 esp_hard_reset(&sysbus
->esp
);
689 static const VMStateDescription vmstate_sysbus_esp_scsi
= {
690 .name
= "sysbusespscsi",
692 .minimum_version_id
= 0,
693 .minimum_version_id_old
= 0,
694 .fields
= (VMStateField
[]) {
695 VMSTATE_STRUCT(esp
, SysBusESPState
, 0, vmstate_esp
, ESPState
),
696 VMSTATE_END_OF_LIST()
700 static void sysbus_esp_class_init(ObjectClass
*klass
, void *data
)
702 DeviceClass
*dc
= DEVICE_CLASS(klass
);
703 SysBusDeviceClass
*k
= SYS_BUS_DEVICE_CLASS(klass
);
705 k
->init
= sysbus_esp_init
;
706 dc
->reset
= sysbus_esp_hard_reset
;
707 dc
->vmsd
= &vmstate_sysbus_esp_scsi
;
710 static const TypeInfo sysbus_esp_info
= {
712 .parent
= TYPE_SYS_BUS_DEVICE
,
713 .instance_size
= sizeof(SysBusESPState
),
714 .class_init
= sysbus_esp_class_init
,
723 #define DMA_SMDLA 0x6
726 #define DMA_CMD_MASK 0x03
727 #define DMA_CMD_DIAG 0x04
728 #define DMA_CMD_MDL 0x10
729 #define DMA_CMD_INTE_P 0x20
730 #define DMA_CMD_INTE_D 0x40
731 #define DMA_CMD_DIR 0x80
733 #define DMA_STAT_PWDN 0x01
734 #define DMA_STAT_ERROR 0x02
735 #define DMA_STAT_ABORT 0x04
736 #define DMA_STAT_DONE 0x08
737 #define DMA_STAT_SCSIINT 0x10
738 #define DMA_STAT_BCMBLT 0x20
740 #define SBAC_STATUS 0x1000
742 typedef struct PCIESPState
{
745 uint32_t dma_regs
[8];
750 static void esp_pci_handle_idle(PCIESPState
*pci
, uint32_t val
)
752 trace_esp_pci_dma_idle(val
);
753 esp_dma_enable(&pci
->esp
, 0, 0);
756 static void esp_pci_handle_blast(PCIESPState
*pci
, uint32_t val
)
758 trace_esp_pci_dma_blast(val
);
759 qemu_log_mask(LOG_UNIMP
, "am53c974: cmd BLAST not implemented\n");
762 static void esp_pci_handle_abort(PCIESPState
*pci
, uint32_t val
)
764 trace_esp_pci_dma_abort(val
);
765 if (pci
->esp
.current_req
) {
766 scsi_req_cancel(pci
->esp
.current_req
);
770 static void esp_pci_handle_start(PCIESPState
*pci
, uint32_t val
)
772 trace_esp_pci_dma_start(val
);
774 pci
->dma_regs
[DMA_WBC
] = pci
->dma_regs
[DMA_STC
];
775 pci
->dma_regs
[DMA_WAC
] = pci
->dma_regs
[DMA_SPA
];
776 pci
->dma_regs
[DMA_WMAC
] = pci
->dma_regs
[DMA_SMDLA
];
778 pci
->dma_regs
[DMA_STAT
] &= ~(DMA_STAT_BCMBLT
| DMA_STAT_SCSIINT
779 | DMA_STAT_DONE
| DMA_STAT_ABORT
780 | DMA_STAT_ERROR
| DMA_STAT_PWDN
);
782 esp_dma_enable(&pci
->esp
, 0, 1);
785 static void esp_pci_dma_write(PCIESPState
*pci
, uint32_t saddr
, uint32_t val
)
787 trace_esp_pci_dma_write(saddr
, pci
->dma_regs
[saddr
], val
);
790 pci
->dma_regs
[saddr
] = val
;
791 switch (val
& DMA_CMD_MASK
) {
793 esp_pci_handle_idle(pci
, val
);
795 case 0x1: /* BLAST */
796 esp_pci_handle_blast(pci
, val
);
798 case 0x2: /* ABORT */
799 esp_pci_handle_abort(pci
, val
);
801 case 0x3: /* START */
802 esp_pci_handle_start(pci
, val
);
804 default: /* can't happen */
811 pci
->dma_regs
[saddr
] = val
;
814 if (!(pci
->sbac
& SBAC_STATUS
)) {
815 /* clear some bits on write */
816 uint32_t mask
= DMA_STAT_ERROR
| DMA_STAT_ABORT
| DMA_STAT_DONE
;
817 pci
->dma_regs
[DMA_STAT
] &= ~(val
& mask
);
821 trace_esp_pci_error_invalid_write_dma(val
, saddr
);
826 static uint32_t esp_pci_dma_read(PCIESPState
*pci
, uint32_t saddr
)
830 val
= pci
->dma_regs
[saddr
];
831 if (saddr
== DMA_STAT
) {
832 if (pci
->esp
.rregs
[ESP_RSTAT
] & STAT_INT
) {
833 val
|= DMA_STAT_SCSIINT
;
835 if (pci
->sbac
& SBAC_STATUS
) {
836 pci
->dma_regs
[DMA_STAT
] &= ~(DMA_STAT_ERROR
| DMA_STAT_ABORT
|
841 trace_esp_pci_dma_read(saddr
, val
);
845 static void esp_pci_io_write(void *opaque
, target_phys_addr_t addr
,
846 uint64_t val
, unsigned int size
)
848 PCIESPState
*pci
= opaque
;
850 if (size
< 4 || addr
& 3) {
851 /* need to upgrade request: we only support 4-bytes accesses */
852 uint32_t current
= 0, mask
;
856 current
= pci
->esp
.wregs
[addr
>> 2];
857 } else if (addr
< 0x60) {
858 current
= pci
->dma_regs
[(addr
- 0x40) >> 2];
859 } else if (addr
< 0x74) {
863 shift
= (4 - size
) * 8;
864 mask
= (~(uint32_t)0 << shift
) >> shift
;
866 shift
= ((4 - (addr
& 3)) & 3) * 8;
868 val
|= current
& ~(mask
<< shift
);
875 esp_reg_write(&pci
->esp
, addr
>> 2, val
);
876 } else if (addr
< 0x60) {
878 esp_pci_dma_write(pci
, (addr
- 0x40) >> 2, val
);
879 } else if (addr
== 0x70) {
880 /* DMA SCSI Bus and control */
881 trace_esp_pci_sbac_write(pci
->sbac
, val
);
884 trace_esp_pci_error_invalid_write((int)addr
);
888 static uint64_t esp_pci_io_read(void *opaque
, target_phys_addr_t addr
,
891 PCIESPState
*pci
= opaque
;
896 ret
= esp_reg_read(&pci
->esp
, addr
>> 2);
897 } else if (addr
< 0x60) {
899 ret
= esp_pci_dma_read(pci
, (addr
- 0x40) >> 2);
900 } else if (addr
== 0x70) {
901 /* DMA SCSI Bus and control */
902 trace_esp_pci_sbac_read(pci
->sbac
);
906 trace_esp_pci_error_invalid_read((int)addr
);
910 /* give only requested data */
911 ret
>>= (addr
& 3) * 8;
912 ret
&= ~(~(uint64_t)0 << (8 * size
));
917 static void esp_pci_dma_memory_rw(PCIESPState
*pci
, uint8_t *buf
, int len
,
921 DMADirection expected_dir
;
923 if (pci
->dma_regs
[DMA_CMD
] & DMA_CMD_DIR
) {
924 expected_dir
= DMA_DIRECTION_FROM_DEVICE
;
926 expected_dir
= DMA_DIRECTION_TO_DEVICE
;
929 if (dir
!= expected_dir
) {
930 trace_esp_pci_error_invalid_dma_direction();
934 if (pci
->dma_regs
[DMA_STAT
] & DMA_CMD_MDL
) {
935 qemu_log_mask(LOG_UNIMP
, "am53c974: MDL transfer not implemented\n");
938 addr
= pci
->dma_regs
[DMA_SPA
];
939 if (pci
->dma_regs
[DMA_WBC
] < len
) {
940 len
= pci
->dma_regs
[DMA_WBC
];
943 pci_dma_rw(&pci
->dev
, addr
, buf
, len
, dir
);
945 /* update status registers */
946 pci
->dma_regs
[DMA_WBC
] -= len
;
947 pci
->dma_regs
[DMA_WAC
] += len
;
950 static void esp_pci_dma_memory_read(void *opaque
, uint8_t *buf
, int len
)
952 PCIESPState
*pci
= opaque
;
953 esp_pci_dma_memory_rw(pci
, buf
, len
, DMA_DIRECTION_TO_DEVICE
);
956 static void esp_pci_dma_memory_write(void *opaque
, uint8_t *buf
, int len
)
958 PCIESPState
*pci
= opaque
;
959 esp_pci_dma_memory_rw(pci
, buf
, len
, DMA_DIRECTION_FROM_DEVICE
);
962 static const MemoryRegionOps esp_pci_io_ops
= {
963 .read
= esp_pci_io_read
,
964 .write
= esp_pci_io_write
,
965 .endianness
= DEVICE_LITTLE_ENDIAN
,
967 .min_access_size
= 1,
968 .max_access_size
= 4,
972 static void esp_pci_hard_reset(DeviceState
*dev
)
974 PCIESPState
*pci
= DO_UPCAST(PCIESPState
, dev
.qdev
, dev
);
975 esp_hard_reset(&pci
->esp
);
976 pci
->dma_regs
[DMA_CMD
] &= ~(DMA_CMD_DIR
| DMA_CMD_INTE_D
| DMA_CMD_INTE_P
977 | DMA_CMD_MDL
| DMA_CMD_DIAG
| DMA_CMD_MASK
);
978 pci
->dma_regs
[DMA_WBC
] &= ~0xffff;
979 pci
->dma_regs
[DMA_WAC
] = 0xffffffff;
980 pci
->dma_regs
[DMA_STAT
] &= ~(DMA_STAT_BCMBLT
| DMA_STAT_SCSIINT
981 | DMA_STAT_DONE
| DMA_STAT_ABORT
983 pci
->dma_regs
[DMA_WMAC
] = 0xfffffffd;
986 static const VMStateDescription vmstate_esp_pci_scsi
= {
987 .name
= "pciespscsi",
989 .minimum_version_id
= 0,
990 .minimum_version_id_old
= 0,
991 .fields
= (VMStateField
[]) {
992 VMSTATE_PCI_DEVICE(dev
, PCIESPState
),
993 VMSTATE_BUFFER_UNSAFE(dma_regs
, PCIESPState
, 0, 8 * sizeof(uint32_t)),
994 VMSTATE_STRUCT(esp
, PCIESPState
, 0, vmstate_esp
, ESPState
),
995 VMSTATE_END_OF_LIST()
999 static void esp_pci_command_complete(SCSIRequest
*req
, uint32_t status
,
1002 ESPState
*s
= req
->hba_private
;
1003 PCIESPState
*pci
= container_of(s
, PCIESPState
, esp
);
1005 esp_command_complete(req
, status
, resid
);
1006 pci
->dma_regs
[DMA_WBC
] = 0;
1007 pci
->dma_regs
[DMA_STAT
] |= DMA_STAT_DONE
;
1010 static const struct SCSIBusInfo esp_pci_scsi_info
= {
1012 .max_target
= ESP_MAX_DEVS
,
1015 .transfer_data
= esp_transfer_data
,
1016 .complete
= esp_pci_command_complete
,
1017 .cancel
= esp_request_cancelled
,
1020 static int esp_pci_scsi_init(PCIDevice
*dev
)
1022 PCIESPState
*pci
= DO_UPCAST(PCIESPState
, dev
, dev
);
1023 ESPState
*s
= &pci
->esp
;
1026 pci_conf
= pci
->dev
.config
;
1028 /* Interrupt pin A */
1029 pci_conf
[PCI_INTERRUPT_PIN
] = 0x01;
1031 s
->dma_memory_read
= esp_pci_dma_memory_read
;
1032 s
->dma_memory_write
= esp_pci_dma_memory_write
;
1033 s
->dma_opaque
= pci
;
1034 s
->chip_id
= TCHI_AM53C974
;
1035 memory_region_init_io(&pci
->io
, &esp_pci_io_ops
, pci
, "esp-io", 0x80);
1037 pci_register_bar(&pci
->dev
, 0, PCI_BASE_ADDRESS_SPACE_IO
, &pci
->io
);
1038 s
->irq
= pci
->dev
.irq
[0];
1040 scsi_bus_new(&s
->bus
, &dev
->qdev
, &esp_pci_scsi_info
);
1041 if (!dev
->qdev
.hotplugged
) {
1042 return scsi_bus_legacy_handle_cmdline(&s
->bus
);
1047 static void esp_pci_scsi_uninit(PCIDevice
*d
)
1049 PCIESPState
*pci
= DO_UPCAST(PCIESPState
, dev
, d
);
1051 memory_region_destroy(&pci
->io
);
1054 static void esp_pci_class_init(ObjectClass
*klass
, void *data
)
1056 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1057 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1059 k
->init
= esp_pci_scsi_init
;
1060 k
->exit
= esp_pci_scsi_uninit
;
1061 k
->vendor_id
= PCI_VENDOR_ID_AMD
;
1062 k
->device_id
= PCI_DEVICE_ID_AMD_SCSI
;
1064 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
1065 dc
->desc
= "AMD Am53c974 PCscsi-PCI SCSI adapter";
1066 dc
->reset
= esp_pci_hard_reset
;
1067 dc
->vmsd
= &vmstate_esp_pci_scsi
;
1070 static const TypeInfo esp_pci_info
= {
1072 .parent
= TYPE_PCI_DEVICE
,
1073 .instance_size
= sizeof(PCIESPState
),
1074 .class_init
= esp_pci_class_init
,
1077 static void esp_register_types(void)
1079 type_register_static(&sysbus_esp_info
);
1080 type_register_static(&esp_pci_info
);
1083 type_init(esp_register_types
)