2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtual SCSI, aka ibmvscsi
6 * Copyright (c) 2010,2011 Benjamin Herrenschmidt, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * - Sort out better how to assign devices to VSCSI instances
30 * - Fix residual counts
31 * - Add indirect descriptors support
32 * - Maybe do autosense (PAPR seems to mandate it, linux doesn't care)
35 #include "qemu/osdep.h"
36 #include "qemu/module.h"
37 #include "hw/scsi/scsi.h"
38 #include "migration/vmstate.h"
39 #include "scsi/constants.h"
41 #include "hw/ppc/spapr.h"
42 #include "hw/ppc/spapr_vio.h"
43 #include "hw/qdev-properties.h"
48 #include "qom/object.h"
55 #define VSCSI_MAX_SECTORS 4096
56 #define VSCSI_REQ_LIMIT 24
58 /* Maximum size of a IU payload */
59 #define SRP_MAX_IU_DATA_LEN (SRP_MAX_IU_LEN - sizeof(union srp_iu))
60 #define SRP_RSP_SENSE_DATA_LEN 18
62 #define SRP_REPORT_LUNS_WLUN 0xc10100000000000ULL
64 typedef union vscsi_crq
{
69 typedef struct vscsi_req
{
71 uint8_t viosrp_iu_buf
[SRP_MAX_IU_LEN
];
73 /* SCSI request tracking */
75 uint32_t qtag
; /* qemu tag != srp tag */
81 uint8_t sense
[SCSI_SENSE_BUF_SIZE
];
83 /* RDMA related bits */
88 uint16_t cur_desc_num
;
89 uint16_t cur_desc_offset
;
92 #define TYPE_VIO_SPAPR_VSCSI_DEVICE "spapr-vscsi"
93 OBJECT_DECLARE_SIMPLE_TYPE(VSCSIState
, VIO_SPAPR_VSCSI_DEVICE
)
98 vscsi_req reqs
[VSCSI_REQ_LIMIT
];
101 static union viosrp_iu
*req_iu(vscsi_req
*req
)
103 return (union viosrp_iu
*)req
->viosrp_iu_buf
;
106 static struct vscsi_req
*vscsi_get_req(VSCSIState
*s
)
111 for (i
= 0; i
< VSCSI_REQ_LIMIT
; i
++) {
114 memset(req
, 0, sizeof(*req
));
123 static struct vscsi_req
*vscsi_find_req(VSCSIState
*s
, uint64_t srp_tag
)
128 for (i
= 0; i
< VSCSI_REQ_LIMIT
; i
++) {
130 if (req_iu(req
)->srp
.cmd
.tag
== srp_tag
) {
137 static void vscsi_put_req(vscsi_req
*req
)
139 if (req
->sreq
!= NULL
) {
140 scsi_req_unref(req
->sreq
);
146 static SCSIDevice
*vscsi_device_find(SCSIBus
*bus
, uint64_t srp_lun
, int *lun
)
148 int channel
= 0, id
= 0;
151 switch (srp_lun
>> 62) {
153 if ((srp_lun
>> 56) != 0) {
154 channel
= (srp_lun
>> 56) & 0x3f;
155 id
= (srp_lun
>> 48) & 0xff;
159 *lun
= (srp_lun
>> 48) & 0xff;
163 *lun
= (srp_lun
>> 48) & 0x3fff;
166 channel
= (srp_lun
>> 53) & 0x7;
167 id
= (srp_lun
>> 56) & 0x3f;
168 *lun
= (srp_lun
>> 48) & 0x1f;
177 return scsi_device_find(bus
, channel
, id
, *lun
);
180 static int vscsi_send_iu(VSCSIState
*s
, vscsi_req
*req
,
181 uint64_t length
, uint8_t format
)
185 assert(length
<= SRP_MAX_IU_LEN
);
187 /* First copy the SRP */
188 rc
= spapr_vio_dma_write(&s
->vdev
, req
->crq
.s
.IU_data_ptr
,
189 &req
->viosrp_iu_buf
, length
);
191 fprintf(stderr
, "vscsi_send_iu: DMA write failure !\n");
194 req
->crq
.s
.valid
= 0x80;
195 req
->crq
.s
.format
= format
;
196 req
->crq
.s
.reserved
= 0x00;
197 req
->crq
.s
.timeout
= cpu_to_be16(0x0000);
198 req
->crq
.s
.IU_length
= cpu_to_be16(length
);
199 req
->crq
.s
.IU_data_ptr
= req_iu(req
)->srp
.rsp
.tag
; /* right byte order */
202 req
->crq
.s
.status
= VIOSRP_OK
;
204 req
->crq
.s
.status
= VIOSRP_ADAPTER_FAIL
;
207 rc1
= spapr_vio_send_crq(&s
->vdev
, req
->crq
.raw
);
209 fprintf(stderr
, "vscsi_send_iu: Error sending response\n");
216 static void vscsi_makeup_sense(VSCSIState
*s
, vscsi_req
*req
,
217 uint8_t key
, uint8_t asc
, uint8_t ascq
)
219 req
->senselen
= SRP_RSP_SENSE_DATA_LEN
;
221 /* Valid bit and 'current errors' */
222 req
->sense
[0] = (0x1 << 7 | 0x70);
225 /* Additional sense length */
226 req
->sense
[7] = 0xa; /* 10 bytes */
227 /* Additional sense code */
228 req
->sense
[12] = asc
;
229 req
->sense
[13] = ascq
;
232 static int vscsi_send_rsp(VSCSIState
*s
, vscsi_req
*req
,
233 uint8_t status
, int32_t res_in
, int32_t res_out
)
235 union viosrp_iu
*iu
= req_iu(req
);
236 uint64_t tag
= iu
->srp
.rsp
.tag
;
237 int total_len
= sizeof(iu
->srp
.rsp
);
238 uint8_t sol_not
= iu
->srp
.cmd
.sol_not
;
240 trace_spapr_vscsi_send_rsp(status
, res_in
, res_out
);
242 memset(iu
, 0, sizeof(struct srp_rsp
));
243 iu
->srp
.rsp
.opcode
= SRP_RSP
;
244 iu
->srp
.rsp
.req_lim_delta
= cpu_to_be32(1);
245 iu
->srp
.rsp
.tag
= tag
;
247 /* Handle residuals */
249 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DIUNDER
;
252 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DIOVER
;
255 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DOUNDER
;
257 } else if (res_out
) {
258 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DOOVER
;
260 iu
->srp
.rsp
.data_in_res_cnt
= cpu_to_be32(res_in
);
261 iu
->srp
.rsp
.data_out_res_cnt
= cpu_to_be32(res_out
);
263 /* We don't do response data */
264 /* iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; */
265 iu
->srp
.rsp
.resp_data_len
= cpu_to_be32(0);
267 /* Handle success vs. failure */
268 iu
->srp
.rsp
.status
= status
;
270 iu
->srp
.rsp
.sol_not
= (sol_not
& 0x04) >> 2;
272 int sense_data_len
= MIN(req
->senselen
, SRP_MAX_IU_DATA_LEN
);
274 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_SNSVALID
;
275 iu
->srp
.rsp
.sense_data_len
= cpu_to_be32(sense_data_len
);
276 memcpy(iu
->srp
.rsp
.data
, req
->sense
, sense_data_len
);
277 total_len
+= sense_data_len
;
280 iu
->srp
.rsp
.sol_not
= (sol_not
& 0x02) >> 1;
283 vscsi_send_iu(s
, req
, total_len
, VIOSRP_SRP_FORMAT
);
287 static inline struct srp_direct_buf
vscsi_swap_desc(struct srp_direct_buf desc
)
289 desc
.va
= be64_to_cpu(desc
.va
);
290 desc
.len
= be32_to_cpu(desc
.len
);
294 static int vscsi_fetch_desc(VSCSIState
*s
, struct vscsi_req
*req
,
295 unsigned n
, unsigned buf_offset
,
296 struct srp_direct_buf
*ret
)
298 struct srp_cmd
*cmd
= &req_iu(req
)->srp
.cmd
;
300 switch (req
->dma_fmt
) {
301 case SRP_NO_DATA_DESC
: {
302 trace_spapr_vscsi_fetch_desc_no_data();
305 case SRP_DATA_DESC_DIRECT
: {
306 memcpy(ret
, cmd
->add_data
+ req
->cdb_offset
, sizeof(*ret
));
307 assert(req
->cur_desc_num
== 0);
308 trace_spapr_vscsi_fetch_desc_direct();
311 case SRP_DATA_DESC_INDIRECT
: {
312 struct srp_indirect_buf
*tmp
= (struct srp_indirect_buf
*)
313 (cmd
->add_data
+ req
->cdb_offset
);
314 if (n
< req
->local_desc
) {
315 *ret
= tmp
->desc_list
[n
];
316 trace_spapr_vscsi_fetch_desc_indirect(req
->qtag
, n
,
318 } else if (n
< req
->total_desc
) {
320 struct srp_direct_buf tbl_desc
= vscsi_swap_desc(tmp
->table_desc
);
321 unsigned desc_offset
= n
* sizeof(struct srp_direct_buf
);
323 if (desc_offset
>= tbl_desc
.len
) {
324 trace_spapr_vscsi_fetch_desc_out_of_range(n
, desc_offset
);
327 rc
= spapr_vio_dma_read(&s
->vdev
, tbl_desc
.va
+ desc_offset
,
328 ret
, sizeof(struct srp_direct_buf
));
330 trace_spapr_vscsi_fetch_desc_dma_read_error(rc
);
333 trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req
->qtag
, n
,
338 trace_spapr_vscsi_fetch_desc_out_of_desc();
344 fprintf(stderr
, "VSCSI: Unknown format %x\n", req
->dma_fmt
);
348 *ret
= vscsi_swap_desc(*ret
);
349 if (buf_offset
> ret
->len
) {
350 trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset
,
355 ret
->va
+= buf_offset
;
356 ret
->len
-= buf_offset
;
358 trace_spapr_vscsi_fetch_desc_done(req
->cur_desc_num
, req
->cur_desc_offset
,
361 return ret
->len
? 1 : 0;
364 static int vscsi_srp_direct_data(VSCSIState
*s
, vscsi_req
*req
,
365 uint8_t *buf
, uint32_t len
)
367 struct srp_direct_buf md
;
371 rc
= vscsi_fetch_desc(s
, req
, req
->cur_desc_num
, req
->cur_desc_offset
, &md
);
374 } else if (rc
== 0) {
378 llen
= MIN(len
, md
.len
);
380 if (req
->writing
) { /* writing = to device = reading from memory */
381 rc
= spapr_vio_dma_read(&s
->vdev
, md
.va
, buf
, llen
);
383 rc
= spapr_vio_dma_write(&s
->vdev
, md
.va
, buf
, llen
);
390 req
->cur_desc_offset
+= llen
;
395 static int vscsi_srp_indirect_data(VSCSIState
*s
, vscsi_req
*req
,
396 uint8_t *buf
, uint32_t len
)
398 struct srp_direct_buf md
;
400 uint32_t llen
, total
= 0;
402 trace_spapr_vscsi_srp_indirect_data(len
);
404 /* While we have data ... */
406 rc
= vscsi_fetch_desc(s
, req
, req
->cur_desc_num
, req
->cur_desc_offset
, &md
);
409 } else if (rc
== 0) {
413 /* Perform transfer */
414 llen
= MIN(len
, md
.len
);
415 if (req
->writing
) { /* writing = to device = reading from memory */
416 rc
= spapr_vio_dma_read(&s
->vdev
, md
.va
, buf
, llen
);
418 rc
= spapr_vio_dma_write(&s
->vdev
, md
.va
, buf
, llen
);
421 trace_spapr_vscsi_srp_indirect_data_rw(req
->writing
, rc
);
424 trace_spapr_vscsi_srp_indirect_data_buf(buf
[0], buf
[1], buf
[2], buf
[3]);
431 /* Update current position in the current descriptor */
432 req
->cur_desc_offset
+= llen
;
433 if (md
.len
== llen
) {
434 /* Go to the next descriptor if the current one finished */
436 req
->cur_desc_offset
= 0;
440 return rc
? -1 : total
;
443 static int vscsi_srp_transfer_data(VSCSIState
*s
, vscsi_req
*req
,
444 int writing
, uint8_t *buf
, uint32_t len
)
448 switch (req
->dma_fmt
) {
449 case SRP_NO_DATA_DESC
:
450 trace_spapr_vscsi_srp_transfer_data(len
);
452 case SRP_DATA_DESC_DIRECT
:
453 err
= vscsi_srp_direct_data(s
, req
, buf
, len
);
455 case SRP_DATA_DESC_INDIRECT
:
456 err
= vscsi_srp_indirect_data(s
, req
, buf
, len
);
462 /* Bits from linux srp */
463 static int data_out_desc_size(struct srp_cmd
*cmd
)
466 uint8_t fmt
= cmd
->buf_fmt
>> 4;
469 case SRP_NO_DATA_DESC
:
471 case SRP_DATA_DESC_DIRECT
:
472 size
= sizeof(struct srp_direct_buf
);
474 case SRP_DATA_DESC_INDIRECT
:
475 size
= sizeof(struct srp_indirect_buf
) +
476 sizeof(struct srp_direct_buf
)*cmd
->data_out_desc_cnt
;
484 static int vscsi_preprocess_desc(vscsi_req
*req
)
486 struct srp_cmd
*cmd
= &req_iu(req
)->srp
.cmd
;
488 req
->cdb_offset
= cmd
->add_cdb_len
& ~3;
491 req
->dma_fmt
= cmd
->buf_fmt
>> 4;
493 req
->cdb_offset
+= data_out_desc_size(cmd
);
494 req
->dma_fmt
= cmd
->buf_fmt
& ((1U << 4) - 1);
497 switch (req
->dma_fmt
) {
498 case SRP_NO_DATA_DESC
:
500 case SRP_DATA_DESC_DIRECT
:
501 req
->total_desc
= req
->local_desc
= 1;
503 case SRP_DATA_DESC_INDIRECT
: {
504 struct srp_indirect_buf
*ind_tmp
= (struct srp_indirect_buf
*)
505 (cmd
->add_data
+ req
->cdb_offset
);
507 req
->total_desc
= be32_to_cpu(ind_tmp
->table_desc
.len
) /
508 sizeof(struct srp_direct_buf
);
509 req
->local_desc
= req
->writing
? cmd
->data_out_desc_cnt
:
510 cmd
->data_in_desc_cnt
;
515 "vscsi_preprocess_desc: Unknown format %x\n", req
->dma_fmt
);
522 /* Callback to indicate that the SCSI layer has completed a transfer. */
523 static void vscsi_transfer_data(SCSIRequest
*sreq
, uint32_t len
)
525 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(sreq
->bus
->qbus
.parent
);
526 vscsi_req
*req
= sreq
->hba_private
;
530 trace_spapr_vscsi_transfer_data(sreq
->tag
, len
, req
);
532 fprintf(stderr
, "VSCSI: Can't find request for tag 0x%x\n", sreq
->tag
);
537 buf
= scsi_req_get_buf(sreq
);
538 rc
= vscsi_srp_transfer_data(s
, req
, req
->writing
, buf
, len
);
541 fprintf(stderr
, "VSCSI: RDMA error rc=%d!\n", rc
);
542 req
->dma_error
= true;
543 scsi_req_cancel(req
->sreq
);
547 /* Start next chunk */
549 scsi_req_continue(sreq
);
552 /* Callback to indicate that the SCSI layer has completed a transfer. */
553 static void vscsi_command_complete(SCSIRequest
*sreq
, size_t resid
)
555 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(sreq
->bus
->qbus
.parent
);
556 vscsi_req
*req
= sreq
->hba_private
;
557 int32_t res_in
= 0, res_out
= 0;
559 trace_spapr_vscsi_command_complete(sreq
->tag
, sreq
->status
, req
);
561 fprintf(stderr
, "VSCSI: Can't find request for tag 0x%x\n", sreq
->tag
);
565 if (sreq
->status
== CHECK_CONDITION
) {
566 req
->senselen
= scsi_req_get_sense(req
->sreq
, req
->sense
,
568 trace_spapr_vscsi_command_complete_sense_data1(req
->senselen
,
569 req
->sense
[0], req
->sense
[1], req
->sense
[2], req
->sense
[3],
570 req
->sense
[4], req
->sense
[5], req
->sense
[6], req
->sense
[7]);
571 trace_spapr_vscsi_command_complete_sense_data2(
572 req
->sense
[8], req
->sense
[9], req
->sense
[10], req
->sense
[11],
573 req
->sense
[12], req
->sense
[13], req
->sense
[14], req
->sense
[15]);
576 trace_spapr_vscsi_command_complete_status(sreq
->status
);
577 if (sreq
->status
== 0) {
578 /* We handle overflows, not underflows for normal commands,
579 * but hopefully nobody cares
582 res_out
= req
->data_len
;
584 res_in
= req
->data_len
;
587 vscsi_send_rsp(s
, req
, sreq
->status
, res_in
, res_out
);
591 static void vscsi_request_cancelled(SCSIRequest
*sreq
)
593 vscsi_req
*req
= sreq
->hba_private
;
595 if (req
->dma_error
) {
596 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(sreq
->bus
->qbus
.parent
);
598 vscsi_makeup_sense(s
, req
, HARDWARE_ERROR
, 0, 0);
599 vscsi_send_rsp(s
, req
, CHECK_CONDITION
, 0, 0);
604 static const VMStateDescription vmstate_spapr_vscsi_req
= {
605 .name
= "spapr_vscsi_req",
607 .minimum_version_id
= 1,
608 .fields
= (VMStateField
[]) {
609 VMSTATE_BUFFER(crq
.raw
, vscsi_req
),
610 VMSTATE_BUFFER(viosrp_iu_buf
, vscsi_req
),
611 VMSTATE_UINT32(qtag
, vscsi_req
),
612 VMSTATE_BOOL(active
, vscsi_req
),
613 VMSTATE_UINT32(data_len
, vscsi_req
),
614 VMSTATE_BOOL(writing
, vscsi_req
),
615 VMSTATE_UINT32(senselen
, vscsi_req
),
616 VMSTATE_BUFFER(sense
, vscsi_req
),
617 VMSTATE_UINT8(dma_fmt
, vscsi_req
),
618 VMSTATE_UINT16(local_desc
, vscsi_req
),
619 VMSTATE_UINT16(total_desc
, vscsi_req
),
620 VMSTATE_UINT16(cdb_offset
, vscsi_req
),
621 /*Restart SCSI request from the beginning for now */
622 /*VMSTATE_UINT16(cur_desc_num, vscsi_req),
623 VMSTATE_UINT16(cur_desc_offset, vscsi_req),*/
624 VMSTATE_END_OF_LIST()
628 static void vscsi_save_request(QEMUFile
*f
, SCSIRequest
*sreq
)
630 vscsi_req
*req
= sreq
->hba_private
;
633 vmstate_save_state(f
, &vmstate_spapr_vscsi_req
, req
, NULL
);
635 trace_spapr_vscsi_save_request(req
->qtag
, req
->cur_desc_num
,
636 req
->cur_desc_offset
);
639 static void *vscsi_load_request(QEMUFile
*f
, SCSIRequest
*sreq
)
641 SCSIBus
*bus
= sreq
->bus
;
642 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(bus
->qbus
.parent
);
646 assert(sreq
->tag
< VSCSI_REQ_LIMIT
);
647 req
= &s
->reqs
[sreq
->tag
];
648 assert(!req
->active
);
650 memset(req
, 0, sizeof(*req
));
651 rc
= vmstate_load_state(f
, &vmstate_spapr_vscsi_req
, req
, 1);
653 fprintf(stderr
, "VSCSI: failed loading request tag#%u\n", sreq
->tag
);
658 req
->sreq
= scsi_req_ref(sreq
);
660 trace_spapr_vscsi_load_request(req
->qtag
, req
->cur_desc_num
,
661 req
->cur_desc_offset
);
666 static void vscsi_process_login(VSCSIState
*s
, vscsi_req
*req
)
668 union viosrp_iu
*iu
= req_iu(req
);
669 struct srp_login_rsp
*rsp
= &iu
->srp
.login_rsp
;
670 uint64_t tag
= iu
->srp
.rsp
.tag
;
672 trace_spapr_vscsi_process_login();
674 /* TODO handle case that requested size is wrong and
675 * buffer format is wrong
677 memset(iu
, 0, sizeof(struct srp_login_rsp
));
678 rsp
->opcode
= SRP_LOGIN_RSP
;
679 /* Don't advertise quite as many request as we support to
680 * keep room for management stuff etc...
682 rsp
->req_lim_delta
= cpu_to_be32(VSCSI_REQ_LIMIT
-2);
684 rsp
->max_it_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
685 rsp
->max_ti_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
686 /* direct and indirect */
687 rsp
->buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
| SRP_BUF_FORMAT_INDIRECT
);
689 vscsi_send_iu(s
, req
, sizeof(*rsp
), VIOSRP_SRP_FORMAT
);
692 static void vscsi_inquiry_no_target(VSCSIState
*s
, vscsi_req
*req
)
694 uint8_t *cdb
= req_iu(req
)->srp
.cmd
.cdb
;
695 uint8_t resp_data
[36];
698 /* We don't do EVPD. Also check that page_code is 0 */
699 if ((cdb
[1] & 0x01) || cdb
[2] != 0) {
700 /* Send INVALID FIELD IN CDB */
701 vscsi_makeup_sense(s
, req
, ILLEGAL_REQUEST
, 0x24, 0);
702 vscsi_send_rsp(s
, req
, CHECK_CONDITION
, 0, 0);
706 alen
= (alen
<< 8) | cdb
[4];
709 /* Fake up inquiry using PQ=3 */
710 memset(resp_data
, 0, 36);
711 resp_data
[0] = 0x7f; /* Not capable of supporting a device here */
712 resp_data
[2] = 0x06; /* SPS-4 */
713 resp_data
[3] = 0x02; /* Resp data format */
714 resp_data
[4] = 36 - 5; /* Additional length */
715 resp_data
[7] = 0x10; /* Sync transfers */
716 memcpy(&resp_data
[16], "QEMU EMPTY ", 16);
717 memcpy(&resp_data
[8], "QEMU ", 8);
720 vscsi_preprocess_desc(req
);
721 rc
= vscsi_srp_transfer_data(s
, req
, 0, resp_data
, len
);
723 vscsi_makeup_sense(s
, req
, HARDWARE_ERROR
, 0, 0);
724 vscsi_send_rsp(s
, req
, CHECK_CONDITION
, 0, 0);
726 vscsi_send_rsp(s
, req
, 0, 36 - rc
, 0);
730 static void vscsi_report_luns(VSCSIState
*s
, vscsi_req
*req
)
739 QTAILQ_FOREACH(kid
, &s
->bus
.qbus
.children
, sibling
) {
740 SCSIDevice
*dev
= SCSI_DEVICE(kid
->child
);
743 if (dev
->channel
== 0 && dev
->id
== 0 && dev
->lun
== 0) {
752 resp_data
= g_malloc0(len
);
753 stl_be_p(resp_data
, n
);
754 i
= found_lun0
? 8 : 16;
755 QTAILQ_FOREACH(kid
, &s
->bus
.qbus
.children
, sibling
) {
756 DeviceState
*qdev
= kid
->child
;
757 SCSIDevice
*dev
= SCSI_DEVICE(qdev
);
759 if (dev
->id
== 0 && dev
->channel
== 0) {
760 resp_data
[i
] = 0; /* Use simple LUN for 0 (SAM5 4.7.7.1) */
762 resp_data
[i
] = (2 << 6); /* Otherwise LUN addressing (4.7.7.4) */
764 resp_data
[i
] |= dev
->id
;
765 resp_data
[i
+1] = (dev
->channel
<< 5);
766 resp_data
[i
+1] |= dev
->lun
;
770 vscsi_preprocess_desc(req
);
771 rc
= vscsi_srp_transfer_data(s
, req
, 0, resp_data
, len
);
774 vscsi_makeup_sense(s
, req
, HARDWARE_ERROR
, 0, 0);
775 vscsi_send_rsp(s
, req
, CHECK_CONDITION
, 0, 0);
777 vscsi_send_rsp(s
, req
, 0, len
- rc
, 0);
781 static int vscsi_queue_cmd(VSCSIState
*s
, vscsi_req
*req
)
783 union srp_iu
*srp
= &req_iu(req
)->srp
;
786 size_t cdb_len
= sizeof (srp
->cmd
.cdb
) + (srp
->cmd
.add_cdb_len
& ~3);
788 if ((srp
->cmd
.lun
== 0 || be64_to_cpu(srp
->cmd
.lun
) == SRP_REPORT_LUNS_WLUN
)
789 && srp
->cmd
.cdb
[0] == REPORT_LUNS
) {
790 vscsi_report_luns(s
, req
);
794 sdev
= vscsi_device_find(&s
->bus
, be64_to_cpu(srp
->cmd
.lun
), &lun
);
796 trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp
->cmd
.lun
));
797 if (srp
->cmd
.cdb
[0] == INQUIRY
) {
798 vscsi_inquiry_no_target(s
, req
);
800 vscsi_makeup_sense(s
, req
, ILLEGAL_REQUEST
, 0x24, 0x00);
801 vscsi_send_rsp(s
, req
, CHECK_CONDITION
, 0, 0);
805 req
->sreq
= scsi_req_new(sdev
, req
->qtag
, lun
, srp
->cmd
.cdb
, cdb_len
, req
);
806 n
= scsi_req_enqueue(req
->sreq
);
808 trace_spapr_vscsi_queue_cmd(req
->qtag
, srp
->cmd
.cdb
[0],
809 scsi_command_name(srp
->cmd
.cdb
[0]), lun
, n
);
812 /* Transfer direction must be set before preprocessing the
815 req
->writing
= (n
< 1);
817 /* Preprocess RDMA descriptors */
818 vscsi_preprocess_desc(req
);
820 /* Get transfer direction and initiate transfer */
826 scsi_req_continue(req
->sreq
);
828 /* Don't touch req here, it may have been recycled already */
833 static int vscsi_process_tsk_mgmt(VSCSIState
*s
, vscsi_req
*req
)
835 union viosrp_iu
*iu
= req_iu(req
);
837 int i
, lun
= 0, resp
= SRP_TSK_MGMT_COMPLETE
;
839 uint64_t tag
= iu
->srp
.rsp
.tag
;
840 uint8_t sol_not
= iu
->srp
.cmd
.sol_not
;
842 trace_spapr_vscsi_process_tsk_mgmt(iu
->srp
.tsk_mgmt
.tsk_mgmt_func
);
843 d
= vscsi_device_find(&s
->bus
,
844 be64_to_cpu(req_iu(req
)->srp
.tsk_mgmt
.lun
), &lun
);
846 resp
= SRP_TSK_MGMT_FIELDS_INVALID
;
848 switch (iu
->srp
.tsk_mgmt
.tsk_mgmt_func
) {
849 case SRP_TSK_ABORT_TASK
:
851 resp
= SRP_TSK_MGMT_FIELDS_INVALID
;
855 tmpreq
= vscsi_find_req(s
, req_iu(req
)->srp
.tsk_mgmt
.task_tag
);
856 if (tmpreq
&& tmpreq
->sreq
) {
857 assert(tmpreq
->sreq
->hba_private
);
858 scsi_req_cancel(tmpreq
->sreq
);
862 case SRP_TSK_LUN_RESET
:
864 resp
= SRP_TSK_MGMT_FIELDS_INVALID
;
868 device_cold_reset(&d
->qdev
);
871 case SRP_TSK_ABORT_TASK_SET
:
872 case SRP_TSK_CLEAR_TASK_SET
:
874 resp
= SRP_TSK_MGMT_FIELDS_INVALID
;
878 for (i
= 0; i
< VSCSI_REQ_LIMIT
; i
++) {
879 tmpreq
= &s
->reqs
[i
];
880 if (req_iu(tmpreq
)->srp
.cmd
.lun
881 != req_iu(req
)->srp
.tsk_mgmt
.lun
) {
884 if (!tmpreq
->active
|| !tmpreq
->sreq
) {
887 assert(tmpreq
->sreq
->hba_private
);
888 scsi_req_cancel(tmpreq
->sreq
);
892 case SRP_TSK_CLEAR_ACA
:
893 resp
= SRP_TSK_MGMT_NOT_SUPPORTED
;
897 resp
= SRP_TSK_MGMT_FIELDS_INVALID
;
902 /* Compose the response here as */
903 QEMU_BUILD_BUG_ON(SRP_MAX_IU_DATA_LEN
< 4);
904 memset(iu
, 0, sizeof(struct srp_rsp
) + 4);
905 iu
->srp
.rsp
.opcode
= SRP_RSP
;
906 iu
->srp
.rsp
.req_lim_delta
= cpu_to_be32(1);
907 iu
->srp
.rsp
.tag
= tag
;
908 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_RSPVALID
;
909 iu
->srp
.rsp
.resp_data_len
= cpu_to_be32(4);
911 iu
->srp
.rsp
.sol_not
= (sol_not
& 0x04) >> 2;
913 iu
->srp
.rsp
.sol_not
= (sol_not
& 0x02) >> 1;
916 iu
->srp
.rsp
.status
= GOOD
;
917 iu
->srp
.rsp
.data
[3] = resp
;
919 vscsi_send_iu(s
, req
, sizeof(iu
->srp
.rsp
) + 4, VIOSRP_SRP_FORMAT
);
924 static int vscsi_handle_srp_req(VSCSIState
*s
, vscsi_req
*req
)
926 union srp_iu
*srp
= &req_iu(req
)->srp
;
928 uint8_t opcode
= srp
->rsp
.opcode
;
932 vscsi_process_login(s
, req
);
935 done
= vscsi_process_tsk_mgmt(s
, req
);
938 done
= vscsi_queue_cmd(s
, req
);
948 fprintf(stderr
, "VSCSI: Unsupported opcode %02x\n", opcode
);
951 fprintf(stderr
, "VSCSI: Unknown type %02x\n", opcode
);
957 static int vscsi_send_adapter_info(VSCSIState
*s
, vscsi_req
*req
)
959 struct viosrp_adapter_info
*sinfo
;
960 struct mad_adapter_info_data info
;
963 sinfo
= &req_iu(req
)->mad
.adapter_info
;
965 #if 0 /* What for ? */
966 rc
= spapr_vio_dma_read(&s
->vdev
, be64_to_cpu(sinfo
->buffer
),
967 &info
, be16_to_cpu(sinfo
->common
.length
));
969 fprintf(stderr
, "vscsi_send_adapter_info: DMA read failure !\n");
972 memset(&info
, 0, sizeof(info
));
973 strcpy(info
.srp_version
, SRP_VERSION
);
974 memcpy(info
.partition_name
, "qemu", sizeof("qemu"));
975 info
.partition_number
= cpu_to_be32(0);
976 info
.mad_version
= cpu_to_be32(1);
977 info
.os_type
= cpu_to_be32(2);
978 info
.port_max_txu
[0] = cpu_to_be32(VSCSI_MAX_SECTORS
<< 9);
980 rc
= spapr_vio_dma_write(&s
->vdev
, be64_to_cpu(sinfo
->buffer
),
981 &info
, be16_to_cpu(sinfo
->common
.length
));
983 fprintf(stderr
, "vscsi_send_adapter_info: DMA write failure !\n");
986 sinfo
->common
.status
= rc
? cpu_to_be32(1) : 0;
988 return vscsi_send_iu(s
, req
, sizeof(*sinfo
), VIOSRP_MAD_FORMAT
);
991 static int vscsi_send_capabilities(VSCSIState
*s
, vscsi_req
*req
)
993 struct viosrp_capabilities
*vcap
;
994 struct capabilities cap
= { };
995 uint16_t len
, req_len
;
999 vcap
= &req_iu(req
)->mad
.capabilities
;
1000 req_len
= len
= be16_to_cpu(vcap
->common
.length
);
1001 buffer
= be64_to_cpu(vcap
->buffer
);
1002 if (len
> sizeof(cap
)) {
1003 fprintf(stderr
, "vscsi_send_capabilities: capabilities size mismatch !\n");
1006 * Just read and populate the structure that is known.
1007 * Zero rest of the structure.
1011 rc
= spapr_vio_dma_read(&s
->vdev
, buffer
, &cap
, len
);
1013 fprintf(stderr
, "vscsi_send_capabilities: DMA read failure !\n");
1017 * Current implementation does not support any migration or
1018 * reservation capabilities. Construct the response telling the
1019 * guest not to use them.
1022 cap
.migration
.ecl
= 0;
1023 cap
.reserve
.type
= 0;
1024 cap
.migration
.common
.server_support
= 0;
1025 cap
.reserve
.common
.server_support
= 0;
1027 rc
= spapr_vio_dma_write(&s
->vdev
, buffer
, &cap
, len
);
1029 fprintf(stderr
, "vscsi_send_capabilities: DMA write failure !\n");
1031 if (req_len
> len
) {
1033 * Being paranoid and lets not worry about the error code
1034 * here. Actual write of the cap is done above.
1036 spapr_vio_dma_set(&s
->vdev
, (buffer
+ len
), 0, (req_len
- len
));
1038 vcap
->common
.status
= rc
? cpu_to_be32(1) : 0;
1039 return vscsi_send_iu(s
, req
, sizeof(*vcap
), VIOSRP_MAD_FORMAT
);
1042 static int vscsi_handle_mad_req(VSCSIState
*s
, vscsi_req
*req
)
1044 union mad_iu
*mad
= &req_iu(req
)->mad
;
1045 bool request_handled
= false;
1046 uint64_t retlen
= 0;
1048 switch (be32_to_cpu(mad
->empty_iu
.common
.type
)) {
1049 case VIOSRP_EMPTY_IU_TYPE
:
1050 fprintf(stderr
, "Unsupported EMPTY MAD IU\n");
1051 retlen
= sizeof(mad
->empty_iu
);
1053 case VIOSRP_ERROR_LOG_TYPE
:
1054 fprintf(stderr
, "Unsupported ERROR LOG MAD IU\n");
1055 retlen
= sizeof(mad
->error_log
);
1057 case VIOSRP_ADAPTER_INFO_TYPE
:
1058 vscsi_send_adapter_info(s
, req
);
1059 request_handled
= true;
1061 case VIOSRP_HOST_CONFIG_TYPE
:
1062 retlen
= sizeof(mad
->host_config
);
1064 case VIOSRP_CAPABILITIES_TYPE
:
1065 vscsi_send_capabilities(s
, req
);
1066 request_handled
= true;
1069 fprintf(stderr
, "VSCSI: Unknown MAD type %02x\n",
1070 be32_to_cpu(mad
->empty_iu
.common
.type
));
1072 * PAPR+ says that "The length field is set to the length
1073 * of the data structure(s) used in the command".
1074 * As we did not recognize the request type, put zero there.
1079 if (!request_handled
) {
1080 mad
->empty_iu
.common
.status
= cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED
);
1081 vscsi_send_iu(s
, req
, retlen
, VIOSRP_MAD_FORMAT
);
1087 static void vscsi_got_payload(VSCSIState
*s
, vscsi_crq
*crq
)
1092 req
= vscsi_get_req(s
);
1094 fprintf(stderr
, "VSCSI: Failed to get a request !\n");
1098 /* We only support a limited number of descriptors, we know
1099 * the ibmvscsi driver uses up to 10 max, so it should fit
1100 * in our 256 bytes IUs. If not we'll have to increase the size
1103 if (crq
->s
.IU_length
> SRP_MAX_IU_LEN
) {
1104 fprintf(stderr
, "VSCSI: SRP IU too long (%d bytes) !\n",
1110 /* XXX Handle failure differently ? */
1111 if (spapr_vio_dma_read(&s
->vdev
, crq
->s
.IU_data_ptr
, &req
->viosrp_iu_buf
,
1112 crq
->s
.IU_length
)) {
1113 fprintf(stderr
, "vscsi_got_payload: DMA read failure !\n");
1117 memcpy(&req
->crq
, crq
, sizeof(vscsi_crq
));
1119 if (crq
->s
.format
== VIOSRP_MAD_FORMAT
) {
1120 done
= vscsi_handle_mad_req(s
, req
);
1122 done
= vscsi_handle_srp_req(s
, req
);
1131 static int vscsi_do_crq(struct SpaprVioDevice
*dev
, uint8_t *crq_data
)
1133 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(dev
);
1136 memcpy(crq
.raw
, crq_data
, 16);
1137 crq
.s
.timeout
= be16_to_cpu(crq
.s
.timeout
);
1138 crq
.s
.IU_length
= be16_to_cpu(crq
.s
.IU_length
);
1139 crq
.s
.IU_data_ptr
= be64_to_cpu(crq
.s
.IU_data_ptr
);
1141 trace_spapr_vscsi_do_crq(crq
.raw
[0], crq
.raw
[1]);
1143 switch (crq
.s
.valid
) {
1144 case 0xc0: /* Init command/response */
1146 /* Respond to initialization request */
1147 if (crq
.s
.format
== 0x01) {
1148 memset(crq
.raw
, 0, 16);
1150 crq
.s
.format
= 0x02;
1151 spapr_vio_send_crq(dev
, crq
.raw
);
1154 /* Note that in hotplug cases, we might get a 0x02
1155 * as a result of us emitting the init request
1159 case 0xff: /* Link event */
1161 /* Not handled for now */
1164 case 0x80: /* Payloads */
1165 switch (crq
.s
.format
) {
1166 case VIOSRP_SRP_FORMAT
: /* AKA VSCSI request */
1167 case VIOSRP_MAD_FORMAT
: /* AKA VSCSI response */
1168 vscsi_got_payload(s
, &crq
);
1170 case VIOSRP_OS400_FORMAT
:
1171 case VIOSRP_AIX_FORMAT
:
1172 case VIOSRP_LINUX_FORMAT
:
1173 case VIOSRP_INLINE_FORMAT
:
1174 fprintf(stderr
, "vscsi_do_srq: Unsupported payload format %02x\n",
1178 fprintf(stderr
, "vscsi_do_srq: Unknown payload format %02x\n",
1183 fprintf(stderr
, "vscsi_do_crq: unknown CRQ %02x %02x ...\n",
1184 crq
.raw
[0], crq
.raw
[1]);
1190 static const struct SCSIBusInfo vscsi_scsi_info
= {
1192 .max_channel
= 7, /* logical unit addressing format */
1196 .transfer_data
= vscsi_transfer_data
,
1197 .complete
= vscsi_command_complete
,
1198 .cancel
= vscsi_request_cancelled
,
1199 .save_request
= vscsi_save_request
,
1200 .load_request
= vscsi_load_request
,
1203 static void spapr_vscsi_reset(SpaprVioDevice
*dev
)
1205 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(dev
);
1208 memset(s
->reqs
, 0, sizeof(s
->reqs
));
1209 for (i
= 0; i
< VSCSI_REQ_LIMIT
; i
++) {
1210 s
->reqs
[i
].qtag
= i
;
1214 static void spapr_vscsi_realize(SpaprVioDevice
*dev
, Error
**errp
)
1216 VSCSIState
*s
= VIO_SPAPR_VSCSI_DEVICE(dev
);
1218 dev
->crq
.SendFunc
= vscsi_do_crq
;
1220 scsi_bus_init(&s
->bus
, sizeof(s
->bus
), DEVICE(dev
), &vscsi_scsi_info
);
1222 /* ibmvscsi SCSI bus does not allow hotplug. */
1223 qbus_set_hotplug_handler(BUS(&s
->bus
), NULL
);
1226 void spapr_vscsi_create(SpaprVioBus
*bus
)
1230 dev
= qdev_new("spapr-vscsi");
1232 qdev_realize_and_unref(dev
, &bus
->bus
, &error_fatal
);
1233 scsi_bus_legacy_handle_cmdline(&VIO_SPAPR_VSCSI_DEVICE(dev
)->bus
);
1236 static int spapr_vscsi_devnode(SpaprVioDevice
*dev
, void *fdt
, int node_off
)
1240 ret
= fdt_setprop_cell(fdt
, node_off
, "#address-cells", 2);
1245 ret
= fdt_setprop_cell(fdt
, node_off
, "#size-cells", 0);
1253 static Property spapr_vscsi_properties
[] = {
1254 DEFINE_SPAPR_PROPERTIES(VSCSIState
, vdev
),
1255 DEFINE_PROP_END_OF_LIST(),
1258 static const VMStateDescription vmstate_spapr_vscsi
= {
1259 .name
= "spapr_vscsi",
1261 .minimum_version_id
= 1,
1262 .fields
= (VMStateField
[]) {
1263 VMSTATE_SPAPR_VIO(vdev
, VSCSIState
),
1267 VMSTATE_END_OF_LIST()
1271 static void spapr_vscsi_class_init(ObjectClass
*klass
, void *data
)
1273 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1274 SpaprVioDeviceClass
*k
= VIO_SPAPR_DEVICE_CLASS(klass
);
1276 k
->realize
= spapr_vscsi_realize
;
1277 k
->reset
= spapr_vscsi_reset
;
1278 k
->devnode
= spapr_vscsi_devnode
;
1279 k
->dt_name
= "v-scsi";
1280 k
->dt_type
= "vscsi";
1281 k
->dt_compatible
= "IBM,v-scsi";
1282 k
->signal_mask
= 0x00000001;
1283 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1284 device_class_set_props(dc
, spapr_vscsi_properties
);
1285 k
->rtce_window_size
= 0x10000000;
1286 dc
->vmsd
= &vmstate_spapr_vscsi
;
1289 static const TypeInfo spapr_vscsi_info
= {
1290 .name
= TYPE_VIO_SPAPR_VSCSI_DEVICE
,
1291 .parent
= TYPE_VIO_SPAPR_DEVICE
,
1292 .instance_size
= sizeof(VSCSIState
),
1293 .class_init
= spapr_vscsi_class_init
,
1296 static void spapr_vscsi_register_types(void)
1298 type_register_static(&spapr_vscsi_info
);
1301 type_init(spapr_vscsi_register_types
)