ppc: Move VMX ops out of translate.c
[qemu/kevin.git] / hw / scsi / spapr_vscsi.c
blob8fbd50f66045ead45f073cac36c17a6ad6fcb411
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtual SCSI, aka ibmvscsi
6 * Copyright (c) 2010,2011 Benjamin Herrenschmidt, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
26 * TODO:
28 * - Cleanups :-)
29 * - Sort out better how to assign devices to VSCSI instances
30 * - Fix residual counts
31 * - Add indirect descriptors support
32 * - Maybe do autosense (PAPR seems to mandate it, linux doesn't care)
34 #include "qemu/osdep.h"
35 #include "qemu-common.h"
36 #include "cpu.h"
37 #include "hw/hw.h"
38 #include "hw/scsi/scsi.h"
39 #include "block/scsi.h"
40 #include "srp.h"
41 #include "hw/qdev.h"
42 #include "hw/ppc/spapr.h"
43 #include "hw/ppc/spapr_vio.h"
44 #include "viosrp.h"
46 #include <libfdt.h>
48 /*#define DEBUG_VSCSI*/
50 #ifdef DEBUG_VSCSI
51 #define DPRINTF(fmt, ...) \
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
53 #else
54 #define DPRINTF(fmt, ...) \
55 do { } while (0)
56 #endif
59 * Virtual SCSI device
62 /* Random numbers */
63 #define VSCSI_MAX_SECTORS 4096
64 #define VSCSI_REQ_LIMIT 24
66 #define SRP_RSP_SENSE_DATA_LEN 18
68 #define SRP_REPORT_LUNS_WLUN 0xc10100000000000ULL
70 typedef union vscsi_crq {
71 struct viosrp_crq s;
72 uint8_t raw[16];
73 } vscsi_crq;
75 typedef struct vscsi_req {
76 vscsi_crq crq;
77 union viosrp_iu iu;
79 /* SCSI request tracking */
80 SCSIRequest *sreq;
81 uint32_t qtag; /* qemu tag != srp tag */
82 bool active;
83 bool writing;
84 bool dma_error;
85 uint32_t data_len;
86 uint32_t senselen;
87 uint8_t sense[SCSI_SENSE_BUF_SIZE];
89 /* RDMA related bits */
90 uint8_t dma_fmt;
91 uint16_t local_desc;
92 uint16_t total_desc;
93 uint16_t cdb_offset;
94 uint16_t cur_desc_num;
95 uint16_t cur_desc_offset;
96 } vscsi_req;
98 #define TYPE_VIO_SPAPR_VSCSI_DEVICE "spapr-vscsi"
99 #define VIO_SPAPR_VSCSI_DEVICE(obj) \
100 OBJECT_CHECK(VSCSIState, (obj), TYPE_VIO_SPAPR_VSCSI_DEVICE)
102 typedef struct {
103 VIOsPAPRDevice vdev;
104 SCSIBus bus;
105 vscsi_req reqs[VSCSI_REQ_LIMIT];
106 } VSCSIState;
108 static struct vscsi_req *vscsi_get_req(VSCSIState *s)
110 vscsi_req *req;
111 int i;
113 for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
114 req = &s->reqs[i];
115 if (!req->active) {
116 memset(req, 0, sizeof(*req));
117 req->qtag = i;
118 req->active = 1;
119 return req;
122 return NULL;
125 static struct vscsi_req *vscsi_find_req(VSCSIState *s, uint64_t srp_tag)
127 vscsi_req *req;
128 int i;
130 for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
131 req = &s->reqs[i];
132 if (req->iu.srp.cmd.tag == srp_tag) {
133 return req;
136 return NULL;
139 static void vscsi_put_req(vscsi_req *req)
141 if (req->sreq != NULL) {
142 scsi_req_unref(req->sreq);
144 req->sreq = NULL;
145 req->active = 0;
148 static SCSIDevice *vscsi_device_find(SCSIBus *bus, uint64_t srp_lun, int *lun)
150 int channel = 0, id = 0;
152 retry:
153 switch (srp_lun >> 62) {
154 case 0:
155 if ((srp_lun >> 56) != 0) {
156 channel = (srp_lun >> 56) & 0x3f;
157 id = (srp_lun >> 48) & 0xff;
158 srp_lun <<= 16;
159 goto retry;
161 *lun = (srp_lun >> 48) & 0xff;
162 break;
164 case 1:
165 *lun = (srp_lun >> 48) & 0x3fff;
166 break;
167 case 2:
168 channel = (srp_lun >> 53) & 0x7;
169 id = (srp_lun >> 56) & 0x3f;
170 *lun = (srp_lun >> 48) & 0x1f;
171 break;
172 case 3:
173 *lun = -1;
174 return NULL;
175 default:
176 abort();
179 return scsi_device_find(bus, channel, id, *lun);
182 static int vscsi_send_iu(VSCSIState *s, vscsi_req *req,
183 uint64_t length, uint8_t format)
185 long rc, rc1;
187 /* First copy the SRP */
188 rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr,
189 &req->iu, length);
190 if (rc) {
191 fprintf(stderr, "vscsi_send_iu: DMA write failure !\n");
194 req->crq.s.valid = 0x80;
195 req->crq.s.format = format;
196 req->crq.s.reserved = 0x00;
197 req->crq.s.timeout = cpu_to_be16(0x0000);
198 req->crq.s.IU_length = cpu_to_be16(length);
199 req->crq.s.IU_data_ptr = req->iu.srp.rsp.tag; /* right byte order */
201 if (rc == 0) {
202 req->crq.s.status = VIOSRP_OK;
203 } else {
204 req->crq.s.status = VIOSRP_ADAPTER_FAIL;
207 rc1 = spapr_vio_send_crq(&s->vdev, req->crq.raw);
208 if (rc1) {
209 fprintf(stderr, "vscsi_send_iu: Error sending response\n");
210 return rc1;
213 return rc;
216 static void vscsi_makeup_sense(VSCSIState *s, vscsi_req *req,
217 uint8_t key, uint8_t asc, uint8_t ascq)
219 req->senselen = SRP_RSP_SENSE_DATA_LEN;
221 /* Valid bit and 'current errors' */
222 req->sense[0] = (0x1 << 7 | 0x70);
223 /* Sense key */
224 req->sense[2] = key;
225 /* Additional sense length */
226 req->sense[7] = 0xa; /* 10 bytes */
227 /* Additional sense code */
228 req->sense[12] = asc;
229 req->sense[13] = ascq;
232 static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req,
233 uint8_t status, int32_t res_in, int32_t res_out)
235 union viosrp_iu *iu = &req->iu;
236 uint64_t tag = iu->srp.rsp.tag;
237 int total_len = sizeof(iu->srp.rsp);
238 uint8_t sol_not = iu->srp.cmd.sol_not;
240 DPRINTF("VSCSI: Sending resp status: 0x%x, "
241 "res_in: %d, res_out: %d\n", status, res_in, res_out);
243 memset(iu, 0, sizeof(struct srp_rsp));
244 iu->srp.rsp.opcode = SRP_RSP;
245 iu->srp.rsp.req_lim_delta = cpu_to_be32(1);
246 iu->srp.rsp.tag = tag;
248 /* Handle residuals */
249 if (res_in < 0) {
250 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIUNDER;
251 res_in = -res_in;
252 } else if (res_in) {
253 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
255 if (res_out < 0) {
256 iu->srp.rsp.flags |= SRP_RSP_FLAG_DOUNDER;
257 res_out = -res_out;
258 } else if (res_out) {
259 iu->srp.rsp.flags |= SRP_RSP_FLAG_DOOVER;
261 iu->srp.rsp.data_in_res_cnt = cpu_to_be32(res_in);
262 iu->srp.rsp.data_out_res_cnt = cpu_to_be32(res_out);
264 /* We don't do response data */
265 /* iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; */
266 iu->srp.rsp.resp_data_len = cpu_to_be32(0);
268 /* Handle success vs. failure */
269 iu->srp.rsp.status = status;
270 if (status) {
271 iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2;
272 if (req->senselen) {
273 req->iu.srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
274 req->iu.srp.rsp.sense_data_len = cpu_to_be32(req->senselen);
275 memcpy(req->iu.srp.rsp.data, req->sense, req->senselen);
276 total_len += req->senselen;
278 } else {
279 iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1;
282 vscsi_send_iu(s, req, total_len, VIOSRP_SRP_FORMAT);
283 return 0;
286 static inline struct srp_direct_buf vscsi_swap_desc(struct srp_direct_buf desc)
288 desc.va = be64_to_cpu(desc.va);
289 desc.len = be32_to_cpu(desc.len);
290 return desc;
293 static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
294 unsigned n, unsigned buf_offset,
295 struct srp_direct_buf *ret)
297 struct srp_cmd *cmd = &req->iu.srp.cmd;
299 switch (req->dma_fmt) {
300 case SRP_NO_DATA_DESC: {
301 DPRINTF("VSCSI: no data descriptor\n");
302 return 0;
304 case SRP_DATA_DESC_DIRECT: {
305 memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret));
306 assert(req->cur_desc_num == 0);
307 DPRINTF("VSCSI: direct segment\n");
308 break;
310 case SRP_DATA_DESC_INDIRECT: {
311 struct srp_indirect_buf *tmp = (struct srp_indirect_buf *)
312 (cmd->add_data + req->cdb_offset);
313 if (n < req->local_desc) {
314 *ret = tmp->desc_list[n];
315 DPRINTF("VSCSI: indirect segment local tag=0x%x desc#%d/%d\n",
316 req->qtag, n, req->local_desc);
318 } else if (n < req->total_desc) {
319 int rc;
320 struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc);
321 unsigned desc_offset = n * sizeof(struct srp_direct_buf);
323 if (desc_offset >= tbl_desc.len) {
324 DPRINTF("VSCSI: #%d is ouf of range (%d bytes)\n",
325 n, desc_offset);
326 return -1;
328 rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset,
329 ret, sizeof(struct srp_direct_buf));
330 if (rc) {
331 DPRINTF("VSCSI: spapr_vio_dma_read -> %d reading ext_desc\n",
332 rc);
333 return -1;
335 DPRINTF("VSCSI: indirect segment ext. tag=0x%x desc#%d/%d { va=%"PRIx64" len=%x }\n",
336 req->qtag, n, req->total_desc, tbl_desc.va, tbl_desc.len);
337 } else {
338 DPRINTF("VSCSI: Out of descriptors !\n");
339 return 0;
341 break;
343 default:
344 fprintf(stderr, "VSCSI: Unknown format %x\n", req->dma_fmt);
345 return -1;
348 *ret = vscsi_swap_desc(*ret);
349 if (buf_offset > ret->len) {
350 DPRINTF(" offset=%x is out of a descriptor #%d boundary=%x\n",
351 buf_offset, req->cur_desc_num, ret->len);
352 return -1;
354 ret->va += buf_offset;
355 ret->len -= buf_offset;
357 DPRINTF(" cur=%d offs=%x ret { va=%"PRIx64" len=%x }\n",
358 req->cur_desc_num, req->cur_desc_offset, ret->va, ret->len);
360 return ret->len ? 1 : 0;
363 static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req,
364 uint8_t *buf, uint32_t len)
366 struct srp_direct_buf md;
367 uint32_t llen;
368 int rc = 0;
370 rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md);
371 if (rc < 0) {
372 return -1;
373 } else if (rc == 0) {
374 return 0;
377 llen = MIN(len, md.len);
378 if (llen) {
379 if (req->writing) { /* writing = to device = reading from memory */
380 rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen);
381 } else {
382 rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
386 if (rc) {
387 return -1;
389 req->cur_desc_offset += llen;
391 return llen;
394 static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
395 uint8_t *buf, uint32_t len)
397 struct srp_direct_buf md;
398 int rc = 0;
399 uint32_t llen, total = 0;
401 DPRINTF("VSCSI: indirect segment 0x%x bytes\n", len);
403 /* While we have data ... */
404 while (len) {
405 rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md);
406 if (rc < 0) {
407 return -1;
408 } else if (rc == 0) {
409 break;
412 /* Perform transfer */
413 llen = MIN(len, md.len);
414 if (req->writing) { /* writing = to device = reading from memory */
415 rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen);
416 } else {
417 rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
419 if (rc) {
420 DPRINTF("VSCSI: spapr_vio_dma_r/w(%d) -> %d\n", req->writing, rc);
421 break;
423 DPRINTF("VSCSI: data: %02x %02x %02x %02x...\n",
424 buf[0], buf[1], buf[2], buf[3]);
426 len -= llen;
427 buf += llen;
429 total += llen;
431 /* Update current position in the current descriptor */
432 req->cur_desc_offset += llen;
433 if (md.len == llen) {
434 /* Go to the next descriptor if the current one finished */
435 ++req->cur_desc_num;
436 req->cur_desc_offset = 0;
440 return rc ? -1 : total;
443 static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req,
444 int writing, uint8_t *buf, uint32_t len)
446 int err = 0;
448 switch (req->dma_fmt) {
449 case SRP_NO_DATA_DESC:
450 DPRINTF("VSCSI: no data desc transfer, skipping 0x%x bytes\n", len);
451 break;
452 case SRP_DATA_DESC_DIRECT:
453 err = vscsi_srp_direct_data(s, req, buf, len);
454 break;
455 case SRP_DATA_DESC_INDIRECT:
456 err = vscsi_srp_indirect_data(s, req, buf, len);
457 break;
459 return err;
462 /* Bits from linux srp */
463 static int data_out_desc_size(struct srp_cmd *cmd)
465 int size = 0;
466 uint8_t fmt = cmd->buf_fmt >> 4;
468 switch (fmt) {
469 case SRP_NO_DATA_DESC:
470 break;
471 case SRP_DATA_DESC_DIRECT:
472 size = sizeof(struct srp_direct_buf);
473 break;
474 case SRP_DATA_DESC_INDIRECT:
475 size = sizeof(struct srp_indirect_buf) +
476 sizeof(struct srp_direct_buf)*cmd->data_out_desc_cnt;
477 break;
478 default:
479 break;
481 return size;
484 static int vscsi_preprocess_desc(vscsi_req *req)
486 struct srp_cmd *cmd = &req->iu.srp.cmd;
488 req->cdb_offset = cmd->add_cdb_len & ~3;
490 if (req->writing) {
491 req->dma_fmt = cmd->buf_fmt >> 4;
492 } else {
493 req->cdb_offset += data_out_desc_size(cmd);
494 req->dma_fmt = cmd->buf_fmt & ((1U << 4) - 1);
497 switch (req->dma_fmt) {
498 case SRP_NO_DATA_DESC:
499 break;
500 case SRP_DATA_DESC_DIRECT:
501 req->total_desc = req->local_desc = 1;
502 break;
503 case SRP_DATA_DESC_INDIRECT: {
504 struct srp_indirect_buf *ind_tmp = (struct srp_indirect_buf *)
505 (cmd->add_data + req->cdb_offset);
507 req->total_desc = be32_to_cpu(ind_tmp->table_desc.len) /
508 sizeof(struct srp_direct_buf);
509 req->local_desc = req->writing ? cmd->data_out_desc_cnt :
510 cmd->data_in_desc_cnt;
511 break;
513 default:
514 fprintf(stderr,
515 "vscsi_preprocess_desc: Unknown format %x\n", req->dma_fmt);
516 return -1;
519 return 0;
522 /* Callback to indicate that the SCSI layer has completed a transfer. */
523 static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len)
525 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
526 vscsi_req *req = sreq->hba_private;
527 uint8_t *buf;
528 int rc = 0;
530 DPRINTF("VSCSI: SCSI xfer complete tag=0x%x len=0x%x, req=%p\n",
531 sreq->tag, len, req);
532 if (req == NULL) {
533 fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
534 return;
537 if (len) {
538 buf = scsi_req_get_buf(sreq);
539 rc = vscsi_srp_transfer_data(s, req, req->writing, buf, len);
541 if (rc < 0) {
542 fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc);
543 req->dma_error = true;
544 scsi_req_cancel(req->sreq);
545 return;
548 /* Start next chunk */
549 req->data_len -= rc;
550 scsi_req_continue(sreq);
553 /* Callback to indicate that the SCSI layer has completed a transfer. */
554 static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t resid)
556 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
557 vscsi_req *req = sreq->hba_private;
558 int32_t res_in = 0, res_out = 0;
560 DPRINTF("VSCSI: SCSI cmd complete, tag=0x%x status=0x%x, req=%p\n",
561 sreq->tag, status, req);
562 if (req == NULL) {
563 fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
564 return;
567 if (status == CHECK_CONDITION) {
568 req->senselen = scsi_req_get_sense(req->sreq, req->sense,
569 sizeof(req->sense));
570 DPRINTF("VSCSI: Sense data, %d bytes:\n", req->senselen);
571 DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
572 req->sense[0], req->sense[1], req->sense[2], req->sense[3],
573 req->sense[4], req->sense[5], req->sense[6], req->sense[7]);
574 DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
575 req->sense[8], req->sense[9], req->sense[10], req->sense[11],
576 req->sense[12], req->sense[13], req->sense[14], req->sense[15]);
579 DPRINTF("VSCSI: Command complete err=%d\n", status);
580 if (status == 0) {
581 /* We handle overflows, not underflows for normal commands,
582 * but hopefully nobody cares
584 if (req->writing) {
585 res_out = req->data_len;
586 } else {
587 res_in = req->data_len;
590 vscsi_send_rsp(s, req, status, res_in, res_out);
591 vscsi_put_req(req);
594 static void vscsi_request_cancelled(SCSIRequest *sreq)
596 vscsi_req *req = sreq->hba_private;
598 if (req->dma_error) {
599 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
601 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
602 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
604 vscsi_put_req(req);
607 static const VMStateDescription vmstate_spapr_vscsi_req = {
608 .name = "spapr_vscsi_req",
609 .version_id = 1,
610 .minimum_version_id = 1,
611 .fields = (VMStateField[]) {
612 VMSTATE_BUFFER(crq.raw, vscsi_req),
613 VMSTATE_BUFFER(iu.srp.reserved, vscsi_req),
614 VMSTATE_UINT32(qtag, vscsi_req),
615 VMSTATE_BOOL(active, vscsi_req),
616 VMSTATE_UINT32(data_len, vscsi_req),
617 VMSTATE_BOOL(writing, vscsi_req),
618 VMSTATE_UINT32(senselen, vscsi_req),
619 VMSTATE_BUFFER(sense, vscsi_req),
620 VMSTATE_UINT8(dma_fmt, vscsi_req),
621 VMSTATE_UINT16(local_desc, vscsi_req),
622 VMSTATE_UINT16(total_desc, vscsi_req),
623 VMSTATE_UINT16(cdb_offset, vscsi_req),
624 /*Restart SCSI request from the beginning for now */
625 /*VMSTATE_UINT16(cur_desc_num, vscsi_req),
626 VMSTATE_UINT16(cur_desc_offset, vscsi_req),*/
627 VMSTATE_END_OF_LIST()
631 static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
633 vscsi_req *req = sreq->hba_private;
634 assert(req->active);
636 vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
638 DPRINTF("VSCSI: saving tag=%u, current desc#%d, offset=%x\n",
639 req->qtag, req->cur_desc_num, req->cur_desc_offset);
642 static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
644 SCSIBus *bus = sreq->bus;
645 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent);
646 vscsi_req *req;
647 int rc;
649 assert(sreq->tag < VSCSI_REQ_LIMIT);
650 req = &s->reqs[sreq->tag];
651 assert(!req->active);
653 memset(req, 0, sizeof(*req));
654 rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1);
655 if (rc) {
656 fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag);
657 return NULL;
659 assert(req->active);
661 req->sreq = scsi_req_ref(sreq);
663 DPRINTF("VSCSI: restoring tag=%u, current desc#%d, offset=%x\n",
664 req->qtag, req->cur_desc_num, req->cur_desc_offset);
666 return req;
669 static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
671 union viosrp_iu *iu = &req->iu;
672 struct srp_login_rsp *rsp = &iu->srp.login_rsp;
673 uint64_t tag = iu->srp.rsp.tag;
675 DPRINTF("VSCSI: Got login, sendin response !\n");
677 /* TODO handle case that requested size is wrong and
678 * buffer format is wrong
680 memset(iu, 0, sizeof(struct srp_login_rsp));
681 rsp->opcode = SRP_LOGIN_RSP;
682 /* Don't advertise quite as many request as we support to
683 * keep room for management stuff etc...
685 rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2);
686 rsp->tag = tag;
687 rsp->max_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
688 rsp->max_ti_iu_len = cpu_to_be32(sizeof(union srp_iu));
689 /* direct and indirect */
690 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
692 vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT);
695 static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req)
697 uint8_t *cdb = req->iu.srp.cmd.cdb;
698 uint8_t resp_data[36];
699 int rc, len, alen;
701 /* We don't do EVPD. Also check that page_code is 0 */
702 if ((cdb[1] & 0x01) || cdb[2] != 0) {
703 /* Send INVALID FIELD IN CDB */
704 vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0);
705 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
706 return;
708 alen = cdb[3];
709 alen = (alen << 8) | cdb[4];
710 len = MIN(alen, 36);
712 /* Fake up inquiry using PQ=3 */
713 memset(resp_data, 0, 36);
714 resp_data[0] = 0x7f; /* Not capable of supporting a device here */
715 resp_data[2] = 0x06; /* SPS-4 */
716 resp_data[3] = 0x02; /* Resp data format */
717 resp_data[4] = 36 - 5; /* Additional length */
718 resp_data[7] = 0x10; /* Sync transfers */
719 memcpy(&resp_data[16], "QEMU EMPTY ", 16);
720 memcpy(&resp_data[8], "QEMU ", 8);
722 req->writing = 0;
723 vscsi_preprocess_desc(req);
724 rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len);
725 if (rc < 0) {
726 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
727 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
728 } else {
729 vscsi_send_rsp(s, req, 0, 36 - rc, 0);
733 static void vscsi_report_luns(VSCSIState *s, vscsi_req *req)
735 BusChild *kid;
736 int i, len, n, rc;
737 uint8_t *resp_data;
738 bool found_lun0;
740 n = 0;
741 found_lun0 = false;
742 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
743 SCSIDevice *dev = SCSI_DEVICE(kid->child);
745 n += 8;
746 if (dev->channel == 0 && dev->id == 0 && dev->lun == 0) {
747 found_lun0 = true;
750 if (!found_lun0) {
751 n += 8;
753 len = n+8;
755 resp_data = g_malloc0(len);
756 stl_be_p(resp_data, n);
757 i = found_lun0 ? 8 : 16;
758 QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
759 DeviceState *qdev = kid->child;
760 SCSIDevice *dev = SCSI_DEVICE(qdev);
762 if (dev->id == 0 && dev->channel == 0) {
763 resp_data[i] = 0; /* Use simple LUN for 0 (SAM5 4.7.7.1) */
764 } else {
765 resp_data[i] = (2 << 6); /* Otherwise LUN addressing (4.7.7.4) */
767 resp_data[i] |= dev->id;
768 resp_data[i+1] = (dev->channel << 5);
769 resp_data[i+1] |= dev->lun;
770 i += 8;
773 vscsi_preprocess_desc(req);
774 rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len);
775 g_free(resp_data);
776 if (rc < 0) {
777 vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
778 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
779 } else {
780 vscsi_send_rsp(s, req, 0, len - rc, 0);
784 static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
786 union srp_iu *srp = &req->iu.srp;
787 SCSIDevice *sdev;
788 int n, lun;
790 if ((srp->cmd.lun == 0 || be64_to_cpu(srp->cmd.lun) == SRP_REPORT_LUNS_WLUN)
791 && srp->cmd.cdb[0] == REPORT_LUNS) {
792 vscsi_report_luns(s, req);
793 return 0;
796 sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun);
797 if (!sdev) {
798 DPRINTF("VSCSI: Command for lun %08" PRIx64 " with no drive\n",
799 be64_to_cpu(srp->cmd.lun));
800 if (srp->cmd.cdb[0] == INQUIRY) {
801 vscsi_inquiry_no_target(s, req);
802 } else {
803 vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00);
804 vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
805 } return 1;
808 req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req);
809 n = scsi_req_enqueue(req->sreq);
811 DPRINTF("VSCSI: Queued command tag 0x%x CMD 0x%x=%s LUN %d ret: %d\n",
812 req->qtag, srp->cmd.cdb[0], scsi_command_name(srp->cmd.cdb[0]),
813 lun, n);
815 if (n) {
816 /* Transfer direction must be set before preprocessing the
817 * descriptors
819 req->writing = (n < 1);
821 /* Preprocess RDMA descriptors */
822 vscsi_preprocess_desc(req);
824 /* Get transfer direction and initiate transfer */
825 if (n > 0) {
826 req->data_len = n;
827 } else if (n < 0) {
828 req->data_len = -n;
830 scsi_req_continue(req->sreq);
832 /* Don't touch req here, it may have been recycled already */
834 return 0;
837 static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
839 union viosrp_iu *iu = &req->iu;
840 vscsi_req *tmpreq;
841 int i, lun = 0, resp = SRP_TSK_MGMT_COMPLETE;
842 SCSIDevice *d;
843 uint64_t tag = iu->srp.rsp.tag;
844 uint8_t sol_not = iu->srp.cmd.sol_not;
846 fprintf(stderr, "vscsi_process_tsk_mgmt %02x\n",
847 iu->srp.tsk_mgmt.tsk_mgmt_func);
849 d = vscsi_device_find(&s->bus, be64_to_cpu(req->iu.srp.tsk_mgmt.lun), &lun);
850 if (!d) {
851 resp = SRP_TSK_MGMT_FIELDS_INVALID;
852 } else {
853 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
854 case SRP_TSK_ABORT_TASK:
855 if (d->lun != lun) {
856 resp = SRP_TSK_MGMT_FIELDS_INVALID;
857 break;
860 tmpreq = vscsi_find_req(s, req->iu.srp.tsk_mgmt.task_tag);
861 if (tmpreq && tmpreq->sreq) {
862 assert(tmpreq->sreq->hba_private);
863 scsi_req_cancel(tmpreq->sreq);
865 break;
867 case SRP_TSK_LUN_RESET:
868 if (d->lun != lun) {
869 resp = SRP_TSK_MGMT_FIELDS_INVALID;
870 break;
873 qdev_reset_all(&d->qdev);
874 break;
876 case SRP_TSK_ABORT_TASK_SET:
877 case SRP_TSK_CLEAR_TASK_SET:
878 if (d->lun != lun) {
879 resp = SRP_TSK_MGMT_FIELDS_INVALID;
880 break;
883 for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
884 tmpreq = &s->reqs[i];
885 if (tmpreq->iu.srp.cmd.lun != req->iu.srp.tsk_mgmt.lun) {
886 continue;
888 if (!tmpreq->active || !tmpreq->sreq) {
889 continue;
891 assert(tmpreq->sreq->hba_private);
892 scsi_req_cancel(tmpreq->sreq);
894 break;
896 case SRP_TSK_CLEAR_ACA:
897 resp = SRP_TSK_MGMT_NOT_SUPPORTED;
898 break;
900 default:
901 resp = SRP_TSK_MGMT_FIELDS_INVALID;
902 break;
906 /* Compose the response here as */
907 memset(iu, 0, sizeof(struct srp_rsp) + 4);
908 iu->srp.rsp.opcode = SRP_RSP;
909 iu->srp.rsp.req_lim_delta = cpu_to_be32(1);
910 iu->srp.rsp.tag = tag;
911 iu->srp.rsp.flags |= SRP_RSP_FLAG_RSPVALID;
912 iu->srp.rsp.resp_data_len = cpu_to_be32(4);
913 if (resp) {
914 iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2;
915 } else {
916 iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1;
919 iu->srp.rsp.status = GOOD;
920 iu->srp.rsp.data[3] = resp;
922 vscsi_send_iu(s, req, sizeof(iu->srp.rsp) + 4, VIOSRP_SRP_FORMAT);
924 return 1;
927 static int vscsi_handle_srp_req(VSCSIState *s, vscsi_req *req)
929 union srp_iu *srp = &req->iu.srp;
930 int done = 1;
931 uint8_t opcode = srp->rsp.opcode;
933 switch (opcode) {
934 case SRP_LOGIN_REQ:
935 vscsi_process_login(s, req);
936 break;
937 case SRP_TSK_MGMT:
938 done = vscsi_process_tsk_mgmt(s, req);
939 break;
940 case SRP_CMD:
941 done = vscsi_queue_cmd(s, req);
942 break;
943 case SRP_LOGIN_RSP:
944 case SRP_I_LOGOUT:
945 case SRP_T_LOGOUT:
946 case SRP_RSP:
947 case SRP_CRED_REQ:
948 case SRP_CRED_RSP:
949 case SRP_AER_REQ:
950 case SRP_AER_RSP:
951 fprintf(stderr, "VSCSI: Unsupported opcode %02x\n", opcode);
952 break;
953 default:
954 fprintf(stderr, "VSCSI: Unknown type %02x\n", opcode);
957 return done;
960 static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req)
962 struct viosrp_adapter_info *sinfo;
963 struct mad_adapter_info_data info;
964 int rc;
966 sinfo = &req->iu.mad.adapter_info;
968 #if 0 /* What for ? */
969 rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer),
970 &info, be16_to_cpu(sinfo->common.length));
971 if (rc) {
972 fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n");
974 #endif
975 memset(&info, 0, sizeof(info));
976 strcpy(info.srp_version, SRP_VERSION);
977 memcpy(info.partition_name, "qemu", sizeof("qemu"));
978 info.partition_number = cpu_to_be32(0);
979 info.mad_version = cpu_to_be32(1);
980 info.os_type = cpu_to_be32(2);
981 info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9);
983 rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer),
984 &info, be16_to_cpu(sinfo->common.length));
985 if (rc) {
986 fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n");
989 sinfo->common.status = rc ? cpu_to_be32(1) : 0;
991 return vscsi_send_iu(s, req, sizeof(*sinfo), VIOSRP_MAD_FORMAT);
994 static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req)
996 struct viosrp_capabilities *vcap;
997 struct capabilities cap = { };
998 uint16_t len, req_len;
999 uint64_t buffer;
1000 int rc;
1002 vcap = &req->iu.mad.capabilities;
1003 req_len = len = be16_to_cpu(vcap->common.length);
1004 buffer = be64_to_cpu(vcap->buffer);
1005 if (len > sizeof(cap)) {
1006 fprintf(stderr, "vscsi_send_capabilities: capabilities size mismatch !\n");
1009 * Just read and populate the structure that is known.
1010 * Zero rest of the structure.
1012 len = sizeof(cap);
1014 rc = spapr_vio_dma_read(&s->vdev, buffer, &cap, len);
1015 if (rc) {
1016 fprintf(stderr, "vscsi_send_capabilities: DMA read failure !\n");
1020 * Current implementation does not suppport any migration or
1021 * reservation capabilities. Construct the response telling the
1022 * guest not to use them.
1024 cap.flags = 0;
1025 cap.migration.ecl = 0;
1026 cap.reserve.type = 0;
1027 cap.migration.common.server_support = 0;
1028 cap.reserve.common.server_support = 0;
1030 rc = spapr_vio_dma_write(&s->vdev, buffer, &cap, len);
1031 if (rc) {
1032 fprintf(stderr, "vscsi_send_capabilities: DMA write failure !\n");
1034 if (req_len > len) {
1036 * Being paranoid and lets not worry about the error code
1037 * here. Actual write of the cap is done above.
1039 spapr_vio_dma_set(&s->vdev, (buffer + len), 0, (req_len - len));
1041 vcap->common.status = rc ? cpu_to_be32(1) : 0;
1042 return vscsi_send_iu(s, req, sizeof(*vcap), VIOSRP_MAD_FORMAT);
1045 static int vscsi_handle_mad_req(VSCSIState *s, vscsi_req *req)
1047 union mad_iu *mad = &req->iu.mad;
1048 bool request_handled = false;
1049 uint64_t retlen = 0;
1051 switch (be32_to_cpu(mad->empty_iu.common.type)) {
1052 case VIOSRP_EMPTY_IU_TYPE:
1053 fprintf(stderr, "Unsupported EMPTY MAD IU\n");
1054 retlen = sizeof(mad->empty_iu);
1055 break;
1056 case VIOSRP_ERROR_LOG_TYPE:
1057 fprintf(stderr, "Unsupported ERROR LOG MAD IU\n");
1058 retlen = sizeof(mad->error_log);
1059 break;
1060 case VIOSRP_ADAPTER_INFO_TYPE:
1061 vscsi_send_adapter_info(s, req);
1062 request_handled = true;
1063 break;
1064 case VIOSRP_HOST_CONFIG_TYPE:
1065 retlen = sizeof(mad->host_config);
1066 break;
1067 case VIOSRP_CAPABILITIES_TYPE:
1068 vscsi_send_capabilities(s, req);
1069 request_handled = true;
1070 break;
1071 default:
1072 fprintf(stderr, "VSCSI: Unknown MAD type %02x\n",
1073 be32_to_cpu(mad->empty_iu.common.type));
1075 * PAPR+ says that "The length field is set to the length
1076 * of the data structure(s) used in the command".
1077 * As we did not recognize the request type, put zero there.
1079 retlen = 0;
1082 if (!request_handled) {
1083 mad->empty_iu.common.status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1084 vscsi_send_iu(s, req, retlen, VIOSRP_MAD_FORMAT);
1087 return 1;
1090 static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq)
1092 vscsi_req *req;
1093 int done;
1095 req = vscsi_get_req(s);
1096 if (req == NULL) {
1097 fprintf(stderr, "VSCSI: Failed to get a request !\n");
1098 return;
1101 /* We only support a limited number of descriptors, we know
1102 * the ibmvscsi driver uses up to 10 max, so it should fit
1103 * in our 256 bytes IUs. If not we'll have to increase the size
1104 * of the structure.
1106 if (crq->s.IU_length > sizeof(union viosrp_iu)) {
1107 fprintf(stderr, "VSCSI: SRP IU too long (%d bytes) !\n",
1108 crq->s.IU_length);
1109 vscsi_put_req(req);
1110 return;
1113 /* XXX Handle failure differently ? */
1114 if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu,
1115 crq->s.IU_length)) {
1116 fprintf(stderr, "vscsi_got_payload: DMA read failure !\n");
1117 vscsi_put_req(req);
1118 return;
1120 memcpy(&req->crq, crq, sizeof(vscsi_crq));
1122 if (crq->s.format == VIOSRP_MAD_FORMAT) {
1123 done = vscsi_handle_mad_req(s, req);
1124 } else {
1125 done = vscsi_handle_srp_req(s, req);
1128 if (done) {
1129 vscsi_put_req(req);
1134 static int vscsi_do_crq(struct VIOsPAPRDevice *dev, uint8_t *crq_data)
1136 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
1137 vscsi_crq crq;
1139 memcpy(crq.raw, crq_data, 16);
1140 crq.s.timeout = be16_to_cpu(crq.s.timeout);
1141 crq.s.IU_length = be16_to_cpu(crq.s.IU_length);
1142 crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr);
1144 DPRINTF("VSCSI: do_crq %02x %02x ...\n", crq.raw[0], crq.raw[1]);
1146 switch (crq.s.valid) {
1147 case 0xc0: /* Init command/response */
1149 /* Respond to initialization request */
1150 if (crq.s.format == 0x01) {
1151 memset(crq.raw, 0, 16);
1152 crq.s.valid = 0xc0;
1153 crq.s.format = 0x02;
1154 spapr_vio_send_crq(dev, crq.raw);
1157 /* Note that in hotplug cases, we might get a 0x02
1158 * as a result of us emitting the init request
1161 break;
1162 case 0xff: /* Link event */
1164 /* Not handled for now */
1166 break;
1167 case 0x80: /* Payloads */
1168 switch (crq.s.format) {
1169 case VIOSRP_SRP_FORMAT: /* AKA VSCSI request */
1170 case VIOSRP_MAD_FORMAT: /* AKA VSCSI response */
1171 vscsi_got_payload(s, &crq);
1172 break;
1173 case VIOSRP_OS400_FORMAT:
1174 case VIOSRP_AIX_FORMAT:
1175 case VIOSRP_LINUX_FORMAT:
1176 case VIOSRP_INLINE_FORMAT:
1177 fprintf(stderr, "vscsi_do_srq: Unsupported payload format %02x\n",
1178 crq.s.format);
1179 break;
1180 default:
1181 fprintf(stderr, "vscsi_do_srq: Unknown payload format %02x\n",
1182 crq.s.format);
1184 break;
1185 default:
1186 fprintf(stderr, "vscsi_do_crq: unknown CRQ %02x %02x ...\n",
1187 crq.raw[0], crq.raw[1]);
1190 return 0;
1193 static const struct SCSIBusInfo vscsi_scsi_info = {
1194 .tcq = true,
1195 .max_channel = 7, /* logical unit addressing format */
1196 .max_target = 63,
1197 .max_lun = 31,
1199 .transfer_data = vscsi_transfer_data,
1200 .complete = vscsi_command_complete,
1201 .cancel = vscsi_request_cancelled,
1202 .save_request = vscsi_save_request,
1203 .load_request = vscsi_load_request,
1206 static void spapr_vscsi_reset(VIOsPAPRDevice *dev)
1208 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
1209 int i;
1211 memset(s->reqs, 0, sizeof(s->reqs));
1212 for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
1213 s->reqs[i].qtag = i;
1217 static void spapr_vscsi_realize(VIOsPAPRDevice *dev, Error **errp)
1219 VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
1221 dev->crq.SendFunc = vscsi_do_crq;
1223 scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
1224 &vscsi_scsi_info, NULL);
1225 if (!dev->qdev.hotplugged) {
1226 scsi_bus_legacy_handle_cmdline(&s->bus, errp);
1230 void spapr_vscsi_create(VIOsPAPRBus *bus)
1232 DeviceState *dev;
1234 dev = qdev_create(&bus->bus, "spapr-vscsi");
1236 qdev_init_nofail(dev);
1239 static int spapr_vscsi_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
1241 int ret;
1243 ret = fdt_setprop_cell(fdt, node_off, "#address-cells", 2);
1244 if (ret < 0) {
1245 return ret;
1248 ret = fdt_setprop_cell(fdt, node_off, "#size-cells", 0);
1249 if (ret < 0) {
1250 return ret;
1253 return 0;
1256 static Property spapr_vscsi_properties[] = {
1257 DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev),
1258 DEFINE_PROP_END_OF_LIST(),
1261 static const VMStateDescription vmstate_spapr_vscsi = {
1262 .name = "spapr_vscsi",
1263 .version_id = 1,
1264 .minimum_version_id = 1,
1265 .fields = (VMStateField[]) {
1266 VMSTATE_SPAPR_VIO(vdev, VSCSIState),
1267 /* VSCSI state */
1268 /* ???? */
1270 VMSTATE_END_OF_LIST()
1274 static void spapr_vscsi_class_init(ObjectClass *klass, void *data)
1276 DeviceClass *dc = DEVICE_CLASS(klass);
1277 VIOsPAPRDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
1279 k->realize = spapr_vscsi_realize;
1280 k->reset = spapr_vscsi_reset;
1281 k->devnode = spapr_vscsi_devnode;
1282 k->dt_name = "v-scsi";
1283 k->dt_type = "vscsi";
1284 k->dt_compatible = "IBM,v-scsi";
1285 k->signal_mask = 0x00000001;
1286 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1287 dc->props = spapr_vscsi_properties;
1288 k->rtce_window_size = 0x10000000;
1289 dc->vmsd = &vmstate_spapr_vscsi;
1292 static const TypeInfo spapr_vscsi_info = {
1293 .name = TYPE_VIO_SPAPR_VSCSI_DEVICE,
1294 .parent = TYPE_VIO_SPAPR_DEVICE,
1295 .instance_size = sizeof(VSCSIState),
1296 .class_init = spapr_vscsi_class_init,
1299 static void spapr_vscsi_register_types(void)
1301 type_register_static(&spapr_vscsi_info);
1304 type_init(spapr_vscsi_register_types)