usched: Allow process to change self cpu affinity
[dragonfly.git] / share / examples / scsi_target / scsi_target.c
blob5867fc00fe0c76fdb8ec1cf6c372686a14869730
1 /*
2 * SCSI Disk Emulator
4 * Copyright (c) 2002 Nate Lawson.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * $FreeBSD: src/share/examples/scsi_target/scsi_target.c,v 1.5.2.5 2003/02/18 22:07:10 njl Exp $
29 * $DragonFly: src/share/examples/scsi_target/scsi_target.c,v 1.2 2003/06/17 04:36:57 dillon Exp $
32 #include <sys/types.h>
33 #include <errno.h>
34 #include <err.h>
35 #include <fcntl.h>
36 #include <signal.h>
37 #include <stddef.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sysexits.h>
42 #include <unistd.h>
43 #include <aio.h>
44 #include <assert.h>
45 #include <sys/stat.h>
46 #include <sys/queue.h>
47 #include <sys/event.h>
48 #include <sys/param.h>
49 #include <cam/cam_queue.h>
50 #include <cam/scsi/scsi_all.h>
51 #include <cam/scsi/scsi_targetio.h>
52 #include <cam/scsi/scsi_message.h>
53 #include "scsi_target.h"
55 /* Maximum amount to transfer per CTIO */
56 #define MAX_XFER MAXPHYS
57 /* Maximum number of allocated CTIOs */
58 #define MAX_CTIOS 32
59 /* Maximum sector size for emulated volume */
60 #define MAX_SECTOR 32768
62 /* Global variables */
63 int debug;
64 u_int32_t volume_size;
65 size_t sector_size;
66 size_t buf_size;
68 /* Local variables */
69 static int targ_fd;
70 static int kq_fd;
71 static int file_fd;
72 static int num_ctios;
73 static struct ccb_queue pending_queue;
74 static struct ccb_queue work_queue;
75 static struct ioc_enable_lun ioc_enlun = {
76 CAM_BUS_WILDCARD,
77 CAM_TARGET_WILDCARD,
78 CAM_LUN_WILDCARD
81 /* Local functions */
82 static void cleanup(void);
83 static int init_ccbs(void);
84 static void request_loop(void);
85 static void handle_read(void);
86 /* static int work_atio(struct ccb_accept_tio *); */
87 static void queue_io(struct ccb_scsiio *);
88 static void run_queue(struct ccb_accept_tio *);
89 static int work_inot(struct ccb_immed_notify *);
90 static struct ccb_scsiio *
91 get_ctio(void);
92 /* static void free_ccb(union ccb *); */
93 static cam_status get_sim_flags(u_int16_t *);
94 static void rel_simq(void);
95 static void abort_all_pending(void);
96 static void usage(void);
98 int
99 main(int argc, char *argv[])
101 int ch, unit;
102 char *file_name, targname[16];
103 u_int16_t req_flags, sim_flags;
104 off_t user_size;
106 /* Initialize */
107 debug = 0;
108 req_flags = sim_flags = 0;
109 user_size = 0;
110 targ_fd = file_fd = kq_fd = -1;
111 num_ctios = 0;
112 sector_size = SECTOR_SIZE;
113 buf_size = MAXPHYS;
115 /* Prepare resource pools */
116 TAILQ_INIT(&pending_queue);
117 TAILQ_INIT(&work_queue);
119 while ((ch = getopt(argc, argv, "AdSTb:c:s:W:")) != -1) {
120 switch(ch) {
121 case 'A':
122 req_flags |= SID_Addr16;
123 break;
124 case 'd':
125 debug = 1;
126 break;
127 case 'S':
128 req_flags |= SID_Sync;
129 break;
130 case 'T':
131 req_flags |= SID_CmdQue;
132 break;
133 case 'b':
134 buf_size = atoi(optarg);
135 if (buf_size < 256 || buf_size > MAX_XFER)
136 errx(1, "Unreasonable buf size: %s", optarg);
137 break;
138 case 'c':
139 sector_size = atoi(optarg);
140 if (sector_size < 512 || sector_size > MAX_SECTOR)
141 errx(1, "Unreasonable sector size: %s", optarg);
142 break;
143 case 's':
144 user_size = strtoll(optarg, NULL, /*base*/10);
145 if (user_size < 0)
146 errx(1, "Unreasonable volume size: %s", optarg);
147 break;
148 case 'W':
149 req_flags &= ~(SID_WBus16 | SID_WBus32);
150 switch (atoi(optarg)) {
151 case 8:
152 /* Leave req_flags zeroed */
153 break;
154 case 16:
155 req_flags |= SID_WBus16;
156 break;
157 case 32:
158 req_flags |= SID_WBus32;
159 break;
160 default:
161 warnx("Width %s not supported", optarg);
162 usage();
163 /* NOTREACHED */
165 break;
166 default:
167 usage();
168 /* NOTREACHED */
171 argc -= optind;
172 argv += optind;
174 if (argc != 2)
175 usage();
177 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
178 &ioc_enlun.lun_id);
179 file_name = argv[1];
181 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
182 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
183 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
184 warnx("Incomplete target path specified");
185 usage();
186 /* NOTREACHED */
188 /* We don't support any vendor-specific commands */
189 ioc_enlun.grp6_len = 0;
190 ioc_enlun.grp7_len = 0;
192 /* Open backing store for IO */
193 file_fd = open(file_name, O_RDWR);
194 if (file_fd < 0)
195 err(1, "open backing store file");
197 /* Check backing store size or use the size user gave us */
198 if (user_size == 0) {
199 struct stat st;
201 if (fstat(file_fd, &st) < 0)
202 err(1, "fstat file");
203 volume_size = st.st_size / sector_size;
204 } else {
205 volume_size = user_size / sector_size;
207 if (volume_size <= 0)
208 errx(1, "volume must be larger than %d", sector_size);
211 struct aiocb aio, *aiop;
213 /* Make sure we have working AIO support */
214 memset(&aio, 0, sizeof(aio));
215 aio.aio_buf = malloc(sector_size);
216 if (aio.aio_buf == NULL)
217 err(1, "malloc");
218 aio.aio_fildes = file_fd;
219 aio.aio_offset = 0;
220 aio.aio_nbytes = sector_size;
221 signal(SIGSYS, SIG_IGN);
222 if (aio_read(&aio) != 0) {
223 printf("You must enable VFS_AIO in your kernel "
224 "or load the aio(4) module.\n");
225 err(1, "aio_read");
227 if (aio_waitcomplete(&aiop, NULL) != sector_size)
228 err(1, "aio_waitcomplete");
229 assert(aiop == &aio);
230 signal(SIGSYS, SIG_DFL);
231 free((void *)aio.aio_buf);
232 if (debug)
233 warnx("aio support tested ok");
236 /* Go through all the control devices and find one that isn't busy. */
237 unit = 0;
238 do {
239 snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
240 targ_fd = open(targname, O_RDWR);
241 } while (targ_fd < 0 && errno == EBUSY);
243 if (targ_fd < 0)
244 err(1, "Tried to open %d devices, none available", unit);
246 /* The first three are handled by kevent() later */
247 signal(SIGHUP, SIG_IGN);
248 signal(SIGINT, SIG_IGN);
249 signal(SIGTERM, SIG_IGN);
250 signal(SIGPROF, SIG_IGN);
251 signal(SIGALRM, SIG_IGN);
252 signal(SIGSTOP, SIG_IGN);
253 signal(SIGTSTP, SIG_IGN);
255 /* Register a cleanup handler to run when exiting */
256 atexit(cleanup);
258 /* Enable listening on the specified LUN */
259 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
260 err(1, "TARGIOCENABLE");
262 /* Enable debugging if requested */
263 if (debug) {
264 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
265 err(1, "TARGIOCDEBUG");
268 /* Set up inquiry data according to what SIM supports */
269 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
270 errx(1, "get_sim_flags");
271 if (tcmd_init(req_flags, sim_flags) != 0)
272 errx(1, "Initializing tcmd subsystem failed");
274 /* Queue ATIOs and INOTs on descriptor */
275 if (init_ccbs() != 0)
276 errx(1, "init_ccbs failed");
278 if (debug)
279 warnx("main loop beginning");
280 request_loop();
282 exit(0);
285 static void
286 cleanup()
288 struct ccb_hdr *ccb_h;
290 if (debug) {
291 warnx("cleanup called");
292 debug = 0;
293 ioctl(targ_fd, TARGIOCDEBUG, &debug);
295 ioctl(targ_fd, TARGIOCDISABLE, NULL);
296 close(targ_fd);
298 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
299 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
300 free_ccb((union ccb *)ccb_h);
302 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
303 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
304 free_ccb((union ccb *)ccb_h);
307 if (kq_fd != -1)
308 close(kq_fd);
311 /* Allocate ATIOs/INOTs and queue on HBA */
312 static int
313 init_ccbs()
315 int i;
317 for (i = 0; i < MAX_INITIATORS; i++) {
318 struct ccb_accept_tio *atio;
319 struct atio_descr *a_descr;
320 struct ccb_immed_notify *inot;
322 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
323 if (atio == NULL) {
324 warn("malloc ATIO");
325 return (-1);
327 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
328 if (a_descr == NULL) {
329 free(atio);
330 warn("malloc atio_descr");
331 return (-1);
333 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
334 atio->ccb_h.targ_descr = a_descr;
335 send_ccb((union ccb *)atio, /*priority*/1);
337 inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
338 if (inot == NULL) {
339 warn("malloc INOT");
340 return (-1);
342 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
343 send_ccb((union ccb *)inot, /*priority*/1);
346 return (0);
349 static void
350 request_loop()
352 struct kevent events[MAX_EVENTS];
353 struct timespec ts, *tptr;
354 int quit;
356 /* Register kqueue for event notification */
357 if ((kq_fd = kqueue()) < 0)
358 err(1, "init kqueue");
360 /* Set up some default events */
361 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
362 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
363 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
364 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
365 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
366 err(1, "kevent signal registration");
368 ts.tv_sec = 0;
369 ts.tv_nsec = 0;
370 tptr = NULL;
371 quit = 0;
373 /* Loop until user signal */
374 while (quit == 0) {
375 int retval, i;
376 struct ccb_hdr *ccb_h;
378 /* Check for the next signal, read ready, or AIO completion */
379 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
380 if (retval < 0) {
381 if (errno == EINTR) {
382 if (debug)
383 warnx("EINTR, looping");
384 continue;
386 else {
387 err(1, "kevent failed");
389 } else if (retval > MAX_EVENTS) {
390 errx(1, "kevent returned more events than allocated?");
393 /* Process all received events. */
394 for (i = 0; i < retval; i++) {
395 if ((events[i].flags & EV_ERROR) != 0)
396 errx(1, "kevent registration failed");
398 switch (events[i].filter) {
399 case EVFILT_READ:
400 if (debug)
401 warnx("read ready");
402 handle_read();
403 break;
404 case EVFILT_AIO:
406 struct ccb_scsiio *ctio;
407 struct ctio_descr *c_descr;
408 if (debug)
409 warnx("aio ready");
411 ctio = (struct ccb_scsiio *)events[i].udata;
412 c_descr = (struct ctio_descr *)
413 ctio->ccb_h.targ_descr;
414 c_descr->event = AIO_DONE;
415 /* Queue on the appropriate ATIO */
416 queue_io(ctio);
417 /* Process any queued completions. */
418 run_queue(c_descr->atio);
419 break;
421 case EVFILT_SIGNAL:
422 if (debug)
423 warnx("signal ready, setting quit");
424 quit = 1;
425 break;
426 default:
427 warnx("unknown event %#x", events[i].filter);
428 break;
431 if (debug)
432 warnx("event done");
435 /* Grab the first CCB and perform one work unit. */
436 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
437 union ccb *ccb;
439 ccb = (union ccb *)ccb_h;
440 switch (ccb_h->func_code) {
441 case XPT_ACCEPT_TARGET_IO:
442 /* Start one more transfer. */
443 retval = work_atio(&ccb->atio);
444 break;
445 case XPT_IMMED_NOTIFY:
446 retval = work_inot(&ccb->cin);
447 break;
448 default:
449 warnx("Unhandled ccb type %#x on workq",
450 ccb_h->func_code);
451 abort();
452 /* NOTREACHED */
455 /* Assume work function handled the exception */
456 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
457 if (debug) {
458 warnx("Queue frozen receiving CCB, "
459 "releasing");
461 rel_simq();
464 /* No more work needed for this command. */
465 if (retval == 0) {
466 TAILQ_REMOVE(&work_queue, ccb_h,
467 periph_links.tqe);
472 * Poll for new events (i.e. completions) while we
473 * are processing CCBs on the work_queue. Once it's
474 * empty, use an infinite wait.
476 if (!TAILQ_EMPTY(&work_queue))
477 tptr = &ts;
478 else
479 tptr = NULL;
483 /* CCBs are ready from the kernel */
484 static void
485 handle_read()
487 union ccb *ccb_array[MAX_INITIATORS], *ccb;
488 int ccb_count, i;
490 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
491 if (ccb_count <= 0) {
492 warn("read ccb ptrs");
493 return;
495 ccb_count /= sizeof(union ccb *);
496 if (ccb_count < 1) {
497 warnx("truncated read ccb ptr?");
498 return;
501 for (i = 0; i < ccb_count; i++) {
502 ccb = ccb_array[i];
503 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
505 switch (ccb->ccb_h.func_code) {
506 case XPT_ACCEPT_TARGET_IO:
508 struct ccb_accept_tio *atio;
509 struct atio_descr *a_descr;
511 /* Initialize ATIO descr for this transaction */
512 atio = &ccb->atio;
513 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
514 bzero(a_descr, sizeof(*a_descr));
515 TAILQ_INIT(&a_descr->cmplt_io);
516 a_descr->flags = atio->ccb_h.flags &
517 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
518 /* XXX add a_descr->priority */
519 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
520 a_descr->cdb = atio->cdb_io.cdb_bytes;
521 else
522 a_descr->cdb = atio->cdb_io.cdb_ptr;
524 /* ATIOs are processed in FIFO order */
525 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
526 periph_links.tqe);
527 break;
529 case XPT_CONT_TARGET_IO:
531 struct ccb_scsiio *ctio;
532 struct ctio_descr *c_descr;
534 ctio = &ccb->ctio;
535 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
536 c_descr->event = CTIO_DONE;
537 /* Queue on the appropriate ATIO */
538 queue_io(ctio);
539 /* Process any queued completions. */
540 run_queue(c_descr->atio);
541 break;
543 case XPT_IMMED_NOTIFY:
544 /* INOTs are handled with priority */
545 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
546 periph_links.tqe);
547 break;
548 default:
549 warnx("Unhandled ccb type %#x in handle_read",
550 ccb->ccb_h.func_code);
551 break;
556 /* Process an ATIO CCB from the kernel */
558 work_atio(struct ccb_accept_tio *atio)
560 struct ccb_scsiio *ctio;
561 struct atio_descr *a_descr;
562 struct ctio_descr *c_descr;
563 cam_status status;
564 int ret;
566 if (debug)
567 warnx("Working on ATIO %p", atio);
569 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
571 /* Get a CTIO and initialize it according to our known parameters */
572 ctio = get_ctio();
573 if (ctio == NULL)
574 return (1);
575 ret = 0;
576 ctio->ccb_h.flags = a_descr->flags;
577 ctio->tag_id = atio->tag_id;
578 ctio->init_id = atio->init_id;
579 /* XXX priority needs to be added to a_descr */
580 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
581 c_descr->atio = atio;
582 if ((a_descr->flags & CAM_DIR_IN) != 0)
583 c_descr->offset = a_descr->base_off + a_descr->targ_req;
584 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
585 c_descr->offset = a_descr->base_off + a_descr->init_req;
588 * Return a check condition if there was an error while
589 * receiving this ATIO.
591 if (atio->sense_len != 0) {
592 struct scsi_sense_data *sense;
594 if (debug) {
595 warnx("ATIO with %u bytes sense received",
596 atio->sense_len);
598 sense = &atio->sense_data;
599 tcmd_sense(ctio->init_id, ctio, sense->flags,
600 sense->add_sense_code, sense->add_sense_code_qual);
601 send_ccb((union ccb *)ctio, /*priority*/1);
602 return (0);
605 status = atio->ccb_h.status & CAM_STATUS_MASK;
606 switch (status) {
607 case CAM_CDB_RECVD:
608 ret = tcmd_handle(atio, ctio, ATIO_WORK);
609 break;
610 case CAM_REQ_ABORTED:
611 /* Requeue on HBA */
612 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
613 send_ccb((union ccb *)atio, /*priority*/1);
614 ret = 1;
615 break;
616 default:
617 warnx("ATIO completed with unhandled status %#x", status);
618 abort();
619 /* NOTREACHED */
620 break;
623 return (ret);
626 static void
627 queue_io(struct ccb_scsiio *ctio)
629 struct ccb_hdr *ccb_h;
630 struct io_queue *ioq;
631 struct ctio_descr *c_descr, *curr_descr;
633 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
634 /* If the completion is for a specific ATIO, queue in order */
635 if (c_descr->atio != NULL) {
636 struct atio_descr *a_descr;
638 a_descr = (struct atio_descr *)c_descr->atio->ccb_h.targ_descr;
639 ioq = &a_descr->cmplt_io;
640 } else {
641 errx(1, "CTIO %p has NULL ATIO", ctio);
644 /* Insert in order, sorted by offset */
645 if (!TAILQ_EMPTY(ioq)) {
646 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
647 curr_descr = (struct ctio_descr *)ccb_h->targ_descr;
648 if (curr_descr->offset <= c_descr->offset) {
649 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h,
650 periph_links.tqe);
651 break;
653 if (TAILQ_PREV(ccb_h, io_queue, periph_links.tqe)
654 == NULL) {
655 TAILQ_INSERT_BEFORE(ccb_h, &ctio->ccb_h,
656 periph_links.tqe);
657 break;
660 } else {
661 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
666 * Go through all completed AIO/CTIOs for a given ATIO and advance data
667 * counts, start continuation IO, etc.
669 static void
670 run_queue(struct ccb_accept_tio *atio)
672 struct atio_descr *a_descr;
673 struct ccb_hdr *ccb_h;
674 int sent_status, event;
676 if (atio == NULL)
677 return;
679 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
681 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
682 struct ccb_scsiio *ctio;
683 struct ctio_descr *c_descr;
685 ctio = (struct ccb_scsiio *)ccb_h;
686 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
688 /* If completed item is in range, call handler */
689 if ((c_descr->event == AIO_DONE &&
690 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
691 || (c_descr->event == CTIO_DONE &&
692 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
693 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
694 event = c_descr->event;
696 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
697 periph_links.tqe);
698 tcmd_handle(atio, ctio, c_descr->event);
700 /* If entire transfer complete, send back ATIO */
701 if (sent_status != 0 && event == CTIO_DONE)
702 send_ccb((union ccb *)atio, /*priority*/1);
703 } else {
704 /* Gap in offsets so wait until later callback */
705 if (debug)
706 warnx("IO %p out of order", ccb_h);
707 break;
712 static int
713 work_inot(struct ccb_immed_notify *inot)
715 cam_status status;
716 int sense;
718 if (debug)
719 warnx("Working on INOT %p", inot);
721 status = inot->ccb_h.status;
722 sense = (status & CAM_AUTOSNS_VALID) != 0;
723 status &= CAM_STATUS_MASK;
725 switch (status) {
726 case CAM_SCSI_BUS_RESET:
727 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
728 abort_all_pending();
729 break;
730 case CAM_BDR_SENT:
731 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
732 abort_all_pending();
733 break;
734 case CAM_MESSAGE_RECV:
735 switch (inot->message_args[0]) {
736 case MSG_TASK_COMPLETE:
737 case MSG_INITIATOR_DET_ERR:
738 case MSG_ABORT_TASK_SET:
739 case MSG_MESSAGE_REJECT:
740 case MSG_NOOP:
741 case MSG_PARITY_ERROR:
742 case MSG_TARGET_RESET:
743 case MSG_ABORT_TASK:
744 case MSG_CLEAR_TASK_SET:
745 default:
746 warnx("INOT message %#x", inot->message_args[0]);
747 break;
749 break;
750 case CAM_REQ_ABORTED:
751 warnx("INOT %p aborted", inot);
752 break;
753 default:
754 warnx("Unhandled INOT status %#x", status);
755 break;
758 /* If there is sense data, use it */
759 if (sense != 0) {
760 struct scsi_sense_data *sense;
762 sense = &inot->sense_data;
763 tcmd_sense(inot->initiator_id, NULL, sense->flags,
764 sense->add_sense_code, sense->add_sense_code_qual);
765 if (debug)
766 warnx("INOT has sense: %#x", sense->flags);
769 /* Requeue on SIM */
770 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
771 send_ccb((union ccb *)inot, /*priority*/1);
773 return (1);
776 void
777 send_ccb(union ccb *ccb, int priority)
779 if (debug)
780 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
781 ccb->ccb_h.pinfo.priority = priority;
782 if (XPT_FC_IS_QUEUED(ccb)) {
783 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
784 periph_links.tqe);
786 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
787 warn("write ccb");
788 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
792 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
793 static struct ccb_scsiio *
794 get_ctio()
796 struct ccb_scsiio *ctio;
797 struct ctio_descr *c_descr;
798 struct sigevent *se;
800 if (num_ctios == MAX_CTIOS)
801 return (NULL);
803 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
804 if (ctio == NULL) {
805 warn("malloc CTIO");
806 return (NULL);
808 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
809 if (c_descr == NULL) {
810 free(ctio);
811 warn("malloc ctio_descr");
812 return (NULL);
814 c_descr->buf = malloc(buf_size);
815 if (c_descr->buf == NULL) {
816 free(c_descr);
817 free(ctio);
818 warn("malloc backing store");
819 return (NULL);
821 num_ctios++;
823 /* Initialize CTIO, CTIO descr, and AIO */
824 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
825 ctio->ccb_h.retry_count = 2;
826 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
827 ctio->data_ptr = c_descr->buf;
828 ctio->ccb_h.targ_descr = c_descr;
829 c_descr->aiocb.aio_buf = c_descr->buf;
830 c_descr->aiocb.aio_fildes = file_fd;
831 se = &c_descr->aiocb.aio_sigevent;
832 se->sigev_notify = SIGEV_KEVENT;
833 se->sigev_notify_kqueue = kq_fd;
834 se->sigev_value.sival_ptr = ctio;
836 return (ctio);
839 void
840 free_ccb(union ccb *ccb)
842 switch (ccb->ccb_h.func_code) {
843 case XPT_CONT_TARGET_IO:
845 struct ctio_descr *c_descr;
847 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
848 free(c_descr->buf);
849 num_ctios--;
850 /* FALLTHROUGH */
852 case XPT_ACCEPT_TARGET_IO:
853 free(ccb->ccb_h.targ_descr);
854 /* FALLTHROUGH */
855 case XPT_IMMED_NOTIFY:
856 default:
857 free(ccb);
858 break;
862 static cam_status
863 get_sim_flags(u_int16_t *flags)
865 struct ccb_pathinq cpi;
866 cam_status status;
868 /* Find SIM capabilities */
869 bzero(&cpi, sizeof(cpi));
870 cpi.ccb_h.func_code = XPT_PATH_INQ;
871 send_ccb((union ccb *)&cpi, /*priority*/1);
872 status = cpi.ccb_h.status & CAM_STATUS_MASK;
873 if (status != CAM_REQ_CMP) {
874 fprintf(stderr, "CPI failed, status %#x\n", status);
875 return (status);
878 /* Can only enable on controllers that support target mode */
879 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
880 fprintf(stderr, "HBA does not support target mode\n");
881 status = CAM_PATH_INVALID;
882 return (status);
885 *flags = cpi.hba_inquiry;
886 return (status);
889 static void
890 rel_simq()
892 struct ccb_relsim crs;
894 bzero(&crs, sizeof(crs));
895 crs.ccb_h.func_code = XPT_REL_SIMQ;
896 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
897 crs.openings = 0;
898 crs.release_timeout = 0;
899 crs.qfrozen_cnt = 0;
900 send_ccb((union ccb *)&crs, /*priority*/0);
903 /* Cancel all pending CCBs. */
904 static void
905 abort_all_pending()
907 struct ccb_abort cab;
908 struct ccb_hdr *ccb_h;
910 if (debug)
911 warnx("abort_all_pending");
913 bzero(&cab, sizeof(cab));
914 cab.ccb_h.func_code = XPT_ABORT;
915 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
916 if (debug)
917 warnx("Aborting pending CCB %p\n", ccb_h);
918 cab.abort_ccb = (union ccb *)ccb_h;
919 send_ccb((union ccb *)&cab, /*priority*/1);
920 if (cab.ccb_h.status != CAM_REQ_CMP) {
921 warnx("Unable to abort CCB, status %#x\n",
922 cab.ccb_h.status);
927 static void
928 usage()
930 fprintf(stderr,
931 "Usage: scsi_target [-AdST] [-b bufsize] [-c sectorsize]\n"
932 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
933 "\t\tbus:target:lun filename\n");
934 exit(1);