kernel/mpr: Remove unused malloc type.
[dragonfly.git] / sys / dev / raid / mpr / mpr_sas.c
blobe78fca2ce6ba745df0ffa1988c60037b58b3ff9c
1 /*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
30 * $FreeBSD: head/sys/dev/mpr/mpr_sas.c 331422 2018-03-23 13:52:26Z ken $
33 /* Communications core for Avago Technologies (LSI) MPT3 */
35 /* TODO Move headers to mprvar */
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/bus.h>
42 #include <sys/conf.h>
43 #include <sys/bio.h>
44 #include <sys/malloc.h>
45 #include <sys/uio.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48 #include <sys/queue.h>
49 #include <sys/kthread.h>
50 #include <sys/taskqueue.h>
51 #include <sys/sbuf.h>
52 #include <sys/eventhandler.h>
54 #include <sys/rman.h>
56 #include <machine/stdarg.h>
58 #include <bus/cam/cam.h>
59 #include <bus/cam/cam_ccb.h>
60 #include <bus/cam/cam_debug.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_sim.h>
63 #include <bus/cam/cam_xpt_periph.h>
64 #include <bus/cam/cam_periph.h>
65 #include <bus/cam/scsi/scsi_all.h>
66 #include <bus/cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <bus/cam/scsi/smp_all.h>
69 #endif
71 #if 0 /* XXX swildner NVMe support */
72 #include <dev/nvme/nvme.h>
73 #endif
75 #include <dev/raid/mpr/mpi/mpi2_type.h>
76 #include <dev/raid/mpr/mpi/mpi2.h>
77 #include <dev/raid/mpr/mpi/mpi2_ioc.h>
78 #include <dev/raid/mpr/mpi/mpi2_sas.h>
79 #include <dev/raid/mpr/mpi/mpi2_pci.h>
80 #include <dev/raid/mpr/mpi/mpi2_cnfg.h>
81 #include <dev/raid/mpr/mpi/mpi2_init.h>
82 #include <dev/raid/mpr/mpi/mpi2_tool.h>
83 #include <dev/raid/mpr/mpr_ioctl.h>
84 #include <dev/raid/mpr/mprvar.h>
85 #include <dev/raid/mpr/mpr_table.h>
86 #include <dev/raid/mpr/mpr_sas.h>
88 #define MPRSAS_DISCOVERY_TIMEOUT 20
89 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
117 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
119 static void mprsas_poll(struct cam_sim *sim);
120 static void mprsas_scsiio_timeout(void *data);
121 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
122 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
123 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
124 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
125 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
126 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
127 struct mpr_command *cm);
128 void mprsas_rescan_callback(struct cam_periph *, union ccb *);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130 struct cam_path *path, void *arg);
131 #if (__FreeBSD_version < 901503) || \
132 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
133 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
134 struct ccb_getdev *cgd);
135 static void mprsas_read_cap_done(struct cam_periph *periph,
136 union ccb *done_ccb);
137 #endif
138 static int mprsas_send_portenable(struct mpr_softc *sc);
139 static void mprsas_portenable_complete(struct mpr_softc *sc,
140 struct mpr_command *cm);
142 #if __FreeBSD_version >= 900026
143 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
144 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
145 uint64_t sasaddr);
146 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
147 #endif //FreeBSD_version >= 900026
149 struct mprsas_target *
150 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
151 uint16_t handle)
153 struct mprsas_target *target;
154 int i;
156 for (i = start; i < sassc->maxtargets; i++) {
157 target = &sassc->targets[i];
158 if (target->handle == handle)
159 return (target);
162 return (NULL);
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166 * commands before device handles have been found by discovery. Since
167 * discovery involves reading config pages and possibly sending commands,
168 * discovery actions may continue even after we receive the end of discovery
169 * event, so refcount discovery actions instead of assuming we can unfreeze
170 * the simq when we get the event.
172 void
173 mprsas_startup_increment(struct mprsas_softc *sassc)
175 MPR_FUNCTRACE(sassc->sc);
177 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
178 if (sassc->startup_refcount++ == 0) {
179 /* just starting, freeze the simq */
180 mpr_dprint(sassc->sc, MPR_INIT,
181 "%s freezing simq\n", __func__);
182 #if (__FreeBSD_version >= 1000039) || \
183 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
184 xpt_hold_boot();
185 #endif
186 xpt_freeze_simq(sassc->sim, 1);
188 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
193 void
194 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
196 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 void
204 mprsas_startup_decrement(struct mprsas_softc *sassc)
206 MPR_FUNCTRACE(sassc->sc);
208 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
213 mpr_dprint(sassc->sc, MPR_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPRSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217 #if (__FreeBSD_version >= 1000039) || \
218 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
219 xpt_release_boot();
220 #else
221 mprsas_rescan_target(sassc->sc, NULL);
222 #endif
224 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
225 sassc->startup_refcount);
229 /* The firmware requires us to stop sending commands when we're doing task
230 * management, so refcount the TMs and keep the simq frozen when any are in
231 * use.
233 struct mpr_command *
234 mprsas_alloc_tm(struct mpr_softc *sc)
236 struct mpr_command *tm;
238 MPR_FUNCTRACE(sc);
239 tm = mpr_alloc_high_priority_command(sc);
240 return tm;
243 void
244 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
246 int target_id = 0xFFFFFFFF;
248 MPR_FUNCTRACE(sc);
249 if (tm == NULL)
250 return;
253 * For TM's the devq is frozen for the device. Unfreeze it here and
254 * free the resources used for freezing the devq. Must clear the
255 * INRESET flag as well or scsi I/O will not work.
257 if (tm->cm_targ != NULL) {
258 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
259 target_id = tm->cm_targ->tid;
261 if (tm->cm_ccb) {
262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
263 target_id);
264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 xpt_free_path(tm->cm_ccb->ccb_h.path);
266 xpt_free_ccb(tm->cm_ccb);
269 mpr_free_high_priority_command(sc, tm);
272 void
273 mprsas_rescan_callback(struct cam_periph *periph, union ccb *ccb)
275 if (ccb->ccb_h.status != CAM_REQ_CMP)
276 kprintf("cam_scan_callback: failure status = %x\n",
277 ccb->ccb_h.status);
279 xpt_free_path(ccb->ccb_h.path);
280 xpt_free_ccb(ccb);
283 void
284 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
286 struct mprsas_softc *sassc = sc->sassc;
287 path_id_t pathid;
288 target_id_t targetid;
289 union ccb *ccb;
291 MPR_FUNCTRACE(sc);
292 pathid = cam_sim_path(sassc->sim);
293 if (targ == NULL)
294 targetid = CAM_TARGET_WILDCARD;
295 else
296 targetid = targ - sassc->targets;
299 * Allocate a CCB and schedule a rescan.
301 ccb = xpt_alloc_ccb();
302 if (ccb == NULL) {
303 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
304 return;
307 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid,
308 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
309 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
310 xpt_free_ccb(ccb);
311 return;
314 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5); /* 5 = low priority */
316 /* XXX Hardwired to scan the bus for now */
317 ccb->ccb_h.func_code = XPT_SCAN_BUS;
318 ccb->ccb_h.cbfcnp = mprsas_rescan_callback;
319 ccb->crcn.flags = CAM_FLAG_NONE;
321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
322 xpt_action(ccb);
325 static void
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
328 struct sbuf sb;
329 __va_list ap;
330 char str[192];
331 char path_str[64];
333 if (cm == NULL)
334 return;
336 /* No need to be in here if debugging isn't enabled */
337 if ((cm->cm_sc->mpr_debug & level) == 0)
338 return;
340 sbuf_new(&sb, str, sizeof(str), 0);
342 __va_start(ap, fmt);
344 if (cm->cm_ccb != NULL) {
345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346 sizeof(path_str));
347 sbuf_cat(&sb, path_str);
348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 scsi_command_string(&cm->cm_ccb->csio, &sb);
350 sbuf_printf(&sb, "length %d ",
351 cm->cm_ccb->csio.dxfer_len);
353 } else {
354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 cam_sim_name(cm->cm_sc->sassc->sim),
356 cam_sim_unit(cm->cm_sc->sassc->sim),
357 cam_sim_bus(cm->cm_sc->sassc->sim),
358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
359 cm->cm_lun);
362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 sbuf_vprintf(&sb, fmt, ap);
364 sbuf_finish(&sb);
365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
367 __va_end(ap);
370 static void
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 struct mprsas_target *targ;
375 uint16_t handle;
377 MPR_FUNCTRACE(sc);
379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
381 targ = tm->cm_targ;
383 if (reply == NULL) {
384 /* XXX retry the remove after the diag reset completes? */
385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 "0x%04x\n", __func__, handle);
387 mprsas_free_tm(sc, tm);
388 return;
391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 MPI2_IOCSTATUS_SUCCESS) {
393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 le32toh(reply->TerminationCount));
399 mpr_free_reply(sc, tm->cm_reply_data);
400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
403 targ->tid, handle);
406 * Don't clear target if remove fails because things will get confusing.
407 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 * this target id if possible, and so we can assign the same target id
409 * to this device if it comes back in the future.
411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 MPI2_IOCSTATUS_SUCCESS) {
413 targ = tm->cm_targ;
414 targ->handle = 0x0;
415 targ->encl_handle = 0x0;
416 targ->encl_level_valid = 0x0;
417 targ->encl_level = 0x0;
418 targ->connector_name[0] = ' ';
419 targ->connector_name[1] = ' ';
420 targ->connector_name[2] = ' ';
421 targ->connector_name[3] = ' ';
422 targ->encl_slot = 0x0;
423 targ->exp_dev_handle = 0x0;
424 targ->phy_num = 0x0;
425 targ->linkrate = 0x0;
426 targ->devinfo = 0x0;
427 targ->flags = 0x0;
428 targ->scsi_req_desc_type = 0;
431 mprsas_free_tm(sc, tm);
436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437 * Otherwise Volume Delete is same as Bare Drive Removal.
439 void
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
442 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 struct mpr_softc *sc;
444 struct mpr_command *cm;
445 struct mprsas_target *targ = NULL;
447 MPR_FUNCTRACE(sassc->sc);
448 sc = sassc->sc;
450 targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 if (targ == NULL) {
452 /* FIXME: what is the action? */
453 /* We don't know about this device? */
454 mpr_dprint(sc, MPR_ERROR,
455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 return;
459 targ->flags |= MPRSAS_TARGET_INREMOVAL;
461 cm = mprsas_alloc_tm(sc);
462 if (cm == NULL) {
463 mpr_dprint(sc, MPR_ERROR,
464 "%s: command alloc failure\n", __func__);
465 return;
468 mprsas_rescan_target(sc, targ);
470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 req->DevHandle = targ->handle;
472 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
473 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
475 /* SAS Hard Link Reset / SATA Link Reset */
476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
478 cm->cm_targ = targ;
479 cm->cm_data = NULL;
480 cm->cm_desc.HighPriority.RequestFlags =
481 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
482 cm->cm_complete = mprsas_remove_volume;
483 cm->cm_complete_data = (void *)(uintptr_t)handle;
485 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
486 __func__, targ->tid);
487 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
489 mpr_map_command(sc, cm);
493 * The firmware performs debounce on the link to avoid transient link errors
494 * and false removals. When it does decide that link has been lost and a
495 * device needs to go away, it expects that the host will perform a target reset
496 * and then an op remove. The reset has the side-effect of aborting any
497 * outstanding requests for the device, which is required for the op-remove to
498 * succeed. It's not clear if the host should check for the device coming back
499 * alive after the reset.
501 void
502 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
504 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
505 struct mpr_softc *sc;
506 struct mpr_command *cm;
507 struct mprsas_target *targ = NULL;
509 MPR_FUNCTRACE(sassc->sc);
511 sc = sassc->sc;
513 targ = mprsas_find_target_by_handle(sassc, 0, handle);
514 if (targ == NULL) {
515 /* FIXME: what is the action? */
516 /* We don't know about this device? */
517 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
518 __func__, handle);
519 return;
522 targ->flags |= MPRSAS_TARGET_INREMOVAL;
524 cm = mprsas_alloc_tm(sc);
525 if (cm == NULL) {
526 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
527 __func__);
528 return;
531 mprsas_rescan_target(sc, targ);
533 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
534 memset(req, 0, sizeof(*req));
535 req->DevHandle = htole16(targ->handle);
536 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
537 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
539 /* SAS Hard Link Reset / SATA Link Reset */
540 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
542 cm->cm_targ = targ;
543 cm->cm_data = NULL;
544 cm->cm_desc.HighPriority.RequestFlags =
545 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
546 cm->cm_complete = mprsas_remove_device;
547 cm->cm_complete_data = (void *)(uintptr_t)handle;
549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 __func__, targ->tid);
551 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
553 mpr_map_command(sc, cm);
556 static void
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
559 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 struct mprsas_target *targ;
562 struct mpr_command *next_cm;
563 uint16_t handle;
565 MPR_FUNCTRACE(sc);
567 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
568 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
569 targ = tm->cm_targ;
572 * Currently there should be no way we can hit this case. It only
573 * happens when we have a failure to allocate chain frames, and
574 * task management commands don't have S/G lists.
576 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
577 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
578 "handle %#04x! This should not happen!\n", __func__,
579 tm->cm_flags, handle);
582 if (reply == NULL) {
583 /* XXX retry the remove after the diag reset completes? */
584 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
585 "0x%04x\n", __func__, handle);
586 mprsas_free_tm(sc, tm);
587 return;
590 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
591 MPI2_IOCSTATUS_SUCCESS) {
592 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
593 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
596 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
597 le32toh(reply->TerminationCount));
598 mpr_free_reply(sc, tm->cm_reply_data);
599 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
601 /* Reuse the existing command */
602 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 memset(req, 0, sizeof(*req));
604 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 req->DevHandle = htole16(handle);
607 tm->cm_data = NULL;
608 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 tm->cm_complete = mprsas_remove_complete;
610 tm->cm_complete_data = (void *)(uintptr_t)handle;
612 mpr_map_command(sc, tm);
614 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
615 targ->tid, handle);
616 if (targ->encl_level_valid) {
617 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
618 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
619 targ->connector_name);
621 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
622 union ccb *ccb;
624 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
625 ccb = tm->cm_complete_data;
626 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
627 mprsas_scsiio_complete(sc, tm);
631 static void
632 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
634 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
635 uint16_t handle;
636 struct mprsas_target *targ;
637 struct mprsas_lun *lun;
639 MPR_FUNCTRACE(sc);
641 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
642 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
645 * Currently there should be no way we can hit this case. It only
646 * happens when we have a failure to allocate chain frames, and
647 * task management commands don't have S/G lists.
649 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
650 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
651 "handle %#04x! This should not happen!\n", __func__,
652 tm->cm_flags, handle);
653 mprsas_free_tm(sc, tm);
654 return;
657 if (reply == NULL) {
658 /* most likely a chip reset */
659 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
660 "0x%04x\n", __func__, handle);
661 mprsas_free_tm(sc, tm);
662 return;
665 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
666 __func__, handle, le16toh(reply->IOCStatus));
669 * Don't clear target if remove fails because things will get confusing.
670 * Leave the devname and sasaddr intact so that we know to avoid reusing
671 * this target id if possible, and so we can assign the same target id
672 * to this device if it comes back in the future.
674 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
675 MPI2_IOCSTATUS_SUCCESS) {
676 targ = tm->cm_targ;
677 targ->handle = 0x0;
678 targ->encl_handle = 0x0;
679 targ->encl_level_valid = 0x0;
680 targ->encl_level = 0x0;
681 targ->connector_name[0] = ' ';
682 targ->connector_name[1] = ' ';
683 targ->connector_name[2] = ' ';
684 targ->connector_name[3] = ' ';
685 targ->encl_slot = 0x0;
686 targ->exp_dev_handle = 0x0;
687 targ->phy_num = 0x0;
688 targ->linkrate = 0x0;
689 targ->devinfo = 0x0;
690 targ->flags = 0x0;
691 targ->scsi_req_desc_type = 0;
693 while (!SLIST_EMPTY(&targ->luns)) {
694 lun = SLIST_FIRST(&targ->luns);
695 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
696 kfree(lun, M_MPR);
700 mprsas_free_tm(sc, tm);
703 static int
704 mprsas_register_events(struct mpr_softc *sc)
706 uint8_t events[16];
708 bzero(events, 16);
709 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
710 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
711 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
712 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
713 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
714 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
715 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
716 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
717 setbit(events, MPI2_EVENT_IR_VOLUME);
718 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
719 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
720 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
721 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
722 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
723 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
724 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
725 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
726 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
727 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
731 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
732 &sc->sassc->mprsas_eh);
734 return (0);
738 mpr_attach_sas(struct mpr_softc *sc)
740 struct mprsas_softc *sassc;
741 cam_status status;
742 int unit, error = 0, reqs;
744 MPR_FUNCTRACE(sc);
745 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
747 sassc = kmalloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
748 if (!sassc) {
749 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
750 "Cannot allocate SAS subsystem memory\n");
751 return (ENOMEM);
755 * XXX MaxTargets could change during a reinit. Since we don't
756 * resize the targets[] array during such an event, cache the value
757 * of MaxTargets here so that we don't get into trouble later. This
758 * should move into the reinit logic.
760 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
761 sassc->targets = kmalloc(sizeof(struct mprsas_target) *
762 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
763 if (!sassc->targets) {
764 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
765 "Cannot allocate SAS target memory\n");
766 kfree(sassc, M_MPR);
767 return (ENOMEM);
769 sc->sassc = sassc;
770 sassc->sc = sc;
772 reqs = sc->num_reqs - sc->num_prireqs - 1;
773 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
774 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
775 error = ENOMEM;
776 goto out;
779 unit = device_get_unit(sc->mpr_dev);
780 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
781 unit, &sc->mpr_lock, reqs, reqs, sassc->devq);
782 cam_simq_release(sassc->devq);
783 if (sassc->sim == NULL) {
784 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
785 error = EINVAL;
786 goto out;
789 TAILQ_INIT(&sassc->ev_queue);
791 /* Initialize taskqueue for Event Handling */
792 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
793 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
794 taskqueue_thread_enqueue, &sassc->ev_tq);
795 taskqueue_start_threads(&sassc->ev_tq, 1, TDPRI_KERN_DAEMON,
796 -1, "%s taskq",
797 device_get_nameunit(sc->mpr_dev));
799 mpr_lock(sc);
802 * XXX There should be a bus for every port on the adapter, but since
803 * we're just going to fake the topology for now, we'll pretend that
804 * everything is just a target on a single bus.
806 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
807 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
808 "Error %d registering SCSI bus\n", error);
809 mpr_unlock(sc);
810 goto out;
814 * Assume that discovery events will start right away.
816 * Hold off boot until discovery is complete.
818 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
819 sc->sassc->startup_refcount = 0;
820 mprsas_startup_increment(sassc);
822 callout_init_mp(&sassc->discovery_callout);
825 * Register for async events so we can determine the EEDP
826 * capabilities of devices.
828 status = xpt_create_path(&sassc->path, /*periph*/NULL,
829 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
830 CAM_LUN_WILDCARD);
831 if (status != CAM_REQ_CMP) {
832 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
833 "Error %#x creating sim path\n", status);
834 sassc->path = NULL;
835 } else {
836 int event;
838 #if (__FreeBSD_version >= 1000006) || \
839 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
840 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
841 #else
842 event = AC_FOUND_DEVICE;
843 #endif
846 * Prior to the CAM locking improvements, we can't call
847 * xpt_register_async() with a particular path specified.
849 * If a path isn't specified, xpt_register_async() will
850 * generate a wildcard path and acquire the XPT lock while
851 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
852 * It will then drop the XPT lock once that is done.
854 * If a path is specified for xpt_register_async(), it will
855 * not acquire and drop the XPT lock around the call to
856 * xpt_action(). xpt_action() asserts that the caller
857 * holds the SIM lock, so the SIM lock has to be held when
858 * calling xpt_register_async() when the path is specified.
860 * But xpt_register_async calls xpt_for_all_devices(),
861 * which calls xptbustraverse(), which will acquire each
862 * SIM lock. When it traverses our particular bus, it will
863 * necessarily acquire the SIM lock, which will lead to a
864 * recursive lock acquisition.
866 * The CAM locking changes fix this problem by acquiring
867 * the XPT topology lock around bus traversal in
868 * xptbustraverse(), so the caller can hold the SIM lock
869 * and it does not cause a recursive lock acquisition.
871 * These __FreeBSD_version values are approximate, especially
872 * for stable/10, which is two months later than the actual
873 * change.
876 #if (__FreeBSD_version < 1000703) || \
877 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
878 mpr_unlock(sc);
879 status = xpt_register_async(event, mprsas_async, sc,
880 NULL);
881 mpr_lock(sc);
882 #else
883 status = xpt_register_async(event, mprsas_async, sc,
884 sassc->path);
885 #endif
887 if (status != CAM_REQ_CMP) {
888 mpr_dprint(sc, MPR_ERROR,
889 "Error %#x registering async handler for "
890 "AC_ADVINFO_CHANGED events\n", status);
891 xpt_free_path(sassc->path);
892 sassc->path = NULL;
895 if (status != CAM_REQ_CMP) {
897 * EEDP use is the exception, not the rule.
898 * Warn the user, but do not fail to attach.
900 mpr_printf(sc, "EEDP capabilities disabled.\n");
903 mpr_unlock(sc);
905 mprsas_register_events(sc);
906 out:
907 if (error)
908 mpr_detach_sas(sc);
910 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
911 return (error);
915 mpr_detach_sas(struct mpr_softc *sc)
917 struct mprsas_softc *sassc;
918 struct mprsas_lun *lun, *lun_tmp;
919 struct mprsas_target *targ;
920 int i;
922 MPR_FUNCTRACE(sc);
924 if (sc->sassc == NULL)
925 return (0);
927 sassc = sc->sassc;
928 mpr_deregister_events(sc, sassc->mprsas_eh);
931 * Drain and free the event handling taskqueue with the lock
932 * unheld so that any parallel processing tasks drain properly
933 * without deadlocking.
935 if (sassc->ev_tq != NULL)
936 taskqueue_free(sassc->ev_tq);
938 /* Make sure CAM doesn't wedge if we had to bail out early. */
939 mpr_lock(sc);
941 while (sassc->startup_refcount != 0)
942 mprsas_startup_decrement(sassc);
944 /* Deregister our async handler */
945 if (sassc->path != NULL) {
946 xpt_register_async(0, mprsas_async, sc, sassc->path);
947 xpt_free_path(sassc->path);
948 sassc->path = NULL;
951 if (sassc->flags & MPRSAS_IN_STARTUP)
952 xpt_release_simq(sassc->sim, 1);
954 if (sassc->sim != NULL) {
955 xpt_bus_deregister(cam_sim_path(sassc->sim));
956 cam_sim_free(sassc->sim);
959 mpr_unlock(sc);
961 for (i = 0; i < sassc->maxtargets; i++) {
962 targ = &sassc->targets[i];
963 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) {
964 kfree(lun, M_MPR);
967 kfree(sassc->targets, M_MPR);
968 kfree(sassc, M_MPR);
969 sc->sassc = NULL;
971 return (0);
974 void
975 mprsas_discovery_end(struct mprsas_softc *sassc)
977 struct mpr_softc *sc = sassc->sc;
979 MPR_FUNCTRACE(sc);
981 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
982 callout_stop(&sassc->discovery_callout);
985 * After discovery has completed, check the mapping table for any
986 * missing devices and update their missing counts. Only do this once
987 * whenever the driver is initialized so that missing counts aren't
988 * updated unnecessarily. Note that just because discovery has
989 * completed doesn't mean that events have been processed yet. The
990 * check_devices function is a callout timer that checks if ALL devices
991 * are missing. If so, it will wait a little longer for events to
992 * complete and keep resetting itself until some device in the mapping
993 * table is not missing, meaning that event processing has started.
995 if (sc->track_mapping_events) {
996 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
997 "completed. Check for missing devices in the mapping "
998 "table.\n");
999 callout_reset(&sc->device_check_callout,
1000 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1001 sc);
1005 static void
1006 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1008 struct mprsas_softc *sassc;
1010 sassc = cam_sim_softc(sim);
1012 MPR_FUNCTRACE(sassc->sc);
1013 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1014 ccb->ccb_h.func_code);
1015 KKASSERT(lockowned(&sassc->sc->mpr_lock));
1017 switch (ccb->ccb_h.func_code) {
1018 case XPT_PATH_INQ:
1020 struct ccb_pathinq *cpi = &ccb->cpi;
1021 struct mpr_softc *sc = sassc->sc;
1023 cpi->version_num = 1;
1024 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1025 cpi->target_sprt = 0;
1026 #if (__FreeBSD_version >= 1000039) || \
1027 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1028 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1029 #elif defined(__DragonFly__)
1030 cpi->hba_misc = PIM_NOBUSRESET;
1031 #else
1032 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1033 #endif
1034 cpi->hba_eng_cnt = 0;
1035 cpi->max_target = sassc->maxtargets - 1;
1036 cpi->max_lun = 255;
1039 * initiator_id is set here to an ID outside the set of valid
1040 * target IDs (including volumes).
1042 cpi->initiator_id = sassc->maxtargets;
1043 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1044 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1045 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1046 cpi->unit_number = cam_sim_unit(sim);
1047 cpi->bus_id = cam_sim_bus(sim);
1049 * XXXSLM-I think this needs to change based on config page or
1050 * something instead of hardcoded to 150000.
1052 cpi->base_transfer_speed = 150000;
1053 cpi->transport = XPORT_SAS;
1054 cpi->transport_version = 0;
1055 cpi->protocol = PROTO_SCSI;
1056 cpi->protocol_version = SCSI_REV_SPC;
1057 cpi->maxio = sc->maxio;
1058 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1059 break;
1061 case XPT_GET_TRAN_SETTINGS:
1063 struct ccb_trans_settings *cts;
1064 struct ccb_trans_settings_sas *sas;
1065 struct ccb_trans_settings_scsi *scsi;
1066 struct mprsas_target *targ;
1068 cts = &ccb->cts;
1069 sas = &cts->xport_specific.sas;
1070 scsi = &cts->proto_specific.scsi;
1072 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1073 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1074 cts->ccb_h.target_id));
1075 targ = &sassc->targets[cts->ccb_h.target_id];
1076 if (targ->handle == 0x0) {
1077 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1078 break;
1081 cts->protocol_version = SCSI_REV_SPC2;
1082 cts->transport = XPORT_SAS;
1083 cts->transport_version = 0;
1085 sas->valid = CTS_SAS_VALID_SPEED;
1086 switch (targ->linkrate) {
1087 case 0x08:
1088 sas->bitrate = 150000;
1089 break;
1090 case 0x09:
1091 sas->bitrate = 300000;
1092 break;
1093 case 0x0a:
1094 sas->bitrate = 600000;
1095 break;
1096 case 0x0b:
1097 sas->bitrate = 1200000;
1098 break;
1099 default:
1100 sas->valid = 0;
1103 cts->protocol = PROTO_SCSI;
1104 scsi->valid = CTS_SCSI_VALID_TQ;
1105 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1107 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1108 break;
1110 case XPT_CALC_GEOMETRY:
1111 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1112 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 break;
1114 case XPT_RESET_DEV:
1115 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1116 "XPT_RESET_DEV\n");
1117 mprsas_action_resetdev(sassc, ccb);
1118 return;
1119 case XPT_RESET_BUS:
1120 case XPT_ABORT:
1121 case XPT_TERM_IO:
1122 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1123 "for abort or reset\n");
1124 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1125 break;
1126 case XPT_SCSI_IO:
1127 mprsas_action_scsiio(sassc, ccb);
1128 return;
1129 #if __FreeBSD_version >= 900026
1130 case XPT_SMP_IO:
1131 mprsas_action_smpio(sassc, ccb);
1132 return;
1133 #endif
1134 default:
1135 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1136 break;
1138 xpt_done(ccb);
1142 static void
1143 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1144 target_id_t target_id, lun_id_t lun_id)
1146 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1147 struct cam_path *path;
1149 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1150 ac_code, target_id, (uintmax_t)lun_id);
1152 if (xpt_create_path(&path, NULL,
1153 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1154 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1155 "notification\n");
1156 return;
1159 xpt_async(ac_code, path, NULL);
1160 xpt_free_path(path);
1163 static void
1164 mprsas_complete_all_commands(struct mpr_softc *sc)
1166 struct mpr_command *cm;
1167 int i;
1168 int completed;
1170 MPR_FUNCTRACE(sc);
1171 KKASSERT(lockowned(&sc->mpr_lock));
1173 /* complete all commands with a NULL reply */
1174 for (i = 1; i < sc->num_reqs; i++) {
1175 cm = &sc->commands[i];
1176 if (cm->cm_state == MPR_CM_STATE_FREE)
1177 continue;
1179 cm->cm_state = MPR_CM_STATE_BUSY;
1180 cm->cm_reply = NULL;
1181 completed = 0;
1183 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1184 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1186 if (cm->cm_complete != NULL) {
1187 mprsas_log_command(cm, MPR_RECOVERY,
1188 "completing cm %p state %x ccb %p for diag reset\n",
1189 cm, cm->cm_state, cm->cm_ccb);
1190 cm->cm_complete(sc, cm);
1191 completed = 1;
1192 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1193 mprsas_log_command(cm, MPR_RECOVERY,
1194 "waking up cm %p state %x ccb %p for diag reset\n",
1195 cm, cm->cm_state, cm->cm_ccb);
1196 wakeup(cm);
1197 completed = 1;
1200 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1201 /* this should never happen, but if it does, log */
1202 mprsas_log_command(cm, MPR_RECOVERY,
1203 "cm %p state %x flags 0x%x ccb %p during diag "
1204 "reset\n", cm, cm->cm_state, cm->cm_flags,
1205 cm->cm_ccb);
1209 sc->io_cmds_active = 0;
1212 void
1213 mprsas_handle_reinit(struct mpr_softc *sc)
1215 int i;
1217 /* Go back into startup mode and freeze the simq, so that CAM
1218 * doesn't send any commands until after we've rediscovered all
1219 * targets and found the proper device handles for them.
1221 * After the reset, portenable will trigger discovery, and after all
1222 * discovery-related activities have finished, the simq will be
1223 * released.
1225 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1226 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1227 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1228 mprsas_startup_increment(sc->sassc);
1230 /* notify CAM of a bus reset */
1231 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1232 CAM_LUN_WILDCARD);
1234 /* complete and cleanup after all outstanding commands */
1235 mprsas_complete_all_commands(sc);
1237 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1238 __func__, sc->sassc->startup_refcount);
1240 /* zero all the target handles, since they may change after the
1241 * reset, and we have to rediscover all the targets and use the new
1242 * handles.
1244 for (i = 0; i < sc->sassc->maxtargets; i++) {
1245 if (sc->sassc->targets[i].outstanding != 0)
1246 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1247 i, sc->sassc->targets[i].outstanding);
1248 sc->sassc->targets[i].handle = 0x0;
1249 sc->sassc->targets[i].exp_dev_handle = 0x0;
1250 sc->sassc->targets[i].outstanding = 0;
1251 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1254 static void
1255 mprsas_tm_timeout(void *data)
1257 struct mpr_command *tm = data;
1258 struct mpr_softc *sc = tm->cm_sc;
1260 KKASSERT(lockowned(&sc->mpr_lock));
1262 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1263 "out\n", tm);
1265 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1266 ("command not inqueue\n"));
1268 tm->cm_state = MPR_CM_STATE_BUSY;
1269 mpr_reinit(sc);
1272 static void
1273 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1275 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1276 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1277 unsigned int cm_count = 0;
1278 struct mpr_command *cm;
1279 struct mprsas_target *targ;
1281 callout_stop(&tm->cm_callout);
1283 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1284 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1285 targ = tm->cm_targ;
1288 * Currently there should be no way we can hit this case. It only
1289 * happens when we have a failure to allocate chain frames, and
1290 * task management commands don't have S/G lists.
1292 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1293 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1294 "%s: cm_flags = %#x for LUN reset! "
1295 "This should not happen!\n", __func__, tm->cm_flags);
1296 mprsas_free_tm(sc, tm);
1297 return;
1300 if (reply == NULL) {
1301 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1302 tm);
1303 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1304 /* this completion was due to a reset, just cleanup */
1305 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1306 "reset, ignoring NULL LUN reset reply\n");
1307 targ->tm = NULL;
1308 mprsas_free_tm(sc, tm);
1310 else {
1311 /* we should have gotten a reply. */
1312 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1313 "LUN reset attempt, resetting controller\n");
1314 mpr_reinit(sc);
1316 return;
1319 mpr_dprint(sc, MPR_RECOVERY,
1320 "logical unit reset status 0x%x code 0x%x count %u\n",
1321 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1322 le32toh(reply->TerminationCount));
1325 * See if there are any outstanding commands for this LUN.
1326 * This could be made more efficient by using a per-LU data
1327 * structure of some sort.
1329 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1330 if (cm->cm_lun == tm->cm_lun)
1331 cm_count++;
1334 if (cm_count == 0) {
1335 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1336 "Finished recovery after LUN reset for target %u\n",
1337 targ->tid);
1339 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1340 tm->cm_lun);
1343 * We've finished recovery for this logical unit. check and
1344 * see if some other logical unit has a timedout command
1345 * that needs to be processed.
1347 cm = TAILQ_FIRST(&targ->timedout_commands);
1348 if (cm) {
1349 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1350 "More commands to abort for target %u\n", targ->tid);
1351 mprsas_send_abort(sc, tm, cm);
1352 } else {
1353 targ->tm = NULL;
1354 mprsas_free_tm(sc, tm);
1356 } else {
1357 /* if we still have commands for this LUN, the reset
1358 * effectively failed, regardless of the status reported.
1359 * Escalate to a target reset.
1361 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1362 "logical unit reset complete for target %u, but still "
1363 "have %u command(s), sending target reset\n", targ->tid,
1364 cm_count);
1365 mprsas_send_reset(sc, tm,
1366 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1370 static void
1371 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1374 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1375 struct mprsas_target *targ;
1377 callout_stop(&tm->cm_callout);
1379 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1380 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1381 targ = tm->cm_targ;
1384 * Currently there should be no way we can hit this case. It only
1385 * happens when we have a failure to allocate chain frames, and
1386 * task management commands don't have S/G lists.
1388 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1389 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1390 "reset! This should not happen!\n", __func__, tm->cm_flags);
1391 mprsas_free_tm(sc, tm);
1392 return;
1395 if (reply == NULL) {
1396 mpr_dprint(sc, MPR_RECOVERY,
1397 "NULL target reset reply for tm %p TaskMID %u\n",
1398 tm, le16toh(req->TaskMID));
1399 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1400 /* this completion was due to a reset, just cleanup */
1401 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1402 "reset, ignoring NULL target reset reply\n");
1403 targ->tm = NULL;
1404 mprsas_free_tm(sc, tm);
1406 else {
1407 /* we should have gotten a reply. */
1408 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1409 "target reset attempt, resetting controller\n");
1410 mpr_reinit(sc);
1412 return;
1415 mpr_dprint(sc, MPR_RECOVERY,
1416 "target reset status 0x%x code 0x%x count %u\n",
1417 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1418 le32toh(reply->TerminationCount));
1420 if (targ->outstanding == 0) {
1422 * We've finished recovery for this target and all
1423 * of its logical units.
1425 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1426 "Finished reset recovery for target %u\n", targ->tid);
1428 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1429 CAM_LUN_WILDCARD);
1431 targ->tm = NULL;
1432 mprsas_free_tm(sc, tm);
1433 } else {
1435 * After a target reset, if this target still has
1436 * outstanding commands, the reset effectively failed,
1437 * regardless of the status reported. escalate.
1439 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1440 "Target reset complete for target %u, but still have %u "
1441 "command(s), resetting controller\n", targ->tid,
1442 targ->outstanding);
1443 mpr_reinit(sc);
1447 #define MPR_RESET_TIMEOUT 30
1450 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1452 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1453 struct mprsas_target *target;
1454 int err;
1456 target = tm->cm_targ;
1457 if (target->handle == 0) {
1458 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1459 "%d\n", __func__, target->tid);
1460 return -1;
1463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1464 req->DevHandle = htole16(target->handle);
1465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1466 req->TaskType = type;
1468 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1469 /* XXX Need to handle invalid LUNs */
1470 MPR_SET_LUN(req->LUN, tm->cm_lun);
1471 tm->cm_targ->logical_unit_resets++;
1472 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1473 "Sending logical unit reset to target %u lun %d\n",
1474 target->tid, tm->cm_lun);
1475 tm->cm_complete = mprsas_logical_unit_reset_complete;
1476 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1477 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1479 * Target reset method =
1480 * SAS Hard Link Reset / SATA Link Reset
1482 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1483 tm->cm_targ->target_resets++;
1484 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1485 "Sending target reset to target %u\n", target->tid);
1486 tm->cm_complete = mprsas_target_reset_complete;
1487 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1489 else {
1490 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1491 return -1;
1494 if (target->encl_level_valid) {
1495 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1496 "At enclosure level %d, slot %d, connector name (%4s)\n",
1497 target->encl_level, target->encl_slot,
1498 target->connector_name);
1501 tm->cm_data = NULL;
1502 tm->cm_desc.HighPriority.RequestFlags =
1503 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1504 tm->cm_complete_data = (void *)tm;
1506 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1507 mprsas_tm_timeout, tm);
1509 err = mpr_map_command(sc, tm);
1510 if (err)
1511 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1512 "error %d sending reset type %u\n", err, type);
1514 return err;
1518 static void
1519 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1521 struct mpr_command *cm;
1522 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1523 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1524 struct mprsas_target *targ;
1526 callout_stop(&tm->cm_callout);
1528 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1529 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1530 targ = tm->cm_targ;
1533 * Currently there should be no way we can hit this case. It only
1534 * happens when we have a failure to allocate chain frames, and
1535 * task management commands don't have S/G lists.
1537 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1538 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1539 "cm_flags = %#x for abort %p TaskMID %u!\n",
1540 tm->cm_flags, tm, le16toh(req->TaskMID));
1541 mprsas_free_tm(sc, tm);
1542 return;
1545 if (reply == NULL) {
1546 mpr_dprint(sc, MPR_RECOVERY,
1547 "NULL abort reply for tm %p TaskMID %u\n",
1548 tm, le16toh(req->TaskMID));
1549 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1550 /* this completion was due to a reset, just cleanup */
1551 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1552 "reset, ignoring NULL abort reply\n");
1553 targ->tm = NULL;
1554 mprsas_free_tm(sc, tm);
1555 } else {
1556 /* we should have gotten a reply. */
1557 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1558 "abort attempt, resetting controller\n");
1559 mpr_reinit(sc);
1561 return;
1564 mpr_dprint(sc, MPR_RECOVERY,
1565 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1566 le16toh(req->TaskMID),
1567 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1568 le32toh(reply->TerminationCount));
1570 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1571 if (cm == NULL) {
1573 * if there are no more timedout commands, we're done with
1574 * error recovery for this target.
1576 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1577 "Finished abort recovery for target %u\n", targ->tid);
1578 targ->tm = NULL;
1579 mprsas_free_tm(sc, tm);
1580 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1581 /* abort success, but we have more timedout commands to abort */
1582 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1583 "Continuing abort recovery for target %u\n", targ->tid);
1584 mprsas_send_abort(sc, tm, cm);
1585 } else {
1587 * we didn't get a command completion, so the abort
1588 * failed as far as we're concerned. escalate.
1590 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1591 "Abort failed for target %u, sending logical unit reset\n",
1592 targ->tid);
1594 mprsas_send_reset(sc, tm,
1595 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1599 #define MPR_ABORT_TIMEOUT 5
1601 static int
1602 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1603 struct mpr_command *cm)
1605 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1606 struct mprsas_target *targ;
1607 int err;
1609 targ = cm->cm_targ;
1610 if (targ->handle == 0) {
1611 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1612 "%s null devhandle for target_id %d\n",
1613 __func__, cm->cm_ccb->ccb_h.target_id);
1614 return -1;
1617 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1618 "Aborting command %p\n", cm);
1620 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1621 req->DevHandle = htole16(targ->handle);
1622 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1623 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1625 /* XXX Need to handle invalid LUNs */
1626 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1628 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1630 tm->cm_data = NULL;
1631 tm->cm_desc.HighPriority.RequestFlags =
1632 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1633 tm->cm_complete = mprsas_abort_complete;
1634 tm->cm_complete_data = (void *)tm;
1635 tm->cm_targ = cm->cm_targ;
1636 tm->cm_lun = cm->cm_lun;
1638 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1639 mprsas_tm_timeout, tm);
1641 targ->aborts++;
1643 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1645 err = mpr_map_command(sc, tm);
1646 if (err)
1647 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1648 "error %d sending abort for cm %p SMID %u\n",
1649 err, cm, req->TaskMID);
1650 return err;
1653 static void
1654 mprsas_scsiio_timeout(void *data)
1656 #if 0 /* XXX swildner: sbintime */
1657 sbintime_t elapsed, now;
1658 #endif
1659 union ccb *ccb;
1660 struct mpr_softc *sc;
1661 struct mpr_command *cm;
1662 struct mprsas_target *targ;
1664 cm = (struct mpr_command *)data;
1665 sc = cm->cm_sc;
1666 ccb = cm->cm_ccb;
1667 #if 0 /* XXX swildner: sbintime */
1668 now = sbinuptime();
1669 #endif
1671 MPR_FUNCTRACE(sc);
1672 KKASSERT(lockowned(&sc->mpr_lock));
1674 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1677 * Run the interrupt handler to make sure it's not pending. This
1678 * isn't perfect because the command could have already completed
1679 * and been re-used, though this is unlikely.
1681 mpr_intr_locked(sc);
1682 if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1683 mprsas_log_command(cm, MPR_XINFO,
1684 "SCSI command %p almost timed out\n", cm);
1685 return;
1688 if (cm->cm_ccb == NULL) {
1689 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1690 return;
1693 targ = cm->cm_targ;
1694 targ->timeouts++;
1696 #if 0 /* XXX swildner: sbintime */
1697 elapsed = now - ccb->ccb_h.qos.sim_data;
1698 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1699 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1700 targ->tid, targ->handle, ccb->ccb_h.timeout,
1701 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1702 #endif
1703 if (targ->encl_level_valid) {
1704 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1705 "At enclosure level %d, slot %d, connector name (%4s)\n",
1706 targ->encl_level, targ->encl_slot, targ->connector_name);
1709 /* XXX first, check the firmware state, to see if it's still
1710 * operational. if not, do a diag reset.
1712 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1713 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1714 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1716 if (targ->tm != NULL) {
1717 /* target already in recovery, just queue up another
1718 * timedout command to be processed later.
1720 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1721 "processing by tm %p\n", cm, targ->tm);
1723 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1725 /* start recovery by aborting the first timedout command */
1726 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1727 "Sending abort to target %u for SMID %d\n", targ->tid,
1728 cm->cm_desc.Default.SMID);
1729 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1730 cm, targ->tm);
1731 mprsas_send_abort(sc, targ->tm, cm);
1733 else {
1734 /* XXX queue this target up for recovery once a TM becomes
1735 * available. The firmware only has a limited number of
1736 * HighPriority credits for the high priority requests used
1737 * for task management, and we ran out.
1739 * Isilon: don't worry about this for now, since we have
1740 * more credits than disks in an enclosure, and limit
1741 * ourselves to one TM per target for recovery.
1743 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1744 "timedout cm %p failed to allocate a tm\n", cm);
1748 #if 0 /* XXX swildner: NVMe support */
1749 /**
1750 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1751 * to SCSI Unmap.
1752 * Return 0 - for success,
1753 * 1 - to immediately return back the command with success status to CAM
1754 * negative value - to fallback to firmware path i.e. issue scsi unmap
1755 * to FW without any translation.
1757 static int
1758 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1759 union ccb *ccb, struct mprsas_target *targ)
1761 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1762 struct ccb_scsiio *csio;
1763 struct unmap_parm_list *plist;
1764 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1765 struct nvme_command *c;
1766 int i, res;
1767 uint16_t ndesc, list_len, data_length;
1768 struct mpr_prp_page *prp_page_info;
1769 uint64_t nvme_dsm_ranges_dma_handle;
1771 csio = &ccb->csio;
1772 #if __FreeBSD_version >= 1100103
1773 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1774 #else
1775 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1776 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1777 ccb->csio.cdb_io.cdb_ptr[8]);
1778 } else {
1779 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1780 ccb->csio.cdb_io.cdb_bytes[8]);
1782 #endif
1783 if (!list_len) {
1784 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1785 return -EINVAL;
1788 plist = kmalloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1789 if (!plist) {
1790 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1791 "save UNMAP data\n");
1792 return -ENOMEM;
1795 /* Copy SCSI unmap data to a local buffer */
1796 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1798 /* return back the unmap command to CAM with success status,
1799 * if number of descripts is zero.
1801 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1802 if (!ndesc) {
1803 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1804 "UNMAP cmd is Zero\n");
1805 res = 1;
1806 goto out;
1809 data_length = ndesc * sizeof(struct nvme_dsm_range);
1810 if (data_length > targ->MDTS) {
1811 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1812 "Device's MDTS: %d\n", data_length, targ->MDTS);
1813 res = -EINVAL;
1814 goto out;
1817 prp_page_info = mpr_alloc_prp_page(sc);
1818 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1819 "UNMAP command.\n", __func__));
1822 * Insert the allocated PRP page into the command's PRP page list. This
1823 * will be freed when the command is freed.
1825 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1827 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1828 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1830 bzero(nvme_dsm_ranges, data_length);
1832 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1833 * for each descriptors contained in SCSI UNMAP data.
1835 for (i = 0; i < ndesc; i++) {
1836 nvme_dsm_ranges[i].length =
1837 htole32(be32toh(plist->desc[i].nlb));
1838 nvme_dsm_ranges[i].starting_lba =
1839 htole64(be64toh(plist->desc[i].slba));
1840 nvme_dsm_ranges[i].attributes = 0;
1843 /* Build MPI2.6's NVMe Encapsulated Request Message */
1844 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1845 bzero(req, sizeof(*req));
1846 req->DevHandle = htole16(targ->handle);
1847 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1848 req->Flags = MPI26_NVME_FLAGS_WRITE;
1849 req->ErrorResponseBaseAddress.High =
1850 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1851 req->ErrorResponseBaseAddress.Low =
1852 htole32(cm->cm_sense_busaddr);
1853 req->ErrorResponseAllocationLength =
1854 htole16(sizeof(struct nvme_completion));
1855 req->EncapsulatedCommandLength =
1856 htole16(sizeof(struct nvme_command));
1857 req->DataLength = htole32(data_length);
1859 /* Build NVMe DSM command */
1860 c = (struct nvme_command *) req->NVMe_Command;
1861 c->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT);
1862 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1863 c->cdw10 = htole32(ndesc - 1);
1864 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1866 cm->cm_length = data_length;
1867 cm->cm_data = NULL;
1869 cm->cm_complete = mprsas_scsiio_complete;
1870 cm->cm_complete_data = ccb;
1871 cm->cm_targ = targ;
1872 cm->cm_lun = csio->ccb_h.target_lun;
1873 cm->cm_ccb = ccb;
1875 cm->cm_desc.Default.RequestFlags =
1876 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1878 csio->ccb_h.qos.sim_data = sbinuptime();
1879 #if __FreeBSD_version >= 1000029
1880 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1881 mprsas_scsiio_timeout, cm, 0);
1882 #else //__FreeBSD_version < 1000029
1883 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1884 mprsas_scsiio_timeout, cm);
1885 #endif //__FreeBSD_version >= 1000029
1887 targ->issued++;
1888 targ->outstanding++;
1889 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1890 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1892 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1893 __func__, cm, ccb, targ->outstanding);
1895 mpr_build_nvme_prp(sc, cm, req,
1896 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1897 mpr_map_command(sc, cm);
1899 out:
1900 kfree(plist, M_MPR);
1901 return 0;
1903 #endif
1905 static void
1906 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1908 MPI2_SCSI_IO_REQUEST *req;
1909 struct ccb_scsiio *csio;
1910 struct mpr_softc *sc;
1911 struct mprsas_target *targ;
1912 struct mprsas_lun *lun;
1913 struct mpr_command *cm;
1914 uint8_t i, lba_byte, *ref_tag_addr;
1915 #if 0 /* XXX swildner: NVMe support */
1916 uint8_t scsi_opcode;
1917 #endif
1918 uint16_t eedp_flags;
1919 uint32_t mpi_control;
1920 #if 0 /* XXX swildner: NVMe support */
1921 int rc;
1922 #endif
1924 sc = sassc->sc;
1925 MPR_FUNCTRACE(sc);
1926 KKASSERT(lockowned(&sc->mpr_lock));
1928 csio = &ccb->csio;
1929 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1930 ("Target %d out of bounds in XPT_SCSI_IO\n",
1931 csio->ccb_h.target_id));
1932 targ = &sassc->targets[csio->ccb_h.target_id];
1933 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1934 if (targ->handle == 0x0) {
1935 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1936 __func__, csio->ccb_h.target_id);
1937 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1938 xpt_done(ccb);
1939 return;
1941 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1942 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1943 "supported %u\n", __func__, csio->ccb_h.target_id);
1944 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1945 xpt_done(ccb);
1946 return;
1949 * Sometimes, it is possible to get a command that is not "In
1950 * Progress" and was actually aborted by the upper layer. Check for
1951 * this here and complete the command without error.
1953 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1954 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1955 "target %u\n", __func__, csio->ccb_h.target_id);
1956 xpt_done(ccb);
1957 return;
1960 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1961 * that the volume has timed out. We want volumes to be enumerated
1962 * until they are deleted/removed, not just failed.
1964 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1965 if (targ->devinfo == 0)
1966 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1967 else
1968 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1969 xpt_done(ccb);
1970 return;
1973 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1974 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1975 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1976 xpt_done(ccb);
1977 return;
1981 * If target has a reset in progress, freeze the devq and return. The
1982 * devq will be released when the TM reset is finished.
1984 if (targ->flags & MPRSAS_TARGET_INRESET) {
1985 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1986 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1987 __func__, targ->tid);
1988 xpt_freeze_devq(ccb->ccb_h.path, 1);
1989 xpt_done(ccb);
1990 return;
1993 cm = mpr_alloc_command(sc);
1994 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1995 if (cm != NULL) {
1996 mpr_free_command(sc, cm);
1998 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1999 xpt_freeze_simq(sassc->sim, 1);
2000 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2002 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2003 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2004 xpt_done(ccb);
2005 return;
2008 #if 0 /* XXX swildner: NVMe support */
2009 /* For NVME device's issue UNMAP command directly to NVME drives by
2010 * constructing equivalent native NVMe DataSetManagement command.
2012 #if __FreeBSD_version >= 1100103
2013 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2014 #else
2015 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2016 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2017 else
2018 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2019 #endif
2020 if (scsi_opcode == UNMAP &&
2021 targ->is_nvme &&
2022 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2023 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2024 if (rc == 1) { /* return command to CAM with success status */
2025 mpr_free_command(sc, cm);
2026 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2027 xpt_done(ccb);
2028 return;
2029 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2030 return;
2032 #endif
2034 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2035 bzero(req, sizeof(*req));
2036 req->DevHandle = htole16(targ->handle);
2037 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2038 req->MsgFlags = 0;
2039 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2040 req->SenseBufferLength = MPR_SENSE_LEN;
2041 req->SGLFlags = 0;
2042 req->ChainOffset = 0;
2043 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2044 req->SGLOffset1= 0;
2045 req->SGLOffset2= 0;
2046 req->SGLOffset3= 0;
2047 req->SkipCount = 0;
2048 req->DataLength = htole32(csio->dxfer_len);
2049 req->BidirectionalDataLength = 0;
2050 req->IoFlags = htole16(csio->cdb_len);
2051 req->EEDPFlags = 0;
2053 /* Note: BiDirectional transfers are not supported */
2054 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2055 case CAM_DIR_IN:
2056 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2057 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2058 break;
2059 case CAM_DIR_OUT:
2060 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2061 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2062 break;
2063 case CAM_DIR_NONE:
2064 default:
2065 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2066 break;
2069 if (csio->cdb_len == 32)
2070 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2072 * It looks like the hardware doesn't require an explicit tag
2073 * number for each transaction. SAM Task Management not supported
2074 * at the moment.
2076 switch (csio->tag_action) {
2077 case MSG_HEAD_OF_Q_TAG:
2078 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2079 break;
2080 case MSG_ORDERED_Q_TAG:
2081 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2082 break;
2083 case MSG_ACA_TASK:
2084 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2085 break;
2086 case CAM_TAG_ACTION_NONE:
2087 case MSG_SIMPLE_Q_TAG:
2088 default:
2089 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2090 break;
2092 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2093 req->Control = htole32(mpi_control);
2095 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2096 mpr_free_command(sc, cm);
2097 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2098 xpt_done(ccb);
2099 return;
2102 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2103 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2104 else {
2105 KASSERT(csio->cdb_len <= IOCDBLEN,
2106 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2107 "is not set", csio->cdb_len));
2108 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2110 req->IoFlags = htole16(csio->cdb_len);
2113 * Check if EEDP is supported and enabled. If it is then check if the
2114 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2115 * is formatted for EEDP support. If all of this is true, set CDB up
2116 * for EEDP transfer.
2118 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2119 if (sc->eedp_enabled && eedp_flags) {
2120 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2121 if (lun->lun_id == csio->ccb_h.target_lun) {
2122 break;
2126 if ((lun != NULL) && (lun->eedp_formatted)) {
2127 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2128 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2129 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2130 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2131 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2132 eedp_flags |=
2133 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2135 req->EEDPFlags = htole16(eedp_flags);
2138 * If CDB less than 32, fill in Primary Ref Tag with
2139 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2140 * already there. Also, set protection bit. FreeBSD
2141 * currently does not support CDBs bigger than 16, but
2142 * the code doesn't hurt, and will be here for the
2143 * future.
2145 if (csio->cdb_len != 32) {
2146 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2147 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2148 PrimaryReferenceTag;
2149 for (i = 0; i < 4; i++) {
2150 *ref_tag_addr =
2151 req->CDB.CDB32[lba_byte + i];
2152 ref_tag_addr++;
2154 req->CDB.EEDP32.PrimaryReferenceTag =
2155 htole32(req->
2156 CDB.EEDP32.PrimaryReferenceTag);
2157 req->CDB.EEDP32.PrimaryApplicationTagMask =
2158 0xFFFF;
2159 req->CDB.CDB32[1] =
2160 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2161 } else {
2162 eedp_flags |=
2163 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2164 req->EEDPFlags = htole16(eedp_flags);
2165 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2166 0x1F) | 0x20;
2171 cm->cm_length = csio->dxfer_len;
2172 if (cm->cm_length != 0) {
2173 cm->cm_data = ccb;
2174 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2175 } else {
2176 cm->cm_data = NULL;
2178 cm->cm_sge = &req->SGL;
2179 cm->cm_sglsize = (32 - 24) * 4;
2180 cm->cm_complete = mprsas_scsiio_complete;
2181 cm->cm_complete_data = ccb;
2182 cm->cm_targ = targ;
2183 cm->cm_lun = csio->ccb_h.target_lun;
2184 cm->cm_ccb = ccb;
2186 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2187 * and set descriptor type.
2189 if (targ->scsi_req_desc_type ==
2190 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2191 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2192 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2193 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2194 if (!sc->atomic_desc_capable) {
2195 cm->cm_desc.FastPathSCSIIO.DevHandle =
2196 htole16(targ->handle);
2198 } else {
2199 cm->cm_desc.SCSIIO.RequestFlags =
2200 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2201 if (!sc->atomic_desc_capable)
2202 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2205 #if 0 /* XXX swildner sbintime */
2206 csio->ccb_h.qos.sim_data = sbinuptime();
2207 #endif
2208 #if __FreeBSD_version >= 1000029
2209 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2210 mprsas_scsiio_timeout, cm, 0);
2211 #else //__FreeBSD_version < 1000029
2212 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2213 mprsas_scsiio_timeout, cm);
2214 #endif //__FreeBSD_version >= 1000029
2216 targ->issued++;
2217 targ->outstanding++;
2218 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2219 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2221 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2222 __func__, cm, ccb, targ->outstanding);
2224 mpr_map_command(sc, cm);
2225 return;
2229 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2231 static void
2232 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2233 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2235 u32 response_info;
2236 u8 *response_bytes;
2237 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2238 MPI2_IOCSTATUS_MASK;
2239 u8 scsi_state = mpi_reply->SCSIState;
2240 u8 scsi_status = mpi_reply->SCSIStatus;
2241 char *desc_ioc_state = NULL;
2242 char *desc_scsi_status = NULL;
2243 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2245 if (log_info == 0x31170000)
2246 return;
2248 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2249 ioc_status);
2250 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2251 scsi_status);
2253 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2254 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2255 if (targ->encl_level_valid) {
2256 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2257 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2258 targ->connector_name);
2262 * We can add more detail about underflow data here
2263 * TO-DO
2265 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2266 "scsi_state %pb%i\n", desc_scsi_status, scsi_status,
2267 "\20" "\1AutosenseValid" "\2AutosenseFailed"
2268 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid",
2269 scsi_state);
2271 if (sc->mpr_debug & MPR_XINFO &&
2272 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2273 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2274 scsi_sense_print(csio);
2275 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2278 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2279 response_info = le32toh(mpi_reply->ResponseInfo);
2280 response_bytes = (u8 *)&response_info;
2281 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2282 response_bytes[0],
2283 mpr_describe_table(mpr_scsi_taskmgmt_string,
2284 response_bytes[0]));
2288 #if 0 /* XXX swildner: NVMe support */
2289 /** mprsas_nvme_trans_status_code
2291 * Convert Native NVMe command error status to
2292 * equivalent SCSI error status.
2294 * Returns appropriate scsi_status
2296 static u8
2297 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2298 struct mpr_command *cm)
2300 u8 status = MPI2_SCSI_STATUS_GOOD;
2301 int skey, asc, ascq;
2302 union ccb *ccb = cm->cm_complete_data;
2303 int returned_sense_len;
2304 uint8_t sct, sc;
2306 sct = NVME_STATUS_GET_SCT(nvme_status);
2307 sc = NVME_STATUS_GET_SC(nvme_status);
2309 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2310 skey = SSD_KEY_ILLEGAL_REQUEST;
2311 asc = SCSI_ASC_NO_SENSE;
2312 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2314 switch (sct) {
2315 case NVME_SCT_GENERIC:
2316 switch (sc) {
2317 case NVME_SC_SUCCESS:
2318 status = MPI2_SCSI_STATUS_GOOD;
2319 skey = SSD_KEY_NO_SENSE;
2320 asc = SCSI_ASC_NO_SENSE;
2321 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2322 break;
2323 case NVME_SC_INVALID_OPCODE:
2324 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2325 skey = SSD_KEY_ILLEGAL_REQUEST;
2326 asc = SCSI_ASC_ILLEGAL_COMMAND;
2327 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2328 break;
2329 case NVME_SC_INVALID_FIELD:
2330 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2331 skey = SSD_KEY_ILLEGAL_REQUEST;
2332 asc = SCSI_ASC_INVALID_CDB;
2333 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2334 break;
2335 case NVME_SC_DATA_TRANSFER_ERROR:
2336 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2337 skey = SSD_KEY_MEDIUM_ERROR;
2338 asc = SCSI_ASC_NO_SENSE;
2339 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2340 break;
2341 case NVME_SC_ABORTED_POWER_LOSS:
2342 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2343 skey = SSD_KEY_ABORTED_COMMAND;
2344 asc = SCSI_ASC_WARNING;
2345 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2346 break;
2347 case NVME_SC_INTERNAL_DEVICE_ERROR:
2348 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2349 skey = SSD_KEY_HARDWARE_ERROR;
2350 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2351 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2352 break;
2353 case NVME_SC_ABORTED_BY_REQUEST:
2354 case NVME_SC_ABORTED_SQ_DELETION:
2355 case NVME_SC_ABORTED_FAILED_FUSED:
2356 case NVME_SC_ABORTED_MISSING_FUSED:
2357 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2358 skey = SSD_KEY_ABORTED_COMMAND;
2359 asc = SCSI_ASC_NO_SENSE;
2360 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2361 break;
2362 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2363 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2364 skey = SSD_KEY_ILLEGAL_REQUEST;
2365 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2366 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2367 break;
2368 case NVME_SC_LBA_OUT_OF_RANGE:
2369 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2370 skey = SSD_KEY_ILLEGAL_REQUEST;
2371 asc = SCSI_ASC_ILLEGAL_BLOCK;
2372 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2373 break;
2374 case NVME_SC_CAPACITY_EXCEEDED:
2375 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2376 skey = SSD_KEY_MEDIUM_ERROR;
2377 asc = SCSI_ASC_NO_SENSE;
2378 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2379 break;
2380 case NVME_SC_NAMESPACE_NOT_READY:
2381 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2382 skey = SSD_KEY_NOT_READY;
2383 asc = SCSI_ASC_LUN_NOT_READY;
2384 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2385 break;
2387 break;
2388 case NVME_SCT_COMMAND_SPECIFIC:
2389 switch (sc) {
2390 case NVME_SC_INVALID_FORMAT:
2391 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2392 skey = SSD_KEY_ILLEGAL_REQUEST;
2393 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2394 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2395 break;
2396 case NVME_SC_CONFLICTING_ATTRIBUTES:
2397 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2398 skey = SSD_KEY_ILLEGAL_REQUEST;
2399 asc = SCSI_ASC_INVALID_CDB;
2400 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2401 break;
2403 break;
2404 case NVME_SCT_MEDIA_ERROR:
2405 switch (sc) {
2406 case NVME_SC_WRITE_FAULTS:
2407 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2408 skey = SSD_KEY_MEDIUM_ERROR;
2409 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2410 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2411 break;
2412 case NVME_SC_UNRECOVERED_READ_ERROR:
2413 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2414 skey = SSD_KEY_MEDIUM_ERROR;
2415 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2416 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2417 break;
2418 case NVME_SC_GUARD_CHECK_ERROR:
2419 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2420 skey = SSD_KEY_MEDIUM_ERROR;
2421 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2422 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2423 break;
2424 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2425 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2426 skey = SSD_KEY_MEDIUM_ERROR;
2427 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2428 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2429 break;
2430 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2431 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2432 skey = SSD_KEY_MEDIUM_ERROR;
2433 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2434 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2435 break;
2436 case NVME_SC_COMPARE_FAILURE:
2437 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2438 skey = SSD_KEY_MISCOMPARE;
2439 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2440 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2441 break;
2442 case NVME_SC_ACCESS_DENIED:
2443 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2444 skey = SSD_KEY_ILLEGAL_REQUEST;
2445 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2446 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2447 break;
2449 break;
2452 returned_sense_len = sizeof(struct scsi_sense_data);
2453 if (returned_sense_len < ccb->csio.sense_len)
2454 ccb->csio.sense_resid = ccb->csio.sense_len -
2455 returned_sense_len;
2456 else
2457 ccb->csio.sense_resid = 0;
2459 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2460 1, skey, asc, ascq, SSD_ELEM_NONE);
2461 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2463 return status;
2466 /** mprsas_complete_nvme_unmap
2468 * Complete native NVMe command issued using NVMe Encapsulated
2469 * Request Message.
2471 static u8
2472 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2474 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2475 struct nvme_completion *nvme_completion = NULL;
2476 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2478 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2479 if (le16toh(mpi_reply->ErrorResponseCount)){
2480 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2481 scsi_status = mprsas_nvme_trans_status_code(
2482 nvme_completion->status, cm);
2484 return scsi_status;
2486 #endif
2488 static void
2489 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2491 MPI2_SCSI_IO_REPLY *rep;
2492 union ccb *ccb;
2493 struct ccb_scsiio *csio;
2494 struct mprsas_softc *sassc;
2495 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2496 u8 *TLR_bits, TLR_on, *scsi_cdb;
2497 int dir = 0, i;
2498 u16 alloc_len;
2499 struct mprsas_target *target;
2500 target_id_t target_id;
2502 MPR_FUNCTRACE(sc);
2503 mpr_dprint(sc, MPR_TRACE,
2504 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2505 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2506 cm->cm_targ->outstanding);
2508 callout_stop(&cm->cm_callout);
2509 KKASSERT(lockowned(&sc->mpr_lock));
2511 sassc = sc->sassc;
2512 ccb = cm->cm_complete_data;
2513 csio = &ccb->csio;
2514 target_id = csio->ccb_h.target_id;
2515 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2517 * XXX KDM if the chain allocation fails, does it matter if we do
2518 * the sync and unload here? It is simpler to do it in every case,
2519 * assuming it doesn't cause problems.
2521 if (cm->cm_data != NULL) {
2522 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2523 dir = BUS_DMASYNC_POSTREAD;
2524 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2525 dir = BUS_DMASYNC_POSTWRITE;
2526 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2527 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2530 cm->cm_targ->completed++;
2531 cm->cm_targ->outstanding--;
2532 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2533 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2535 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2536 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2537 cm->cm_state = MPR_CM_STATE_BUSY;
2538 if (cm->cm_reply != NULL)
2539 mprsas_log_command(cm, MPR_RECOVERY,
2540 "completed timedout cm %p ccb %p during recovery "
2541 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2542 le16toh(rep->IOCStatus), rep->SCSIStatus,
2543 rep->SCSIState, le32toh(rep->TransferCount));
2544 else
2545 mprsas_log_command(cm, MPR_RECOVERY,
2546 "completed timedout cm %p ccb %p during recovery\n",
2547 cm, cm->cm_ccb);
2548 } else if (cm->cm_targ->tm != NULL) {
2549 if (cm->cm_reply != NULL)
2550 mprsas_log_command(cm, MPR_RECOVERY,
2551 "completed cm %p ccb %p during recovery "
2552 "ioc %x scsi %x state %x xfer %u\n",
2553 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2554 rep->SCSIStatus, rep->SCSIState,
2555 le32toh(rep->TransferCount));
2556 else
2557 mprsas_log_command(cm, MPR_RECOVERY,
2558 "completed cm %p ccb %p during recovery\n",
2559 cm, cm->cm_ccb);
2560 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2561 mprsas_log_command(cm, MPR_RECOVERY,
2562 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2565 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2567 * We ran into an error after we tried to map the command,
2568 * so we're getting a callback without queueing the command
2569 * to the hardware. So we set the status here, and it will
2570 * be retained below. We'll go through the "fast path",
2571 * because there can be no reply when we haven't actually
2572 * gone out to the hardware.
2574 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2577 * Currently the only error included in the mask is
2578 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2579 * chain frames. We need to freeze the queue until we get
2580 * a command that completed without this error, which will
2581 * hopefully have some chain frames attached that we can
2582 * use. If we wanted to get smarter about it, we would
2583 * only unfreeze the queue in this condition when we're
2584 * sure that we're getting some chain frames back. That's
2585 * probably unnecessary.
2587 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2588 xpt_freeze_simq(sassc->sim, 1);
2589 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2590 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2591 "freezing SIM queue\n");
2596 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2597 * flag, and use it in a few places in the rest of this function for
2598 * convenience. Use the macro if available.
2600 #if __FreeBSD_version >= 1100103
2601 scsi_cdb = scsiio_cdb_ptr(csio);
2602 #else
2603 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2604 scsi_cdb = csio->cdb_io.cdb_ptr;
2605 else
2606 scsi_cdb = csio->cdb_io.cdb_bytes;
2607 #endif
2610 * If this is a Start Stop Unit command and it was issued by the driver
2611 * during shutdown, decrement the refcount to account for all of the
2612 * commands that were sent. All SSU commands should be completed before
2613 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2614 * is TRUE.
2616 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2617 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2618 sc->SSU_refcount--;
2621 /* Take the fast path to completion */
2622 if (cm->cm_reply == NULL) {
2623 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2624 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2625 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2626 else {
2627 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2628 csio->scsi_status = SCSI_STATUS_OK;
2630 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2631 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2632 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2633 mpr_dprint(sc, MPR_XINFO,
2634 "Unfreezing SIM queue\n");
2639 * There are two scenarios where the status won't be
2640 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2641 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2643 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2645 * Freeze the dev queue so that commands are
2646 * executed in the correct order after error
2647 * recovery.
2649 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2650 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2652 mpr_free_command(sc, cm);
2653 xpt_done(ccb);
2654 return;
2657 #if 0 /* XXX swildner: NVMe support */
2658 target = &sassc->targets[target_id];
2659 if (scsi_cdb[0] == UNMAP &&
2660 target->is_nvme &&
2661 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2662 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2663 csio->scsi_status = rep->SCSIStatus;
2665 #endif
2667 mprsas_log_command(cm, MPR_XINFO,
2668 "ioc %x scsi %x state %x xfer %u\n",
2669 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2670 le32toh(rep->TransferCount));
2672 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2673 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2674 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2675 /* FALLTHROUGH */
2676 case MPI2_IOCSTATUS_SUCCESS:
2677 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2678 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2679 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2680 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2682 /* Completion failed at the transport level. */
2683 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2684 MPI2_SCSI_STATE_TERMINATED)) {
2685 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2686 break;
2689 /* In a modern packetized environment, an autosense failure
2690 * implies that there's not much else that can be done to
2691 * recover the command.
2693 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2694 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2695 break;
2699 * CAM doesn't care about SAS Response Info data, but if this is
2700 * the state check if TLR should be done. If not, clear the
2701 * TLR_bits for the target.
2703 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2704 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2705 == MPR_SCSI_RI_INVALID_FRAME)) {
2706 sc->mapping_table[target_id].TLR_bits =
2707 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2711 * Intentionally override the normal SCSI status reporting
2712 * for these two cases. These are likely to happen in a
2713 * multi-initiator environment, and we want to make sure that
2714 * CAM retries these commands rather than fail them.
2716 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2717 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2718 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2719 break;
2722 /* Handle normal status and sense */
2723 csio->scsi_status = rep->SCSIStatus;
2724 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2725 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2726 else
2727 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2729 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2730 int sense_len, returned_sense_len;
2732 returned_sense_len = min(le32toh(rep->SenseCount),
2733 sizeof(struct scsi_sense_data));
2734 if (returned_sense_len < csio->sense_len)
2735 csio->sense_resid = csio->sense_len -
2736 returned_sense_len;
2737 else
2738 csio->sense_resid = 0;
2740 sense_len = min(returned_sense_len,
2741 csio->sense_len - csio->sense_resid);
2742 bzero(&csio->sense_data, sizeof(csio->sense_data));
2743 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2744 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2748 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2749 * and it's page code 0 (Supported Page List), and there is
2750 * inquiry data, and this is for a sequential access device, and
2751 * the device is an SSP target, and TLR is supported by the
2752 * controller, turn the TLR_bits value ON if page 0x90 is
2753 * supported.
2755 if ((scsi_cdb[0] == INQUIRY) &&
2756 (scsi_cdb[1] & SI_EVPD) &&
2757 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2758 #if 0 /* XXX swildner */
2759 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2760 #endif
2761 (csio->data_ptr != NULL) &&
2762 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2763 (sc->control_TLR) &&
2764 (sc->mapping_table[target_id].device_info &
2765 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2766 vpd_list = (struct scsi_vpd_supported_page_list *)
2767 csio->data_ptr;
2768 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2769 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2770 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2771 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2772 alloc_len -= csio->resid;
2773 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2774 if (vpd_list->list[i] == 0x90) {
2775 *TLR_bits = TLR_on;
2776 break;
2782 * If this is a SATA direct-access end device, mark it so that
2783 * a SCSI StartStopUnit command will be sent to it when the
2784 * driver is being shutdown.
2786 if ((scsi_cdb[0] == INQUIRY) &&
2787 (csio->data_ptr != NULL) &&
2788 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2789 (sc->mapping_table[target_id].device_info &
2790 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2791 ((sc->mapping_table[target_id].device_info &
2792 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2793 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2794 target = &sassc->targets[target_id];
2795 target->supports_SSU = TRUE;
2796 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2797 target_id);
2799 break;
2800 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2801 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2803 * If devinfo is 0 this will be a volume. In that case don't
2804 * tell CAM that the volume is not there. We want volumes to
2805 * be enumerated until they are deleted/removed, not just
2806 * failed.
2808 if (cm->cm_targ->devinfo == 0)
2809 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2810 else
2811 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2812 break;
2813 case MPI2_IOCSTATUS_INVALID_SGL:
2814 mpr_print_scsiio_cmd(sc, cm);
2815 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2816 break;
2817 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2819 * This is one of the responses that comes back when an I/O
2820 * has been aborted. If it is because of a timeout that we
2821 * initiated, just set the status to CAM_CMD_TIMEOUT.
2822 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2823 * command is the same (it gets retried, subject to the
2824 * retry counter), the only difference is what gets printed
2825 * on the console.
2827 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2828 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2829 else
2830 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2831 break;
2832 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2833 /* resid is ignored for this condition */
2834 csio->resid = 0;
2835 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2836 break;
2837 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2838 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2840 * These can sometimes be transient transport-related
2841 * errors, and sometimes persistent drive-related errors.
2842 * We used to retry these without decrementing the retry
2843 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2844 * we hit a persistent drive problem that returns one of
2845 * these error codes, we would retry indefinitely. So,
2846 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2847 * count and avoid infinite retries. We're taking the
2848 * potential risk of flagging false failures in the event
2849 * of a topology-related error (e.g. a SAS expander problem
2850 * causes a command addressed to a drive to fail), but
2851 * avoiding getting into an infinite retry loop.
2853 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2854 mpr_dprint(sc, MPR_INFO,
2855 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2856 mpr_describe_table(mpr_iocstatus_string,
2857 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2858 target_id, cm->cm_desc.Default.SMID,
2859 le32toh(rep->IOCLogInfo));
2860 mpr_dprint(sc, MPR_XINFO,
2861 "SCSIStatus %x SCSIState %x xfercount %u\n",
2862 rep->SCSIStatus, rep->SCSIState,
2863 le32toh(rep->TransferCount));
2864 break;
2865 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2866 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2867 case MPI2_IOCSTATUS_INVALID_VPID:
2868 case MPI2_IOCSTATUS_INVALID_FIELD:
2869 case MPI2_IOCSTATUS_INVALID_STATE:
2870 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2871 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2872 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2873 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2874 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2875 default:
2876 mprsas_log_command(cm, MPR_XINFO,
2877 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2878 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2879 rep->SCSIStatus, rep->SCSIState,
2880 le32toh(rep->TransferCount));
2881 csio->resid = cm->cm_length;
2883 #if 0 /* XXX swildner: NVMe support */
2884 if (scsi_cdb[0] == UNMAP &&
2885 target->is_nvme &&
2886 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2887 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2888 else
2889 #endif
2890 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2892 break;
2895 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2897 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2898 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2899 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2900 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2901 "queue\n");
2904 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2905 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2906 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2909 mpr_free_command(sc, cm);
2910 xpt_done(ccb);
2913 #if __FreeBSD_version >= 900026
2914 static void
2915 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2917 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2918 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2919 uint64_t sasaddr;
2920 union ccb *ccb;
2922 ccb = cm->cm_complete_data;
2925 * Currently there should be no way we can hit this case. It only
2926 * happens when we have a failure to allocate chain frames, and SMP
2927 * commands require two S/G elements only. That should be handled
2928 * in the standard request size.
2930 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2931 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2932 "request!\n", __func__, cm->cm_flags);
2933 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2934 goto bailout;
2937 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2938 if (rpl == NULL) {
2939 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2940 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2941 goto bailout;
2944 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2945 sasaddr = le32toh(req->SASAddress.Low);
2946 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2948 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2949 MPI2_IOCSTATUS_SUCCESS ||
2950 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2951 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2952 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2953 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2954 goto bailout;
2957 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2958 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2960 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2961 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2962 else
2963 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2965 bailout:
2967 * We sync in both directions because we had DMAs in the S/G list
2968 * in both directions.
2970 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2972 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2973 mpr_free_command(sc, cm);
2974 xpt_done(ccb);
2977 static void
2978 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2980 struct mpr_command *cm;
2981 uint8_t *request, *response;
2982 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2983 struct mpr_softc *sc;
2984 struct sglist *sg;
2985 int error;
2987 sc = sassc->sc;
2988 sg = NULL;
2989 error = 0;
2991 #if (__FreeBSD_version >= 1000028) || \
2992 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2993 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2994 case CAM_DATA_PADDR:
2995 case CAM_DATA_SG_PADDR:
2997 * XXX We don't yet support physical addresses here.
2999 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3000 "supported\n", __func__);
3001 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3002 xpt_done(ccb);
3003 return;
3004 case CAM_DATA_SG:
3006 * The chip does not support more than one buffer for the
3007 * request or response.
3009 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3010 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3011 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3012 "response buffer segments not supported for SMP\n",
3013 __func__);
3014 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3015 xpt_done(ccb);
3016 return;
3020 * The CAM_SCATTER_VALID flag was originally implemented
3021 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3022 * We have two. So, just take that flag to mean that we
3023 * might have S/G lists, and look at the S/G segment count
3024 * to figure out whether that is the case for each individual
3025 * buffer.
3027 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3028 bus_dma_segment_t *req_sg;
3030 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3031 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3032 } else
3033 request = ccb->smpio.smp_request;
3035 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3036 bus_dma_segment_t *rsp_sg;
3038 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3039 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3040 } else
3041 response = ccb->smpio.smp_response;
3042 break;
3043 case CAM_DATA_VADDR:
3044 request = ccb->smpio.smp_request;
3045 response = ccb->smpio.smp_response;
3046 break;
3047 default:
3048 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3049 xpt_done(ccb);
3050 return;
3052 #else /* __FreeBSD_version < 1000028 */
3054 * XXX We don't yet support physical addresses here.
3056 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3057 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3058 "supported\n", __func__);
3059 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3060 xpt_done(ccb);
3061 return;
3065 * If the user wants to send an S/G list, check to make sure they
3066 * have single buffers.
3068 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3070 * The chip does not support more than one buffer for the
3071 * request or response.
3073 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3074 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3075 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3076 "response buffer segments not supported for SMP\n",
3077 __func__);
3078 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3079 xpt_done(ccb);
3080 return;
3084 * The CAM_SCATTER_VALID flag was originally implemented
3085 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3086 * We have two. So, just take that flag to mean that we
3087 * might have S/G lists, and look at the S/G segment count
3088 * to figure out whether that is the case for each individual
3089 * buffer.
3091 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3092 bus_dma_segment_t *req_sg;
3094 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3095 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3096 } else
3097 request = ccb->smpio.smp_request;
3099 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3100 bus_dma_segment_t *rsp_sg;
3102 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3103 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3104 } else
3105 response = ccb->smpio.smp_response;
3106 } else {
3107 request = ccb->smpio.smp_request;
3108 response = ccb->smpio.smp_response;
3110 #endif /* __FreeBSD_version < 1000028 */
3112 cm = mpr_alloc_command(sc);
3113 if (cm == NULL) {
3114 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3115 __func__);
3116 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3117 xpt_done(ccb);
3118 return;
3121 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3122 bzero(req, sizeof(*req));
3123 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3125 /* Allow the chip to use any route to this SAS address. */
3126 req->PhysicalPort = 0xff;
3128 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3129 req->SGLFlags =
3130 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3132 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3133 "%#jx\n", __func__, (uintmax_t)sasaddr);
3135 mpr_init_sge(cm, req, &req->SGL);
3138 * Set up a uio to pass into mpr_map_command(). This allows us to
3139 * do one map command, and one busdma call in there.
3141 cm->cm_uio.uio_iov = cm->cm_iovec;
3142 cm->cm_uio.uio_iovcnt = 2;
3143 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3146 * The read/write flag isn't used by busdma, but set it just in
3147 * case. This isn't exactly accurate, either, since we're going in
3148 * both directions.
3150 cm->cm_uio.uio_rw = UIO_WRITE;
3152 cm->cm_iovec[0].iov_base = request;
3153 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3154 cm->cm_iovec[1].iov_base = response;
3155 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3157 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3158 cm->cm_iovec[1].iov_len;
3161 * Trigger a warning message in mpr_data_cb() for the user if we
3162 * wind up exceeding two S/G segments. The chip expects one
3163 * segment for the request and another for the response.
3165 cm->cm_max_segs = 2;
3167 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3168 cm->cm_complete = mprsas_smpio_complete;
3169 cm->cm_complete_data = ccb;
3172 * Tell the mapping code that we're using a uio, and that this is
3173 * an SMP passthrough request. There is a little special-case
3174 * logic there (in mpr_data_cb()) to handle the bidirectional
3175 * transfer.
3177 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3178 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3180 /* The chip data format is little endian. */
3181 req->SASAddress.High = htole32(sasaddr >> 32);
3182 req->SASAddress.Low = htole32(sasaddr);
3185 * XXX Note that we don't have a timeout/abort mechanism here.
3186 * From the manual, it looks like task management requests only
3187 * work for SCSI IO and SATA passthrough requests. We may need to
3188 * have a mechanism to retry requests in the event of a chip reset
3189 * at least. Hopefully the chip will insure that any errors short
3190 * of that are relayed back to the driver.
3192 error = mpr_map_command(sc, cm);
3193 if ((error != 0) && (error != EINPROGRESS)) {
3194 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3195 "mpr_map_command()\n", __func__, error);
3196 goto bailout_error;
3199 return;
3201 bailout_error:
3202 mpr_free_command(sc, cm);
3203 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3204 xpt_done(ccb);
3205 return;
3208 static void
3209 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3211 struct mpr_softc *sc;
3212 struct mprsas_target *targ;
3213 uint64_t sasaddr = 0;
3215 sc = sassc->sc;
3218 * Make sure the target exists.
3220 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3221 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3222 targ = &sassc->targets[ccb->ccb_h.target_id];
3223 if (targ->handle == 0x0) {
3224 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3225 __func__, ccb->ccb_h.target_id);
3226 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3227 xpt_done(ccb);
3228 return;
3232 * If this device has an embedded SMP target, we'll talk to it
3233 * directly.
3234 * figure out what the expander's address is.
3236 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3237 sasaddr = targ->sasaddr;
3240 * If we don't have a SAS address for the expander yet, try
3241 * grabbing it from the page 0x83 information cached in the
3242 * transport layer for this target. LSI expanders report the
3243 * expander SAS address as the port-associated SAS address in
3244 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3245 * 0x83.
3247 * XXX KDM disable this for now, but leave it commented out so that
3248 * it is obvious that this is another possible way to get the SAS
3249 * address.
3251 * The parent handle method below is a little more reliable, and
3252 * the other benefit is that it works for devices other than SES
3253 * devices. So you can send a SMP request to a da(4) device and it
3254 * will get routed to the expander that device is attached to.
3255 * (Assuming the da(4) device doesn't contain an SMP target...)
3257 #if 0
3258 if (sasaddr == 0)
3259 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3260 #endif
3263 * If we still don't have a SAS address for the expander, look for
3264 * the parent device of this device, which is probably the expander.
3266 if (sasaddr == 0) {
3267 #ifdef OLD_MPR_PROBE
3268 struct mprsas_target *parent_target;
3269 #endif
3271 if (targ->parent_handle == 0x0) {
3272 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3273 "a valid parent handle!\n", __func__, targ->handle);
3274 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3275 goto bailout;
3277 #ifdef OLD_MPR_PROBE
3278 parent_target = mprsas_find_target_by_handle(sassc, 0,
3279 targ->parent_handle);
3281 if (parent_target == NULL) {
3282 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3283 "a valid parent target!\n", __func__, targ->handle);
3284 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3285 goto bailout;
3288 if ((parent_target->devinfo &
3289 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3290 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3291 "does not have an SMP target!\n", __func__,
3292 targ->handle, parent_target->handle);
3293 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3294 goto bailout;
3297 sasaddr = parent_target->sasaddr;
3298 #else /* OLD_MPR_PROBE */
3299 if ((targ->parent_devinfo &
3300 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3301 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3302 "does not have an SMP target!\n", __func__,
3303 targ->handle, targ->parent_handle);
3304 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3305 goto bailout;
3308 if (targ->parent_sasaddr == 0x0) {
3309 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3310 "%d does not have a valid SAS address!\n", __func__,
3311 targ->handle, targ->parent_handle);
3312 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3313 goto bailout;
3316 sasaddr = targ->parent_sasaddr;
3317 #endif /* OLD_MPR_PROBE */
3321 if (sasaddr == 0) {
3322 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3323 "handle %d\n", __func__, targ->handle);
3324 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3325 goto bailout;
3327 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3329 return;
3331 bailout:
3332 xpt_done(ccb);
3335 #endif //__FreeBSD_version >= 900026
3337 static void
3338 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3340 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3341 struct mpr_softc *sc;
3342 struct mpr_command *tm;
3343 struct mprsas_target *targ;
3345 MPR_FUNCTRACE(sassc->sc);
3346 KKASSERT(lockowned(&sassc->sc->mpr_lock));
3348 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3349 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3350 sc = sassc->sc;
3351 tm = mpr_alloc_command(sc);
3352 if (tm == NULL) {
3353 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3354 "mprsas_action_resetdev\n");
3355 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3356 xpt_done(ccb);
3357 return;
3360 targ = &sassc->targets[ccb->ccb_h.target_id];
3361 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3362 req->DevHandle = htole16(targ->handle);
3363 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3364 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3366 /* SAS Hard Link Reset / SATA Link Reset */
3367 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3369 tm->cm_data = NULL;
3370 tm->cm_desc.HighPriority.RequestFlags =
3371 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3372 tm->cm_complete = mprsas_resetdev_complete;
3373 tm->cm_complete_data = ccb;
3375 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3376 __func__, targ->tid);
3377 tm->cm_targ = targ;
3378 targ->flags |= MPRSAS_TARGET_INRESET;
3380 mpr_map_command(sc, tm);
3383 static void
3384 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3386 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3387 union ccb *ccb;
3389 MPR_FUNCTRACE(sc);
3390 KKASSERT(lockowned(&sc->mpr_lock));
3392 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3393 ccb = tm->cm_complete_data;
3396 * Currently there should be no way we can hit this case. It only
3397 * happens when we have a failure to allocate chain frames, and
3398 * task management commands don't have S/G lists.
3400 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3401 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3403 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3405 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3406 "handle %#04x! This should not happen!\n", __func__,
3407 tm->cm_flags, req->DevHandle);
3408 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3409 goto bailout;
3412 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3413 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3415 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3416 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3417 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3418 CAM_LUN_WILDCARD);
3420 else
3421 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3423 bailout:
3425 mprsas_free_tm(sc, tm);
3426 xpt_done(ccb);
3429 static void
3430 mprsas_poll(struct cam_sim *sim)
3432 struct mprsas_softc *sassc;
3434 sassc = cam_sim_softc(sim);
3436 if (sassc->sc->mpr_debug & MPR_TRACE) {
3437 /* frequent debug messages during a panic just slow
3438 * everything down too much.
3440 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3441 __func__);
3442 sassc->sc->mpr_debug &= ~MPR_TRACE;
3445 mpr_intr_locked(sassc->sc);
3448 static void
3449 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3450 void *arg)
3452 struct mpr_softc *sc;
3454 sc = (struct mpr_softc *)callback_arg;
3456 switch (code) {
3457 #if (__FreeBSD_version >= 1000006) || \
3458 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3459 case AC_ADVINFO_CHANGED: {
3460 struct mprsas_target *target;
3461 struct mprsas_softc *sassc;
3462 struct scsi_read_capacity_data_long rcap_buf;
3463 struct ccb_dev_advinfo cdai;
3464 struct mprsas_lun *lun;
3465 lun_id_t lunid;
3466 int found_lun;
3467 uintptr_t buftype;
3469 buftype = (uintptr_t)arg;
3471 found_lun = 0;
3472 sassc = sc->sassc;
3475 * We're only interested in read capacity data changes.
3477 if (buftype != CDAI_TYPE_RCAPLONG)
3478 break;
3481 * See the comment in mpr_attach_sas() for a detailed
3482 * explanation. In these versions of FreeBSD we register
3483 * for all events and filter out the events that don't
3484 * apply to us.
3486 #if (__FreeBSD_version < 1000703) || \
3487 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3488 if (xpt_path_path_id(path) != sassc->sim->path_id)
3489 break;
3490 #endif
3493 * We should have a handle for this, but check to make sure.
3495 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3496 ("Target %d out of bounds in mprsas_async\n",
3497 xpt_path_target_id(path)));
3498 target = &sassc->targets[xpt_path_target_id(path)];
3499 if (target->handle == 0)
3500 break;
3502 lunid = xpt_path_lun_id(path);
3504 SLIST_FOREACH(lun, &target->luns, lun_link) {
3505 if (lun->lun_id == lunid) {
3506 found_lun = 1;
3507 break;
3511 if (found_lun == 0) {
3512 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR,
3513 M_NOWAIT | M_ZERO);
3514 if (lun == NULL) {
3515 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3516 "LUN for EEDP support.\n");
3517 break;
3519 lun->lun_id = lunid;
3520 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3523 bzero(&rcap_buf, sizeof(rcap_buf));
3524 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3525 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3526 cdai.ccb_h.flags = CAM_DIR_IN;
3527 cdai.buftype = CDAI_TYPE_RCAPLONG;
3528 #if (__FreeBSD_version >= 1100061) || \
3529 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3530 cdai.flags = CDAI_FLAG_NONE;
3531 #else
3532 cdai.flags = 0;
3533 #endif
3534 cdai.bufsiz = sizeof(rcap_buf);
3535 cdai.buf = (uint8_t *)&rcap_buf;
3536 xpt_action((union ccb *)&cdai);
3537 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3538 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3540 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3541 && (rcap_buf.prot & SRC16_PROT_EN)) {
3542 switch (rcap_buf.prot & SRC16_P_TYPE) {
3543 case SRC16_PTYPE_1:
3544 case SRC16_PTYPE_3:
3545 lun->eedp_formatted = TRUE;
3546 lun->eedp_block_size =
3547 scsi_4btoul(rcap_buf.length);
3548 break;
3549 case SRC16_PTYPE_2:
3550 default:
3551 lun->eedp_formatted = FALSE;
3552 lun->eedp_block_size = 0;
3553 break;
3555 } else {
3556 lun->eedp_formatted = FALSE;
3557 lun->eedp_block_size = 0;
3559 break;
3561 #endif
3562 case AC_FOUND_DEVICE: {
3563 struct ccb_getdev *cgd;
3566 * See the comment in mpr_attach_sas() for a detailed
3567 * explanation. In these versions of FreeBSD we register
3568 * for all events and filter out the events that don't
3569 * apply to us.
3571 #if (__FreeBSD_version < 1000703) || \
3572 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3573 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3574 break;
3575 #endif
3577 cgd = arg;
3578 #if (__FreeBSD_version < 901503) || \
3579 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3580 mprsas_check_eedp(sc, path, cgd);
3581 #endif
3582 break;
3584 default:
3585 break;
3589 #if (__FreeBSD_version < 901503) || \
3590 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3591 static void
3592 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3593 struct ccb_getdev *cgd)
3595 struct mprsas_softc *sassc = sc->sassc;
3596 struct ccb_scsiio *csio;
3597 struct scsi_read_capacity_16 *scsi_cmd;
3598 struct scsi_read_capacity_eedp *rcap_buf;
3599 path_id_t pathid;
3600 target_id_t targetid;
3601 lun_id_t lunid;
3602 union ccb *ccb;
3603 struct cam_path *local_path;
3604 struct mprsas_target *target;
3605 struct mprsas_lun *lun;
3606 uint8_t found_lun;
3607 char path_str[64];
3609 pathid = cam_sim_path(sassc->sim);
3610 targetid = xpt_path_target_id(path);
3611 lunid = xpt_path_lun_id(path);
3613 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3614 "mprsas_check_eedp\n", targetid));
3615 target = &sassc->targets[targetid];
3616 if (target->handle == 0x0)
3617 return;
3620 * Determine if the device is EEDP capable.
3622 * If this flag is set in the inquiry data, the device supports
3623 * protection information, and must support the 16 byte read capacity
3624 * command, otherwise continue without sending read cap 16.
3626 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3627 return;
3630 * Issue a READ CAPACITY 16 command. This info is used to determine if
3631 * the LUN is formatted for EEDP support.
3633 ccb = xpt_alloc_ccb();
3634 if (ccb == NULL) {
3635 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3636 "support.\n");
3637 return;
3640 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3641 CAM_REQ_CMP) {
3642 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3643 "support.\n");
3644 xpt_free_ccb(ccb);
3645 return;
3649 * If LUN is already in list, don't create a new one.
3651 found_lun = FALSE;
3652 SLIST_FOREACH(lun, &target->luns, lun_link) {
3653 if (lun->lun_id == lunid) {
3654 found_lun = TRUE;
3655 break;
3658 if (!found_lun) {
3659 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR,
3660 M_NOWAIT | M_ZERO);
3661 if (lun == NULL) {
3662 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3663 "EEDP support.\n");
3664 xpt_free_path(local_path);
3665 xpt_free_ccb(ccb);
3666 return;
3668 lun->lun_id = lunid;
3669 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3672 xpt_path_string(local_path, path_str, sizeof(path_str));
3673 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3674 path_str, target->handle);
3677 * Issue a READ CAPACITY 16 command for the LUN. The
3678 * mprsas_read_cap_done function will load the read cap info into the
3679 * LUN struct.
3681 rcap_buf = kmalloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3682 M_NOWAIT | M_ZERO);
3683 if (rcap_buf == NULL) {
3684 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3685 "buffer for EEDP support.\n");
3686 xpt_free_path(ccb->ccb_h.path);
3687 xpt_free_ccb(ccb);
3688 return;
3690 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_NORMAL);
3691 csio = &ccb->csio;
3692 csio->ccb_h.func_code = XPT_SCSI_IO;
3693 csio->ccb_h.flags = CAM_DIR_IN;
3694 csio->ccb_h.retry_count = 4;
3695 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3696 csio->ccb_h.timeout = 60000;
3697 csio->data_ptr = (uint8_t *)rcap_buf;
3698 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3699 csio->sense_len = MPR_SENSE_LEN;
3700 csio->cdb_len = sizeof(*scsi_cmd);
3701 csio->tag_action = MSG_SIMPLE_Q_TAG;
3703 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3704 bzero(scsi_cmd, sizeof(*scsi_cmd));
3705 scsi_cmd->opcode = 0x9E;
3706 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3707 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3709 ccb->ccb_h.ppriv_ptr1 = sassc;
3710 xpt_action(ccb);
3713 static void
3714 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3716 struct mprsas_softc *sassc;
3717 struct mprsas_target *target;
3718 struct mprsas_lun *lun;
3719 struct scsi_read_capacity_eedp *rcap_buf;
3721 if (done_ccb == NULL)
3722 return;
3724 /* Driver need to release devq, it Scsi command is
3725 * generated by driver internally.
3726 * Currently there is a single place where driver
3727 * calls scsi command internally. In future if driver
3728 * calls more scsi command internally, it needs to release
3729 * devq internally, since those command will not go back to
3730 * cam_periph.
3732 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3733 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3734 xpt_release_devq(done_ccb->ccb_h.path,
3735 /*count*/ 1, /*run_queue*/TRUE);
3738 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3741 * Get the LUN ID for the path and look it up in the LUN list for the
3742 * target.
3744 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3745 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3746 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3747 target = &sassc->targets[done_ccb->ccb_h.target_id];
3748 SLIST_FOREACH(lun, &target->luns, lun_link) {
3749 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3750 continue;
3753 * Got the LUN in the target's LUN list. Fill it in with EEDP
3754 * info. If the READ CAP 16 command had some SCSI error (common
3755 * if command is not supported), mark the lun as not supporting
3756 * EEDP and set the block size to 0.
3758 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3759 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3760 lun->eedp_formatted = FALSE;
3761 lun->eedp_block_size = 0;
3762 break;
3765 if (rcap_buf->protect & 0x01) {
3766 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3767 "%d is formatted for EEDP support.\n",
3768 done_ccb->ccb_h.target_lun,
3769 done_ccb->ccb_h.target_id);
3770 lun->eedp_formatted = TRUE;
3771 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3773 break;
3776 // Finished with this CCB and path.
3777 kfree(rcap_buf, M_MPR);
3778 xpt_free_path(done_ccb->ccb_h.path);
3779 xpt_free_ccb(done_ccb);
3781 #endif /* (__FreeBSD_version < 901503) || \
3782 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3784 void
3785 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3786 struct mprsas_target *target, lun_id_t lun_id)
3788 union ccb *ccb;
3789 path_id_t path_id;
3792 * Set the INRESET flag for this target so that no I/O will be sent to
3793 * the target until the reset has completed. If an I/O request does
3794 * happen, the devq will be frozen. The CCB holds the path which is
3795 * used to release the devq. The devq is released and the CCB is freed
3796 * when the TM completes.
3798 ccb = xpt_alloc_ccb();
3799 if (ccb) {
3800 path_id = cam_sim_path(sc->sassc->sim);
3801 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3802 target->tid, lun_id) != CAM_REQ_CMP) {
3803 xpt_free_ccb(ccb);
3804 } else {
3805 tm->cm_ccb = ccb;
3806 tm->cm_targ = target;
3807 target->flags |= MPRSAS_TARGET_INRESET;
3813 mprsas_startup(struct mpr_softc *sc)
3816 * Send the port enable message and set the wait_for_port_enable flag.
3817 * This flag helps to keep the simq frozen until all discovery events
3818 * are processed.
3820 sc->wait_for_port_enable = 1;
3821 mprsas_send_portenable(sc);
3822 return (0);
3825 static int
3826 mprsas_send_portenable(struct mpr_softc *sc)
3828 MPI2_PORT_ENABLE_REQUEST *request;
3829 struct mpr_command *cm;
3831 MPR_FUNCTRACE(sc);
3833 if ((cm = mpr_alloc_command(sc)) == NULL)
3834 return (EBUSY);
3835 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3836 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3837 request->MsgFlags = 0;
3838 request->VP_ID = 0;
3839 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3840 cm->cm_complete = mprsas_portenable_complete;
3841 cm->cm_data = NULL;
3842 cm->cm_sge = NULL;
3844 mpr_map_command(sc, cm);
3845 mpr_dprint(sc, MPR_XINFO,
3846 "mpr_send_portenable finished cm %p req %p complete %p\n",
3847 cm, cm->cm_req, cm->cm_complete);
3848 return (0);
3851 static void
3852 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3854 MPI2_PORT_ENABLE_REPLY *reply;
3855 struct mprsas_softc *sassc;
3857 MPR_FUNCTRACE(sc);
3858 sassc = sc->sassc;
3861 * Currently there should be no way we can hit this case. It only
3862 * happens when we have a failure to allocate chain frames, and
3863 * port enable commands don't have S/G lists.
3865 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3866 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3867 "This should not happen!\n", __func__, cm->cm_flags);
3870 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3871 if (reply == NULL)
3872 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3873 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3874 MPI2_IOCSTATUS_SUCCESS)
3875 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3877 mpr_free_command(sc, cm);
3879 * Done waiting for port enable to complete. Decrement the refcount.
3880 * If refcount is 0, discovery is complete and a rescan of the bus can
3881 * take place.
3883 sc->wait_for_port_enable = 0;
3884 sc->port_enable_complete = 1;
3885 wakeup(&sc->port_enable_complete);
3886 mprsas_startup_decrement(sassc);
3890 mprsas_check_id(struct mprsas_softc *sassc, int id)
3892 struct mpr_softc *sc = sassc->sc;
3893 char *ids;
3894 char *name;
3896 ids = &sc->exclude_ids[0];
3897 while((name = strsep(&ids, ",")) != NULL) {
3898 if (name[0] == '\0')
3899 continue;
3900 if (strtol(name, NULL, 0) == (long)id)
3901 return (1);
3904 return (0);
3907 void
3908 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3910 struct mprsas_softc *sassc;
3911 struct mprsas_lun *lun, *lun_tmp;
3912 struct mprsas_target *targ;
3913 int i;
3915 sassc = sc->sassc;
3917 * The number of targets is based on IOC Facts, so free all of
3918 * the allocated LUNs for each target and then the target buffer
3919 * itself.
3921 for (i=0; i< maxtargets; i++) {
3922 targ = &sassc->targets[i];
3923 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) {
3924 kfree(lun, M_MPR);
3927 kfree(sassc->targets, M_MPR);
3929 sassc->targets = kmalloc(sizeof(struct mprsas_target) * maxtargets,
3930 M_MPR, M_WAITOK|M_ZERO);
3931 if (!sassc->targets) {
3932 panic("%s failed to alloc targets with error %d\n",
3933 __func__, ENOMEM);