NFE - Change default RX ring size from 128 -> 256, Adjust moderation timer.
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
blobfcb97677e3615174614ec49a068e3cc0a9af67c3
1 /*-
2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
17 * redistribution.
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 /*-
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
37 * All Rights Reserved
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
41 * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.20 2009/05/21 12:36:40 jhb Exp $
44 #include <sys/cdefs.h>
46 #include <dev/disk/mpt/mpt.h>
47 #include <dev/disk/mpt/mpt_raid.h>
49 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/disk/mpt/mpilib/mpi_raid.h"
52 #include <bus/cam/cam.h>
53 #include <bus/cam/cam_ccb.h>
54 #include <bus/cam/cam_sim.h>
55 #include <bus/cam/cam_xpt_periph.h>
56 #include <bus/cam/cam_xpt_sim.h>
58 #if __FreeBSD_version < 500000
59 #include <sys/devicestat.h>
60 #define GIANT_REQUIRED
61 #endif
62 #include <bus/cam/cam_periph.h>
64 #include <sys/callout.h>
65 #include <sys/kthread.h>
66 #include <sys/sysctl.h>
68 #include <machine/stdarg.h>
70 struct mpt_raid_action_result
72 union {
73 MPI_RAID_VOL_INDICATOR indicator_struct;
74 uint32_t new_settings;
75 uint8_t phys_disk_num;
76 } action_data;
77 uint16_t action_status;
80 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
81 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
83 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
86 static mpt_probe_handler_t mpt_raid_probe;
87 static mpt_attach_handler_t mpt_raid_attach;
88 static mpt_enable_handler_t mpt_raid_enable;
89 static mpt_event_handler_t mpt_raid_event;
90 static mpt_shutdown_handler_t mpt_raid_shutdown;
91 static mpt_reset_handler_t mpt_raid_ioc_reset;
92 static mpt_detach_handler_t mpt_raid_detach;
94 static struct mpt_personality mpt_raid_personality =
96 .name = "mpt_raid",
97 .probe = mpt_raid_probe,
98 .attach = mpt_raid_attach,
99 .enable = mpt_raid_enable,
100 .event = mpt_raid_event,
101 .reset = mpt_raid_ioc_reset,
102 .shutdown = mpt_raid_shutdown,
103 .detach = mpt_raid_detach,
106 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
107 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
109 static mpt_reply_handler_t mpt_raid_reply_handler;
110 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
111 MSG_DEFAULT_REPLY *reply_frame);
112 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
113 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
114 static void mpt_raid_thread(void *arg);
115 static timeout_t mpt_raid_timer;
116 #if 0
117 static void mpt_enable_vol(struct mpt_softc *mpt,
118 struct mpt_raid_volume *mpt_vol, int enable);
119 #endif
120 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
121 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
122 struct cam_path *);
123 #if __FreeBSD_version < 500000
124 #define mpt_raid_sysctl_attach(x) do { } while (0)
125 #else
126 static void mpt_raid_sysctl_attach(struct mpt_softc *);
127 #endif
129 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
131 const char *
132 mpt_vol_type(struct mpt_raid_volume *vol)
134 switch (vol->config_page->VolumeType) {
135 case MPI_RAID_VOL_TYPE_IS:
136 return ("RAID-0");
137 case MPI_RAID_VOL_TYPE_IME:
138 return ("RAID-1E");
139 case MPI_RAID_VOL_TYPE_IM:
140 return ("RAID-1");
141 default:
142 return ("Unknown");
146 const char *
147 mpt_vol_state(struct mpt_raid_volume *vol)
149 switch (vol->config_page->VolumeStatus.State) {
150 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
151 return ("Optimal");
152 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
153 return ("Degraded");
154 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
155 return ("Failed");
156 default:
157 return ("Unknown");
161 const char *
162 mpt_disk_state(struct mpt_raid_disk *disk)
164 switch (disk->config_page.PhysDiskStatus.State) {
165 case MPI_PHYSDISK0_STATUS_ONLINE:
166 return ("Online");
167 case MPI_PHYSDISK0_STATUS_MISSING:
168 return ("Missing");
169 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
170 return ("Incompatible");
171 case MPI_PHYSDISK0_STATUS_FAILED:
172 return ("Failed");
173 case MPI_PHYSDISK0_STATUS_INITIALIZING:
174 return ("Initializing");
175 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
176 return ("Offline Requested");
177 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
178 return ("Failed per Host Request");
179 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
180 return ("Offline");
181 default:
182 return ("Unknown");
186 void
187 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
188 const char *fmt, ...)
190 __va_list ap;
192 kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
193 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
194 vol->config_page->VolumeBus, vol->config_page->VolumeID);
195 __va_start(ap, fmt);
196 kvprintf(fmt, ap);
197 __va_end(ap);
200 void
201 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
202 const char *fmt, ...)
204 __va_list ap;
206 if (disk->volume != NULL) {
207 kprintf("(%s:vol%d:%d): ",
208 device_get_nameunit(mpt->dev),
209 disk->volume->config_page->VolumeID,
210 disk->member_number);
211 } else {
212 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
213 disk->config_page.PhysDiskBus,
214 disk->config_page.PhysDiskID);
216 __va_start(ap, fmt);
217 kvprintf(fmt, ap);
218 __va_end(ap);
221 static void
222 mpt_raid_async(void *callback_arg, u_int32_t code,
223 struct cam_path *path, void *arg)
225 struct mpt_softc *mpt;
227 mpt = (struct mpt_softc*)callback_arg;
228 switch (code) {
229 case AC_FOUND_DEVICE:
231 struct ccb_getdev *cgd;
232 struct mpt_raid_volume *mpt_vol;
234 cgd = (struct ccb_getdev *)arg;
235 if (cgd == NULL) {
236 break;
239 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
240 cgd->ccb_h.target_id);
242 RAID_VOL_FOREACH(mpt, mpt_vol) {
243 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
244 continue;
246 if (mpt_vol->config_page->VolumeID
247 == cgd->ccb_h.target_id) {
248 mpt_adjust_queue_depth(mpt, mpt_vol, path);
249 break;
253 default:
254 break;
259 mpt_raid_probe(struct mpt_softc *mpt)
261 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
262 return (ENODEV);
264 return (0);
268 mpt_raid_attach(struct mpt_softc *mpt)
270 struct ccb_setasync csa;
271 mpt_handler_t handler;
272 int error;
274 mpt_callout_init(&mpt->raid_timer);
276 error = mpt_spawn_raid_thread(mpt);
277 if (error != 0) {
278 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
279 goto cleanup;
282 MPT_LOCK(mpt);
283 handler.reply_handler = mpt_raid_reply_handler;
284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 &raid_handler_id);
286 if (error != 0) {
287 mpt_prt(mpt, "Unable to register RAID haandler!\n");
288 goto cleanup;
291 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
292 csa.ccb_h.func_code = XPT_SASYNC_CB;
293 csa.event_enable = AC_FOUND_DEVICE;
294 csa.callback = mpt_raid_async;
295 csa.callback_arg = mpt;
296 xpt_action((union ccb *)&csa);
297 if (csa.ccb_h.status != CAM_REQ_CMP) {
298 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
299 "CAM async handler.\n");
301 MPT_UNLOCK(mpt);
303 mpt_raid_sysctl_attach(mpt);
304 return (0);
305 cleanup:
306 MPT_UNLOCK(mpt);
307 mpt_raid_detach(mpt);
308 return (error);
312 mpt_raid_enable(struct mpt_softc *mpt)
314 return (0);
317 void
318 mpt_raid_detach(struct mpt_softc *mpt)
320 struct ccb_setasync csa;
321 mpt_handler_t handler;
323 callout_stop(&mpt->raid_timer);
324 MPT_LOCK(mpt);
325 mpt_terminate_raid_thread(mpt);
327 handler.reply_handler = mpt_raid_reply_handler;
328 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
329 raid_handler_id);
330 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
331 csa.ccb_h.func_code = XPT_SASYNC_CB;
332 csa.event_enable = 0;
333 csa.callback = mpt_raid_async;
334 csa.callback_arg = mpt;
335 xpt_action((union ccb *)&csa);
336 MPT_UNLOCK(mpt);
339 static void
340 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
342 /* Nothing to do yet. */
345 static const char *raid_event_txt[] =
347 "Volume Created",
348 "Volume Deleted",
349 "Volume Settings Changed",
350 "Volume Status Changed",
351 "Volume Physical Disk Membership Changed",
352 "Physical Disk Created",
353 "Physical Disk Deleted",
354 "Physical Disk Settings Changed",
355 "Physical Disk Status Changed",
356 "Domain Validation Required",
357 "SMART Data Received",
358 "Replace Action Started",
361 static int
362 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
363 MSG_EVENT_NOTIFY_REPLY *msg)
365 EVENT_DATA_RAID *raid_event;
366 struct mpt_raid_volume *mpt_vol;
367 struct mpt_raid_disk *mpt_disk;
368 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
369 int i;
370 int print_event;
372 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
373 return (0);
376 raid_event = (EVENT_DATA_RAID *)&msg->Data;
378 mpt_vol = NULL;
379 vol_pg = NULL;
380 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
381 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
382 mpt_vol = &mpt->raid_volumes[i];
383 vol_pg = mpt_vol->config_page;
385 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
386 continue;
388 if (vol_pg->VolumeID == raid_event->VolumeID
389 && vol_pg->VolumeBus == raid_event->VolumeBus)
390 break;
392 if (i >= mpt->ioc_page2->MaxVolumes) {
393 mpt_vol = NULL;
394 vol_pg = NULL;
398 mpt_disk = NULL;
399 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
400 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
401 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
402 mpt_disk = NULL;
406 print_event = 1;
407 switch(raid_event->ReasonCode) {
408 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
409 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
410 break;
411 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
412 if (mpt_vol != NULL) {
413 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
414 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
415 } else {
417 * Coalesce status messages into one
418 * per background run of our RAID thread.
419 * This removes "spurious" status messages
420 * from our output.
422 print_event = 0;
425 break;
426 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
427 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
428 mpt->raid_rescan++;
429 if (mpt_vol != NULL) {
430 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
432 break;
433 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
434 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
435 mpt->raid_rescan++;
436 break;
437 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
438 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
439 mpt->raid_rescan++;
440 if (mpt_disk != NULL) {
441 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
443 break;
444 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
445 mpt->raid_rescan++;
446 break;
447 case MPI_EVENT_RAID_RC_SMART_DATA:
448 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
449 break;
452 if (print_event) {
453 if (mpt_disk != NULL) {
454 mpt_disk_prt(mpt, mpt_disk, "");
455 } else if (mpt_vol != NULL) {
456 mpt_vol_prt(mpt, mpt_vol, "");
457 } else {
458 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
459 raid_event->VolumeID);
461 if (raid_event->PhysDiskNum != 0xFF)
462 mpt_prtc(mpt, ":%d): ",
463 raid_event->PhysDiskNum);
464 else
465 mpt_prtc(mpt, "): ");
468 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
469 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
470 raid_event->ReasonCode);
471 else
472 mpt_prtc(mpt, "%s\n",
473 raid_event_txt[raid_event->ReasonCode]);
476 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
477 /* XXX Use CAM's print sense for this... */
478 if (mpt_disk != NULL)
479 mpt_disk_prt(mpt, mpt_disk, "");
480 else
481 mpt_prt(mpt, "Volume(%d:%d:%d: ",
482 raid_event->VolumeBus, raid_event->VolumeID,
483 raid_event->PhysDiskNum);
484 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
485 raid_event->ASC, raid_event->ASCQ);
488 mpt_raid_wakeup(mpt);
489 return (1);
492 static void
493 mpt_raid_shutdown(struct mpt_softc *mpt)
495 struct mpt_raid_volume *mpt_vol;
497 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
498 return;
501 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
502 RAID_VOL_FOREACH(mpt, mpt_vol) {
503 mpt_verify_mwce(mpt, mpt_vol);
507 static int
508 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
509 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
511 int free_req;
513 if (req == NULL)
514 return (TRUE);
516 free_req = TRUE;
517 if (reply_frame != NULL)
518 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
519 #ifdef NOTYET
520 else if (req->ccb != NULL) {
521 /* Complete Quiesce CCB with error... */
523 #endif
525 req->state &= ~REQ_STATE_QUEUED;
526 req->state |= REQ_STATE_DONE;
527 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
529 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
530 wakeup(req);
531 } else if (free_req) {
532 mpt_free_request(mpt, req);
535 return (TRUE);
539 * Parse additional completion information in the reply
540 * frame for RAID I/O requests.
542 static int
543 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
544 MSG_DEFAULT_REPLY *reply_frame)
546 MSG_RAID_ACTION_REPLY *reply;
547 struct mpt_raid_action_result *action_result;
548 MSG_RAID_ACTION_REQUEST *rap;
550 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
551 req->IOCStatus = le16toh(reply->IOCStatus);
552 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
554 switch (rap->Action) {
555 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
556 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
557 break;
558 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
559 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
560 break;
561 default:
562 break;
564 action_result = REQ_TO_RAID_ACTION_RESULT(req);
565 memcpy(&action_result->action_data, &reply->ActionData,
566 sizeof(action_result->action_data));
567 action_result->action_status = le16toh(reply->ActionStatus);
568 return (TRUE);
572 * Utiltity routine to perform a RAID action command;
575 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
576 struct mpt_raid_disk *disk, request_t *req, u_int Action,
577 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
578 int write, int wait)
580 MSG_RAID_ACTION_REQUEST *rap;
581 SGE_SIMPLE32 *se;
583 rap = req->req_vbuf;
584 memset(rap, 0, sizeof *rap);
585 rap->Action = Action;
586 rap->ActionDataWord = htole32(ActionDataWord);
587 rap->Function = MPI_FUNCTION_RAID_ACTION;
588 rap->VolumeID = vol->config_page->VolumeID;
589 rap->VolumeBus = vol->config_page->VolumeBus;
590 if (disk != 0)
591 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
592 else
593 rap->PhysDiskNum = 0xFF;
594 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
595 se->Address = htole32(addr);
596 MPI_pSGE_SET_LENGTH(se, len);
597 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
598 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
599 MPI_SGE_FLAGS_END_OF_LIST |
600 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
601 se->FlagsLength = htole32(se->FlagsLength);
602 rap->MsgContext = htole32(req->index | raid_handler_id);
604 mpt_check_doorbell(mpt);
605 mpt_send_cmd(mpt, req);
607 if (wait) {
608 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
609 /*sleep_ok*/FALSE, /*time_ms*/2000));
610 } else {
611 return (0);
615 /*************************** RAID Status Monitoring ***************************/
616 static int
617 mpt_spawn_raid_thread(struct mpt_softc *mpt)
619 int error;
622 * Freeze out any CAM transactions until our thread
623 * is able to run at least once. We need to update
624 * our RAID pages before acception I/O or we may
625 * reject I/O to an ID we later determine is for a
626 * hidden physdisk.
628 MPT_LOCK(mpt);
629 xpt_freeze_simq(mpt->phydisk_sim, 1);
630 MPT_UNLOCK(mpt);
631 error = mpt_kthread_create(mpt_raid_thread, mpt,
632 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
633 "mpt_raid%d", mpt->unit);
634 if (error != 0) {
635 MPT_LOCK(mpt);
636 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
637 MPT_UNLOCK(mpt);
639 return (error);
642 static void
643 mpt_terminate_raid_thread(struct mpt_softc *mpt)
646 if (mpt->raid_thread == NULL) {
647 return;
649 mpt->shutdwn_raid = 1;
650 wakeup(mpt->raid_volumes);
652 * Sleep on a slightly different location
653 * for this interlock just for added safety.
655 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
658 static void
659 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
662 xpt_free_path(ccb->ccb_h.path);
663 xpt_free_ccb(ccb);
666 static void
667 mpt_raid_thread(void *arg)
669 struct mpt_softc *mpt;
670 int firstrun;
672 mpt = (struct mpt_softc *)arg;
673 firstrun = 1;
674 MPT_LOCK(mpt);
675 while (mpt->shutdwn_raid == 0) {
677 if (mpt->raid_wakeup == 0) {
678 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
679 continue;
682 mpt->raid_wakeup = 0;
684 if (mpt_refresh_raid_data(mpt)) {
685 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
686 continue;
690 * Now that we have our first snapshot of RAID data,
691 * allow CAM to access our physical disk bus.
693 if (firstrun) {
694 firstrun = 0;
695 MPTLOCK_2_CAMLOCK(mpt);
696 xpt_release_simq(mpt->phydisk_sim, TRUE);
697 CAMLOCK_2_MPTLOCK(mpt);
700 if (mpt->raid_rescan != 0) {
701 union ccb *ccb;
702 struct cam_path *path;
703 int error;
705 mpt->raid_rescan = 0;
706 MPT_UNLOCK(mpt);
708 ccb = xpt_alloc_ccb();
710 MPT_LOCK(mpt);
711 error = xpt_create_path(&path, xpt_periph,
712 cam_sim_path(mpt->phydisk_sim),
713 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
714 if (error != CAM_REQ_CMP) {
715 xpt_free_ccb(ccb);
716 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
717 } else {
718 xpt_setup_ccb(&ccb->ccb_h, path, 5);
719 ccb->ccb_h.func_code = XPT_SCAN_BUS;
720 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
721 ccb->crcn.flags = CAM_FLAG_NONE;
722 MPTLOCK_2_CAMLOCK(mpt);
723 xpt_action(ccb);
724 CAMLOCK_2_MPTLOCK(mpt);
728 mpt->raid_thread = NULL;
729 wakeup(&mpt->raid_thread);
730 MPT_UNLOCK(mpt);
731 mpt_kthread_exit(0);
734 #if 0
735 static void
736 mpt_raid_quiesce_timeout(void *arg)
738 /* Complete the CCB with error */
739 /* COWWWW */
742 static timeout_t mpt_raid_quiesce_timeout;
743 cam_status
744 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
745 request_t *req)
747 union ccb *ccb;
749 ccb = req->ccb;
750 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
751 return (CAM_REQ_CMP);
753 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
754 int rv;
756 mpt_disk->flags |= MPT_RDF_QUIESCING;
757 xpt_freeze_devq(ccb->ccb_h.path, 1);
759 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
760 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
761 /*ActionData*/0, /*addr*/0,
762 /*len*/0, /*write*/FALSE,
763 /*wait*/FALSE);
764 if (rv != 0)
765 return (CAM_REQ_CMP_ERR);
767 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
768 #if 0
769 if (rv == ETIMEDOUT) {
770 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
771 "Quiece Timed-out\n");
772 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
773 return (CAM_REQ_CMP_ERR);
776 ar = REQ_TO_RAID_ACTION_RESULT(req);
777 if (rv != 0
778 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
779 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
780 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
781 "%d:%x:%x\n", rv, req->IOCStatus,
782 ar->action_status);
783 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
784 return (CAM_REQ_CMP_ERR);
786 #endif
787 return (CAM_REQ_INPROG);
789 return (CAM_REQUEUE_REQ);
791 #endif
793 /* XXX Ignores that there may be multiple busses/IOCs involved. */
794 cam_status
795 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
797 struct mpt_raid_disk *mpt_disk;
799 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
800 if (ccb->ccb_h.target_id < mpt->raid_max_disks
801 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
802 *tgt = mpt_disk->config_page.PhysDiskID;
803 return (0);
805 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
806 ccb->ccb_h.target_id);
807 return (-1);
810 /* XXX Ignores that there may be multiple busses/IOCs involved. */
812 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
814 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
815 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
817 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
818 return (0);
820 ioc_vol = mpt->ioc_page2->RaidVolume;
821 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
822 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
823 if (ioc_vol->VolumeID == tgt) {
824 return (1);
827 return (0);
830 #if 0
831 static void
832 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
833 int enable)
835 request_t *req;
836 struct mpt_raid_action_result *ar;
837 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
838 int enabled;
839 int rv;
841 vol_pg = mpt_vol->config_page;
842 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
845 * If the setting matches the configuration,
846 * there is nothing to do.
848 if ((enabled && enable)
849 || (!enabled && !enable))
850 return;
852 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
853 if (req == NULL) {
854 mpt_vol_prt(mpt, mpt_vol,
855 "mpt_enable_vol: Get request failed!\n");
856 return;
859 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
860 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
861 : MPI_RAID_ACTION_DISABLE_VOLUME,
862 /*data*/0, /*addr*/0, /*len*/0,
863 /*write*/FALSE, /*wait*/TRUE);
864 if (rv == ETIMEDOUT) {
865 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
866 "%s Volume Timed-out\n",
867 enable ? "Enable" : "Disable");
868 return;
870 ar = REQ_TO_RAID_ACTION_RESULT(req);
871 if (rv != 0
872 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
873 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
874 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
875 enable ? "Enable" : "Disable",
876 rv, req->IOCStatus, ar->action_status);
879 mpt_free_request(mpt, req);
881 #endif
883 static void
884 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
886 request_t *req;
887 struct mpt_raid_action_result *ar;
888 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
889 uint32_t data;
890 int rv;
891 int resyncing;
892 int mwce;
894 vol_pg = mpt_vol->config_page;
895 resyncing = vol_pg->VolumeStatus.Flags
896 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
897 mwce = vol_pg->VolumeSettings.Settings
898 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
901 * If the setting matches the configuration,
902 * there is nothing to do.
904 switch (mpt->raid_mwce_setting) {
905 case MPT_RAID_MWCE_REBUILD_ONLY:
906 if ((resyncing && mwce) || (!resyncing && !mwce)) {
907 return;
909 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
910 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
912 * Wait one more status update to see if
913 * resyncing gets enabled. It gets disabled
914 * temporarilly when WCE is changed.
916 return;
918 break;
919 case MPT_RAID_MWCE_ON:
920 if (mwce)
921 return;
922 break;
923 case MPT_RAID_MWCE_OFF:
924 if (!mwce)
925 return;
926 break;
927 case MPT_RAID_MWCE_NC:
928 return;
931 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
932 if (req == NULL) {
933 mpt_vol_prt(mpt, mpt_vol,
934 "mpt_verify_mwce: Get request failed!\n");
935 return;
938 vol_pg->VolumeSettings.Settings ^=
939 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
940 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
941 vol_pg->VolumeSettings.Settings ^=
942 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
943 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
944 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
945 data, /*addr*/0, /*len*/0,
946 /*write*/FALSE, /*wait*/TRUE);
947 if (rv == ETIMEDOUT) {
948 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
949 "Write Cache Enable Timed-out\n");
950 return;
952 ar = REQ_TO_RAID_ACTION_RESULT(req);
953 if (rv != 0
954 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
955 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
956 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
957 "%d:%x:%x\n", rv, req->IOCStatus,
958 ar->action_status);
959 } else {
960 vol_pg->VolumeSettings.Settings ^=
961 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
963 mpt_free_request(mpt, req);
966 static void
967 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
969 request_t *req;
970 struct mpt_raid_action_result *ar;
971 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
972 u_int prio;
973 int rv;
975 vol_pg = mpt_vol->config_page;
977 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
978 return;
981 * If the current RAID resync rate does not
982 * match our configured rate, update it.
984 prio = vol_pg->VolumeSettings.Settings
985 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
986 if (vol_pg->ResyncRate != 0
987 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
989 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
990 if (req == NULL) {
991 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
992 "Get request failed!\n");
993 return;
996 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
997 MPI_RAID_ACTION_SET_RESYNC_RATE,
998 mpt->raid_resync_rate, /*addr*/0,
999 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1000 if (rv == ETIMEDOUT) {
1001 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1002 "Resync Rate Setting Timed-out\n");
1003 return;
1006 ar = REQ_TO_RAID_ACTION_RESULT(req);
1007 if (rv != 0
1008 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1009 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1010 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1011 "%d:%x:%x\n", rv, req->IOCStatus,
1012 ar->action_status);
1013 } else
1014 vol_pg->ResyncRate = mpt->raid_resync_rate;
1015 mpt_free_request(mpt, req);
1016 } else if ((prio && mpt->raid_resync_rate < 128)
1017 || (!prio && mpt->raid_resync_rate >= 128)) {
1018 uint32_t data;
1020 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1021 if (req == NULL) {
1022 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1023 "Get request failed!\n");
1024 return;
1027 vol_pg->VolumeSettings.Settings ^=
1028 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1029 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1030 vol_pg->VolumeSettings.Settings ^=
1031 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1032 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1033 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1034 data, /*addr*/0, /*len*/0,
1035 /*write*/FALSE, /*wait*/TRUE);
1036 if (rv == ETIMEDOUT) {
1037 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1038 "Resync Rate Setting Timed-out\n");
1039 return;
1041 ar = REQ_TO_RAID_ACTION_RESULT(req);
1042 if (rv != 0
1043 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1044 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1045 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1046 "%d:%x:%x\n", rv, req->IOCStatus,
1047 ar->action_status);
1048 } else {
1049 vol_pg->VolumeSettings.Settings ^=
1050 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1053 mpt_free_request(mpt, req);
1057 static void
1058 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1059 struct cam_path *path)
1061 struct ccb_relsim crs;
1063 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1064 crs.ccb_h.func_code = XPT_REL_SIMQ;
1065 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1066 crs.openings = mpt->raid_queue_depth;
1067 xpt_action((union ccb *)&crs);
1068 if (crs.ccb_h.status != CAM_REQ_CMP)
1069 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1070 "with CAM status %#x\n", crs.ccb_h.status);
1073 static void
1074 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1076 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1077 u_int i;
1079 vol_pg = mpt_vol->config_page;
1080 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1081 for (i = 1; i <= 0x8000; i <<= 1) {
1082 switch (vol_pg->VolumeSettings.Settings & i) {
1083 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1084 mpt_prtc(mpt, " Member-WCE");
1085 break;
1086 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1087 mpt_prtc(mpt, " Offline-On-SMART-Err");
1088 break;
1089 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1090 mpt_prtc(mpt, " Hot-Plug-Spares");
1091 break;
1092 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1093 mpt_prtc(mpt, " High-Priority-ReSync");
1094 break;
1095 default:
1096 break;
1099 mpt_prtc(mpt, " )\n");
1100 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1101 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1102 powerof2(vol_pg->VolumeSettings.HotSparePool)
1103 ? ":" : "s:");
1104 for (i = 0; i < 8; i++) {
1105 u_int mask;
1107 mask = 0x1 << i;
1108 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1109 continue;
1110 mpt_prtc(mpt, " %d", i);
1112 mpt_prtc(mpt, "\n");
1114 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1115 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1116 struct mpt_raid_disk *mpt_disk;
1117 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1118 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1119 U8 f, s;
1121 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1122 disk_pg = &mpt_disk->config_page;
1123 mpt_prtc(mpt, " ");
1124 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1125 pt_bus, disk_pg->PhysDiskID);
1126 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1127 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1128 "Primary" : "Secondary");
1129 } else {
1130 mpt_prtc(mpt, "Stripe Position %d",
1131 mpt_disk->member_number);
1133 f = disk_pg->PhysDiskStatus.Flags;
1134 s = disk_pg->PhysDiskStatus.State;
1135 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1136 mpt_prtc(mpt, " Out of Sync");
1138 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1139 mpt_prtc(mpt, " Quiesced");
1141 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1142 mpt_prtc(mpt, " Inactive");
1144 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1145 mpt_prtc(mpt, " Was Optimal");
1147 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1148 mpt_prtc(mpt, " Was Non-Optimal");
1150 switch (s) {
1151 case MPI_PHYSDISK0_STATUS_ONLINE:
1152 mpt_prtc(mpt, " Online");
1153 break;
1154 case MPI_PHYSDISK0_STATUS_MISSING:
1155 mpt_prtc(mpt, " Missing");
1156 break;
1157 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1158 mpt_prtc(mpt, " Incompatible");
1159 break;
1160 case MPI_PHYSDISK0_STATUS_FAILED:
1161 mpt_prtc(mpt, " Failed");
1162 break;
1163 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1164 mpt_prtc(mpt, " Initializing");
1165 break;
1166 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1167 mpt_prtc(mpt, " Requested Offline");
1168 break;
1169 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1170 mpt_prtc(mpt, " Requested Failed");
1171 break;
1172 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1173 default:
1174 mpt_prtc(mpt, " Offline Other (%x)", s);
1175 break;
1177 mpt_prtc(mpt, "\n");
1181 static void
1182 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1184 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1185 int rd_bus = cam_sim_bus(mpt->sim);
1186 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1187 u_int i;
1189 disk_pg = &mpt_disk->config_page;
1190 mpt_disk_prt(mpt, mpt_disk,
1191 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1192 device_get_nameunit(mpt->dev), rd_bus,
1193 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1194 pt_bus, mpt_disk - mpt->raid_disks);
1195 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1196 return;
1197 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1198 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1199 ? ":" : "s:");
1200 for (i = 0; i < 8; i++) {
1201 u_int mask;
1203 mask = 0x1 << i;
1204 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1205 continue;
1206 mpt_prtc(mpt, " %d", i);
1208 mpt_prtc(mpt, "\n");
1211 static void
1212 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1213 IOC_3_PHYS_DISK *ioc_disk)
1215 int rv;
1217 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1218 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1219 &mpt_disk->config_page.Header,
1220 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1221 if (rv != 0) {
1222 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1223 "Failed to read RAID Disk Hdr(%d)\n",
1224 ioc_disk->PhysDiskNum);
1225 return;
1227 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1228 &mpt_disk->config_page.Header,
1229 sizeof(mpt_disk->config_page),
1230 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1231 if (rv != 0)
1232 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1233 "Failed to read RAID Disk Page(%d)\n",
1234 ioc_disk->PhysDiskNum);
1235 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1238 static void
1239 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1240 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1242 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1243 struct mpt_raid_action_result *ar;
1244 request_t *req;
1245 int rv;
1246 int i;
1248 vol_pg = mpt_vol->config_page;
1249 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1251 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1252 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1253 if (rv != 0) {
1254 mpt_vol_prt(mpt, mpt_vol,
1255 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1256 ioc_vol->VolumePageNumber);
1257 return;
1260 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1261 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1262 if (rv != 0) {
1263 mpt_vol_prt(mpt, mpt_vol,
1264 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1265 ioc_vol->VolumePageNumber);
1266 return;
1268 mpt2host_config_page_raid_vol_0(vol_pg);
1270 mpt_vol->flags |= MPT_RVF_ACTIVE;
1272 /* Update disk entry array data. */
1273 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1274 struct mpt_raid_disk *mpt_disk;
1275 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1276 mpt_disk->volume = mpt_vol;
1277 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1278 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1279 mpt_disk->member_number--;
1283 if ((vol_pg->VolumeStatus.Flags
1284 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1285 return;
1287 req = mpt_get_request(mpt, TRUE);
1288 if (req == NULL) {
1289 mpt_vol_prt(mpt, mpt_vol,
1290 "mpt_refresh_raid_vol: Get request failed!\n");
1291 return;
1293 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1294 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1295 if (rv == ETIMEDOUT) {
1296 mpt_vol_prt(mpt, mpt_vol,
1297 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1298 mpt_free_request(mpt, req);
1299 return;
1302 ar = REQ_TO_RAID_ACTION_RESULT(req);
1303 if (rv == 0
1304 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1305 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1306 memcpy(&mpt_vol->sync_progress,
1307 &ar->action_data.indicator_struct,
1308 sizeof(mpt_vol->sync_progress));
1309 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1310 } else {
1311 mpt_vol_prt(mpt, mpt_vol,
1312 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1314 mpt_free_request(mpt, req);
1318 * Update in-core information about RAID support. We update any entries
1319 * that didn't previously exists or have been marked as needing to
1320 * be updated by our event handler. Interesting changes are displayed
1321 * to the console.
1324 mpt_refresh_raid_data(struct mpt_softc *mpt)
1326 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1327 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1328 IOC_3_PHYS_DISK *ioc_disk;
1329 IOC_3_PHYS_DISK *ioc_last_disk;
1330 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1331 size_t len;
1332 int rv;
1333 int i;
1334 u_int nonopt_volumes;
1336 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1337 return (0);
1341 * Mark all items as unreferenced by the configuration.
1342 * This allows us to find, report, and discard stale
1343 * entries.
1345 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1346 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1348 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1349 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1353 * Get Physical Disk information.
1355 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1356 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1357 &mpt->ioc_page3->Header, len,
1358 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1359 if (rv) {
1360 mpt_prt(mpt,
1361 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1362 return (-1);
1364 mpt2host_config_page_ioc3(mpt->ioc_page3);
1366 ioc_disk = mpt->ioc_page3->PhysDisk;
1367 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1368 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1369 struct mpt_raid_disk *mpt_disk;
1371 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1372 mpt_disk->flags |= MPT_RDF_REFERENCED;
1373 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1374 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1376 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1379 mpt_disk->flags |= MPT_RDF_ACTIVE;
1380 mpt->raid_rescan++;
1384 * Refresh volume data.
1386 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1387 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1388 &mpt->ioc_page2->Header, len,
1389 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1390 if (rv) {
1391 mpt_prt(mpt, "mpt_refresh_raid_data: "
1392 "Failed to read IOC Page 2\n");
1393 return (-1);
1395 mpt2host_config_page_ioc2(mpt->ioc_page2);
1397 ioc_vol = mpt->ioc_page2->RaidVolume;
1398 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1399 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1400 struct mpt_raid_volume *mpt_vol;
1402 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1403 mpt_vol->flags |= MPT_RVF_REFERENCED;
1404 vol_pg = mpt_vol->config_page;
1405 if (vol_pg == NULL)
1406 continue;
1407 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1408 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1409 || (vol_pg->VolumeStatus.Flags
1410 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1412 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1414 mpt_vol->flags |= MPT_RVF_ACTIVE;
1417 nonopt_volumes = 0;
1418 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1419 struct mpt_raid_volume *mpt_vol;
1420 uint64_t total;
1421 uint64_t left;
1422 int m;
1423 u_int prio;
1425 mpt_vol = &mpt->raid_volumes[i];
1427 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1428 continue;
1431 vol_pg = mpt_vol->config_page;
1432 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1433 == MPT_RVF_ANNOUNCED) {
1434 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1435 mpt_vol->flags = 0;
1436 continue;
1439 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1440 mpt_announce_vol(mpt, mpt_vol);
1441 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1444 if (vol_pg->VolumeStatus.State !=
1445 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1446 nonopt_volumes++;
1448 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1449 continue;
1451 mpt_vol->flags |= MPT_RVF_UP2DATE;
1452 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1453 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1454 mpt_verify_mwce(mpt, mpt_vol);
1456 if (vol_pg->VolumeStatus.Flags == 0) {
1457 continue;
1460 mpt_vol_prt(mpt, mpt_vol, "Status (");
1461 for (m = 1; m <= 0x80; m <<= 1) {
1462 switch (vol_pg->VolumeStatus.Flags & m) {
1463 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1464 mpt_prtc(mpt, " Enabled");
1465 break;
1466 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1467 mpt_prtc(mpt, " Quiesced");
1468 break;
1469 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1470 mpt_prtc(mpt, " Re-Syncing");
1471 break;
1472 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1473 mpt_prtc(mpt, " Inactive");
1474 break;
1475 default:
1476 break;
1479 mpt_prtc(mpt, " )\n");
1481 if ((vol_pg->VolumeStatus.Flags
1482 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1483 continue;
1485 mpt_verify_resync_rate(mpt, mpt_vol);
1487 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1488 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1489 if (vol_pg->ResyncRate != 0) {
1491 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1492 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1493 prio / 1000, prio % 1000);
1494 } else {
1495 prio = vol_pg->VolumeSettings.Settings
1496 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1497 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1498 prio ? "High" : "Low");
1500 #if __FreeBSD_version >= 500000
1501 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1502 "blocks remaining\n", (uintmax_t)left,
1503 (uintmax_t)total);
1504 #else
1505 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1506 "blocks remaining\n", (uint64_t)left,
1507 (uint64_t)total);
1508 #endif
1510 /* Periodically report on sync progress. */
1511 mpt_schedule_raid_refresh(mpt);
1514 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1515 struct mpt_raid_disk *mpt_disk;
1516 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1517 int m;
1519 mpt_disk = &mpt->raid_disks[i];
1520 disk_pg = &mpt_disk->config_page;
1522 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1523 continue;
1525 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1526 == MPT_RDF_ANNOUNCED) {
1527 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1528 mpt_disk->flags = 0;
1529 mpt->raid_rescan++;
1530 continue;
1533 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1535 mpt_announce_disk(mpt, mpt_disk);
1536 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1539 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1540 continue;
1542 mpt_disk->flags |= MPT_RDF_UP2DATE;
1543 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1544 if (disk_pg->PhysDiskStatus.Flags == 0)
1545 continue;
1547 mpt_disk_prt(mpt, mpt_disk, "Status (");
1548 for (m = 1; m <= 0x80; m <<= 1) {
1549 switch (disk_pg->PhysDiskStatus.Flags & m) {
1550 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1551 mpt_prtc(mpt, " Out-Of-Sync");
1552 break;
1553 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1554 mpt_prtc(mpt, " Quiesced");
1555 break;
1556 default:
1557 break;
1560 mpt_prtc(mpt, " )\n");
1563 mpt->raid_nonopt_volumes = nonopt_volumes;
1564 return (0);
1567 static void
1568 mpt_raid_timer(void *arg)
1570 struct mpt_softc *mpt;
1572 mpt = (struct mpt_softc *)arg;
1573 MPT_LOCK(mpt);
1574 mpt_raid_wakeup(mpt);
1575 MPT_UNLOCK(mpt);
1578 void
1579 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1581 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1582 mpt_raid_timer, mpt);
1585 void
1586 mpt_raid_free_mem(struct mpt_softc *mpt)
1589 if (mpt->raid_volumes) {
1590 struct mpt_raid_volume *mpt_raid;
1591 int i;
1592 for (i = 0; i < mpt->raid_max_volumes; i++) {
1593 mpt_raid = &mpt->raid_volumes[i];
1594 if (mpt_raid->config_page) {
1595 kfree(mpt_raid->config_page, M_DEVBUF);
1596 mpt_raid->config_page = NULL;
1599 kfree(mpt->raid_volumes, M_DEVBUF);
1600 mpt->raid_volumes = NULL;
1602 if (mpt->raid_disks) {
1603 kfree(mpt->raid_disks, M_DEVBUF);
1604 mpt->raid_disks = NULL;
1606 if (mpt->ioc_page2) {
1607 kfree(mpt->ioc_page2, M_DEVBUF);
1608 mpt->ioc_page2 = NULL;
1610 if (mpt->ioc_page3) {
1611 kfree(mpt->ioc_page3, M_DEVBUF);
1612 mpt->ioc_page3 = NULL;
1614 mpt->raid_max_volumes = 0;
1615 mpt->raid_max_disks = 0;
1618 #if __FreeBSD_version >= 500000
1619 static int
1620 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1622 struct mpt_raid_volume *mpt_vol;
1624 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1625 || rate < MPT_RAID_RESYNC_RATE_MIN)
1626 && rate != MPT_RAID_RESYNC_RATE_NC)
1627 return (EINVAL);
1629 MPT_LOCK(mpt);
1630 mpt->raid_resync_rate = rate;
1631 RAID_VOL_FOREACH(mpt, mpt_vol) {
1632 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1633 continue;
1635 mpt_verify_resync_rate(mpt, mpt_vol);
1637 MPT_UNLOCK(mpt);
1638 return (0);
1641 static int
1642 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1644 struct mpt_raid_volume *mpt_vol;
1646 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1647 return (EINVAL);
1649 MPT_LOCK(mpt);
1650 mpt->raid_queue_depth = vol_queue_depth;
1651 RAID_VOL_FOREACH(mpt, mpt_vol) {
1652 struct cam_path *path;
1653 int error;
1655 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1656 continue;
1658 mpt->raid_rescan = 0;
1660 MPTLOCK_2_CAMLOCK(mpt);
1661 error = xpt_create_path(&path, xpt_periph,
1662 cam_sim_path(mpt->sim),
1663 mpt_vol->config_page->VolumeID,
1664 /*lun*/0);
1665 if (error != CAM_REQ_CMP) {
1666 CAMLOCK_2_MPTLOCK(mpt);
1667 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1668 continue;
1670 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1671 xpt_free_path(path);
1672 CAMLOCK_2_MPTLOCK(mpt);
1674 MPT_UNLOCK(mpt);
1675 return (0);
1678 static int
1679 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1681 struct mpt_raid_volume *mpt_vol;
1682 int force_full_resync;
1684 MPT_LOCK(mpt);
1685 if (mwce == mpt->raid_mwce_setting) {
1686 MPT_UNLOCK(mpt);
1687 return (0);
1691 * Catch MWCE being left on due to a failed shutdown. Since
1692 * sysctls cannot be set by the loader, we treat the first
1693 * setting of this varible specially and force a full volume
1694 * resync if MWCE is enabled and a resync is in progress.
1696 force_full_resync = 0;
1697 if (mpt->raid_mwce_set == 0
1698 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1699 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1700 force_full_resync = 1;
1702 mpt->raid_mwce_setting = mwce;
1703 RAID_VOL_FOREACH(mpt, mpt_vol) {
1704 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1705 int resyncing;
1706 int mwce;
1708 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1709 continue;
1711 vol_pg = mpt_vol->config_page;
1712 resyncing = vol_pg->VolumeStatus.Flags
1713 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1714 mwce = vol_pg->VolumeSettings.Settings
1715 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1716 if (force_full_resync && resyncing && mwce) {
1719 * XXX disable/enable volume should force a resync,
1720 * but we'll need to queice, drain, and restart
1721 * I/O to do that.
1723 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1724 "detected. Suggest full resync.\n");
1726 mpt_verify_mwce(mpt, mpt_vol);
1728 mpt->raid_mwce_set = 1;
1729 MPT_UNLOCK(mpt);
1730 return (0);
1732 const char *mpt_vol_mwce_strs[] =
1734 "On",
1735 "Off",
1736 "On-During-Rebuild",
1737 "NC"
1740 static int
1741 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1743 char inbuf[20];
1744 struct mpt_softc *mpt;
1745 const char *str;
1746 int error;
1747 u_int size;
1748 u_int i;
1750 GIANT_REQUIRED;
1752 mpt = (struct mpt_softc *)arg1;
1753 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1754 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1755 if (error || !req->newptr) {
1756 return (error);
1759 size = req->newlen - req->newidx;
1760 if (size >= sizeof(inbuf)) {
1761 return (EINVAL);
1764 error = SYSCTL_IN(req, inbuf, size);
1765 if (error) {
1766 return (error);
1768 inbuf[size] = '\0';
1769 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1770 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1771 return (mpt_raid_set_vol_mwce(mpt, i));
1774 return (EINVAL);
1777 static int
1778 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1780 struct mpt_softc *mpt;
1781 u_int raid_resync_rate;
1782 int error;
1784 GIANT_REQUIRED;
1786 mpt = (struct mpt_softc *)arg1;
1787 raid_resync_rate = mpt->raid_resync_rate;
1789 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1790 if (error || !req->newptr) {
1791 return error;
1794 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1797 static int
1798 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1800 struct mpt_softc *mpt;
1801 u_int raid_queue_depth;
1802 int error;
1804 GIANT_REQUIRED;
1806 mpt = (struct mpt_softc *)arg1;
1807 raid_queue_depth = mpt->raid_queue_depth;
1809 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1810 if (error || !req->newptr) {
1811 return error;
1814 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1817 static void
1818 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1820 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1821 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1823 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1824 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1825 mpt_raid_sysctl_vol_member_wce, "A",
1826 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1828 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1830 mpt_raid_sysctl_vol_queue_depth, "I",
1831 "default volume queue depth");
1833 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1834 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1835 mpt_raid_sysctl_vol_resync_rate, "I",
1836 "volume resync priority (0 == NC, 1 - 255)");
1837 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1838 "nonoptimal_volumes", CTLFLAG_RD,
1839 &mpt->raid_nonopt_volumes, 0,
1840 "number of nonoptimal volumes");
1842 #endif