5017 use 64bit DMA in mpt_sas
[illumos-gate.git] / usr / src / uts / common / io / scsi / adapters / mpt_sas / mptsas_raid.c
blob371db950e5df518d01865312bb1b68b2822801ba
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
29 * Copyright (c) 2000 to 2010, LSI Corporation.
30 * All rights reserved.
32 * Redistribution and use in source and binary forms of all code within
33 * this file that is exclusively owned by LSI, with or without
34 * modification, is permitted provided that, in addition to the CDDL 1.0
35 * License requirements, the following conditions are met:
37 * Neither the name of the author nor the names of its contributors may be
38 * used to endorse or promote products derived from this software without
39 * specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
44 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
45 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
47 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
48 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
49 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
50 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52 * DAMAGE.
56 * mptsas_raid - This file contains all the RAID related functions for the
57 * MPT interface.
60 #if defined(lint) || defined(DEBUG)
61 #define MPTSAS_DEBUG
62 #endif
64 #define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX 2
67 * standard header files
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/byteorder.h>
72 #include <sys/raidioctl.h>
74 #pragma pack(1)
76 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
77 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
84 #pragma pack()
87 * private header files.
89 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
91 static int mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol);
93 extern int mptsas_check_dma_handle(ddi_dma_handle_t handle);
94 extern int mptsas_check_acc_handle(ddi_acc_handle_t handle);
95 extern mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t,
96 uint64_t, uint32_t, mptsas_phymask_t, uint8_t);
98 static int
99 mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
100 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
101 va_list ap)
103 #ifndef __lock_lint
104 _NOTE(ARGUNUSED(ap))
105 #endif
106 pMpi2RaidConfigurationPage0_t raidconfig_page0;
107 pMpi2RaidConfig0ConfigElement_t element;
108 uint32_t *confignum;
109 int rval = DDI_SUCCESS, i;
110 uint8_t numelements, vol, disk;
111 uint16_t elementtype, voldevhandle;
112 uint16_t etype_vol, etype_pd, etype_hs;
113 uint16_t etype_oce;
114 m_raidconfig_t *raidconfig;
115 uint64_t raidwwn;
116 uint32_t native;
117 mptsas_target_t *ptgt;
118 uint32_t configindex;
120 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
121 return (DDI_FAILURE);
124 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
125 mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
126 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
127 iocstatus, iocloginfo);
128 rval = DDI_FAILURE;
129 return (rval);
131 confignum = va_arg(ap, uint32_t *);
132 configindex = va_arg(ap, uint32_t);
133 raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
135 * Get all RAID configurations.
137 etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
138 etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
139 etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
140 etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
142 * Set up page address for next time through.
144 *confignum = ddi_get8(accessp,
145 &raidconfig_page0->ConfigNum);
148 * Point to the right config in the structure.
149 * Increment the number of valid RAID configs.
151 raidconfig = &mpt->m_raidconfig[configindex];
152 mpt->m_num_raid_configs++;
155 * Set the native flag if this is not a foreign
156 * configuration.
158 native = ddi_get32(accessp, &raidconfig_page0->Flags);
159 if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
160 native = FALSE;
161 } else {
162 native = TRUE;
164 raidconfig->m_native = (uint8_t)native;
167 * Get volume information for the volumes in the
168 * config.
170 numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
171 vol = 0;
172 disk = 0;
173 element = (pMpi2RaidConfig0ConfigElement_t)
174 &raidconfig_page0->ConfigElement;
176 for (i = 0; ((i < numelements) && native); i++, element++) {
178 * Get the element type. Could be Volume,
179 * PhysDisk, Hot Spare, or Online Capacity
180 * Expansion PhysDisk.
182 elementtype = ddi_get16(accessp, &element->ElementFlags);
183 elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
186 * For volumes, get the RAID settings and the
187 * WWID.
189 if (elementtype == etype_vol) {
190 voldevhandle = ddi_get16(accessp,
191 &element->VolDevHandle);
192 raidconfig->m_raidvol[vol].m_israid = 1;
193 raidconfig->m_raidvol[vol].
194 m_raidhandle = voldevhandle;
196 * Get the settings for the raid
197 * volume. This includes the
198 * DevHandles for the disks making up
199 * the raid volume.
201 if (mptsas_get_raid_settings(mpt,
202 &raidconfig->m_raidvol[vol]))
203 continue;
206 * Get the WWID of the RAID volume for
207 * SAS HBA
209 if (mptsas_get_raid_wwid(mpt,
210 &raidconfig->m_raidvol[vol]))
211 continue;
213 raidwwn = raidconfig->m_raidvol[vol].
214 m_raidwwid;
217 * RAID uses phymask of 0.
219 ptgt = mptsas_tgt_alloc(mpt,
220 voldevhandle, raidwwn, 0, 0, 0);
222 raidconfig->m_raidvol[vol].m_raidtgt =
223 ptgt;
226 * Increment volume index within this
227 * raid config.
229 vol++;
230 } else if ((elementtype == etype_pd) ||
231 (elementtype == etype_hs) ||
232 (elementtype == etype_oce)) {
234 * For all other element types, put
235 * their DevHandles in the phys disk
236 * list of the config. These are all
237 * some variation of a Phys Disk and
238 * this list is used to keep these
239 * disks from going online.
241 raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
242 &element->PhysDiskDevHandle);
245 * Increment disk index within this
246 * raid config.
248 disk++;
252 return (rval);
256 mptsas_get_raid_info(mptsas_t *mpt)
258 int rval = DDI_SUCCESS;
259 uint32_t confignum, pageaddress;
260 uint8_t configindex;
262 ASSERT(mutex_owned(&mpt->m_mutex));
265 * Clear all RAID info before starting.
267 bzero(mpt->m_raidconfig, sizeof (mpt->m_raidconfig));
268 mpt->m_num_raid_configs = 0;
270 configindex = 0;
271 confignum = 0xff;
272 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM | confignum;
273 while (rval == DDI_SUCCESS) {
275 * Get the header and config page. reply contains the reply
276 * frame, which holds status info for the request.
278 rval = mptsas_access_config_page(mpt,
279 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
280 MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG, 0, pageaddress,
281 mptsas_raidconf_page_0_cb, &confignum, configindex);
282 configindex++;
283 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM |
284 confignum;
287 return (rval);
290 static int
291 mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
292 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
293 va_list ap)
295 #ifndef __lock_lint
296 _NOTE(ARGUNUSED(ap))
297 #endif
298 pMpi2RaidVolPage0_t raidpage;
299 int rval = DDI_SUCCESS, i;
300 mptsas_raidvol_t *raidvol;
301 uint8_t numdisks, volstate, voltype, physdisknum;
302 uint32_t volsetting;
303 uint32_t statusflags, resync_flag;
305 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
306 return (DDI_FAILURE);
308 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
309 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
310 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
311 iocstatus, iocloginfo);
312 rval = DDI_FAILURE;
313 return (rval);
316 raidvol = va_arg(ap, mptsas_raidvol_t *);
318 raidpage = (pMpi2RaidVolPage0_t)page_memp;
319 volstate = ddi_get8(accessp, &raidpage->VolumeState);
320 volsetting = ddi_get32(accessp,
321 (uint32_t *)(void *)&raidpage->VolumeSettings);
322 statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
323 voltype = ddi_get8(accessp, &raidpage->VolumeType);
325 raidvol->m_state = volstate;
326 raidvol->m_statusflags = statusflags;
328 * Volume size is not used right now. Set to 0.
330 raidvol->m_raidsize = 0;
331 raidvol->m_settings = volsetting;
332 raidvol->m_raidlevel = voltype;
334 if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
335 mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
336 raidvol->m_raidhandle);
339 if (statusflags &
340 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
341 mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
342 raidvol->m_raidhandle);
345 resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
346 switch (volstate) {
347 case MPI2_RAID_VOL_STATE_OPTIMAL:
348 mptsas_log(mpt, CE_NOTE, "?Volume %d is "
349 "optimal\n", raidvol->m_raidhandle);
350 break;
351 case MPI2_RAID_VOL_STATE_DEGRADED:
352 if ((statusflags & resync_flag) == 0) {
353 mptsas_log(mpt, CE_WARN, "Volume %d "
354 "is degraded\n",
355 raidvol->m_raidhandle);
357 break;
358 case MPI2_RAID_VOL_STATE_FAILED:
359 mptsas_log(mpt, CE_WARN, "Volume %d is "
360 "failed\n", raidvol->m_raidhandle);
361 break;
362 case MPI2_RAID_VOL_STATE_MISSING:
363 mptsas_log(mpt, CE_WARN, "Volume %d is "
364 "missing\n", raidvol->m_raidhandle);
365 break;
366 default:
367 break;
369 numdisks = raidpage->NumPhysDisks;
370 raidvol->m_ndisks = numdisks;
371 for (i = 0; i < numdisks; i++) {
372 physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
373 raidvol->m_disknum[i] = physdisknum;
374 if (mptsas_get_physdisk_settings(mpt, raidvol,
375 physdisknum))
376 break;
378 return (rval);
382 mptsas_get_raid_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
384 int rval = DDI_SUCCESS;
385 uint32_t page_address;
387 ASSERT(mutex_owned(&mpt->m_mutex));
390 * Get the header and config page. reply contains the reply frame,
391 * which holds status info for the request.
393 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
394 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
395 rval = mptsas_access_config_page(mpt,
396 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
397 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 0, page_address,
398 mptsas_raidvol_page_0_cb, raidvol);
400 return (rval);
403 static int
404 mptsas_raidvol_page_1_cb(mptsas_t *mpt, caddr_t page_memp,
405 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
406 va_list ap)
408 #ifndef __lock_lint
409 _NOTE(ARGUNUSED(ap))
410 #endif
411 pMpi2RaidVolPage1_t raidpage;
412 int rval = DDI_SUCCESS, i;
413 uint8_t *sas_addr = NULL;
414 uint8_t tmp_sas_wwn[SAS_WWN_BYTE_SIZE];
415 uint64_t *sas_wwn;
417 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
418 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page_1_cb "
419 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
420 iocstatus, iocloginfo);
421 rval = DDI_FAILURE;
422 return (rval);
424 sas_wwn = va_arg(ap, uint64_t *);
426 raidpage = (pMpi2RaidVolPage1_t)page_memp;
427 sas_addr = (uint8_t *)(&raidpage->WWID);
428 for (i = 0; i < SAS_WWN_BYTE_SIZE; i++) {
429 tmp_sas_wwn[i] = ddi_get8(accessp, sas_addr + i);
431 bcopy(tmp_sas_wwn, sas_wwn, SAS_WWN_BYTE_SIZE);
432 *sas_wwn = LE_64(*sas_wwn);
433 return (rval);
436 static int
437 mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
439 int rval = DDI_SUCCESS;
440 uint32_t page_address;
441 uint64_t sas_wwn;
443 ASSERT(mutex_owned(&mpt->m_mutex));
446 * Get the header and config page. reply contains the reply frame,
447 * which holds status info for the request.
449 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
450 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
451 rval = mptsas_access_config_page(mpt,
452 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
453 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 1, page_address,
454 mptsas_raidvol_page_1_cb, &sas_wwn);
457 * Get the required information from the page.
459 if (rval == DDI_SUCCESS) {
462 * replace top nibble of WWID of RAID to '3' for OBP
464 sas_wwn = MPTSAS_RAID_WWID(sas_wwn);
465 raidvol->m_raidwwid = sas_wwn;
468 return (rval);
471 static int
472 mptsas_raidphydsk_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
473 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
474 va_list ap)
476 #ifndef __lock_lint
477 _NOTE(ARGUNUSED(ap))
478 #endif
479 pMpi2RaidPhysDiskPage0_t diskpage;
480 int rval = DDI_SUCCESS;
481 uint16_t *devhdl;
482 uint8_t *state;
484 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
485 return (DDI_FAILURE);
487 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
488 mptsas_log(mpt, CE_WARN, "mptsas_raidphydsk_page0_cb "
489 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
490 iocstatus, iocloginfo);
491 rval = DDI_FAILURE;
492 return (rval);
494 devhdl = va_arg(ap, uint16_t *);
495 state = va_arg(ap, uint8_t *);
496 diskpage = (pMpi2RaidPhysDiskPage0_t)page_memp;
497 *devhdl = ddi_get16(accessp, &diskpage->DevHandle);
498 *state = ddi_get8(accessp, &diskpage->PhysDiskState);
499 return (rval);
503 mptsas_get_physdisk_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol,
504 uint8_t physdisknum)
506 int rval = DDI_SUCCESS, i;
507 uint8_t state;
508 uint16_t devhdl;
509 uint32_t page_address;
511 ASSERT(mutex_owned(&mpt->m_mutex));
514 * Get the header and config page. reply contains the reply frame,
515 * which holds status info for the request.
517 page_address = (MPI2_PHYSDISK_PGAD_FORM_MASK &
518 MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM) | physdisknum;
519 rval = mptsas_access_config_page(mpt,
520 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
521 MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, page_address,
522 mptsas_raidphydsk_page_0_cb, &devhdl, &state);
525 * Get the required information from the page.
527 if (rval == DDI_SUCCESS) {
528 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
529 /* find the correct position in the arrays */
530 if (raidvol->m_disknum[i] == physdisknum)
531 break;
533 raidvol->m_devhdl[i] = devhdl;
535 switch (state) {
536 case MPI2_RAID_PD_STATE_OFFLINE:
537 raidvol->m_diskstatus[i] =
538 RAID_DISKSTATUS_FAILED;
539 break;
541 case MPI2_RAID_PD_STATE_HOT_SPARE:
542 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
543 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
544 break;
546 case MPI2_RAID_PD_STATE_DEGRADED:
547 case MPI2_RAID_PD_STATE_OPTIMAL:
548 case MPI2_RAID_PD_STATE_REBUILDING:
549 case MPI2_RAID_PD_STATE_ONLINE:
550 default:
551 raidvol->m_diskstatus[i] =
552 RAID_DISKSTATUS_GOOD;
553 break;
557 return (rval);
561 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
562 * avoid a call to mptsas_save_cmd. Since Solaris requires that the mutex is
563 * not held during the mptsas_quiesce function, this RAID action must not use
564 * the normal code path of requests and replies.
566 void
567 mptsas_raid_action_system_shutdown(mptsas_t *mpt)
569 pMpi2RaidActionRequest_t action;
570 uint8_t ir_active = FALSE, reply_type;
571 uint8_t function, found_reply = FALSE;
572 uint16_t SMID, action_type;
573 mptsas_slots_t *slots = mpt->m_active;
574 int config, vol;
575 mptsas_cmd_t *cmd;
576 uint32_t reply_addr;
577 uint64_t request_desc;
578 int cnt;
579 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
580 pMPI2DefaultReply_t reply;
581 pMpi2AddressReplyDescriptor_t address_reply;
584 * Before doing the system shutdown RAID Action, make sure that the IOC
585 * supports IR and make sure there is a valid volume for the request.
587 if (mpt->m_ir_capable) {
588 for (config = 0; (config < mpt->m_num_raid_configs) &&
589 (!ir_active); config++) {
590 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
591 if (mpt->m_raidconfig[config].m_raidvol[vol].
592 m_israid) {
593 ir_active = TRUE;
594 break;
599 if (!ir_active) {
600 return;
604 * If TM slot is already being used (highly unlikely), show message and
605 * don't issue the RAID action.
607 if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
608 mptsas_log(mpt, CE_WARN, "RAID Action slot in use. Cancelling"
609 " System Shutdown RAID Action.\n");
610 return;
614 * Create the cmd and put it in the dedicated TM slot.
616 cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
617 bzero((caddr_t)cmd, sizeof (*cmd));
618 cmd->cmd_pkt = NULL;
619 cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
620 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;
623 * Form message for raid action.
625 action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
626 (mpt->m_req_frame_size * cmd->cmd_slot));
627 bzero(action, mpt->m_req_frame_size);
628 action->Function = MPI2_FUNCTION_RAID_ACTION;
629 action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
632 * Send RAID Action.
634 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
635 DDI_DMA_SYNC_FORDEV);
636 request_desc = (cmd->cmd_slot << 16) +
637 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
638 MPTSAS_START_CMD(mpt, request_desc);
641 * Even though reply does not matter because the system is shutting
642 * down, wait no more than 5 seconds here to get the reply just because
643 * we don't want to leave it hanging if it's coming. Poll because
644 * interrupts are disabled when this function is called.
646 for (cnt = 0; cnt < 5000; cnt++) {
648 * Check for a reply.
650 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
651 DDI_DMA_SYNC_FORCPU);
653 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
654 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
656 if (ddi_get32(mpt->m_acc_post_queue_hdl,
657 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
658 ddi_get32(mpt->m_acc_post_queue_hdl,
659 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
660 drv_usecwait(1000);
661 continue;
665 * There is a reply. If it's not an address reply, ignore it.
667 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
668 &reply_desc_union->Default.ReplyFlags);
669 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
670 if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
671 goto clear_and_continue;
675 * SMID must be the TM slot since that's what we're using for
676 * this RAID action. If not, ignore this reply.
678 address_reply =
679 (pMpi2AddressReplyDescriptor_t)reply_desc_union;
680 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
681 &address_reply->SMID);
682 if (SMID != MPTSAS_TM_SLOT(mpt)) {
683 goto clear_and_continue;
687 * If reply frame is not in the proper range ignore it.
689 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
690 &address_reply->ReplyFrameAddress);
691 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
692 (reply_addr >= (mpt->m_reply_frame_dma_addr +
693 (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
694 ((reply_addr - mpt->m_reply_frame_dma_addr) %
695 mpt->m_reply_frame_size != 0)) {
696 goto clear_and_continue;
700 * If not a RAID action reply ignore it.
702 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
703 DDI_DMA_SYNC_FORCPU);
704 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
705 (reply_addr - mpt->m_reply_frame_dma_addr));
706 function = ddi_get8(mpt->m_acc_reply_frame_hdl,
707 &reply->Function);
708 if (function != MPI2_FUNCTION_RAID_ACTION) {
709 goto clear_and_continue;
713 * Finally, make sure this is the System Shutdown RAID action.
714 * If not, ignore reply.
716 action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
717 &reply->FunctionDependent1);
718 if (action_type !=
719 MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
720 goto clear_and_continue;
722 found_reply = TRUE;
724 clear_and_continue:
726 * Clear the reply descriptor for re-use and increment index.
728 ddi_put64(mpt->m_acc_post_queue_hdl,
729 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
730 0xFFFFFFFFFFFFFFFF);
731 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
732 DDI_DMA_SYNC_FORDEV);
735 * Update the global reply index and keep looking for the
736 * reply if not found yet.
738 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
739 mpt->m_post_index = 0;
741 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
742 mpt->m_post_index);
743 if (!found_reply) {
744 continue;
747 break;
751 * clear the used slot as the last step.
753 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
757 mptsas_delete_volume(mptsas_t *mpt, uint16_t volid)
759 int config, i = 0, vol = (-1);
761 for (config = 0; (config < mpt->m_num_raid_configs) && (vol != i);
762 config++) {
763 for (i = 0; i < MPTSAS_MAX_RAIDVOLS; i++) {
764 if (mpt->m_raidconfig[config].m_raidvol[i].
765 m_raidhandle == volid) {
766 vol = i;
767 break;
772 if (vol < 0) {
773 mptsas_log(mpt, CE_WARN, "raid doesn't exist at specified "
774 "target.");
775 return (-1);
778 mpt->m_raidconfig[config].m_raidvol[vol].m_israid = 0;
779 mpt->m_raidconfig[config].m_raidvol[vol].m_ndisks = 0;
780 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
781 mpt->m_raidconfig[config].m_raidvol[vol].m_disknum[i] = 0;
782 mpt->m_raidconfig[config].m_raidvol[vol].m_devhdl[i] = 0;
785 return (0);