bmake-ify mr_sas
[unleashed.git] / kernel / drivers / scsi / mr_sas / mr_sas_tbolt.c
blob99d147cb73861374f9c3f10501203a19184805b1
1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
25 #include <sys/types.h>
26 #include <sys/file.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
30 #include <sys/sdt.h>
31 #include "ld_pd_map.h"
32 #include "mr_sas.h"
33 #include "fusion.h"
36 * FMA header files
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
44 /* Pre-TB command size and TB command size. */
45 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 extern struct ddi_device_acc_attr endian_attr;
53 extern int debug_level_g;
54 extern unsigned int enable_fp;
55 volatile int dump_io_wait_time = 90;
56 extern volatile int debug_timeout_g;
57 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 extern void push_pending_mfi_pkt(struct mrsas_instance *,
60 struct mrsas_cmd *);
61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
64 /* Local static prototypes. */
65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66 struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
68 U64 start_blk, U32 num_blocks);
69 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
71 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
72 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
73 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
74 struct mrsas_tbolt_pd_info *, int);
76 static int mrsas_debug_tbolt_fw_faults_after_ocr = 0;
79 * destroy_mfi_mpi_frame_pool
81 void
82 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
84 int i;
86 struct mrsas_cmd *cmd;
88 /* return all mfi frames to pool */
89 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
90 cmd = instance->cmd_list[i];
91 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
92 (void) mrsas_free_dma_obj(instance,
93 cmd->frame_dma_obj);
95 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
100 * destroy_mpi2_frame_pool
102 void
103 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
106 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
107 (void) mrsas_free_dma_obj(instance,
108 instance->mpi2_frame_pool_dma_obj);
109 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
115 * mrsas_tbolt_free_additional_dma_buffer
117 void
118 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
120 int i;
122 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
123 (void) mrsas_free_dma_obj(instance,
124 instance->mfi_internal_dma_obj);
125 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
127 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
128 (void) mrsas_free_dma_obj(instance,
129 instance->mfi_evt_detail_obj);
130 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
133 for (i = 0; i < 2; i++) {
134 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
135 (void) mrsas_free_dma_obj(instance,
136 instance->ld_map_obj[i]);
137 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
144 * free_req_desc_pool
146 void
147 free_req_rep_desc_pool(struct mrsas_instance *instance)
149 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
150 (void) mrsas_free_dma_obj(instance,
151 instance->request_desc_dma_obj);
152 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
155 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
156 (void) mrsas_free_dma_obj(instance,
157 instance->reply_desc_dma_obj);
158 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
166 * ThunderBolt(TB) Request Message Frame Pool
169 create_mpi2_frame_pool(struct mrsas_instance *instance)
171 int i = 0;
172 uint16_t max_cmd;
173 uint32_t sgl_sz;
174 uint32_t raid_msg_size;
175 uint32_t total_size;
176 uint32_t offset;
177 uint32_t io_req_base_phys;
178 uint8_t *io_req_base;
179 struct mrsas_cmd *cmd;
181 max_cmd = instance->max_fw_cmds;
183 sgl_sz = 1024;
184 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
186 /* Allocating additional 256 bytes to accomodate SMID 0. */
187 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
188 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
190 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
191 "max_cmd %x", max_cmd));
193 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
194 "request message frame pool size %x", total_size));
197 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
198 * and then split the memory to 1024 commands. Each command should be
199 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
200 * within it. Further refer the "alloc_req_rep_desc" function where
201 * we allocate request/reply descriptors queues for a clue.
204 instance->mpi2_frame_pool_dma_obj.size = total_size;
205 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
206 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
207 0xFFFFFFFFU;
208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
209 0xFFFFFFFFU;
210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
211 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
213 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
214 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
215 dev_err(instance->dip, CE_WARN,
216 "could not alloc mpi2 frame pool");
217 return (DDI_FAILURE);
220 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
221 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
223 instance->io_request_frames =
224 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
225 instance->io_request_frames_phy =
226 (uint32_t)
227 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
229 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
230 (void *)instance->io_request_frames));
232 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
233 instance->io_request_frames_phy));
235 io_req_base = (uint8_t *)instance->io_request_frames +
236 MRSAS_THUNDERBOLT_MSG_SIZE;
237 io_req_base_phys = instance->io_request_frames_phy +
238 MRSAS_THUNDERBOLT_MSG_SIZE;
240 con_log(CL_DLEVEL3, (CE_NOTE,
241 "io req_base_phys 0x%x", io_req_base_phys));
243 for (i = 0; i < max_cmd; i++) {
244 cmd = instance->cmd_list[i];
246 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
248 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
249 ((uint8_t *)io_req_base + offset);
250 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
252 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
253 (max_cmd * raid_msg_size) + i * sgl_sz);
255 cmd->sgl_phys_addr = (io_req_base_phys +
256 (max_cmd * raid_msg_size) + i * sgl_sz);
258 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
259 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
260 (i * SENSE_LENGTH));
262 cmd->sense_phys_addr1 = (io_req_base_phys +
263 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
264 (i * SENSE_LENGTH));
267 cmd->SMID = i + 1;
269 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
270 cmd->index, (void *)cmd->scsi_io_request));
272 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
273 cmd->index, cmd->scsi_io_request_phys_addr));
275 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
276 cmd->index, (void *)cmd->sense1));
278 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
279 cmd->index, cmd->sense_phys_addr1));
281 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
282 cmd->index, (void *)cmd->sgl));
284 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
285 cmd->index, cmd->sgl_phys_addr));
288 return (DDI_SUCCESS);
294 * alloc_additional_dma_buffer for AEN
297 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
299 uint32_t internal_buf_size = PAGESIZE*2;
300 int i;
302 /* Initialize buffer status as free */
303 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
304 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
305 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
306 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
309 instance->mfi_internal_dma_obj.size = internal_buf_size;
310 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
312 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
313 0xFFFFFFFFU;
314 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
316 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
317 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
318 dev_err(instance->dip, CE_WARN,
319 "could not alloc reply queue");
320 return (DDI_FAILURE);
323 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
325 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
326 instance->internal_buf =
327 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
328 instance->internal_buf_dmac_add =
329 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
330 instance->internal_buf_size = internal_buf_size;
332 /* allocate evt_detail */
333 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
334 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
338 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
340 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
341 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
342 dev_err(instance->dip, CE_WARN,
343 "mrsas_tbolt_alloc_additional_dma_buffer: "
344 "could not allocate data transfer buffer.");
345 goto fail_tbolt_additional_buff;
348 bzero(instance->mfi_evt_detail_obj.buffer,
349 sizeof (struct mrsas_evt_detail));
351 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
353 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
354 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
356 for (i = 0; i < 2; i++) {
357 /* allocate the data transfer buffer */
358 instance->ld_map_obj[i].size = instance->size_map_info;
359 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
360 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
361 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
362 0xFFFFFFFFU;
363 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
364 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
366 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
367 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
368 dev_err(instance->dip, CE_WARN,
369 "could not allocate data transfer buffer.");
370 goto fail_tbolt_additional_buff;
373 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
375 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
377 instance->ld_map[i] =
378 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
379 instance->ld_map_phy[i] = (uint32_t)instance->
380 ld_map_obj[i].dma_cookie[0].dmac_address;
382 con_log(CL_DLEVEL3, (CE_NOTE,
383 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
385 con_log(CL_DLEVEL3, (CE_NOTE,
386 "size_map_info 0x%x", instance->size_map_info));
389 return (DDI_SUCCESS);
391 fail_tbolt_additional_buff:
392 mrsas_tbolt_free_additional_dma_buffer(instance);
394 return (DDI_FAILURE);
397 MRSAS_REQUEST_DESCRIPTOR_UNION *
398 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
400 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
402 if (index > instance->max_fw_cmds) {
403 con_log(CL_ANN1, (CE_NOTE,
404 "Invalid SMID 0x%x request for descriptor", index));
405 con_log(CL_ANN1, (CE_NOTE,
406 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
407 return (NULL);
410 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
411 ((char *)instance->request_message_pool +
412 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
414 con_log(CL_ANN1, (CE_NOTE,
415 "request descriptor : 0x%08lx", (unsigned long)req_desc));
417 con_log(CL_ANN1, (CE_NOTE,
418 "request descriptor base phy : 0x%08lx",
419 (unsigned long)instance->request_message_pool_phy));
421 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
426 * Allocate Request and Reply Queue Descriptors.
429 alloc_req_rep_desc(struct mrsas_instance *instance)
431 uint32_t request_q_sz, reply_q_sz;
432 int i, max_reply_q_sz;
433 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
436 * ThunderBolt(TB) There's no longer producer consumer mechanism.
437 * Once we have an interrupt we are supposed to scan through the list of
438 * reply descriptors and process them accordingly. We would be needing
439 * to allocate memory for 1024 reply descriptors
442 /* Allocate Reply Descriptors */
443 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
444 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446 /* reply queue size should be multiple of 16 */
447 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
449 reply_q_sz = 8 * max_reply_q_sz;
452 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
453 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
455 instance->reply_desc_dma_obj.size = reply_q_sz;
456 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
457 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
458 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
459 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
460 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
462 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
463 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
464 dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
465 return (DDI_FAILURE);
468 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
469 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
471 /* virtual address of reply queue */
472 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
473 instance->reply_desc_dma_obj.buffer);
475 instance->reply_q_depth = max_reply_q_sz;
477 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
478 instance->reply_q_depth));
480 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
481 (void *)instance->reply_frame_pool));
483 /* initializing reply address to 0xFFFFFFFF */
484 reply_desc = instance->reply_frame_pool;
486 for (i = 0; i < instance->reply_q_depth; i++) {
487 reply_desc->Words = (uint64_t)~0;
488 reply_desc++;
492 instance->reply_frame_pool_phy =
493 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
495 con_log(CL_ANN1, (CE_NOTE,
496 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
499 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
500 reply_q_sz);
502 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
503 instance->reply_pool_limit_phy));
506 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
507 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509 /* Allocate Request Descriptors */
510 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
511 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
513 request_q_sz = 8 *
514 (instance->max_fw_cmds);
516 instance->request_desc_dma_obj.size = request_q_sz;
517 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
518 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
519 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
520 0xFFFFFFFFU;
521 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
522 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
524 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
525 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
526 dev_err(instance->dip, CE_WARN,
527 "could not alloc request queue desc");
528 goto fail_undo_reply_queue;
531 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
532 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
534 /* virtual address of request queue desc */
535 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
536 (instance->request_desc_dma_obj.buffer);
538 instance->request_message_pool_phy =
539 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
541 return (DDI_SUCCESS);
543 fail_undo_reply_queue:
544 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
545 (void) mrsas_free_dma_obj(instance,
546 instance->reply_desc_dma_obj);
547 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
550 return (DDI_FAILURE);
554 * mrsas_alloc_cmd_pool_tbolt
556 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
557 * routine
560 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
562 int i;
563 int count;
564 uint32_t max_cmd;
565 uint32_t reserve_cmd;
566 size_t sz;
568 struct mrsas_cmd *cmd;
570 max_cmd = instance->max_fw_cmds;
571 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
572 "max_cmd %x", max_cmd));
575 sz = sizeof (struct mrsas_cmd *) * max_cmd;
578 * instance->cmd_list is an array of struct mrsas_cmd pointers.
579 * Allocate the dynamic array first and then allocate individual
580 * commands.
582 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
584 /* create a frame pool and assign one frame to each cmd */
585 for (count = 0; count < max_cmd; count++) {
586 instance->cmd_list[count] =
587 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
590 /* add all the commands to command pool */
592 INIT_LIST_HEAD(&instance->cmd_pool_list);
593 INIT_LIST_HEAD(&instance->cmd_pend_list);
594 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
596 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
598 /* cmd index 0 reservered for IOC INIT */
599 for (i = 1; i < reserve_cmd; i++) {
600 cmd = instance->cmd_list[i];
601 cmd->index = i;
602 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
606 for (i = reserve_cmd; i < max_cmd; i++) {
607 cmd = instance->cmd_list[i];
608 cmd->index = i;
609 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
612 return (DDI_SUCCESS);
614 mrsas_undo_cmds:
615 if (count > 0) {
616 /* free each cmd */
617 for (i = 0; i < count; i++) {
618 if (instance->cmd_list[i] != NULL) {
619 kmem_free(instance->cmd_list[i],
620 sizeof (struct mrsas_cmd));
622 instance->cmd_list[i] = NULL;
626 mrsas_undo_cmd_list:
627 if (instance->cmd_list != NULL)
628 kmem_free(instance->cmd_list, sz);
629 instance->cmd_list = NULL;
631 return (DDI_FAILURE);
636 * free_space_for_mpi2
638 void
639 free_space_for_mpi2(struct mrsas_instance *instance)
641 /* already freed */
642 if (instance->cmd_list == NULL) {
643 return;
646 /* First free the additional DMA buffer */
647 mrsas_tbolt_free_additional_dma_buffer(instance);
649 /* Free the request/reply descriptor pool */
650 free_req_rep_desc_pool(instance);
652 /* Free the MPI message pool */
653 destroy_mpi2_frame_pool(instance);
655 /* Free the MFI frame pool */
656 destroy_mfi_frame_pool(instance);
658 /* Free all the commands in the cmd_list */
659 /* Free the cmd_list buffer itself */
660 mrsas_free_cmd_pool(instance);
665 * ThunderBolt(TB) memory allocations for commands/messages/frames.
668 alloc_space_for_mpi2(struct mrsas_instance *instance)
670 /* Allocate command pool (memory for cmd_list & individual commands) */
671 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
672 dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
673 return (DDI_FAILURE);
676 /* Initialize single reply size and Message size */
677 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
678 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
680 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
681 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
682 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
683 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
684 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
686 /* Reduce SG count by 1 to take care of group cmds feature in FW */
687 instance->max_num_sge = (instance->max_sge_in_main_msg +
688 instance->max_sge_in_chain - 2);
689 instance->chain_offset_mpt_msg =
690 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
691 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
692 sizeof (MPI2_SGE_IO_UNION)) / 16;
693 instance->reply_read_index = 0;
696 /* Allocate Request and Reply descriptors Array */
697 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
698 if (alloc_req_rep_desc(instance)) {
699 dev_err(instance->dip, CE_WARN,
700 "Error, allocating memory for descripter-pool");
701 goto mpi2_undo_cmd_pool;
703 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
704 instance->request_message_pool_phy));
707 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
708 if (create_mfi_frame_pool(instance)) {
709 dev_err(instance->dip, CE_WARN,
710 "Error, allocating memory for MFI frame-pool");
711 goto mpi2_undo_descripter_pool;
715 /* Allocate MPI2 Message pool */
717 * Make sure the buffer is alligned to 256 for raid message packet
718 * create a io request pool and assign one frame to each cmd
721 if (create_mpi2_frame_pool(instance)) {
722 dev_err(instance->dip, CE_WARN,
723 "Error, allocating memory for MPI2 Message-pool");
724 goto mpi2_undo_mfi_frame_pool;
727 #ifdef DEBUG
728 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
729 instance->max_sge_in_main_msg));
730 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
731 instance->max_sge_in_chain));
732 con_log(CL_ANN1, (CE_CONT,
733 "[max_sge]0x%x", instance->max_num_sge));
734 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
735 instance->chain_offset_mpt_msg));
736 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
737 instance->chain_offset_io_req));
738 #endif
741 /* Allocate additional dma buffer */
742 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
743 dev_err(instance->dip, CE_WARN,
744 "Error, allocating tbolt additional DMA buffer");
745 goto mpi2_undo_message_pool;
748 return (DDI_SUCCESS);
750 mpi2_undo_message_pool:
751 destroy_mpi2_frame_pool(instance);
753 mpi2_undo_mfi_frame_pool:
754 destroy_mfi_frame_pool(instance);
756 mpi2_undo_descripter_pool:
757 free_req_rep_desc_pool(instance);
759 mpi2_undo_cmd_pool:
760 mrsas_free_cmd_pool(instance);
762 return (DDI_FAILURE);
767 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
770 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
774 * Reduce the max supported cmds by 1. This is to ensure that the
775 * reply_q_sz (1 more than the max cmd that driver may send)
776 * does not exceed max cmds that the FW can support
779 if (instance->max_fw_cmds > 1008) {
780 instance->max_fw_cmds = 1008;
781 instance->max_fw_cmds = instance->max_fw_cmds-1;
784 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
785 "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
788 /* create a pool of commands */
789 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
790 dev_err(instance->dip, CE_WARN,
791 "alloc_space_for_mpi2() failed.");
793 return (DDI_FAILURE);
796 /* Send ioc init message */
797 /* NOTE: the issue_init call does FMA checking already. */
798 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
799 dev_err(instance->dip, CE_WARN,
800 "mrsas_issue_init_mpi2() failed.");
802 goto fail_init_fusion;
805 instance->unroll.alloc_space_mpi2 = 1;
807 con_log(CL_ANN, (CE_NOTE,
808 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
810 return (DDI_SUCCESS);
812 fail_init_fusion:
813 free_space_for_mpi2(instance);
815 return (DDI_FAILURE);
821 * init_mpi2
824 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
826 dma_obj_t init2_dma_obj;
827 int ret_val = DDI_SUCCESS;
829 /* allocate DMA buffer for IOC INIT message */
830 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
831 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
832 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
833 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
834 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
835 init2_dma_obj.dma_attr.dma_attr_align = 256;
837 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
838 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
839 dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
840 "could not allocate data transfer buffer.");
841 return (DDI_FAILURE);
843 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
845 con_log(CL_ANN1, (CE_NOTE,
846 "mrsas_issue_init_mpi2 _phys adr: %x",
847 init2_dma_obj.dma_cookie[0].dmac_address));
850 /* Initialize and send ioc init message */
851 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
852 if (ret_val == DDI_FAILURE) {
853 con_log(CL_ANN1, (CE_WARN,
854 "mrsas_issue_init_mpi2: Failed"));
855 goto fail_init_mpi2;
858 /* free IOC init DMA buffer */
859 if (mrsas_free_dma_obj(instance, init2_dma_obj)
860 != DDI_SUCCESS) {
861 con_log(CL_ANN1, (CE_WARN,
862 "mrsas_issue_init_mpi2: Free Failed"));
863 return (DDI_FAILURE);
866 /* Get/Check and sync ld_map info */
867 instance->map_id = 0;
868 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
869 (void) mrsas_tbolt_sync_map_info(instance);
872 /* No mrsas_cmd to send, so send NULL. */
873 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
874 goto fail_init_mpi2;
876 con_log(CL_ANN, (CE_NOTE,
877 "mrsas_issue_init_mpi2: SUCCESSFUL"));
879 return (DDI_SUCCESS);
881 fail_init_mpi2:
882 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
884 return (DDI_FAILURE);
887 static int
888 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
890 int numbytes;
891 uint16_t flags;
892 struct mrsas_init_frame2 *mfiFrameInit2;
893 struct mrsas_header *frame_hdr;
894 Mpi2IOCInitRequest_t *init;
895 struct mrsas_cmd *cmd = NULL;
896 struct mrsas_drv_ver drv_ver_info;
897 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
898 uint32_t timeout;
900 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
903 #ifdef DEBUG
904 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
905 (int)sizeof (*mfiFrameInit2)));
906 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
907 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
908 (int)sizeof (struct mrsas_init_frame2)));
909 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
910 (int)sizeof (Mpi2IOCInitRequest_t)));
911 #endif
913 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
914 numbytes = sizeof (*init);
915 bzero(init, numbytes);
917 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
918 MPI2_FUNCTION_IOC_INIT);
920 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
921 MPI2_WHOINIT_HOST_DRIVER);
923 /* set MsgVersion and HeaderVersion host driver was built with */
924 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
925 MPI2_VERSION);
927 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
928 MPI2_HEADER_VERSION);
930 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
931 instance->raid_io_msg_size / 4);
933 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
936 ddi_put16(mpi2_dma_obj->acc_handle,
937 &init->ReplyDescriptorPostQueueDepth,
938 instance->reply_q_depth);
940 * These addresses are set using the DMA cookie addresses from when the
941 * memory was allocated. Sense buffer hi address should be 0.
942 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
945 ddi_put32(mpi2_dma_obj->acc_handle,
946 &init->SenseBufferAddressHigh, 0);
948 ddi_put64(mpi2_dma_obj->acc_handle,
949 (uint64_t *)&init->SystemRequestFrameBaseAddress,
950 instance->io_request_frames_phy);
952 ddi_put64(mpi2_dma_obj->acc_handle,
953 &init->ReplyDescriptorPostQueueAddress,
954 instance->reply_frame_pool_phy);
956 ddi_put64(mpi2_dma_obj->acc_handle,
957 &init->ReplyFreeQueueAddress, 0);
959 cmd = instance->cmd_list[0];
960 if (cmd == NULL) {
961 return (DDI_FAILURE);
963 cmd->retry_count_for_ocr = 0;
964 cmd->pkt = NULL;
965 cmd->drv_pkt_time = 0;
967 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
968 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
970 frame_hdr = &cmd->frame->hdr;
972 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
973 MFI_CMD_STATUS_POLL_MODE);
975 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
977 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
979 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
981 con_log(CL_ANN, (CE_CONT,
982 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
984 /* Init the MFI Header */
985 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
986 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
988 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
990 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
991 &mfiFrameInit2->cmd_status,
992 MFI_STAT_INVALID_STATUS);
994 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
996 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
997 &mfiFrameInit2->queue_info_new_phys_addr_lo,
998 mpi2_dma_obj->dma_cookie[0].dmac_address);
1000 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 &mfiFrameInit2->data_xfer_len,
1002 sizeof (Mpi2IOCInitRequest_t));
1004 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1005 (int)init->ReplyDescriptorPostQueueAddress));
1007 /* fill driver version information */
1008 fill_up_drv_ver(&drv_ver_info);
1010 /* allocate the driver version data transfer buffer */
1011 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1012 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1018 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1019 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1020 dev_err(instance->dip, CE_WARN,
1021 "fusion init: Could not allocate driver version buffer.");
1022 return (DDI_FAILURE);
1024 /* copy driver version to dma buffer */
1025 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1026 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1027 (uint8_t *)drv_ver_info.drv_ver,
1028 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1029 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1031 /* send driver version physical address to firmware */
1032 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1033 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1035 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1036 mfiFrameInit2->queue_info_new_phys_addr_lo,
1037 (int)sizeof (Mpi2IOCInitRequest_t)));
1039 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1041 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1042 cmd->scsi_io_request_phys_addr,
1043 (int)sizeof (struct mrsas_init_frame2)));
1045 /* disable interrupts before sending INIT2 frame */
1046 instance->func_ptr->disable_intr(instance);
1048 req_desc.Words = cmd->scsi_io_request_phys_addr;
1049 req_desc.MFAIo.RequestFlags =
1050 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1052 cmd->request_desc = &req_desc;
1054 /* issue the init frame */
1056 mutex_enter(&instance->reg_write_mtx);
1057 WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1058 WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1059 mutex_exit(&instance->reg_write_mtx);
1061 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1062 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1063 frame_hdr->cmd_status));
1065 timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1066 do {
1067 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1068 &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1069 break;
1070 delay(1);
1071 timeout--;
1072 } while (timeout > 0);
1074 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1075 &mfiFrameInit2->cmd_status) == 0) {
1076 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1077 } else {
1078 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1079 mrsas_dump_reply_desc(instance);
1080 goto fail_ioc_init;
1083 mrsas_dump_reply_desc(instance);
1085 instance->unroll.verBuff = 1;
1087 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1089 return (DDI_SUCCESS);
1092 fail_ioc_init:
1094 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1096 return (DDI_FAILURE);
1100 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1102 int i;
1103 uint32_t wait_time = dump_io_wait_time;
1104 for (i = 0; i < wait_time; i++) {
1106 * Check For Outstanding poll Commands
1107 * except ldsync command and aen command
1109 if (instance->fw_outstanding <= 2) {
1110 break;
1112 drv_usecwait(10*MILLISEC);
1113 /* complete commands from reply queue */
1114 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1116 if (instance->fw_outstanding > 2) {
1117 return (1);
1119 return (0);
1122 * scsi_pkt handling
1124 * Visible to the external world via the transport structure.
1128 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1130 struct mrsas_instance *instance = ADDR2MR(ap);
1131 struct scsa_cmd *acmd = PKT2CMD(pkt);
1132 struct mrsas_cmd *cmd = NULL;
1133 uchar_t cmd_done = 0;
1135 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1136 if (instance->deadadapter == 1) {
1137 dev_err(instance->dip, CE_WARN,
1138 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1139 "for IO, as the HBA doesnt take any more IOs");
1140 if (pkt) {
1141 pkt->pkt_reason = CMD_DEV_GONE;
1142 pkt->pkt_statistics = STAT_DISCON;
1144 return (TRAN_FATAL_ERROR);
1146 if (instance->adapterresetinprogress) {
1147 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1148 "returning mfi_pkt and setting TRAN_BUSY\n"));
1149 return (TRAN_BUSY);
1151 (void) mrsas_tbolt_prepare_pkt(acmd);
1153 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1156 * Check if the command is already completed by the mrsas_build_cmd()
1157 * routine. In which case the busy_flag would be clear and scb will be
1158 * NULL and appropriate reason provided in pkt_reason field
1160 if (cmd_done) {
1161 pkt->pkt_reason = CMD_CMPLT;
1162 pkt->pkt_scbp[0] = STATUS_GOOD;
1163 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1164 | STATE_SENT_CMD;
1165 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1166 (*pkt->pkt_comp)(pkt);
1169 return (TRAN_ACCEPT);
1172 if (cmd == NULL) {
1173 return (TRAN_BUSY);
1177 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1178 if (instance->fw_outstanding > instance->max_fw_cmds) {
1179 dev_err(instance->dip, CE_WARN,
1180 "Command Queue Full... Returning BUSY");
1181 DTRACE_PROBE2(tbolt_start_tran_err,
1182 uint16_t, instance->fw_outstanding,
1183 uint16_t, instance->max_fw_cmds);
1184 return_raid_msg_pkt(instance, cmd);
1185 return (TRAN_BUSY);
1188 /* Synchronize the Cmd frame for the controller */
1189 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1190 DDI_DMA_SYNC_FORDEV);
1192 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1193 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1194 cmd->index, cmd->SMID));
1196 instance->func_ptr->issue_cmd(cmd, instance);
1197 } else {
1198 instance->func_ptr->issue_cmd(cmd, instance);
1199 (void) wait_for_outstanding_poll_io(instance);
1200 (void) mrsas_common_check(instance, cmd);
1201 DTRACE_PROBE2(tbolt_start_nointr_done,
1202 uint8_t, cmd->frame->hdr.cmd,
1203 uint8_t, cmd->frame->hdr.cmd_status);
1206 return (TRAN_ACCEPT);
1210 * prepare the pkt:
1211 * the pkt may have been resubmitted or just reused so
1212 * initialize some fields and do some checks.
1214 static int
1215 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1217 struct scsi_pkt *pkt = CMD2PKT(acmd);
1221 * Reinitialize some fields that need it; the packet may
1222 * have been resubmitted
1224 pkt->pkt_reason = CMD_CMPLT;
1225 pkt->pkt_state = 0;
1226 pkt->pkt_statistics = 0;
1227 pkt->pkt_resid = 0;
1230 * zero status byte.
1232 *(pkt->pkt_scbp) = 0;
1234 return (0);
1239 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1240 struct scsa_cmd *acmd,
1241 struct mrsas_cmd *cmd,
1242 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1243 uint32_t *datalen)
1245 uint32_t MaxSGEs;
1246 int sg_to_process;
1247 uint32_t i, j;
1248 uint32_t numElements, endElement;
1249 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1250 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1251 ddi_acc_handle_t acc_handle =
1252 instance->mpi2_frame_pool_dma_obj.acc_handle;
1254 con_log(CL_ANN1, (CE_NOTE,
1255 "chkpnt: Building Chained SGL :%d", __LINE__));
1257 /* Calulate SGE size in number of Words(32bit) */
1258 /* Clear the datalen before updating it. */
1259 *datalen = 0;
1261 MaxSGEs = instance->max_sge_in_main_msg;
1263 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1264 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1266 /* set data transfer flag. */
1267 if (acmd->cmd_flags & CFLAG_DMASEND) {
1268 ddi_put32(acc_handle, &scsi_raid_io->Control,
1269 MPI2_SCSIIO_CONTROL_WRITE);
1270 } else {
1271 ddi_put32(acc_handle, &scsi_raid_io->Control,
1272 MPI2_SCSIIO_CONTROL_READ);
1276 numElements = acmd->cmd_cookiecnt;
1278 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1280 if (numElements > instance->max_num_sge) {
1281 con_log(CL_ANN, (CE_NOTE,
1282 "[Max SGE Count Exceeded]:%x", numElements));
1283 return (numElements);
1286 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1287 (uint8_t)numElements);
1289 /* set end element in main message frame */
1290 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1292 /* prepare the scatter-gather list for the firmware */
1293 scsi_raid_io_sgl_ieee =
1294 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1296 if (instance->gen3) {
1297 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1298 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1300 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1303 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1304 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1305 acmd->cmd_dmacookies[i].dmac_laddress);
1307 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1308 acmd->cmd_dmacookies[i].dmac_size);
1310 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1312 if (instance->gen3) {
1313 if (i == (numElements - 1)) {
1314 ddi_put8(acc_handle,
1315 &scsi_raid_io_sgl_ieee->Flags,
1316 IEEE_SGE_FLAGS_END_OF_LIST);
1320 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1322 #ifdef DEBUG
1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1324 scsi_raid_io_sgl_ieee->Address));
1325 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1326 scsi_raid_io_sgl_ieee->Length));
1327 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1328 scsi_raid_io_sgl_ieee->Flags));
1329 #endif
1333 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1335 /* check if chained SGL required */
1336 if (i < numElements) {
1338 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1340 if (instance->gen3) {
1341 uint16_t ioFlags =
1342 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1344 if ((ioFlags &
1345 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1346 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1347 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1348 (U8)instance->chain_offset_io_req);
1349 } else {
1350 ddi_put8(acc_handle,
1351 &scsi_raid_io->ChainOffset, 0);
1353 } else {
1354 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1355 (U8)instance->chain_offset_io_req);
1358 /* prepare physical chain element */
1359 ieeeChainElement = scsi_raid_io_sgl_ieee;
1361 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1363 if (instance->gen3) {
1364 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1365 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1366 } else {
1367 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1368 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1369 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1372 ddi_put32(acc_handle, &ieeeChainElement->Length,
1373 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1375 ddi_put64(acc_handle, &ieeeChainElement->Address,
1376 (U64)cmd->sgl_phys_addr);
1378 sg_to_process = numElements - i;
1380 con_log(CL_ANN1, (CE_NOTE,
1381 "[Additional SGE Count]:%x", endElement));
1383 /* point to the chained SGL buffer */
1384 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1386 /* build rest of the SGL in chained buffer */
1387 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1388 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1390 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1391 acmd->cmd_dmacookies[i].dmac_laddress);
1393 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1394 acmd->cmd_dmacookies[i].dmac_size);
1396 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1398 if (instance->gen3) {
1399 if (i == (numElements - 1)) {
1400 ddi_put8(acc_handle,
1401 &scsi_raid_io_sgl_ieee->Flags,
1402 IEEE_SGE_FLAGS_END_OF_LIST);
1406 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1408 #if DEBUG
1409 con_log(CL_DLEVEL1, (CE_NOTE,
1410 "[SGL Address]: %" PRIx64,
1411 scsi_raid_io_sgl_ieee->Address));
1412 con_log(CL_DLEVEL1, (CE_NOTE,
1413 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1414 con_log(CL_DLEVEL1, (CE_NOTE,
1415 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1416 #endif
1418 i++;
1422 return (0);
1423 } /*end of BuildScatterGather */
1427 * build_cmd
1429 static struct mrsas_cmd *
1430 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1431 struct scsi_pkt *pkt, uchar_t *cmd_done)
1433 uint8_t fp_possible = 0;
1434 uint32_t index;
1435 uint32_t lba_count = 0;
1436 uint32_t start_lba_hi = 0;
1437 uint32_t start_lba_lo = 0;
1438 ddi_acc_handle_t acc_handle =
1439 instance->mpi2_frame_pool_dma_obj.acc_handle;
1440 struct mrsas_cmd *cmd = NULL;
1441 struct scsa_cmd *acmd = PKT2CMD(pkt);
1442 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1443 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1444 uint32_t datalen;
1445 struct IO_REQUEST_INFO io_info;
1446 MR_FW_RAID_MAP_ALL *local_map_ptr;
1447 uint16_t pd_cmd_cdblen;
1449 con_log(CL_DLEVEL1, (CE_NOTE,
1450 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1452 /* find out if this is logical or physical drive command. */
1453 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1454 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1456 *cmd_done = 0;
1458 /* get the command packet */
1459 if (!(cmd = get_raid_msg_pkt(instance))) {
1460 DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1461 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1462 return (NULL);
1465 index = cmd->index;
1466 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1467 ReqDescUnion->Words = 0;
1468 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1469 ReqDescUnion->SCSIIO.RequestFlags =
1470 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1471 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1474 cmd->request_desc = ReqDescUnion;
1475 cmd->pkt = pkt;
1476 cmd->cmd = acmd;
1478 DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1479 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1480 uint16_t, acmd->device_id);
1482 /* lets get the command directions */
1483 if (acmd->cmd_flags & CFLAG_DMASEND) {
1484 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1486 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487 DDI_DMA_SYNC_FORDEV);
1489 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493 DDI_DMA_SYNC_FORCPU);
1495 } else {
1496 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1500 /* get SCSI_IO raid message frame pointer */
1501 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1503 /* zero out SCSI_IO raid message frame */
1504 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1506 /* Set the ldTargetId set by BuildRaidContext() */
1507 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1508 acmd->device_id);
1510 /* Copy CDB to scsi_io_request message frame */
1511 ddi_rep_put8(acc_handle,
1512 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1513 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516 * Just the CDB length, rest of the Flags are zero
1517 * This will be modified later.
1519 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1521 pd_cmd_cdblen = acmd->cmd_cdblen;
1523 if (acmd->islogical) {
1525 switch (pkt->pkt_cdbp[0]) {
1526 case SCMD_READ:
1527 case SCMD_WRITE:
1528 case SCMD_READ_G1:
1529 case SCMD_WRITE_G1:
1530 case SCMD_READ_G4:
1531 case SCMD_WRITE_G4:
1532 case SCMD_READ_G5:
1533 case SCMD_WRITE_G5:
1535 /* Initialize sense Information */
1536 if (cmd->sense1 == NULL) {
1537 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1538 "Sense buffer ptr NULL "));
1540 bzero(cmd->sense1, SENSE_LENGTH);
1541 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1542 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1544 if (acmd->cmd_cdblen == CDB_GROUP0) {
1545 /* 6-byte cdb */
1546 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1547 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1548 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1549 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1550 << 16));
1551 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1552 /* 10-byte cdb */
1553 lba_count =
1554 (((uint16_t)(pkt->pkt_cdbp[8])) |
1555 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557 start_lba_lo =
1558 (((uint32_t)(pkt->pkt_cdbp[5])) |
1559 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1560 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1561 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1564 /* 12-byte cdb */
1565 lba_count = (
1566 ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1571 start_lba_lo =
1572 (((uint32_t)(pkt->pkt_cdbp[5])) |
1573 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1577 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1578 /* 16-byte cdb */
1579 lba_count = (
1580 ((uint32_t)(pkt->pkt_cdbp[13])) |
1581 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1582 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1583 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1585 start_lba_lo = (
1586 ((uint32_t)(pkt->pkt_cdbp[9])) |
1587 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1588 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1589 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1591 start_lba_hi = (
1592 ((uint32_t)(pkt->pkt_cdbp[5])) |
1593 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1594 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1595 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1598 if (instance->tbolt &&
1599 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1600 dev_err(instance->dip, CE_WARN,
1601 "IO SECTOR COUNT exceeds "
1602 "controller limit 0x%x sectors",
1603 lba_count);
1606 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1607 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1608 start_lba_lo;
1609 io_info.numBlocks = lba_count;
1610 io_info.ldTgtId = acmd->device_id;
1612 if (acmd->cmd_flags & CFLAG_DMASEND)
1613 io_info.isRead = 0;
1614 else
1615 io_info.isRead = 1;
1618 /* Acquire SYNC MAP UPDATE lock */
1619 mutex_enter(&instance->sync_map_mtx);
1621 local_map_ptr =
1622 instance->ld_map[(instance->map_id & 1)];
1624 if ((MR_TargetIdToLdGet(
1625 acmd->device_id, local_map_ptr) >=
1626 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1627 dev_err(instance->dip, CE_NOTE,
1628 "Fast Path NOT Possible, "
1629 "targetId >= MAX_LOGICAL_DRIVES || "
1630 "!instance->fast_path_io");
1631 fp_possible = 0;
1632 /* Set Regionlock flags to BYPASS */
1633 /* io_request->RaidContext.regLockFlags = 0; */
1634 ddi_put8(acc_handle,
1635 &scsi_raid_io->RaidContext.regLockFlags, 0);
1636 } else {
1637 if (MR_BuildRaidContext(instance, &io_info,
1638 &scsi_raid_io->RaidContext, local_map_ptr))
1639 fp_possible = io_info.fpOkForIo;
1642 if (!enable_fp)
1643 fp_possible = 0;
1645 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1646 "instance->fast_path_io %d fp_possible %d",
1647 enable_fp, instance->fast_path_io, fp_possible));
1649 if (fp_possible) {
1651 /* Check for DIF enabled LD */
1652 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1653 /* Prepare 32 Byte CDB for DIF capable Disk */
1654 mrsas_tbolt_prepare_cdb(instance,
1655 scsi_raid_io->CDB.CDB32,
1656 &io_info, scsi_raid_io, start_lba_lo);
1657 } else {
1658 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1659 (uint8_t *)&pd_cmd_cdblen,
1660 io_info.pdBlock, io_info.numBlocks);
1661 ddi_put16(acc_handle,
1662 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1665 ddi_put8(acc_handle, &scsi_raid_io->Function,
1666 MPI2_FUNCTION_SCSI_IO_REQUEST);
1668 ReqDescUnion->SCSIIO.RequestFlags =
1669 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1670 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1672 if (instance->gen3) {
1673 uint8_t regLockFlags = ddi_get8(acc_handle,
1674 &scsi_raid_io->RaidContext.regLockFlags);
1675 uint16_t IoFlags = ddi_get16(acc_handle,
1676 &scsi_raid_io->IoFlags);
1678 if (regLockFlags == REGION_TYPE_UNUSED)
1679 ReqDescUnion->SCSIIO.RequestFlags =
1680 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1681 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1683 IoFlags |=
1684 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1685 regLockFlags |=
1686 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1687 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1689 ddi_put8(acc_handle,
1690 &scsi_raid_io->ChainOffset, 0);
1691 ddi_put8(acc_handle,
1692 &scsi_raid_io->RaidContext.nsegType,
1693 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1694 MPI2_TYPE_CUDA));
1695 ddi_put8(acc_handle,
1696 &scsi_raid_io->RaidContext.regLockFlags,
1697 regLockFlags);
1698 ddi_put16(acc_handle,
1699 &scsi_raid_io->IoFlags, IoFlags);
1702 if ((instance->load_balance_info[
1703 acmd->device_id].loadBalanceFlag) &&
1704 (io_info.isRead)) {
1705 io_info.devHandle =
1706 get_updated_dev_handle(&instance->
1707 load_balance_info[acmd->device_id],
1708 &io_info);
1709 cmd->load_balance_flag |=
1710 MEGASAS_LOAD_BALANCE_FLAG;
1711 } else {
1712 cmd->load_balance_flag &=
1713 ~MEGASAS_LOAD_BALANCE_FLAG;
1716 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1717 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1718 io_info.devHandle);
1720 } else { /* FP Not Possible */
1722 ddi_put8(acc_handle, &scsi_raid_io->Function,
1723 MPI2_FUNCTION_LD_IO_REQUEST);
1725 ddi_put16(acc_handle,
1726 &scsi_raid_io->DevHandle, acmd->device_id);
1728 ReqDescUnion->SCSIIO.RequestFlags =
1729 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1730 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1732 ddi_put16(acc_handle,
1733 &scsi_raid_io->RaidContext.timeoutValue,
1734 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1736 if (instance->gen3) {
1737 uint8_t regLockFlags = ddi_get8(acc_handle,
1738 &scsi_raid_io->RaidContext.regLockFlags);
1740 if (regLockFlags == REGION_TYPE_UNUSED) {
1741 ReqDescUnion->SCSIIO.RequestFlags =
1742 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1743 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1746 regLockFlags |=
1747 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1748 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1750 ddi_put8(acc_handle,
1751 &scsi_raid_io->RaidContext.nsegType,
1752 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1753 MPI2_TYPE_CUDA));
1754 ddi_put8(acc_handle,
1755 &scsi_raid_io->RaidContext.regLockFlags,
1756 regLockFlags);
1758 } /* Not FP */
1760 /* Release SYNC MAP UPDATE lock */
1761 mutex_exit(&instance->sync_map_mtx);
1763 break;
1765 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766 return_raid_msg_pkt(instance, cmd);
1767 *cmd_done = 1;
1768 return (NULL);
1771 case SCMD_MODE_SENSE:
1772 case SCMD_MODE_SENSE_G1: {
1773 union scsi_cdb *cdbp;
1774 uint16_t page_code;
1776 cdbp = (void *)pkt->pkt_cdbp;
1777 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 switch (page_code) {
1779 case 0x3:
1780 case 0x4:
1781 (void) mrsas_mode_sense_build(pkt);
1782 return_raid_msg_pkt(instance, cmd);
1783 *cmd_done = 1;
1784 return (NULL);
1786 return (cmd);
1789 default:
1790 /* Pass-through command to logical drive */
1791 ddi_put8(acc_handle, &scsi_raid_io->Function,
1792 MPI2_FUNCTION_LD_IO_REQUEST);
1793 ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1794 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1795 acmd->device_id);
1796 ReqDescUnion->SCSIIO.RequestFlags =
1797 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1798 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1799 break;
1801 } else { /* Physical */
1802 /* Pass-through command to physical drive */
1804 /* Acquire SYNC MAP UPDATE lock */
1805 mutex_enter(&instance->sync_map_mtx);
1807 local_map_ptr = instance->ld_map[instance->map_id & 1];
1809 ddi_put8(acc_handle, &scsi_raid_io->Function,
1810 MPI2_FUNCTION_SCSI_IO_REQUEST);
1812 ReqDescUnion->SCSIIO.RequestFlags =
1813 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1814 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1816 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1817 local_map_ptr->raidMap.
1818 devHndlInfo[acmd->device_id].curDevHdl);
1820 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1821 ddi_put8(acc_handle,
1822 &scsi_raid_io->RaidContext.regLockFlags, 0);
1823 ddi_put64(acc_handle,
1824 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1825 ddi_put32(acc_handle,
1826 &scsi_raid_io->RaidContext.regLockLength, 0);
1827 ddi_put8(acc_handle,
1828 &scsi_raid_io->RaidContext.RAIDFlags,
1829 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1830 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1831 ddi_put16(acc_handle,
1832 &scsi_raid_io->RaidContext.timeoutValue,
1833 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1834 ddi_put16(acc_handle,
1835 &scsi_raid_io->RaidContext.ldTargetId,
1836 acmd->device_id);
1837 ddi_put8(acc_handle,
1838 &scsi_raid_io->LUN[1], acmd->lun);
1840 if (instance->fast_path_io && instance->gen3) {
1841 uint16_t IoFlags = ddi_get16(acc_handle,
1842 &scsi_raid_io->IoFlags);
1843 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1844 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1846 ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1847 local_map_ptr->raidMap.
1848 devHndlInfo[acmd->device_id].curDevHdl);
1850 /* Release SYNC MAP UPDATE lock */
1851 mutex_exit(&instance->sync_map_mtx);
1854 /* Set sense buffer physical address/length in scsi_io_request. */
1855 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1856 cmd->sense_phys_addr1);
1857 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1859 /* Construct SGL */
1860 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1861 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1863 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1864 scsi_raid_io, &datalen);
1866 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1868 con_log(CL_ANN, (CE_CONT,
1869 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1870 pkt->pkt_cdbp[0], acmd->device_id));
1871 con_log(CL_DLEVEL1, (CE_CONT,
1872 "data length = %x\n",
1873 scsi_raid_io->DataLength));
1874 con_log(CL_DLEVEL1, (CE_CONT,
1875 "cdb length = %x\n",
1876 acmd->cmd_cdblen));
1878 return (cmd);
1881 uint32_t
1882 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1884 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1887 void
1888 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1890 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1891 atomic_inc_16(&instance->fw_outstanding);
1893 struct scsi_pkt *pkt;
1895 con_log(CL_ANN1,
1896 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1898 con_log(CL_DLEVEL1, (CE_CONT,
1899 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1900 con_log(CL_DLEVEL1, (CE_CONT,
1901 " [req desc low part] %x \n",
1902 (uint_t)(req_desc->Words & 0xffffffffff)));
1903 con_log(CL_DLEVEL1, (CE_CONT,
1904 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1905 pkt = cmd->pkt;
1907 if (pkt) {
1908 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1909 "ISSUED CMD TO FW : called : cmd:"
1910 ": %p instance : %p pkt : %p pkt_time : %x\n",
1911 gethrtime(), (void *)cmd, (void *)instance,
1912 (void *)pkt, cmd->drv_pkt_time));
1913 if (instance->adapterresetinprogress) {
1914 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1915 con_log(CL_ANN, (CE_NOTE,
1916 "TBOLT Reset the scsi_pkt timer"));
1917 } else {
1918 push_pending_mfi_pkt(instance, cmd);
1921 } else {
1922 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1923 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1924 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1927 /* Issue the command to the FW */
1928 mutex_enter(&instance->reg_write_mtx);
1929 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1930 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1931 mutex_exit(&instance->reg_write_mtx);
1935 * issue_cmd_in_sync_mode
1938 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1939 struct mrsas_cmd *cmd)
1941 int i;
1942 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1943 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1945 struct mrsas_header *hdr;
1946 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1948 con_log(CL_ANN,
1949 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1950 cmd->SMID));
1953 if (instance->adapterresetinprogress) {
1954 cmd->drv_pkt_time = ddi_get16
1955 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1956 if (cmd->drv_pkt_time < debug_timeout_g)
1957 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1958 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1959 "RESET-IN-PROGRESS, issue cmd & return."));
1961 mutex_enter(&instance->reg_write_mtx);
1962 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1963 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1964 mutex_exit(&instance->reg_write_mtx);
1966 return (DDI_SUCCESS);
1967 } else {
1968 con_log(CL_ANN1, (CE_NOTE,
1969 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1970 push_pending_mfi_pkt(instance, cmd);
1973 con_log(CL_DLEVEL2, (CE_NOTE,
1974 "HighQport offset :%p",
1975 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1976 con_log(CL_DLEVEL2, (CE_NOTE,
1977 "LowQport offset :%p",
1978 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1980 cmd->sync_cmd = MRSAS_TRUE;
1981 cmd->cmd_status = ENODATA;
1984 mutex_enter(&instance->reg_write_mtx);
1985 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1986 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1987 mutex_exit(&instance->reg_write_mtx);
1989 con_log(CL_ANN1, (CE_NOTE,
1990 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1991 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1992 (uint_t)(req_desc->Words & 0xffffffff)));
1994 mutex_enter(&instance->int_cmd_mtx);
1995 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1996 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1998 mutex_exit(&instance->int_cmd_mtx);
2001 if (i < (msecs -1)) {
2002 return (DDI_SUCCESS);
2003 } else {
2004 return (DDI_FAILURE);
2009 * issue_cmd_in_poll_mode
2012 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2013 struct mrsas_cmd *cmd)
2015 int i;
2016 uint16_t flags;
2017 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2018 struct mrsas_header *frame_hdr;
2020 con_log(CL_ANN,
2021 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2022 cmd->SMID));
2024 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2026 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2027 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2028 MFI_CMD_STATUS_POLL_MODE);
2029 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2030 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2031 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2033 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2034 (uint_t)(req_desc->Words & 0xffffffff)));
2035 con_log(CL_ANN1, (CE_NOTE,
2036 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2038 /* issue the frame using inbound queue port */
2039 mutex_enter(&instance->reg_write_mtx);
2040 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2041 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2042 mutex_exit(&instance->reg_write_mtx);
2044 for (i = 0; i < msecs && (
2045 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2046 == MFI_CMD_STATUS_POLL_MODE); i++) {
2047 /* wait for cmd_status to change from 0xFF */
2048 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2051 DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2053 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2054 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2055 con_log(CL_ANN1, (CE_NOTE,
2056 " cmd failed %" PRIx64, (req_desc->Words)));
2057 return (DDI_FAILURE);
2060 return (DDI_SUCCESS);
2063 void
2064 tbolt_enable_intr(struct mrsas_instance *instance)
2066 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2067 /* writel(~0, &regs->outbound_intr_status); */
2068 /* readl(&regs->outbound_intr_status); */
2070 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2072 /* dummy read to force PCI flush */
2073 (void) RD_OB_INTR_MASK(instance);
2077 void
2078 tbolt_disable_intr(struct mrsas_instance *instance)
2080 uint32_t mask = 0xFFFFFFFF;
2082 WR_OB_INTR_MASK(mask, instance);
2084 /* Dummy readl to force pci flush */
2086 (void) RD_OB_INTR_MASK(instance);
2091 tbolt_intr_ack(struct mrsas_instance *instance)
2093 uint32_t status;
2095 /* check if it is our interrupt */
2096 status = RD_OB_INTR_STATUS(instance);
2097 con_log(CL_ANN1, (CE_NOTE,
2098 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2100 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2101 return (DDI_INTR_UNCLAIMED);
2104 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2105 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2106 return (DDI_INTR_UNCLAIMED);
2109 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2110 /* clear the interrupt by writing back the same value */
2111 WR_OB_INTR_STATUS(status, instance);
2112 /* dummy READ */
2113 (void) RD_OB_INTR_STATUS(instance);
2115 return (DDI_INTR_CLAIMED);
2119 * get_raid_msg_pkt : Get a command from the free pool
2120 * After successful allocation, the caller of this routine
2121 * must clear the frame buffer (memset to zero) before
2122 * using the packet further.
2124 * ***** Note *****
2125 * After clearing the frame buffer the context id of the
2126 * frame buffer SHOULD be restored back.
2129 struct mrsas_cmd *
2130 get_raid_msg_pkt(struct mrsas_instance *instance)
2132 mlist_t *head = &instance->cmd_pool_list;
2133 struct mrsas_cmd *cmd = NULL;
2135 mutex_enter(&instance->cmd_pool_mtx);
2136 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2139 if (!mlist_empty(head)) {
2140 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2141 mlist_del_init(head->next);
2143 if (cmd != NULL) {
2144 cmd->pkt = NULL;
2145 cmd->retry_count_for_ocr = 0;
2146 cmd->drv_pkt_time = 0;
2148 mutex_exit(&instance->cmd_pool_mtx);
2150 if (cmd != NULL)
2151 bzero(cmd->scsi_io_request,
2152 sizeof (Mpi2RaidSCSIIORequest_t));
2153 return (cmd);
2156 struct mrsas_cmd *
2157 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2159 mlist_t *head = &instance->cmd_app_pool_list;
2160 struct mrsas_cmd *cmd = NULL;
2162 mutex_enter(&instance->cmd_app_pool_mtx);
2163 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2165 if (!mlist_empty(head)) {
2166 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2167 mlist_del_init(head->next);
2169 if (cmd != NULL) {
2170 cmd->retry_count_for_ocr = 0;
2171 cmd->drv_pkt_time = 0;
2172 cmd->pkt = NULL;
2173 cmd->request_desc = NULL;
2177 mutex_exit(&instance->cmd_app_pool_mtx);
2179 if (cmd != NULL) {
2180 bzero(cmd->scsi_io_request,
2181 sizeof (Mpi2RaidSCSIIORequest_t));
2184 return (cmd);
2188 * return_raid_msg_pkt : Return a cmd to free command pool
2190 void
2191 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2193 mutex_enter(&instance->cmd_pool_mtx);
2194 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2197 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2199 mutex_exit(&instance->cmd_pool_mtx);
2202 void
2203 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2205 mutex_enter(&instance->cmd_app_pool_mtx);
2206 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2208 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2210 mutex_exit(&instance->cmd_app_pool_mtx);
2214 void
2215 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2216 struct mrsas_cmd *cmd)
2218 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2219 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2220 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2221 uint32_t index;
2222 ddi_acc_handle_t acc_handle =
2223 instance->mpi2_frame_pool_dma_obj.acc_handle;
2225 if (!instance->tbolt) {
2226 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2227 return;
2230 index = cmd->index;
2232 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2234 if (!ReqDescUnion) {
2235 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2236 return;
2239 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2241 ReqDescUnion->Words = 0;
2243 ReqDescUnion->SCSIIO.RequestFlags =
2244 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2245 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2247 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2249 cmd->request_desc = ReqDescUnion;
2251 /* get raid message frame pointer */
2252 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2254 if (instance->gen3) {
2255 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2256 &scsi_raid_io->SGL.IeeeChain;
2257 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2258 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2261 ddi_put8(acc_handle, &scsi_raid_io->Function,
2262 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2264 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2265 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2267 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2268 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2270 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2271 cmd->sense_phys_addr1);
2274 scsi_raid_io_sgl_ieee =
2275 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2277 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2278 (U64)cmd->frame_phys_addr);
2280 ddi_put8(acc_handle,
2281 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2282 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2283 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2284 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2286 con_log(CL_ANN1, (CE_NOTE,
2287 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2288 scsi_raid_io_sgl_ieee->Address));
2289 con_log(CL_ANN1, (CE_NOTE,
2290 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2291 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2292 scsi_raid_io_sgl_ieee->Flags));
2296 void
2297 tbolt_complete_cmd(struct mrsas_instance *instance,
2298 struct mrsas_cmd *cmd)
2300 uint8_t status;
2301 uint8_t extStatus;
2302 uint8_t function;
2303 uint8_t arm;
2304 struct scsa_cmd *acmd;
2305 struct scsi_pkt *pkt;
2306 struct scsi_arq_status *arqstat;
2307 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2308 LD_LOAD_BALANCE_INFO *lbinfo;
2309 ddi_acc_handle_t acc_handle =
2310 instance->mpi2_frame_pool_dma_obj.acc_handle;
2312 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2314 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2315 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2317 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2318 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2320 if (status != MFI_STAT_OK) {
2321 con_log(CL_ANN, (CE_WARN,
2322 "IO Cmd Failed SMID %x", cmd->SMID));
2323 } else {
2324 con_log(CL_ANN, (CE_NOTE,
2325 "IO Cmd Success SMID %x", cmd->SMID));
2328 /* regular commands */
2330 function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2331 DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2332 uint8_t, status, uint8_t, extStatus);
2334 switch (function) {
2336 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2337 acmd = (struct scsa_cmd *)cmd->cmd;
2338 lbinfo = &instance->load_balance_info[acmd->device_id];
2340 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2341 arm = lbinfo->raid1DevHandle[0] ==
2342 scsi_raid_io->DevHandle ? 0 : 1;
2344 lbinfo->scsi_pending_cmds[arm]--;
2345 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2347 con_log(CL_DLEVEL3, (CE_NOTE,
2348 "FastPath IO Completion Success "));
2349 /* FALLTHRU */
2351 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2352 acmd = (struct scsa_cmd *)cmd->cmd;
2353 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2355 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2356 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2357 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2358 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2359 DDI_DMA_SYNC_FORCPU);
2363 pkt->pkt_reason = CMD_CMPLT;
2364 pkt->pkt_statistics = 0;
2365 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2366 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2368 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2369 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2370 ((acmd->islogical) ? "LD" : "PD"),
2371 acmd->cmd_dmacount, cmd->SMID, status));
2373 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2374 struct scsi_inquiry *inq;
2376 if (acmd->cmd_dmacount != 0) {
2377 bp_mapin(acmd->cmd_buf);
2378 inq = (struct scsi_inquiry *)
2379 acmd->cmd_buf->b_un.b_addr;
2381 /* don't expose physical drives to OS */
2382 if (acmd->islogical &&
2383 (status == MFI_STAT_OK)) {
2384 display_scsi_inquiry((caddr_t)inq);
2385 } else if ((status == MFI_STAT_OK) &&
2386 inq->inq_dtype == DTYPE_DIRECT) {
2387 display_scsi_inquiry((caddr_t)inq);
2388 } else {
2389 /* for physical disk */
2390 status = MFI_STAT_DEVICE_NOT_FOUND;
2395 switch (status) {
2396 case MFI_STAT_OK:
2397 pkt->pkt_scbp[0] = STATUS_GOOD;
2398 break;
2399 case MFI_STAT_LD_CC_IN_PROGRESS:
2400 case MFI_STAT_LD_RECON_IN_PROGRESS:
2401 pkt->pkt_scbp[0] = STATUS_GOOD;
2402 break;
2403 case MFI_STAT_LD_INIT_IN_PROGRESS:
2404 pkt->pkt_reason = CMD_TRAN_ERR;
2405 break;
2406 case MFI_STAT_SCSI_IO_FAILED:
2407 dev_err(instance->dip, CE_WARN,
2408 "tbolt_complete_cmd: scsi_io failed");
2409 pkt->pkt_reason = CMD_TRAN_ERR;
2410 break;
2411 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2412 con_log(CL_ANN, (CE_WARN,
2413 "tbolt_complete_cmd: scsi_done with error"));
2415 pkt->pkt_reason = CMD_CMPLT;
2416 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2418 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2419 con_log(CL_ANN,
2420 (CE_WARN, "TEST_UNIT_READY fail"));
2421 } else {
2422 pkt->pkt_state |= STATE_ARQ_DONE;
2423 arqstat = (void *)(pkt->pkt_scbp);
2424 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2425 arqstat->sts_rqpkt_resid = 0;
2426 arqstat->sts_rqpkt_state |=
2427 STATE_GOT_BUS | STATE_GOT_TARGET
2428 | STATE_SENT_CMD
2429 | STATE_XFERRED_DATA;
2430 *(uint8_t *)&arqstat->sts_rqpkt_status =
2431 STATUS_GOOD;
2432 con_log(CL_ANN1,
2433 (CE_NOTE, "Copying Sense data %x",
2434 cmd->SMID));
2436 ddi_rep_get8(acc_handle,
2437 (uint8_t *)&(arqstat->sts_sensedata),
2438 cmd->sense1,
2439 sizeof (struct scsi_extended_sense),
2440 DDI_DEV_AUTOINCR);
2443 break;
2444 case MFI_STAT_LD_OFFLINE:
2445 dev_err(instance->dip, CE_WARN,
2446 "tbolt_complete_cmd: ld offline "
2447 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2448 /* UNDO: */
2449 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2451 ddi_get16(acc_handle,
2452 &scsi_raid_io->RaidContext.ldTargetId),
2454 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2456 pkt->pkt_reason = CMD_DEV_GONE;
2457 pkt->pkt_statistics = STAT_DISCON;
2458 break;
2459 case MFI_STAT_DEVICE_NOT_FOUND:
2460 con_log(CL_ANN, (CE_CONT,
2461 "tbolt_complete_cmd: device not found error"));
2462 pkt->pkt_reason = CMD_DEV_GONE;
2463 pkt->pkt_statistics = STAT_DISCON;
2464 break;
2466 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2467 pkt->pkt_state |= STATE_ARQ_DONE;
2468 pkt->pkt_reason = CMD_CMPLT;
2469 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2471 arqstat = (void *)(pkt->pkt_scbp);
2472 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2473 arqstat->sts_rqpkt_resid = 0;
2474 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2475 | STATE_GOT_TARGET | STATE_SENT_CMD
2476 | STATE_XFERRED_DATA;
2477 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2479 arqstat->sts_sensedata.es_valid = 1;
2480 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2481 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2484 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 * ASC: 0x21h; ASCQ: 0x00h;
2487 arqstat->sts_sensedata.es_add_code = 0x21;
2488 arqstat->sts_sensedata.es_qual_code = 0x00;
2489 break;
2490 case MFI_STAT_INVALID_CMD:
2491 case MFI_STAT_INVALID_DCMD:
2492 case MFI_STAT_INVALID_PARAMETER:
2493 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2494 default:
2495 dev_err(instance->dip, CE_WARN,
2496 "tbolt_complete_cmd: Unknown status!");
2497 pkt->pkt_reason = CMD_TRAN_ERR;
2499 break;
2502 atomic_add_16(&instance->fw_outstanding, (-1));
2504 (void) mrsas_common_check(instance, cmd);
2505 if (acmd->cmd_dmahandle) {
2506 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2507 DDI_SUCCESS) {
2508 ddi_fm_service_impact(instance->dip,
2509 DDI_SERVICE_UNAFFECTED);
2510 pkt->pkt_reason = CMD_TRAN_ERR;
2511 pkt->pkt_statistics = 0;
2515 /* Call the callback routine */
2516 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2517 (*pkt->pkt_comp)(pkt);
2519 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2521 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2523 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2525 return_raid_msg_pkt(instance, cmd);
2526 break;
2528 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2530 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2531 cmd->frame->dcmd.mbox.b[1] == 1) {
2533 mutex_enter(&instance->sync_map_mtx);
2535 con_log(CL_ANN, (CE_NOTE,
2536 "LDMAP sync command SMID RECEIVED 0x%X",
2537 cmd->SMID));
2538 if (cmd->frame->hdr.cmd_status != 0) {
2539 dev_err(instance->dip, CE_WARN,
2540 "map sync failed, status = 0x%x.",
2541 cmd->frame->hdr.cmd_status);
2542 } else {
2543 instance->map_id++;
2544 con_log(CL_ANN1, (CE_NOTE,
2545 "map sync received, switched map_id to %"
2546 PRIu64, instance->map_id));
2549 if (MR_ValidateMapInfo(
2550 instance->ld_map[instance->map_id & 1],
2551 instance->load_balance_info)) {
2552 instance->fast_path_io = 1;
2553 } else {
2554 instance->fast_path_io = 0;
2557 con_log(CL_ANN, (CE_NOTE,
2558 "instance->fast_path_io %d",
2559 instance->fast_path_io));
2561 instance->unroll.syncCmd = 0;
2563 if (instance->map_update_cmd == cmd) {
2564 return_raid_msg_pkt(instance, cmd);
2565 atomic_add_16(&instance->fw_outstanding, (-1));
2566 (void) mrsas_tbolt_sync_map_info(instance);
2569 con_log(CL_ANN1, (CE_NOTE,
2570 "LDMAP sync completed, ldcount=%d",
2571 instance->ld_map[instance->map_id & 1]
2572 ->raidMap.ldCount));
2573 mutex_exit(&instance->sync_map_mtx);
2574 break;
2577 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2578 con_log(CL_ANN1, (CE_CONT,
2579 "AEN command SMID RECEIVED 0x%X",
2580 cmd->SMID));
2581 if ((instance->aen_cmd == cmd) &&
2582 (instance->aen_cmd->abort_aen)) {
2583 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2584 "aborted_aen returned"));
2585 } else {
2586 atomic_add_16(&instance->fw_outstanding, (-1));
2587 service_mfi_aen(instance, cmd);
2591 if (cmd->sync_cmd == MRSAS_TRUE) {
2592 con_log(CL_ANN1, (CE_CONT,
2593 "Sync-mode Command Response SMID RECEIVED 0x%X",
2594 cmd->SMID));
2596 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2597 } else {
2598 con_log(CL_ANN, (CE_CONT,
2599 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2600 cmd->SMID));
2602 break;
2603 default:
2604 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2605 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2607 /* free message */
2608 con_log(CL_ANN,
2609 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2610 break;
2614 uint_t
2615 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2617 uint8_t replyType;
2618 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2619 Mpi2ReplyDescriptorsUnion_t *desc;
2620 uint16_t smid;
2621 union desc_value d_val;
2622 struct mrsas_cmd *cmd;
2624 struct mrsas_header *hdr;
2625 struct scsi_pkt *pkt;
2627 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2628 0, 0, DDI_DMA_SYNC_FORDEV);
2630 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2631 0, 0, DDI_DMA_SYNC_FORCPU);
2633 desc = instance->reply_frame_pool;
2634 desc += instance->reply_read_index;
2636 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2637 replyType = replyDesc->ReplyFlags &
2638 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2640 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2641 return (DDI_INTR_UNCLAIMED);
2643 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2644 != DDI_SUCCESS) {
2645 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2646 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2647 con_log(CL_ANN1,
2648 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2649 "FMA check, returning DDI_INTR_UNCLAIMED"));
2650 return (DDI_INTR_CLAIMED);
2653 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2654 (void *)desc, desc->Words));
2656 d_val.word = desc->Words;
2659 /* Read Reply descriptor */
2660 while ((d_val.u1.low != 0xffffffff) &&
2661 (d_val.u1.high != 0xffffffff)) {
2663 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2664 0, 0, DDI_DMA_SYNC_FORCPU);
2666 smid = replyDesc->SMID;
2668 if (!smid || smid > instance->max_fw_cmds + 1) {
2669 con_log(CL_ANN1, (CE_NOTE,
2670 "Reply Desc at Break = %p Words = %" PRIx64,
2671 (void *)desc, desc->Words));
2672 break;
2675 cmd = instance->cmd_list[smid - 1];
2676 if (!cmd) {
2677 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2678 "outstanding_cmd: Invalid command "
2679 " or Poll commad Received in completion path"));
2680 } else {
2681 mutex_enter(&instance->cmd_pend_mtx);
2682 if (cmd->sync_cmd == MRSAS_TRUE) {
2683 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2684 if (hdr) {
2685 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2686 "tbolt_process_outstanding_cmd:"
2687 " mlist_del_init(&cmd->list)."));
2688 mlist_del_init(&cmd->list);
2690 } else {
2691 pkt = cmd->pkt;
2692 if (pkt) {
2693 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2694 "tbolt_process_outstanding_cmd:"
2695 "mlist_del_init(&cmd->list)."));
2696 mlist_del_init(&cmd->list);
2700 mutex_exit(&instance->cmd_pend_mtx);
2702 tbolt_complete_cmd(instance, cmd);
2704 /* set it back to all 1s. */
2705 desc->Words = -1LL;
2707 instance->reply_read_index++;
2709 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2710 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2711 instance->reply_read_index = 0;
2714 /* Get the next reply descriptor */
2715 if (!instance->reply_read_index)
2716 desc = instance->reply_frame_pool;
2717 else
2718 desc++;
2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2722 d_val.word = desc->Words;
2724 con_log(CL_ANN1, (CE_NOTE,
2725 "Next Reply Desc = %p Words = %" PRIx64,
2726 (void *)desc, desc->Words));
2728 replyType = replyDesc->ReplyFlags &
2729 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2731 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2732 break;
2734 } /* End of while loop. */
2736 /* update replyIndex to FW */
2737 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2740 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2741 0, 0, DDI_DMA_SYNC_FORDEV);
2743 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2744 0, 0, DDI_DMA_SYNC_FORCPU);
2745 return (DDI_INTR_CLAIMED);
2752 * complete_cmd_in_sync_mode - Completes an internal command
2753 * @instance: Adapter soft state
2754 * @cmd: Command to be completed
2756 * The issue_cmd_in_sync_mode() function waits for a command to complete
2757 * after it issues a command. This function wakes up that waiting routine by
2758 * calling wake_up() on the wait queue.
2760 void
2761 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2762 struct mrsas_cmd *cmd)
2765 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2766 &cmd->frame->io.cmd_status);
2768 cmd->sync_cmd = MRSAS_FALSE;
2770 mutex_enter(&instance->int_cmd_mtx);
2771 if (cmd->cmd_status == ENODATA) {
2772 cmd->cmd_status = 0;
2774 cv_broadcast(&instance->int_cmd_cv);
2775 mutex_exit(&instance->int_cmd_mtx);
2780 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2781 * instance: Adapter soft state
2783 * Issues an internal command (DCMD) to get the FW's controller PD
2784 * list structure. This information is mainly used to find out SYSTEM
2785 * supported by the FW.
2788 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2790 int ret = 0;
2791 struct mrsas_cmd *cmd = NULL;
2792 struct mrsas_dcmd_frame *dcmd;
2793 MR_FW_RAID_MAP_ALL *ci;
2794 uint32_t ci_h = 0;
2795 U32 size_map_info;
2797 cmd = get_raid_msg_pkt(instance);
2799 if (cmd == NULL) {
2800 dev_err(instance->dip, CE_WARN,
2801 "Failed to get a cmd from free-pool in get_ld_map_info()");
2802 return (DDI_FAILURE);
2805 dcmd = &cmd->frame->dcmd;
2807 size_map_info = sizeof (MR_FW_RAID_MAP) +
2808 (sizeof (MR_LD_SPAN_MAP) *
2809 (MAX_LOGICAL_DRIVES - 1));
2811 con_log(CL_ANN, (CE_NOTE,
2812 "size_map_info : 0x%x", size_map_info));
2814 ci = instance->ld_map[instance->map_id & 1];
2815 ci_h = instance->ld_map_phy[instance->map_id & 1];
2817 if (!ci) {
2818 dev_err(instance->dip, CE_WARN,
2819 "Failed to alloc mem for ld_map_info");
2820 return_raid_msg_pkt(instance, cmd);
2821 return (-1);
2824 bzero(ci, sizeof (*ci));
2825 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2827 dcmd->cmd = MFI_CMD_OP_DCMD;
2828 dcmd->cmd_status = 0xFF;
2829 dcmd->sge_count = 1;
2830 dcmd->flags = MFI_FRAME_DIR_READ;
2831 dcmd->timeout = 0;
2832 dcmd->pad_0 = 0;
2833 dcmd->data_xfer_len = size_map_info;
2834 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2835 dcmd->sgl.sge32[0].phys_addr = ci_h;
2836 dcmd->sgl.sge32[0].length = size_map_info;
2839 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2841 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2842 ret = 0;
2843 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2844 } else {
2845 dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2846 ret = -1;
2849 return_raid_msg_pkt(instance, cmd);
2851 return (ret);
2854 void
2855 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2857 uint32_t i;
2858 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2859 union desc_value d_val;
2861 reply_desc = instance->reply_frame_pool;
2863 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2864 d_val.word = reply_desc->Words;
2865 con_log(CL_DLEVEL3, (CE_NOTE,
2866 "i=%d, %x:%x",
2867 i, d_val.u1.high, d_val.u1.low));
2872 * mrsas_tbolt_command_create - Create command for fast path.
2873 * @io_info: MegaRAID IO request packet pointer.
2874 * @ref_tag: Reference tag for RD/WRPROTECT
2876 * Create the command for fast path.
2878 void
2879 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2880 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2881 U32 ref_tag)
2883 uint16_t EEDPFlags;
2884 uint32_t Control;
2885 ddi_acc_handle_t acc_handle =
2886 instance->mpi2_frame_pool_dma_obj.acc_handle;
2888 /* Prepare 32-byte CDB if DIF is supported on this device */
2889 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2891 bzero(cdb, 32);
2893 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2896 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2898 if (io_info->isRead)
2899 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2900 else
2901 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2903 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2904 cdb[10] = MRSAS_RD_WR_PROTECT;
2906 /* LOGICAL BLOCK ADDRESS */
2907 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2908 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2909 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2910 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2911 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2912 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2913 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2914 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2916 /* Logical block reference tag */
2917 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2918 BE_32(ref_tag));
2920 ddi_put16(acc_handle,
2921 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2923 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2924 ((io_info->numBlocks)*512));
2925 /* Specify 32-byte cdb */
2926 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2928 /* Transfer length */
2929 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2930 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2931 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2932 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2934 /* set SCSI IO EEDPFlags */
2935 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2936 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2938 /* set SCSI IO EEDPFlags bits */
2939 if (io_info->isRead) {
2941 * For READ commands, the EEDPFlags shall be set to specify to
2942 * Increment the Primary Reference Tag, to Check the Reference
2943 * Tag, and to Check and Remove the Protection Information
2944 * fields.
2946 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2947 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2948 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2949 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2950 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2951 } else {
2953 * For WRITE commands, the EEDPFlags shall be set to specify to
2954 * Increment the Primary Reference Tag, and to Insert
2955 * Protection Information fields.
2957 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2958 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2960 Control |= (0x4 << 26);
2962 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2963 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2964 ddi_put32(acc_handle,
2965 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2970 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2971 * @cdb: CDB
2972 * @cdb_len: cdb length
2973 * @start_blk: Start block of IO
2975 * Used to set the PD LBA in CDB for FP IOs
2977 static void
2978 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2979 U32 num_blocks)
2981 U8 cdb_len = *cdb_len_ptr;
2982 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2984 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2985 if (((cdb_len == 12) || (cdb_len == 16)) &&
2986 (start_blk <= 0xffffffff)) {
2987 if (cdb_len == 16) {
2988 con_log(CL_ANN,
2989 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2990 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2991 flagvals = cdb[1];
2992 groupnum = cdb[14];
2993 control = cdb[15];
2994 } else {
2995 con_log(CL_ANN,
2996 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2997 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2998 flagvals = cdb[1];
2999 groupnum = cdb[10];
3000 control = cdb[11];
3003 bzero(cdb, sizeof (cdb));
3005 cdb[0] = opcode;
3006 cdb[1] = flagvals;
3007 cdb[6] = groupnum;
3008 cdb[9] = control;
3009 /* Set transfer length */
3010 cdb[8] = (U8)(num_blocks & 0xff);
3011 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3012 cdb_len = 10;
3013 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3014 /* Convert to 16 byte CDB for large LBA's */
3015 con_log(CL_ANN,
3016 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3017 switch (cdb_len) {
3018 case 6:
3019 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3020 control = cdb[5];
3021 break;
3022 case 10:
3023 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3024 flagvals = cdb[1];
3025 groupnum = cdb[6];
3026 control = cdb[9];
3027 break;
3028 case 12:
3029 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3030 flagvals = cdb[1];
3031 groupnum = cdb[10];
3032 control = cdb[11];
3033 break;
3036 bzero(cdb, sizeof (cdb));
3038 cdb[0] = opcode;
3039 cdb[1] = flagvals;
3040 cdb[14] = groupnum;
3041 cdb[15] = control;
3043 /* Transfer length */
3044 cdb[13] = (U8)(num_blocks & 0xff);
3045 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3046 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3047 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3049 /* Specify 16-byte cdb */
3050 cdb_len = 16;
3051 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3052 /* convert to 10 byte CDB */
3053 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3054 control = cdb[5];
3056 bzero(cdb, sizeof (cdb));
3057 cdb[0] = opcode;
3058 cdb[9] = control;
3060 /* Set transfer length */
3061 cdb[8] = (U8)(num_blocks & 0xff);
3062 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3064 /* Specify 10-byte cdb */
3065 cdb_len = 10;
3069 /* Fall through Normal case, just load LBA here */
3070 switch (cdb_len) {
3071 case 6:
3073 U8 val = cdb[1] & 0xE0;
3074 cdb[3] = (U8)(start_blk & 0xff);
3075 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3076 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3077 break;
3079 case 10:
3080 cdb[5] = (U8)(start_blk & 0xff);
3081 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3082 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3083 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3084 break;
3085 case 12:
3086 cdb[5] = (U8)(start_blk & 0xff);
3087 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3088 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3089 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3090 break;
3092 case 16:
3093 cdb[9] = (U8)(start_blk & 0xff);
3094 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3095 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3096 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3097 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3098 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3099 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3100 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3101 break;
3104 *cdb_len_ptr = cdb_len;
3108 static int
3109 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3111 MR_FW_RAID_MAP_ALL *ld_map;
3113 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3115 ld_map = instance->ld_map[instance->map_id & 1];
3117 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3118 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3120 if (MR_ValidateMapInfo(
3121 instance->ld_map[instance->map_id & 1],
3122 instance->load_balance_info)) {
3123 con_log(CL_ANN,
3124 (CE_CONT, "MR_ValidateMapInfo success"));
3126 instance->fast_path_io = 1;
3127 con_log(CL_ANN,
3128 (CE_NOTE, "instance->fast_path_io %d",
3129 instance->fast_path_io));
3131 return (DDI_SUCCESS);
3136 instance->fast_path_io = 0;
3137 dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3138 con_log(CL_ANN, (CE_NOTE,
3139 "instance->fast_path_io %d", instance->fast_path_io));
3141 return (DDI_FAILURE);
3145 * Marks HBA as bad. This will be called either when an
3146 * IO packet times out even after 3 FW resets
3147 * or FW is found to be fault even after 3 continuous resets.
3150 void
3151 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3153 dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3155 if (instance->deadadapter == 1)
3156 return;
3158 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3159 "Writing to doorbell with MFI_STOP_ADP "));
3160 mutex_enter(&instance->ocr_flags_mtx);
3161 instance->deadadapter = 1;
3162 mutex_exit(&instance->ocr_flags_mtx);
3163 instance->func_ptr->disable_intr(instance);
3164 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3165 /* Flush */
3166 (void) RD_RESERVED0_REGISTER(instance);
3168 (void) mrsas_print_pending_cmds(instance);
3169 (void) mrsas_complete_pending_cmds(instance);
3172 void
3173 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3175 int i;
3176 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3177 instance->reply_read_index = 0;
3179 /* initializing reply address to 0xFFFFFFFF */
3180 reply_desc = instance->reply_frame_pool;
3182 for (i = 0; i < instance->reply_q_depth; i++) {
3183 reply_desc->Words = (uint64_t)~0;
3184 reply_desc++;
3189 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3191 uint32_t status = 0x00;
3192 uint32_t retry = 0;
3193 uint32_t cur_abs_reg_val;
3194 uint32_t fw_state;
3195 uint32_t abs_state;
3196 uint32_t i;
3198 if (instance->deadadapter == 1) {
3199 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3200 "no more resets as HBA has been marked dead");
3201 return (DDI_FAILURE);
3204 mutex_enter(&instance->ocr_flags_mtx);
3205 instance->adapterresetinprogress = 1;
3206 mutex_exit(&instance->ocr_flags_mtx);
3208 instance->func_ptr->disable_intr(instance);
3210 /* Add delay in order to complete the ioctl & io cmds in-flight */
3211 for (i = 0; i < 3000; i++)
3212 ddi_msleep(1);
3214 instance->reply_read_index = 0;
3216 retry_reset:
3217 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3219 /* Flush */
3220 WR_TBOLT_IB_WRITE_SEQ(0x0, instance);
3221 /* Write magic number */
3222 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3223 WR_TBOLT_IB_WRITE_SEQ(0x4, instance);
3224 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3225 WR_TBOLT_IB_WRITE_SEQ(0x2, instance);
3226 WR_TBOLT_IB_WRITE_SEQ(0x7, instance);
3227 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3229 con_log(CL_ANN1, (CE_NOTE,
3230 "mrsas_tbolt_reset_ppc: magic number written "
3231 "to write sequence register"));
3233 /* Wait for the diag write enable (DRWE) bit to be set */
3234 retry = 0;
3235 status = RD_TBOLT_HOST_DIAG(instance);
3236 while (!(status & DIAG_WRITE_ENABLE)) {
3237 ddi_msleep(100);
3238 status = RD_TBOLT_HOST_DIAG(instance);
3239 if (retry++ >= 100) {
3240 dev_err(instance->dip, CE_WARN,
3241 "%s(): timeout waiting for DRWE.", __func__);
3242 return (DDI_FAILURE);
3246 /* Send reset command */
3247 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3248 ddi_msleep(100);
3250 /* Wait for reset bit to clear */
3251 retry = 0;
3252 status = RD_TBOLT_HOST_DIAG(instance);
3253 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3254 ddi_msleep(100);
3255 status = RD_TBOLT_HOST_DIAG(instance);
3256 if (retry++ == 100) {
3257 /* Dont call kill adapter here */
3258 /* RESET BIT ADAPTER is cleared by firmare */
3259 /* mrsas_tbolt_kill_adapter(instance); */
3260 dev_err(instance->dip, CE_WARN,
3261 "%s(): RESET FAILED; return failure!!!", __func__);
3262 return (DDI_FAILURE);
3266 con_log(CL_ANN,
3267 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3269 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3270 retry = 0;
3271 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3272 ddi_msleep(100);
3273 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3275 if (abs_state <= MFI_STATE_FW_INIT) {
3276 dev_err(instance->dip, CE_WARN,
3277 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3278 "state = 0x%x, RETRY RESET.", abs_state);
3279 goto retry_reset;
3282 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3283 if (mfi_state_transition_to_ready(instance) ||
3284 mrsas_debug_tbolt_fw_faults_after_ocr == 1) {
3285 cur_abs_reg_val =
3286 instance->func_ptr->read_fw_status_reg(instance);
3287 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3289 con_log(CL_ANN1, (CE_NOTE,
3290 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3291 "FW state = 0x%x", fw_state));
3292 if (mrsas_debug_tbolt_fw_faults_after_ocr == 1)
3293 fw_state = MFI_STATE_FAULT;
3295 con_log(CL_ANN,
3296 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3297 "FW state = 0x%x", fw_state));
3299 if (fw_state == MFI_STATE_FAULT) {
3300 /* increment the count */
3301 instance->fw_fault_count_after_ocr++;
3302 if (instance->fw_fault_count_after_ocr
3303 < MAX_FW_RESET_COUNT) {
3304 dev_err(instance->dip, CE_WARN,
3305 "mrsas_tbolt_reset_ppc: "
3306 "FW is in fault after OCR count %d "
3307 "Retry Reset",
3308 instance->fw_fault_count_after_ocr);
3309 goto retry_reset;
3311 } else {
3312 dev_err(instance->dip, CE_WARN, "%s:"
3313 "Max Reset Count exceeded >%d"
3314 "Mark HBA as bad, KILL adapter",
3315 __func__, MAX_FW_RESET_COUNT);
3317 mrsas_tbolt_kill_adapter(instance);
3318 return (DDI_FAILURE);
3323 /* reset the counter as FW is up after OCR */
3324 instance->fw_fault_count_after_ocr = 0;
3326 mrsas_reset_reply_desc(instance);
3328 abs_state = mrsas_issue_init_mpi2(instance);
3329 if (abs_state == (uint32_t)DDI_FAILURE) {
3330 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3331 "INIT failed Retrying Reset");
3332 goto retry_reset;
3335 (void) mrsas_print_pending_cmds(instance);
3337 instance->func_ptr->enable_intr(instance);
3338 instance->fw_outstanding = 0;
3340 (void) mrsas_issue_pending_cmds(instance);
3342 instance->aen_cmd->retry_count_for_ocr = 0;
3343 instance->aen_cmd->drv_pkt_time = 0;
3345 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3347 mutex_enter(&instance->ocr_flags_mtx);
3348 instance->adapterresetinprogress = 0;
3349 mutex_exit(&instance->ocr_flags_mtx);
3351 dev_err(instance->dip, CE_NOTE, "TBOLT adapter reset successfully");
3353 return (DDI_SUCCESS);
3357 * mrsas_sync_map_info - Returns FW's ld_map structure
3358 * @instance: Adapter soft state
3360 * Issues an internal command (DCMD) to get the FW's controller PD
3361 * list structure. This information is mainly used to find out SYSTEM
3362 * supported by the FW.
3365 static int
3366 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3368 int ret = 0, i;
3369 struct mrsas_cmd *cmd = NULL;
3370 struct mrsas_dcmd_frame *dcmd;
3371 uint32_t size_sync_info, num_lds;
3372 LD_TARGET_SYNC *ci = NULL;
3373 MR_FW_RAID_MAP_ALL *map;
3374 MR_LD_RAID *raid;
3375 LD_TARGET_SYNC *ld_sync;
3376 uint32_t ci_h = 0;
3377 uint32_t size_map_info;
3379 cmd = get_raid_msg_pkt(instance);
3381 if (cmd == NULL) {
3382 dev_err(instance->dip, CE_WARN,
3383 "Failed to get a cmd from free-pool in "
3384 "mrsas_tbolt_sync_map_info().");
3385 return (DDI_FAILURE);
3388 /* Clear the frame buffer and assign back the context id */
3389 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3390 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3391 cmd->index);
3392 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3395 map = instance->ld_map[instance->map_id & 1];
3397 num_lds = map->raidMap.ldCount;
3399 dcmd = &cmd->frame->dcmd;
3401 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3403 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3404 size_sync_info, num_lds));
3406 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3408 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3409 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3411 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3413 ld_sync = (LD_TARGET_SYNC *)ci;
3415 for (i = 0; i < num_lds; i++, ld_sync++) {
3416 raid = MR_LdRaidGet(i, map);
3418 con_log(CL_ANN1,
3419 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3420 i, raid->seqNum, raid->flags.ldSyncRequired));
3422 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3424 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3425 i, ld_sync->ldTargetId));
3427 ld_sync->seqNum = raid->seqNum;
3431 size_map_info = sizeof (MR_FW_RAID_MAP) +
3432 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3434 dcmd->cmd = MFI_CMD_OP_DCMD;
3435 dcmd->cmd_status = 0xFF;
3436 dcmd->sge_count = 1;
3437 dcmd->flags = MFI_FRAME_DIR_WRITE;
3438 dcmd->timeout = 0;
3439 dcmd->pad_0 = 0;
3440 dcmd->data_xfer_len = size_map_info;
3441 ASSERT(num_lds <= 255);
3442 dcmd->mbox.b[0] = (U8)num_lds;
3443 dcmd->mbox.b[1] = 1; /* Pend */
3444 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3445 dcmd->sgl.sge32[0].phys_addr = ci_h;
3446 dcmd->sgl.sge32[0].length = size_map_info;
3449 instance->map_update_cmd = cmd;
3450 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3452 instance->func_ptr->issue_cmd(cmd, instance);
3454 instance->unroll.syncCmd = 1;
3455 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3457 return (ret);
3461 * abort_syncmap_cmd
3464 abort_syncmap_cmd(struct mrsas_instance *instance,
3465 struct mrsas_cmd *cmd_to_abort)
3467 int ret = 0;
3469 struct mrsas_cmd *cmd;
3470 struct mrsas_abort_frame *abort_fr;
3472 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3474 cmd = get_raid_msg_mfi_pkt(instance);
3476 if (!cmd) {
3477 dev_err(instance->dip, CE_WARN,
3478 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3479 return (DDI_FAILURE);
3481 /* Clear the frame buffer and assign back the context id */
3482 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3483 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3484 cmd->index);
3486 abort_fr = &cmd->frame->abort;
3488 /* prepare and issue the abort frame */
3489 ddi_put8(cmd->frame_dma_obj.acc_handle,
3490 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3491 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3492 MFI_CMD_STATUS_SYNC_MODE);
3493 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3494 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3495 cmd_to_abort->index);
3496 ddi_put32(cmd->frame_dma_obj.acc_handle,
3497 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3498 ddi_put32(cmd->frame_dma_obj.acc_handle,
3499 &abort_fr->abort_mfi_phys_addr_hi, 0);
3501 cmd->frame_count = 1;
3503 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3505 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3506 con_log(CL_ANN1, (CE_WARN,
3507 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3508 ret = -1;
3509 } else {
3510 ret = 0;
3513 return_raid_msg_mfi_pkt(instance, cmd);
3515 atomic_add_16(&instance->fw_outstanding, (-1));
3517 return (ret);
3521 * Even though these functions were originally intended for 2208 only, it
3522 * turns out they're useful for "Skinny" support as well. In a perfect world,
3523 * these two functions would be either in mr_sas.c, or in their own new source
3524 * file. Since this driver needs some cleanup anyway, keep this portion in
3525 * mind as well.
3529 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3530 uint8_t lun, dev_info_t **ldip)
3532 struct scsi_device *sd;
3533 dev_info_t *child;
3534 int rval, dtype;
3535 struct mrsas_tbolt_pd_info *pds = NULL;
3537 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3538 tgt, lun));
3540 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3541 if (ldip) {
3542 *ldip = child;
3544 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3545 rval = mrsas_service_evt(instance, tgt, 1,
3546 MRSAS_EVT_UNCONFIG_TGT, (uintptr_t)NULL);
3547 con_log(CL_ANN1, (CE_WARN,
3548 "mr_sas:DELETING STALE ENTRY rval = %d "
3549 "tgt id = %d", rval, tgt));
3550 return (NDI_FAILURE);
3552 return (NDI_SUCCESS);
3555 pds = (struct mrsas_tbolt_pd_info *)
3556 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3557 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3558 dtype = pds->scsiDevType;
3560 /* Check for Disk */
3561 if ((dtype == DTYPE_DIRECT)) {
3562 if ((dtype == DTYPE_DIRECT) &&
3563 (LE_16(pds->fwState) != PD_SYSTEM)) {
3564 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3565 return (NDI_FAILURE);
3567 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3568 sd->sd_address.a_hba_tran = instance->tran;
3569 sd->sd_address.a_target = (uint16_t)tgt;
3570 sd->sd_address.a_lun = (uint8_t)lun;
3572 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3573 rval = mrsas_config_scsi_device(instance, sd, ldip);
3574 dev_err(instance->dip, CE_CONT,
3575 "?Phys. device found: tgt %d dtype %d: %s\n",
3576 tgt, dtype, sd->sd_inq->inq_vid);
3577 } else {
3578 rval = NDI_FAILURE;
3579 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3580 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3581 tgt, dtype, sd->sd_inq->inq_vid));
3584 /* sd_unprobe is blank now. Free buffer manually */
3585 if (sd->sd_inq) {
3586 kmem_free(sd->sd_inq, SUN_INQSIZE);
3587 sd->sd_inq = NULL;
3589 kmem_free(sd, sizeof (struct scsi_device));
3590 } else {
3591 con_log(CL_ANN1, (CE_NOTE,
3592 "?Device not supported: tgt %d lun %d dtype %d",
3593 tgt, lun, dtype));
3594 rval = NDI_FAILURE;
3597 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3598 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3599 rval));
3600 return (rval);
3603 static void
3604 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3605 struct mrsas_tbolt_pd_info *pds, int tgt)
3607 struct mrsas_cmd *cmd;
3608 struct mrsas_dcmd_frame *dcmd;
3609 dma_obj_t dcmd_dma_obj;
3611 ASSERT(instance->tbolt || instance->skinny);
3613 if (instance->tbolt)
3614 cmd = get_raid_msg_pkt(instance);
3615 else
3616 cmd = mrsas_get_mfi_pkt(instance);
3618 if (!cmd) {
3619 con_log(CL_ANN1,
3620 (CE_WARN, "Failed to get a cmd for get pd info"));
3621 return;
3624 /* Clear the frame buffer and assign back the context id */
3625 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3626 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3627 cmd->index);
3630 dcmd = &cmd->frame->dcmd;
3631 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3632 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3633 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3634 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3635 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3636 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3638 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3639 DDI_STRUCTURE_LE_ACC);
3640 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3641 bzero(dcmd->mbox.b, 12);
3642 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3643 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3644 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3645 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3646 MFI_FRAME_DIR_READ);
3647 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3648 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3649 sizeof (struct mrsas_tbolt_pd_info));
3650 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3651 MR_DCMD_PD_GET_INFO);
3652 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3653 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3654 sizeof (struct mrsas_tbolt_pd_info));
3655 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3656 dcmd_dma_obj.dma_cookie[0].dmac_address);
3658 cmd->sync_cmd = MRSAS_TRUE;
3659 cmd->frame_count = 1;
3661 if (instance->tbolt)
3662 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3664 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3666 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3667 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3668 DDI_DEV_AUTOINCR);
3669 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3671 if (instance->tbolt)
3672 return_raid_msg_pkt(instance, cmd);
3673 else
3674 mrsas_return_mfi_pkt(instance, cmd);