2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
19 * Copyright 2018 Nexenta Systems, Inc.
20 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
25 #include <sys/types.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
31 #include "ld_pd_map.h"
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
44 /* Pre-TB command size and TB command size. */
45 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
46 MR_LD_RAID
*MR_LdRaidGet(U32 ld
, MR_FW_RAID_MAP_ALL
*map
);
47 U16
MR_TargetIdToLdGet(U32 ldTgtId
, MR_FW_RAID_MAP_ALL
*map
);
48 U16
MR_GetLDTgtId(U32 ld
, MR_FW_RAID_MAP_ALL
*map
);
49 U16
get_updated_dev_handle(PLD_LOAD_BALANCE_INFO
, struct IO_REQUEST_INFO
*);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr
;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer
;
52 extern struct ddi_device_acc_attr endian_attr
;
53 extern int debug_level_g
;
54 extern unsigned int enable_fp
;
55 volatile int dump_io_wait_time
= 900;
56 extern volatile int debug_timeout_g
;
57 extern int mrsas_issue_pending_cmds(struct mrsas_instance
*);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance
*instance
);
59 extern void push_pending_mfi_pkt(struct mrsas_instance
*,
61 extern U8
MR_BuildRaidContext(struct mrsas_instance
*, struct IO_REQUEST_INFO
*,
62 MPI2_SCSI_IO_VENDOR_UNIQUE
*, MR_FW_RAID_MAP_ALL
*);
64 /* Local static prototypes. */
65 static struct mrsas_cmd
*mrsas_tbolt_build_cmd(struct mrsas_instance
*,
66 struct scsi_address
*, struct scsi_pkt
*, uchar_t
*);
67 static void mrsas_tbolt_set_pd_lba(U8
*, size_t, uint8_t *, U64
, U32
);
68 static int mrsas_tbolt_check_map_info(struct mrsas_instance
*);
69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance
*);
70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd
*);
71 static int mrsas_tbolt_ioc_init(struct mrsas_instance
*, dma_obj_t
*);
72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance
*,
73 struct mrsas_tbolt_pd_info
*, int);
75 static int mrsas_debug_tbolt_fw_faults_after_ocr
= 0;
78 * destroy_mfi_mpi_frame_pool
81 destroy_mfi_mpi_frame_pool(struct mrsas_instance
*instance
)
85 struct mrsas_cmd
*cmd
;
87 /* return all mfi frames to pool */
88 for (i
= 0; i
< MRSAS_APP_RESERVED_CMDS
; i
++) {
89 cmd
= instance
->cmd_list
[i
];
90 if (cmd
->frame_dma_obj_status
== DMA_OBJ_ALLOCATED
) {
91 (void) mrsas_free_dma_obj(instance
,
94 cmd
->frame_dma_obj_status
= DMA_OBJ_FREED
;
99 * destroy_mpi2_frame_pool
102 destroy_mpi2_frame_pool(struct mrsas_instance
*instance
)
105 if (instance
->mpi2_frame_pool_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
106 (void) mrsas_free_dma_obj(instance
,
107 instance
->mpi2_frame_pool_dma_obj
);
108 instance
->mpi2_frame_pool_dma_obj
.status
|= DMA_OBJ_FREED
;
114 * mrsas_tbolt_free_additional_dma_buffer
117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance
*instance
)
121 if (instance
->mfi_internal_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
122 (void) mrsas_free_dma_obj(instance
,
123 instance
->mfi_internal_dma_obj
);
124 instance
->mfi_internal_dma_obj
.status
= DMA_OBJ_FREED
;
126 if (instance
->mfi_evt_detail_obj
.status
== DMA_OBJ_ALLOCATED
) {
127 (void) mrsas_free_dma_obj(instance
,
128 instance
->mfi_evt_detail_obj
);
129 instance
->mfi_evt_detail_obj
.status
= DMA_OBJ_FREED
;
132 for (i
= 0; i
< 2; i
++) {
133 if (instance
->ld_map_obj
[i
].status
== DMA_OBJ_ALLOCATED
) {
134 (void) mrsas_free_dma_obj(instance
,
135 instance
->ld_map_obj
[i
]);
136 instance
->ld_map_obj
[i
].status
= DMA_OBJ_FREED
;
146 free_req_rep_desc_pool(struct mrsas_instance
*instance
)
148 if (instance
->request_desc_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
149 (void) mrsas_free_dma_obj(instance
,
150 instance
->request_desc_dma_obj
);
151 instance
->request_desc_dma_obj
.status
= DMA_OBJ_FREED
;
154 if (instance
->reply_desc_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
155 (void) mrsas_free_dma_obj(instance
,
156 instance
->reply_desc_dma_obj
);
157 instance
->reply_desc_dma_obj
.status
= DMA_OBJ_FREED
;
165 * ThunderBolt(TB) Request Message Frame Pool
168 create_mpi2_frame_pool(struct mrsas_instance
*instance
)
173 uint32_t raid_msg_size
;
176 uint32_t io_req_base_phys
;
177 uint8_t *io_req_base
;
178 struct mrsas_cmd
*cmd
;
180 max_cmd
= instance
->max_fw_cmds
;
183 raid_msg_size
= MRSAS_THUNDERBOLT_MSG_SIZE
;
185 /* Allocating additional 256 bytes to accomodate SMID 0. */
186 total_size
= MRSAS_THUNDERBOLT_MSG_SIZE
+ (max_cmd
* raid_msg_size
) +
187 (max_cmd
* sgl_sz
) + (max_cmd
* SENSE_LENGTH
);
189 con_log(CL_ANN1
, (CE_NOTE
, "create_mpi2_frame_pool: "
190 "max_cmd %x", max_cmd
));
192 con_log(CL_DLEVEL3
, (CE_NOTE
, "create_mpi2_frame_pool: "
193 "request message frame pool size %x", total_size
));
196 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 * and then split the memory to 1024 commands. Each command should be
198 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 * within it. Further refer the "alloc_req_rep_desc" function where
200 * we allocate request/reply descriptors queues for a clue.
203 instance
->mpi2_frame_pool_dma_obj
.size
= total_size
;
204 instance
->mpi2_frame_pool_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
205 instance
->mpi2_frame_pool_dma_obj
.dma_attr
.dma_attr_addr_hi
=
207 instance
->mpi2_frame_pool_dma_obj
.dma_attr
.dma_attr_count_max
=
209 instance
->mpi2_frame_pool_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
210 instance
->mpi2_frame_pool_dma_obj
.dma_attr
.dma_attr_align
= 256;
212 if (mrsas_alloc_dma_obj(instance
, &instance
->mpi2_frame_pool_dma_obj
,
213 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
214 dev_err(instance
->dip
, CE_WARN
,
215 "could not alloc mpi2 frame pool");
216 return (DDI_FAILURE
);
219 bzero(instance
->mpi2_frame_pool_dma_obj
.buffer
, total_size
);
220 instance
->mpi2_frame_pool_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
222 instance
->io_request_frames
=
223 (uint8_t *)instance
->mpi2_frame_pool_dma_obj
.buffer
;
224 instance
->io_request_frames_phy
=
226 instance
->mpi2_frame_pool_dma_obj
.dma_cookie
[0].dmac_address
;
228 con_log(CL_DLEVEL3
, (CE_NOTE
, "io_request_frames 0x%p",
229 (void *)instance
->io_request_frames
));
231 con_log(CL_DLEVEL3
, (CE_NOTE
, "io_request_frames_phy 0x%x",
232 instance
->io_request_frames_phy
));
234 io_req_base
= (uint8_t *)instance
->io_request_frames
+
235 MRSAS_THUNDERBOLT_MSG_SIZE
;
236 io_req_base_phys
= instance
->io_request_frames_phy
+
237 MRSAS_THUNDERBOLT_MSG_SIZE
;
239 con_log(CL_DLEVEL3
, (CE_NOTE
,
240 "io req_base_phys 0x%x", io_req_base_phys
));
242 for (i
= 0; i
< max_cmd
; i
++) {
243 cmd
= instance
->cmd_list
[i
];
245 offset
= i
* MRSAS_THUNDERBOLT_MSG_SIZE
;
247 cmd
->scsi_io_request
= (Mpi2RaidSCSIIORequest_t
*)
248 ((uint8_t *)io_req_base
+ offset
);
249 cmd
->scsi_io_request_phys_addr
= io_req_base_phys
+ offset
;
251 cmd
->sgl
= (Mpi2SGEIOUnion_t
*)((uint8_t *)io_req_base
+
252 (max_cmd
* raid_msg_size
) + i
* sgl_sz
);
254 cmd
->sgl_phys_addr
= (io_req_base_phys
+
255 (max_cmd
* raid_msg_size
) + i
* sgl_sz
);
257 cmd
->sense1
= (uint8_t *)((uint8_t *)io_req_base
+
258 (max_cmd
* raid_msg_size
) + (max_cmd
* sgl_sz
) +
261 cmd
->sense_phys_addr1
= (io_req_base_phys
+
262 (max_cmd
* raid_msg_size
) + (max_cmd
* sgl_sz
) +
268 con_log(CL_DLEVEL3
, (CE_NOTE
, "Frame Pool Addr [%x]0x%p",
269 cmd
->index
, (void *)cmd
->scsi_io_request
));
271 con_log(CL_DLEVEL3
, (CE_NOTE
, "Frame Pool Phys Addr [%x]0x%x",
272 cmd
->index
, cmd
->scsi_io_request_phys_addr
));
274 con_log(CL_DLEVEL3
, (CE_NOTE
, "Sense Addr [%x]0x%p",
275 cmd
->index
, (void *)cmd
->sense1
));
277 con_log(CL_DLEVEL3
, (CE_NOTE
, "Sense Addr Phys [%x]0x%x",
278 cmd
->index
, cmd
->sense_phys_addr1
));
280 con_log(CL_DLEVEL3
, (CE_NOTE
, "Sgl bufffers [%x]0x%p",
281 cmd
->index
, (void *)cmd
->sgl
));
283 con_log(CL_DLEVEL3
, (CE_NOTE
, "Sgl bufffers phys [%x]0x%x",
284 cmd
->index
, cmd
->sgl_phys_addr
));
287 return (DDI_SUCCESS
);
293 * alloc_additional_dma_buffer for AEN
296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance
*instance
)
298 uint32_t internal_buf_size
= PAGESIZE
*2;
301 /* Initialize buffer status as free */
302 instance
->mfi_internal_dma_obj
.status
= DMA_OBJ_FREED
;
303 instance
->mfi_evt_detail_obj
.status
= DMA_OBJ_FREED
;
304 instance
->ld_map_obj
[0].status
= DMA_OBJ_FREED
;
305 instance
->ld_map_obj
[1].status
= DMA_OBJ_FREED
;
308 instance
->mfi_internal_dma_obj
.size
= internal_buf_size
;
309 instance
->mfi_internal_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
310 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
311 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_count_max
=
313 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
315 if (mrsas_alloc_dma_obj(instance
, &instance
->mfi_internal_dma_obj
,
316 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
317 dev_err(instance
->dip
, CE_WARN
,
318 "could not alloc reply queue");
319 return (DDI_FAILURE
);
322 bzero(instance
->mfi_internal_dma_obj
.buffer
, internal_buf_size
);
324 instance
->mfi_internal_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
325 instance
->internal_buf
=
326 (caddr_t
)(((unsigned long)instance
->mfi_internal_dma_obj
.buffer
));
327 instance
->internal_buf_dmac_add
=
328 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
;
329 instance
->internal_buf_size
= internal_buf_size
;
331 /* allocate evt_detail */
332 instance
->mfi_evt_detail_obj
.size
= sizeof (struct mrsas_evt_detail
);
333 instance
->mfi_evt_detail_obj
.dma_attr
= mrsas_generic_dma_attr
;
334 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
335 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
336 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_sgllen
= 1;
337 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_align
= 8;
339 if (mrsas_alloc_dma_obj(instance
, &instance
->mfi_evt_detail_obj
,
340 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
341 dev_err(instance
->dip
, CE_WARN
,
342 "mrsas_tbolt_alloc_additional_dma_buffer: "
343 "could not allocate data transfer buffer.");
344 goto fail_tbolt_additional_buff
;
347 bzero(instance
->mfi_evt_detail_obj
.buffer
,
348 sizeof (struct mrsas_evt_detail
));
350 instance
->mfi_evt_detail_obj
.status
|= DMA_OBJ_ALLOCATED
;
352 instance
->size_map_info
= sizeof (MR_FW_RAID_MAP
) +
353 (sizeof (MR_LD_SPAN_MAP
) * (MAX_LOGICAL_DRIVES
- 1));
355 for (i
= 0; i
< 2; i
++) {
356 /* allocate the data transfer buffer */
357 instance
->ld_map_obj
[i
].size
= instance
->size_map_info
;
358 instance
->ld_map_obj
[i
].dma_attr
= mrsas_generic_dma_attr
;
359 instance
->ld_map_obj
[i
].dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
360 instance
->ld_map_obj
[i
].dma_attr
.dma_attr_count_max
=
362 instance
->ld_map_obj
[i
].dma_attr
.dma_attr_sgllen
= 1;
363 instance
->ld_map_obj
[i
].dma_attr
.dma_attr_align
= 1;
365 if (mrsas_alloc_dma_obj(instance
, &instance
->ld_map_obj
[i
],
366 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
367 dev_err(instance
->dip
, CE_WARN
,
368 "could not allocate data transfer buffer.");
369 goto fail_tbolt_additional_buff
;
372 instance
->ld_map_obj
[i
].status
|= DMA_OBJ_ALLOCATED
;
374 bzero(instance
->ld_map_obj
[i
].buffer
, instance
->size_map_info
);
376 instance
->ld_map
[i
] =
377 (MR_FW_RAID_MAP_ALL
*)instance
->ld_map_obj
[i
].buffer
;
378 instance
->ld_map_phy
[i
] = (uint32_t)instance
->
379 ld_map_obj
[i
].dma_cookie
[0].dmac_address
;
381 con_log(CL_DLEVEL3
, (CE_NOTE
,
382 "ld_map Addr Phys 0x%x", instance
->ld_map_phy
[i
]));
384 con_log(CL_DLEVEL3
, (CE_NOTE
,
385 "size_map_info 0x%x", instance
->size_map_info
));
388 return (DDI_SUCCESS
);
390 fail_tbolt_additional_buff
:
391 mrsas_tbolt_free_additional_dma_buffer(instance
);
393 return (DDI_FAILURE
);
396 MRSAS_REQUEST_DESCRIPTOR_UNION
*
397 mr_sas_get_request_descriptor(struct mrsas_instance
*instance
, uint16_t index
)
399 MRSAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
401 if (index
> instance
->max_fw_cmds
) {
402 con_log(CL_ANN1
, (CE_NOTE
,
403 "Invalid SMID 0x%x request for descriptor", index
));
404 con_log(CL_ANN1
, (CE_NOTE
,
405 "max_fw_cmds : 0x%x", instance
->max_fw_cmds
));
409 req_desc
= (MRSAS_REQUEST_DESCRIPTOR_UNION
*)
410 ((char *)instance
->request_message_pool
+
411 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION
) * index
));
413 con_log(CL_ANN1
, (CE_NOTE
,
414 "request descriptor : 0x%08lx", (unsigned long)req_desc
));
416 con_log(CL_ANN1
, (CE_NOTE
,
417 "request descriptor base phy : 0x%08lx",
418 (unsigned long)instance
->request_message_pool_phy
));
420 return ((MRSAS_REQUEST_DESCRIPTOR_UNION
*)req_desc
);
425 * Allocate Request and Reply Queue Descriptors.
428 alloc_req_rep_desc(struct mrsas_instance
*instance
)
430 uint32_t request_q_sz
, reply_q_sz
;
431 int i
, max_reply_q_sz
;
432 MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
435 * ThunderBolt(TB) There's no longer producer consumer mechanism.
436 * Once we have an interrupt we are supposed to scan through the list of
437 * reply descriptors and process them accordingly. We would be needing
438 * to allocate memory for 1024 reply descriptors
441 /* Allocate Reply Descriptors */
442 con_log(CL_ANN1
, (CE_NOTE
, " reply q desc len = %x",
443 (uint_t
)sizeof (MPI2_REPLY_DESCRIPTORS_UNION
)));
445 /* reply queue size should be multiple of 16 */
446 max_reply_q_sz
= ((instance
->max_fw_cmds
+ 1 + 15)/16)*16;
448 reply_q_sz
= 8 * max_reply_q_sz
;
451 con_log(CL_ANN1
, (CE_NOTE
, " reply q desc len = %x",
452 (uint_t
)sizeof (MPI2_REPLY_DESCRIPTORS_UNION
)));
454 instance
->reply_desc_dma_obj
.size
= reply_q_sz
;
455 instance
->reply_desc_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
456 instance
->reply_desc_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
457 instance
->reply_desc_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
458 instance
->reply_desc_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
459 instance
->reply_desc_dma_obj
.dma_attr
.dma_attr_align
= 16;
461 if (mrsas_alloc_dma_obj(instance
, &instance
->reply_desc_dma_obj
,
462 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
463 dev_err(instance
->dip
, CE_WARN
, "could not alloc reply queue");
464 return (DDI_FAILURE
);
467 bzero(instance
->reply_desc_dma_obj
.buffer
, reply_q_sz
);
468 instance
->reply_desc_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
470 /* virtual address of reply queue */
471 instance
->reply_frame_pool
= (MPI2_REPLY_DESCRIPTORS_UNION
*)(
472 instance
->reply_desc_dma_obj
.buffer
);
474 instance
->reply_q_depth
= max_reply_q_sz
;
476 con_log(CL_ANN1
, (CE_NOTE
, "[reply queue depth]0x%x",
477 instance
->reply_q_depth
));
479 con_log(CL_ANN1
, (CE_NOTE
, "[reply queue virt addr]0x%p",
480 (void *)instance
->reply_frame_pool
));
482 /* initializing reply address to 0xFFFFFFFF */
483 reply_desc
= instance
->reply_frame_pool
;
485 for (i
= 0; i
< instance
->reply_q_depth
; i
++) {
486 reply_desc
->Words
= (uint64_t)~0;
491 instance
->reply_frame_pool_phy
=
492 (uint32_t)instance
->reply_desc_dma_obj
.dma_cookie
[0].dmac_address
;
494 con_log(CL_ANN1
, (CE_NOTE
,
495 "[reply queue phys addr]0x%x", instance
->reply_frame_pool_phy
));
498 instance
->reply_pool_limit_phy
= (instance
->reply_frame_pool_phy
+
501 con_log(CL_ANN1
, (CE_NOTE
, "[reply pool limit phys addr]0x%x",
502 instance
->reply_pool_limit_phy
));
505 con_log(CL_ANN1
, (CE_NOTE
, " request q desc len = %x",
506 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION
)));
508 /* Allocate Request Descriptors */
509 con_log(CL_ANN1
, (CE_NOTE
, " request q desc len = %x",
510 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION
)));
513 (instance
->max_fw_cmds
);
515 instance
->request_desc_dma_obj
.size
= request_q_sz
;
516 instance
->request_desc_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
517 instance
->request_desc_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
518 instance
->request_desc_dma_obj
.dma_attr
.dma_attr_count_max
=
520 instance
->request_desc_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
521 instance
->request_desc_dma_obj
.dma_attr
.dma_attr_align
= 16;
523 if (mrsas_alloc_dma_obj(instance
, &instance
->request_desc_dma_obj
,
524 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
525 dev_err(instance
->dip
, CE_WARN
,
526 "could not alloc request queue desc");
527 goto fail_undo_reply_queue
;
530 bzero(instance
->request_desc_dma_obj
.buffer
, request_q_sz
);
531 instance
->request_desc_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
533 /* virtual address of request queue desc */
534 instance
->request_message_pool
= (MRSAS_REQUEST_DESCRIPTOR_UNION
*)
535 (instance
->request_desc_dma_obj
.buffer
);
537 instance
->request_message_pool_phy
=
538 (uint32_t)instance
->request_desc_dma_obj
.dma_cookie
[0].dmac_address
;
540 return (DDI_SUCCESS
);
542 fail_undo_reply_queue
:
543 if (instance
->reply_desc_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
544 (void) mrsas_free_dma_obj(instance
,
545 instance
->reply_desc_dma_obj
);
546 instance
->reply_desc_dma_obj
.status
= DMA_OBJ_FREED
;
549 return (DDI_FAILURE
);
553 * mrsas_alloc_cmd_pool_tbolt
555 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance
*instance
)
564 uint32_t reserve_cmd
;
567 struct mrsas_cmd
*cmd
;
569 max_cmd
= instance
->max_fw_cmds
;
570 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_alloc_cmd_pool: "
571 "max_cmd %x", max_cmd
));
574 sz
= sizeof (struct mrsas_cmd
*) * max_cmd
;
577 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 * Allocate the dynamic array first and then allocate individual
581 instance
->cmd_list
= kmem_zalloc(sz
, KM_SLEEP
);
583 /* create a frame pool and assign one frame to each cmd */
584 for (count
= 0; count
< max_cmd
; count
++) {
585 instance
->cmd_list
[count
] =
586 kmem_zalloc(sizeof (struct mrsas_cmd
), KM_SLEEP
);
589 /* add all the commands to command pool */
591 INIT_LIST_HEAD(&instance
->cmd_pool_list
);
592 INIT_LIST_HEAD(&instance
->cmd_pend_list
);
593 INIT_LIST_HEAD(&instance
->cmd_app_pool_list
);
595 reserve_cmd
= MRSAS_APP_RESERVED_CMDS
;
597 /* cmd index 0 reservered for IOC INIT */
598 for (i
= 1; i
< reserve_cmd
; i
++) {
599 cmd
= instance
->cmd_list
[i
];
601 mlist_add_tail(&cmd
->list
, &instance
->cmd_app_pool_list
);
605 for (i
= reserve_cmd
; i
< max_cmd
; i
++) {
606 cmd
= instance
->cmd_list
[i
];
608 mlist_add_tail(&cmd
->list
, &instance
->cmd_pool_list
);
611 return (DDI_SUCCESS
);
616 for (i
= 0; i
< count
; i
++) {
617 if (instance
->cmd_list
[i
] != NULL
) {
618 kmem_free(instance
->cmd_list
[i
],
619 sizeof (struct mrsas_cmd
));
621 instance
->cmd_list
[i
] = NULL
;
626 if (instance
->cmd_list
!= NULL
)
627 kmem_free(instance
->cmd_list
, sz
);
628 instance
->cmd_list
= NULL
;
630 return (DDI_FAILURE
);
635 * free_space_for_mpi2
638 free_space_for_mpi2(struct mrsas_instance
*instance
)
641 if (instance
->cmd_list
== NULL
) {
645 /* First free the additional DMA buffer */
646 mrsas_tbolt_free_additional_dma_buffer(instance
);
648 /* Free the request/reply descriptor pool */
649 free_req_rep_desc_pool(instance
);
651 /* Free the MPI message pool */
652 destroy_mpi2_frame_pool(instance
);
654 /* Free the MFI frame pool */
655 destroy_mfi_frame_pool(instance
);
657 /* Free all the commands in the cmd_list */
658 /* Free the cmd_list buffer itself */
659 mrsas_free_cmd_pool(instance
);
664 * ThunderBolt(TB) memory allocations for commands/messages/frames.
667 alloc_space_for_mpi2(struct mrsas_instance
*instance
)
669 /* Allocate command pool (memory for cmd_list & individual commands) */
670 if (mrsas_alloc_cmd_pool_tbolt(instance
)) {
671 dev_err(instance
->dip
, CE_WARN
, "Error creating cmd pool");
672 return (DDI_FAILURE
);
675 /* Initialize single reply size and Message size */
676 instance
->reply_size
= MRSAS_THUNDERBOLT_REPLY_SIZE
;
677 instance
->raid_io_msg_size
= MRSAS_THUNDERBOLT_MSG_SIZE
;
679 instance
->max_sge_in_main_msg
= (MRSAS_THUNDERBOLT_MSG_SIZE
-
680 (sizeof (MPI2_RAID_SCSI_IO_REQUEST
) -
681 sizeof (MPI2_SGE_IO_UNION
)))/ sizeof (MPI2_SGE_IO_UNION
);
682 instance
->max_sge_in_chain
= (MR_COMMAND_SIZE
-
683 MRSAS_THUNDERBOLT_MSG_SIZE
) / sizeof (MPI2_SGE_IO_UNION
);
685 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 instance
->max_num_sge
= (instance
->max_sge_in_main_msg
+
687 instance
->max_sge_in_chain
- 2);
688 instance
->chain_offset_mpt_msg
=
689 offsetof(MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 16;
690 instance
->chain_offset_io_req
= (MRSAS_THUNDERBOLT_MSG_SIZE
-
691 sizeof (MPI2_SGE_IO_UNION
)) / 16;
692 instance
->reply_read_index
= 0;
695 /* Allocate Request and Reply descriptors Array */
696 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 if (alloc_req_rep_desc(instance
)) {
698 dev_err(instance
->dip
, CE_WARN
,
699 "Error, allocating memory for descripter-pool");
700 goto mpi2_undo_cmd_pool
;
702 con_log(CL_ANN1
, (CE_NOTE
, "[request message pool phys addr]0x%x",
703 instance
->request_message_pool_phy
));
706 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 if (create_mfi_frame_pool(instance
)) {
708 dev_err(instance
->dip
, CE_WARN
,
709 "Error, allocating memory for MFI frame-pool");
710 goto mpi2_undo_descripter_pool
;
714 /* Allocate MPI2 Message pool */
716 * Make sure the buffer is alligned to 256 for raid message packet
717 * create a io request pool and assign one frame to each cmd
720 if (create_mpi2_frame_pool(instance
)) {
721 dev_err(instance
->dip
, CE_WARN
,
722 "Error, allocating memory for MPI2 Message-pool");
723 goto mpi2_undo_mfi_frame_pool
;
727 con_log(CL_ANN1
, (CE_CONT
, "[max_sge_in_main_msg]0x%x",
728 instance
->max_sge_in_main_msg
));
729 con_log(CL_ANN1
, (CE_CONT
, "[max_sge_in_chain]0x%x",
730 instance
->max_sge_in_chain
));
731 con_log(CL_ANN1
, (CE_CONT
,
732 "[max_sge]0x%x", instance
->max_num_sge
));
733 con_log(CL_ANN1
, (CE_CONT
, "[chain_offset_mpt_msg]0x%x",
734 instance
->chain_offset_mpt_msg
));
735 con_log(CL_ANN1
, (CE_CONT
, "[chain_offset_io_req]0x%x",
736 instance
->chain_offset_io_req
));
740 /* Allocate additional dma buffer */
741 if (mrsas_tbolt_alloc_additional_dma_buffer(instance
)) {
742 dev_err(instance
->dip
, CE_WARN
,
743 "Error, allocating tbolt additional DMA buffer");
744 goto mpi2_undo_message_pool
;
747 return (DDI_SUCCESS
);
749 mpi2_undo_message_pool
:
750 destroy_mpi2_frame_pool(instance
);
752 mpi2_undo_mfi_frame_pool
:
753 destroy_mfi_frame_pool(instance
);
755 mpi2_undo_descripter_pool
:
756 free_req_rep_desc_pool(instance
);
759 mrsas_free_cmd_pool(instance
);
761 return (DDI_FAILURE
);
766 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
769 mrsas_init_adapter_tbolt(struct mrsas_instance
*instance
)
773 * Reduce the max supported cmds by 1. This is to ensure that the
774 * reply_q_sz (1 more than the max cmd that driver may send)
775 * does not exceed max cmds that the FW can support
778 if (instance
->max_fw_cmds
> 1008) {
779 instance
->max_fw_cmds
= 1008;
780 instance
->max_fw_cmds
= instance
->max_fw_cmds
-1;
783 con_log(CL_ANN
, (CE_NOTE
, "mrsas_init_adapter_tbolt: "
784 "instance->max_fw_cmds 0x%X.", instance
->max_fw_cmds
));
787 /* create a pool of commands */
788 if (alloc_space_for_mpi2(instance
) != DDI_SUCCESS
) {
789 dev_err(instance
->dip
, CE_WARN
,
790 "alloc_space_for_mpi2() failed.");
792 return (DDI_FAILURE
);
795 /* Send ioc init message */
796 /* NOTE: the issue_init call does FMA checking already. */
797 if (mrsas_issue_init_mpi2(instance
) != DDI_SUCCESS
) {
798 dev_err(instance
->dip
, CE_WARN
,
799 "mrsas_issue_init_mpi2() failed.");
801 goto fail_init_fusion
;
804 instance
->unroll
.alloc_space_mpi2
= 1;
806 con_log(CL_ANN
, (CE_NOTE
,
807 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
809 return (DDI_SUCCESS
);
812 free_space_for_mpi2(instance
);
814 return (DDI_FAILURE
);
823 mrsas_issue_init_mpi2(struct mrsas_instance
*instance
)
825 dma_obj_t init2_dma_obj
;
826 int ret_val
= DDI_SUCCESS
;
828 /* allocate DMA buffer for IOC INIT message */
829 init2_dma_obj
.size
= sizeof (Mpi2IOCInitRequest_t
);
830 init2_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
831 init2_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
832 init2_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
833 init2_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
834 init2_dma_obj
.dma_attr
.dma_attr_align
= 256;
836 if (mrsas_alloc_dma_obj(instance
, &init2_dma_obj
,
837 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
838 dev_err(instance
->dip
, CE_WARN
, "mr_sas_issue_init_mpi2 "
839 "could not allocate data transfer buffer.");
840 return (DDI_FAILURE
);
842 (void) memset(init2_dma_obj
.buffer
, 2, sizeof (Mpi2IOCInitRequest_t
));
844 con_log(CL_ANN1
, (CE_NOTE
,
845 "mrsas_issue_init_mpi2 _phys adr: %x",
846 init2_dma_obj
.dma_cookie
[0].dmac_address
));
849 /* Initialize and send ioc init message */
850 ret_val
= mrsas_tbolt_ioc_init(instance
, &init2_dma_obj
);
851 if (ret_val
== DDI_FAILURE
) {
852 con_log(CL_ANN1
, (CE_WARN
,
853 "mrsas_issue_init_mpi2: Failed"));
857 /* free IOC init DMA buffer */
858 if (mrsas_free_dma_obj(instance
, init2_dma_obj
)
860 con_log(CL_ANN1
, (CE_WARN
,
861 "mrsas_issue_init_mpi2: Free Failed"));
862 return (DDI_FAILURE
);
865 /* Get/Check and sync ld_map info */
866 instance
->map_id
= 0;
867 if (mrsas_tbolt_check_map_info(instance
) == DDI_SUCCESS
)
868 (void) mrsas_tbolt_sync_map_info(instance
);
871 /* No mrsas_cmd to send, so send NULL. */
872 if (mrsas_common_check(instance
, NULL
) != DDI_SUCCESS
)
875 con_log(CL_ANN
, (CE_NOTE
,
876 "mrsas_issue_init_mpi2: SUCCESSFUL"));
878 return (DDI_SUCCESS
);
881 (void) mrsas_free_dma_obj(instance
, init2_dma_obj
);
883 return (DDI_FAILURE
);
887 mrsas_tbolt_ioc_init(struct mrsas_instance
*instance
, dma_obj_t
*mpi2_dma_obj
)
891 struct mrsas_init_frame2
*mfiFrameInit2
;
892 struct mrsas_header
*frame_hdr
;
893 Mpi2IOCInitRequest_t
*init
;
894 struct mrsas_cmd
*cmd
= NULL
;
895 struct mrsas_drv_ver drv_ver_info
;
896 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc
;
899 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
903 con_log(CL_ANN1
, (CE_CONT
, " mfiFrameInit2 len = %x\n",
904 (int)sizeof (*mfiFrameInit2
)));
905 con_log(CL_ANN1
, (CE_CONT
, " MPI len = %x\n", (int)sizeof (*init
)));
906 con_log(CL_ANN1
, (CE_CONT
, " mfiFrameInit2 len = %x\n",
907 (int)sizeof (struct mrsas_init_frame2
)));
908 con_log(CL_ANN1
, (CE_CONT
, " MPI len = %x\n",
909 (int)sizeof (Mpi2IOCInitRequest_t
)));
912 init
= (Mpi2IOCInitRequest_t
*)mpi2_dma_obj
->buffer
;
913 numbytes
= sizeof (*init
);
914 bzero(init
, numbytes
);
916 ddi_put8(mpi2_dma_obj
->acc_handle
, &init
->Function
,
917 MPI2_FUNCTION_IOC_INIT
);
919 ddi_put8(mpi2_dma_obj
->acc_handle
, &init
->WhoInit
,
920 MPI2_WHOINIT_HOST_DRIVER
);
922 /* set MsgVersion and HeaderVersion host driver was built with */
923 ddi_put16(mpi2_dma_obj
->acc_handle
, &init
->MsgVersion
,
926 ddi_put16(mpi2_dma_obj
->acc_handle
, &init
->HeaderVersion
,
927 MPI2_HEADER_VERSION
);
929 ddi_put16(mpi2_dma_obj
->acc_handle
, &init
->SystemRequestFrameSize
,
930 instance
->raid_io_msg_size
/ 4);
932 ddi_put16(mpi2_dma_obj
->acc_handle
, &init
->ReplyFreeQueueDepth
,
935 ddi_put16(mpi2_dma_obj
->acc_handle
,
936 &init
->ReplyDescriptorPostQueueDepth
,
937 instance
->reply_q_depth
);
939 * These addresses are set using the DMA cookie addresses from when the
940 * memory was allocated. Sense buffer hi address should be 0.
941 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
944 ddi_put32(mpi2_dma_obj
->acc_handle
,
945 &init
->SenseBufferAddressHigh
, 0);
947 ddi_put64(mpi2_dma_obj
->acc_handle
,
948 (uint64_t *)&init
->SystemRequestFrameBaseAddress
,
949 instance
->io_request_frames_phy
);
951 ddi_put64(mpi2_dma_obj
->acc_handle
,
952 &init
->ReplyDescriptorPostQueueAddress
,
953 instance
->reply_frame_pool_phy
);
955 ddi_put64(mpi2_dma_obj
->acc_handle
,
956 &init
->ReplyFreeQueueAddress
, 0);
958 cmd
= instance
->cmd_list
[0];
960 return (DDI_FAILURE
);
962 cmd
->retry_count_for_ocr
= 0;
964 cmd
->drv_pkt_time
= 0;
966 mfiFrameInit2
= (struct mrsas_init_frame2
*)cmd
->scsi_io_request
;
967 con_log(CL_ANN1
, (CE_CONT
, "[mfi vaddr]%p", (void *)mfiFrameInit2
));
969 frame_hdr
= &cmd
->frame
->hdr
;
971 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
,
972 MFI_CMD_STATUS_POLL_MODE
);
974 flags
= ddi_get16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
);
976 flags
|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
978 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
, flags
);
980 con_log(CL_ANN
, (CE_CONT
,
981 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd
->SMID
));
983 /* Init the MFI Header */
984 ddi_put8(instance
->mpi2_frame_pool_dma_obj
.acc_handle
,
985 &mfiFrameInit2
->cmd
, MFI_CMD_OP_INIT
);
987 con_log(CL_ANN1
, (CE_CONT
, "[CMD]%x", mfiFrameInit2
->cmd
));
989 ddi_put8(instance
->mpi2_frame_pool_dma_obj
.acc_handle
,
990 &mfiFrameInit2
->cmd_status
,
991 MFI_STAT_INVALID_STATUS
);
993 con_log(CL_ANN1
, (CE_CONT
, "[Status]%x", mfiFrameInit2
->cmd_status
));
995 ddi_put32(instance
->mpi2_frame_pool_dma_obj
.acc_handle
,
996 &mfiFrameInit2
->queue_info_new_phys_addr_lo
,
997 mpi2_dma_obj
->dma_cookie
[0].dmac_address
);
999 ddi_put32(instance
->mpi2_frame_pool_dma_obj
.acc_handle
,
1000 &mfiFrameInit2
->data_xfer_len
,
1001 sizeof (Mpi2IOCInitRequest_t
));
1003 con_log(CL_ANN1
, (CE_CONT
, "[reply q desc addr]%x",
1004 (int)init
->ReplyDescriptorPostQueueAddress
));
1006 /* fill driver version information */
1007 fill_up_drv_ver(&drv_ver_info
);
1009 /* allocate the driver version data transfer buffer */
1010 instance
->drv_ver_dma_obj
.size
= sizeof (drv_ver_info
.drv_ver
);
1011 instance
->drv_ver_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
1012 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
1013 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
1014 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
1015 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_align
= 1;
1017 if (mrsas_alloc_dma_obj(instance
, &instance
->drv_ver_dma_obj
,
1018 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
1019 dev_err(instance
->dip
, CE_WARN
,
1020 "fusion init: Could not allocate driver version buffer.");
1021 return (DDI_FAILURE
);
1023 /* copy driver version to dma buffer */
1024 bzero(instance
->drv_ver_dma_obj
.buffer
, sizeof (drv_ver_info
.drv_ver
));
1025 ddi_rep_put8(cmd
->frame_dma_obj
.acc_handle
,
1026 (uint8_t *)drv_ver_info
.drv_ver
,
1027 (uint8_t *)instance
->drv_ver_dma_obj
.buffer
,
1028 sizeof (drv_ver_info
.drv_ver
), DDI_DEV_AUTOINCR
);
1030 /* send driver version physical address to firmware */
1031 ddi_put64(cmd
->frame_dma_obj
.acc_handle
, &mfiFrameInit2
->driverversion
,
1032 instance
->drv_ver_dma_obj
.dma_cookie
[0].dmac_address
);
1034 con_log(CL_ANN1
, (CE_CONT
, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1035 mfiFrameInit2
->queue_info_new_phys_addr_lo
,
1036 (int)sizeof (Mpi2IOCInitRequest_t
)));
1038 con_log(CL_ANN1
, (CE_CONT
, "[Length]%x", mfiFrameInit2
->data_xfer_len
));
1040 con_log(CL_ANN1
, (CE_CONT
, "[MFI frame Phys Address]%x len = %x",
1041 cmd
->scsi_io_request_phys_addr
,
1042 (int)sizeof (struct mrsas_init_frame2
)));
1044 /* disable interrupts before sending INIT2 frame */
1045 instance
->func_ptr
->disable_intr(instance
);
1047 req_desc
.Words
= cmd
->scsi_io_request_phys_addr
;
1048 req_desc
.MFAIo
.RequestFlags
=
1049 (MPI2_REQ_DESCRIPT_FLAGS_MFA
<< MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1051 cmd
->request_desc
= &req_desc
;
1053 /* issue the init frame */
1055 mutex_enter(&instance
->reg_write_mtx
);
1056 WR_IB_LOW_QPORT((uint32_t)(req_desc
.Words
), instance
);
1057 WR_IB_HIGH_QPORT((uint32_t)(req_desc
.Words
>> 32), instance
);
1058 mutex_exit(&instance
->reg_write_mtx
);
1060 con_log(CL_ANN1
, (CE_CONT
, "[cmd = %d] ", frame_hdr
->cmd
));
1061 con_log(CL_ANN1
, (CE_CONT
, "[cmd Status= %x] ",
1062 frame_hdr
->cmd_status
));
1064 timeout
= drv_usectohz(MFI_POLL_TIMEOUT_SECS
* MICROSEC
);
1066 if (ddi_get8(cmd
->frame_dma_obj
.acc_handle
,
1067 &mfiFrameInit2
->cmd_status
) != MFI_CMD_STATUS_POLL_MODE
)
1071 } while (timeout
> 0);
1073 if (ddi_get8(instance
->mpi2_frame_pool_dma_obj
.acc_handle
,
1074 &mfiFrameInit2
->cmd_status
) == 0) {
1075 con_log(CL_ANN
, (CE_NOTE
, "INIT2 Success"));
1077 con_log(CL_ANN
, (CE_WARN
, "INIT2 Fail"));
1078 mrsas_dump_reply_desc(instance
);
1082 mrsas_dump_reply_desc(instance
);
1084 instance
->unroll
.verBuff
= 1;
1086 con_log(CL_ANN
, (CE_NOTE
, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1088 return (DDI_SUCCESS
);
1093 (void) mrsas_free_dma_obj(instance
, instance
->drv_ver_dma_obj
);
1095 return (DDI_FAILURE
);
1099 wait_for_outstanding_poll_io(struct mrsas_instance
*instance
)
1102 uint32_t wait_time
= dump_io_wait_time
;
1103 for (i
= 0; i
< wait_time
; i
++) {
1105 * Check For Outstanding poll Commands
1106 * except ldsync command and aen command
1108 if (instance
->fw_outstanding
<= 2) {
1111 drv_usecwait(MILLISEC
);
1112 /* complete commands from reply queue */
1113 (void) mr_sas_tbolt_process_outstanding_cmd(instance
);
1115 if (instance
->fw_outstanding
> 2) {
1123 * Visible to the external world via the transport structure.
1127 mrsas_tbolt_tran_start(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1129 struct mrsas_instance
*instance
= ADDR2MR(ap
);
1130 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
1131 struct mrsas_cmd
*cmd
= NULL
;
1132 uchar_t cmd_done
= 0;
1134 con_log(CL_DLEVEL1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1135 if (instance
->deadadapter
== 1) {
1136 dev_err(instance
->dip
, CE_WARN
,
1137 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1138 "for IO, as the HBA doesnt take any more IOs");
1140 pkt
->pkt_reason
= CMD_DEV_GONE
;
1141 pkt
->pkt_statistics
= STAT_DISCON
;
1143 return (TRAN_FATAL_ERROR
);
1145 if (instance
->adapterresetinprogress
) {
1146 con_log(CL_ANN
, (CE_NOTE
, "Reset flag set, "
1147 "returning mfi_pkt and setting TRAN_BUSY\n"));
1150 (void) mrsas_tbolt_prepare_pkt(acmd
);
1152 cmd
= mrsas_tbolt_build_cmd(instance
, ap
, pkt
, &cmd_done
);
1155 * Check if the command is already completed by the mrsas_build_cmd()
1156 * routine. In which case the busy_flag would be clear and scb will be
1157 * NULL and appropriate reason provided in pkt_reason field
1160 pkt
->pkt_reason
= CMD_CMPLT
;
1161 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
1162 pkt
->pkt_state
|= STATE_GOT_BUS
| STATE_GOT_TARGET
1164 if (((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) && pkt
->pkt_comp
) {
1165 (*pkt
->pkt_comp
)(pkt
);
1168 return (TRAN_ACCEPT
);
1176 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
1177 if (instance
->fw_outstanding
> instance
->max_fw_cmds
) {
1178 dev_err(instance
->dip
, CE_WARN
,
1179 "Command Queue Full... Returning BUSY");
1180 DTRACE_PROBE2(tbolt_start_tran_err
,
1181 uint16_t, instance
->fw_outstanding
,
1182 uint16_t, instance
->max_fw_cmds
);
1183 return_raid_msg_pkt(instance
, cmd
);
1187 /* Synchronize the Cmd frame for the controller */
1188 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
, 0, 0,
1189 DDI_DMA_SYNC_FORDEV
);
1191 con_log(CL_ANN
, (CE_CONT
, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1192 "cmd->index:0x%x SMID 0x%x\n", pkt
->pkt_cdbp
[0],
1193 cmd
->index
, cmd
->SMID
));
1195 instance
->func_ptr
->issue_cmd(cmd
, instance
);
1197 instance
->func_ptr
->issue_cmd(cmd
, instance
);
1198 (void) wait_for_outstanding_poll_io(instance
);
1199 (void) mrsas_common_check(instance
, cmd
);
1200 DTRACE_PROBE2(tbolt_start_nointr_done
,
1201 uint8_t, cmd
->frame
->hdr
.cmd
,
1202 uint8_t, cmd
->frame
->hdr
.cmd_status
);
1205 return (TRAN_ACCEPT
);
1210 * the pkt may have been resubmitted or just reused so
1211 * initialize some fields and do some checks.
1214 mrsas_tbolt_prepare_pkt(struct scsa_cmd
*acmd
)
1216 struct scsi_pkt
*pkt
= CMD2PKT(acmd
);
1220 * Reinitialize some fields that need it; the packet may
1221 * have been resubmitted
1223 pkt
->pkt_reason
= CMD_CMPLT
;
1225 pkt
->pkt_statistics
= 0;
1231 *(pkt
->pkt_scbp
) = 0;
1238 mr_sas_tbolt_build_sgl(struct mrsas_instance
*instance
,
1239 struct scsa_cmd
*acmd
,
1240 struct mrsas_cmd
*cmd
,
1241 Mpi2RaidSCSIIORequest_t
*scsi_raid_io
,
1247 uint32_t numElements
, endElement
;
1248 Mpi25IeeeSgeChain64_t
*ieeeChainElement
= NULL
;
1249 Mpi25IeeeSgeChain64_t
*scsi_raid_io_sgl_ieee
= NULL
;
1250 ddi_acc_handle_t acc_handle
=
1251 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
1253 con_log(CL_ANN1
, (CE_NOTE
,
1254 "chkpnt: Building Chained SGL :%d", __LINE__
));
1256 /* Calulate SGE size in number of Words(32bit) */
1257 /* Clear the datalen before updating it. */
1260 MaxSGEs
= instance
->max_sge_in_main_msg
;
1262 ddi_put16(acc_handle
, &scsi_raid_io
->SGLFlags
,
1263 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
);
1265 /* set data transfer flag. */
1266 if (acmd
->cmd_flags
& CFLAG_DMASEND
) {
1267 ddi_put32(acc_handle
, &scsi_raid_io
->Control
,
1268 MPI2_SCSIIO_CONTROL_WRITE
);
1270 ddi_put32(acc_handle
, &scsi_raid_io
->Control
,
1271 MPI2_SCSIIO_CONTROL_READ
);
1275 numElements
= acmd
->cmd_cookiecnt
;
1277 con_log(CL_DLEVEL1
, (CE_NOTE
, "[SGE Count]:%x", numElements
));
1279 if (numElements
> instance
->max_num_sge
) {
1280 con_log(CL_ANN
, (CE_NOTE
,
1281 "[Max SGE Count Exceeded]:%x", numElements
));
1282 return (numElements
);
1285 ddi_put8(acc_handle
, &scsi_raid_io
->RaidContext
.numSGE
,
1286 (uint8_t)numElements
);
1288 /* set end element in main message frame */
1289 endElement
= (numElements
<= MaxSGEs
) ? numElements
: (MaxSGEs
- 1);
1291 /* prepare the scatter-gather list for the firmware */
1292 scsi_raid_io_sgl_ieee
=
1293 (Mpi25IeeeSgeChain64_t
*)&scsi_raid_io
->SGL
.IeeeChain
;
1295 if (instance
->gen3
) {
1296 Mpi25IeeeSgeChain64_t
*sgl_ptr_end
= scsi_raid_io_sgl_ieee
;
1297 sgl_ptr_end
+= instance
->max_sge_in_main_msg
- 1;
1299 ddi_put8(acc_handle
, &sgl_ptr_end
->Flags
, 0);
1302 for (i
= 0; i
< endElement
; i
++, scsi_raid_io_sgl_ieee
++) {
1303 ddi_put64(acc_handle
, &scsi_raid_io_sgl_ieee
->Address
,
1304 acmd
->cmd_dmacookies
[i
].dmac_laddress
);
1306 ddi_put32(acc_handle
, &scsi_raid_io_sgl_ieee
->Length
,
1307 acmd
->cmd_dmacookies
[i
].dmac_size
);
1309 ddi_put8(acc_handle
, &scsi_raid_io_sgl_ieee
->Flags
, 0);
1311 if (instance
->gen3
) {
1312 if (i
== (numElements
- 1)) {
1313 ddi_put8(acc_handle
,
1314 &scsi_raid_io_sgl_ieee
->Flags
,
1315 IEEE_SGE_FLAGS_END_OF_LIST
);
1319 *datalen
+= acmd
->cmd_dmacookies
[i
].dmac_size
;
1322 con_log(CL_DLEVEL1
, (CE_NOTE
, "[SGL Address]: %" PRIx64
,
1323 scsi_raid_io_sgl_ieee
->Address
));
1324 con_log(CL_DLEVEL1
, (CE_NOTE
, "[SGL Length]:%x",
1325 scsi_raid_io_sgl_ieee
->Length
));
1326 con_log(CL_DLEVEL1
, (CE_NOTE
, "[SGL Flags]:%x",
1327 scsi_raid_io_sgl_ieee
->Flags
));
1332 ddi_put8(acc_handle
, &scsi_raid_io
->ChainOffset
, 0);
1334 /* check if chained SGL required */
1335 if (i
< numElements
) {
1337 con_log(CL_ANN1
, (CE_NOTE
, "[Chain Element index]:%x", i
));
1339 if (instance
->gen3
) {
1341 ddi_get16(acc_handle
, &scsi_raid_io
->IoFlags
);
1344 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) !=
1345 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) {
1346 ddi_put8(acc_handle
, &scsi_raid_io
->ChainOffset
,
1347 (U8
)instance
->chain_offset_io_req
);
1349 ddi_put8(acc_handle
,
1350 &scsi_raid_io
->ChainOffset
, 0);
1353 ddi_put8(acc_handle
, &scsi_raid_io
->ChainOffset
,
1354 (U8
)instance
->chain_offset_io_req
);
1357 /* prepare physical chain element */
1358 ieeeChainElement
= scsi_raid_io_sgl_ieee
;
1360 ddi_put8(acc_handle
, &ieeeChainElement
->NextChainOffset
, 0);
1362 if (instance
->gen3
) {
1363 ddi_put8(acc_handle
, &ieeeChainElement
->Flags
,
1364 IEEE_SGE_FLAGS_CHAIN_ELEMENT
);
1366 ddi_put8(acc_handle
, &ieeeChainElement
->Flags
,
1367 (IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
1368 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
));
1371 ddi_put32(acc_handle
, &ieeeChainElement
->Length
,
1372 (sizeof (MPI2_SGE_IO_UNION
) * (numElements
- i
)));
1374 ddi_put64(acc_handle
, &ieeeChainElement
->Address
,
1375 (U64
)cmd
->sgl_phys_addr
);
1377 sg_to_process
= numElements
- i
;
1379 con_log(CL_ANN1
, (CE_NOTE
,
1380 "[Additional SGE Count]:%x", endElement
));
1382 /* point to the chained SGL buffer */
1383 scsi_raid_io_sgl_ieee
= (Mpi25IeeeSgeChain64_t
*)cmd
->sgl
;
1385 /* build rest of the SGL in chained buffer */
1386 for (j
= 0; j
< sg_to_process
; j
++, scsi_raid_io_sgl_ieee
++) {
1387 con_log(CL_DLEVEL3
, (CE_NOTE
, "[remaining SGL]:%x", i
));
1389 ddi_put64(acc_handle
, &scsi_raid_io_sgl_ieee
->Address
,
1390 acmd
->cmd_dmacookies
[i
].dmac_laddress
);
1392 ddi_put32(acc_handle
, &scsi_raid_io_sgl_ieee
->Length
,
1393 acmd
->cmd_dmacookies
[i
].dmac_size
);
1395 ddi_put8(acc_handle
, &scsi_raid_io_sgl_ieee
->Flags
, 0);
1397 if (instance
->gen3
) {
1398 if (i
== (numElements
- 1)) {
1399 ddi_put8(acc_handle
,
1400 &scsi_raid_io_sgl_ieee
->Flags
,
1401 IEEE_SGE_FLAGS_END_OF_LIST
);
1405 *datalen
+= acmd
->cmd_dmacookies
[i
].dmac_size
;
1408 con_log(CL_DLEVEL1
, (CE_NOTE
,
1409 "[SGL Address]: %" PRIx64
,
1410 scsi_raid_io_sgl_ieee
->Address
));
1411 con_log(CL_DLEVEL1
, (CE_NOTE
,
1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee
->Length
));
1413 con_log(CL_DLEVEL1
, (CE_NOTE
,
1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee
->Flags
));
1422 } /*end of BuildScatterGather */
1428 static struct mrsas_cmd
*
1429 mrsas_tbolt_build_cmd(struct mrsas_instance
*instance
, struct scsi_address
*ap
,
1430 struct scsi_pkt
*pkt
, uchar_t
*cmd_done
)
1432 uint8_t fp_possible
= 0;
1434 uint32_t lba_count
= 0;
1435 uint32_t start_lba_hi
= 0;
1436 uint32_t start_lba_lo
= 0;
1437 ddi_acc_handle_t acc_handle
=
1438 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
1439 struct mrsas_cmd
*cmd
= NULL
;
1440 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
1441 MRSAS_REQUEST_DESCRIPTOR_UNION
*ReqDescUnion
;
1442 Mpi2RaidSCSIIORequest_t
*scsi_raid_io
;
1444 struct IO_REQUEST_INFO io_info
;
1445 MR_FW_RAID_MAP_ALL
*local_map_ptr
;
1446 uint16_t pd_cmd_cdblen
;
1448 con_log(CL_DLEVEL1
, (CE_NOTE
,
1449 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__
));
1451 /* find out if this is logical or physical drive command. */
1452 acmd
->islogical
= MRDRV_IS_LOGICAL(ap
);
1453 acmd
->device_id
= MAP_DEVICE_ID(instance
, ap
);
1457 /* get the command packet */
1458 if (!(cmd
= get_raid_msg_pkt(instance
))) {
1459 DTRACE_PROBE2(tbolt_build_cmd_mfi_err
, uint16_t,
1460 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
1465 ReqDescUnion
= mr_sas_get_request_descriptor(instance
, index
);
1466 ReqDescUnion
->Words
= 0;
1467 ReqDescUnion
->SCSIIO
.SMID
= cmd
->SMID
;
1468 ReqDescUnion
->SCSIIO
.RequestFlags
=
1469 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO
<<
1470 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1473 cmd
->request_desc
= ReqDescUnion
;
1477 DTRACE_PROBE4(tbolt_build_cmd
, uint8_t, pkt
->pkt_cdbp
[0],
1478 ulong_t
, acmd
->cmd_dmacount
, ulong_t
, acmd
->cmd_dma_len
,
1479 uint16_t, acmd
->device_id
);
1481 /* lets get the command directions */
1482 if (acmd
->cmd_flags
& CFLAG_DMASEND
) {
1483 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
1484 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
1485 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
1486 DDI_DMA_SYNC_FORDEV
);
1488 } else if (acmd
->cmd_flags
& ~CFLAG_DMASEND
) {
1489 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
1490 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
1491 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
1492 DDI_DMA_SYNC_FORCPU
);
1495 con_log(CL_ANN
, (CE_NOTE
, "NO DMA"));
1499 /* get SCSI_IO raid message frame pointer */
1500 scsi_raid_io
= (Mpi2RaidSCSIIORequest_t
*)cmd
->scsi_io_request
;
1502 /* zero out SCSI_IO raid message frame */
1503 bzero(scsi_raid_io
, sizeof (Mpi2RaidSCSIIORequest_t
));
1505 /* Set the ldTargetId set by BuildRaidContext() */
1506 ddi_put16(acc_handle
, &scsi_raid_io
->RaidContext
.ldTargetId
,
1509 /* Copy CDB to scsi_io_request message frame */
1510 ddi_rep_put8(acc_handle
,
1511 (uint8_t *)pkt
->pkt_cdbp
, (uint8_t *)scsi_raid_io
->CDB
.CDB32
,
1512 acmd
->cmd_cdblen
, DDI_DEV_AUTOINCR
);
1515 * Just the CDB length, rest of the Flags are zero
1516 * This will be modified later.
1518 ddi_put16(acc_handle
, &scsi_raid_io
->IoFlags
, acmd
->cmd_cdblen
);
1520 pd_cmd_cdblen
= acmd
->cmd_cdblen
;
1522 if (acmd
->islogical
) {
1524 switch (pkt
->pkt_cdbp
[0]) {
1534 /* Initialize sense Information */
1535 if (cmd
->sense1
== NULL
) {
1536 con_log(CL_ANN
, (CE_NOTE
, "tbolt_build_cmd: "
1537 "Sense buffer ptr NULL "));
1539 bzero(cmd
->sense1
, SENSE_LENGTH
);
1540 con_log(CL_DLEVEL2
, (CE_NOTE
, "tbolt_build_cmd "
1541 "CDB[0] = %x\n", pkt
->pkt_cdbp
[0]));
1543 if (acmd
->cmd_cdblen
== CDB_GROUP0
) {
1545 lba_count
= (uint16_t)(pkt
->pkt_cdbp
[4]);
1546 start_lba_lo
= ((uint32_t)(pkt
->pkt_cdbp
[3]) |
1547 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 8) |
1548 ((uint32_t)((pkt
->pkt_cdbp
[1]) & 0x1F)
1550 } else if (acmd
->cmd_cdblen
== CDB_GROUP1
) {
1553 (((uint16_t)(pkt
->pkt_cdbp
[8])) |
1554 ((uint16_t)(pkt
->pkt_cdbp
[7]) << 8));
1557 (((uint32_t)(pkt
->pkt_cdbp
[5])) |
1558 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
1559 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
1560 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
1562 } else if (acmd
->cmd_cdblen
== CDB_GROUP5
) {
1565 ((uint32_t)(pkt
->pkt_cdbp
[9])) |
1566 ((uint32_t)(pkt
->pkt_cdbp
[8]) << 8) |
1567 ((uint32_t)(pkt
->pkt_cdbp
[7]) << 16) |
1568 ((uint32_t)(pkt
->pkt_cdbp
[6]) << 24));
1571 (((uint32_t)(pkt
->pkt_cdbp
[5])) |
1572 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
1573 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
1574 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
1576 } else if (acmd
->cmd_cdblen
== CDB_GROUP4
) {
1579 ((uint32_t)(pkt
->pkt_cdbp
[13])) |
1580 ((uint32_t)(pkt
->pkt_cdbp
[12]) << 8) |
1581 ((uint32_t)(pkt
->pkt_cdbp
[11]) << 16) |
1582 ((uint32_t)(pkt
->pkt_cdbp
[10]) << 24));
1585 ((uint32_t)(pkt
->pkt_cdbp
[9])) |
1586 ((uint32_t)(pkt
->pkt_cdbp
[8]) << 8) |
1587 ((uint32_t)(pkt
->pkt_cdbp
[7]) << 16) |
1588 ((uint32_t)(pkt
->pkt_cdbp
[6]) << 24));
1591 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
1592 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
1593 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
1594 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
1597 if (instance
->tbolt
&&
1598 ((lba_count
* 512) > mrsas_tbolt_max_cap_maxxfer
)) {
1599 dev_err(instance
->dip
, CE_WARN
,
1600 "IO SECTOR COUNT exceeds "
1601 "controller limit 0x%x sectors",
1605 bzero(&io_info
, sizeof (struct IO_REQUEST_INFO
));
1606 io_info
.ldStartBlock
= ((uint64_t)start_lba_hi
<< 32) |
1608 io_info
.numBlocks
= lba_count
;
1609 io_info
.ldTgtId
= acmd
->device_id
;
1611 if (acmd
->cmd_flags
& CFLAG_DMASEND
)
1617 /* Acquire SYNC MAP UPDATE lock */
1618 mutex_enter(&instance
->sync_map_mtx
);
1621 instance
->ld_map
[(instance
->map_id
& 1)];
1623 if ((MR_TargetIdToLdGet(
1624 acmd
->device_id
, local_map_ptr
) >=
1625 MAX_LOGICAL_DRIVES
) || !instance
->fast_path_io
) {
1626 dev_err(instance
->dip
, CE_NOTE
,
1627 "Fast Path NOT Possible, "
1628 "targetId >= MAX_LOGICAL_DRIVES || "
1629 "!instance->fast_path_io");
1631 /* Set Regionlock flags to BYPASS */
1632 /* io_request->RaidContext.regLockFlags = 0; */
1633 ddi_put8(acc_handle
,
1634 &scsi_raid_io
->RaidContext
.regLockFlags
, 0);
1636 if (MR_BuildRaidContext(instance
, &io_info
,
1637 &scsi_raid_io
->RaidContext
, local_map_ptr
))
1638 fp_possible
= io_info
.fpOkForIo
;
1644 con_log(CL_ANN1
, (CE_NOTE
, "enable_fp %d "
1645 "instance->fast_path_io %d fp_possible %d",
1646 enable_fp
, instance
->fast_path_io
, fp_possible
));
1650 /* Check for DIF enabled LD */
1651 if (MR_CheckDIF(acmd
->device_id
, local_map_ptr
)) {
1652 /* Prepare 32 Byte CDB for DIF capable Disk */
1653 mrsas_tbolt_prepare_cdb(instance
,
1654 scsi_raid_io
->CDB
.CDB32
,
1655 &io_info
, scsi_raid_io
, start_lba_lo
);
1657 mrsas_tbolt_set_pd_lba(scsi_raid_io
->CDB
.CDB32
,
1658 sizeof (scsi_raid_io
->CDB
.CDB32
),
1659 (uint8_t *)&pd_cmd_cdblen
,
1660 io_info
.pdBlock
, io_info
.numBlocks
);
1661 ddi_put16(acc_handle
,
1662 &scsi_raid_io
->IoFlags
, pd_cmd_cdblen
);
1665 ddi_put8(acc_handle
, &scsi_raid_io
->Function
,
1666 MPI2_FUNCTION_SCSI_IO_REQUEST
);
1668 ReqDescUnion
->SCSIIO
.RequestFlags
=
1669 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
<<
1670 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1672 if (instance
->gen3
) {
1673 uint8_t regLockFlags
= ddi_get8(acc_handle
,
1674 &scsi_raid_io
->RaidContext
.regLockFlags
);
1675 uint16_t IoFlags
= ddi_get16(acc_handle
,
1676 &scsi_raid_io
->IoFlags
);
1678 if (regLockFlags
== REGION_TYPE_UNUSED
)
1679 ReqDescUnion
->SCSIIO
.RequestFlags
=
1680 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
1681 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1684 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
;
1686 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA
|
1687 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
1689 ddi_put8(acc_handle
,
1690 &scsi_raid_io
->ChainOffset
, 0);
1691 ddi_put8(acc_handle
,
1692 &scsi_raid_io
->RaidContext
.nsegType
,
1693 ((0x01 << MPI2_NSEG_FLAGS_SHIFT
) |
1695 ddi_put8(acc_handle
,
1696 &scsi_raid_io
->RaidContext
.regLockFlags
,
1698 ddi_put16(acc_handle
,
1699 &scsi_raid_io
->IoFlags
, IoFlags
);
1702 if ((instance
->load_balance_info
[
1703 acmd
->device_id
].loadBalanceFlag
) &&
1706 get_updated_dev_handle(&instance
->
1707 load_balance_info
[acmd
->device_id
],
1709 cmd
->load_balance_flag
|=
1710 MEGASAS_LOAD_BALANCE_FLAG
;
1712 cmd
->load_balance_flag
&=
1713 ~MEGASAS_LOAD_BALANCE_FLAG
;
1716 ReqDescUnion
->SCSIIO
.DevHandle
= io_info
.devHandle
;
1717 ddi_put16(acc_handle
, &scsi_raid_io
->DevHandle
,
1720 } else { /* FP Not Possible */
1722 ddi_put8(acc_handle
, &scsi_raid_io
->Function
,
1723 MPI2_FUNCTION_LD_IO_REQUEST
);
1725 ddi_put16(acc_handle
,
1726 &scsi_raid_io
->DevHandle
, acmd
->device_id
);
1728 ReqDescUnion
->SCSIIO
.RequestFlags
=
1729 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO
<<
1730 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1732 ddi_put16(acc_handle
,
1733 &scsi_raid_io
->RaidContext
.timeoutValue
,
1734 local_map_ptr
->raidMap
.fpPdIoTimeoutSec
);
1736 if (instance
->gen3
) {
1737 uint8_t regLockFlags
= ddi_get8(acc_handle
,
1738 &scsi_raid_io
->RaidContext
.regLockFlags
);
1740 if (regLockFlags
== REGION_TYPE_UNUSED
) {
1741 ReqDescUnion
->SCSIIO
.RequestFlags
=
1742 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
1743 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1747 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0
|
1748 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
1750 ddi_put8(acc_handle
,
1751 &scsi_raid_io
->RaidContext
.nsegType
,
1752 ((0x01 << MPI2_NSEG_FLAGS_SHIFT
) |
1754 ddi_put8(acc_handle
,
1755 &scsi_raid_io
->RaidContext
.regLockFlags
,
1760 /* Release SYNC MAP UPDATE lock */
1761 mutex_exit(&instance
->sync_map_mtx
);
1765 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766 return_raid_msg_pkt(instance
, cmd
);
1771 case SCMD_MODE_SENSE
:
1772 case SCMD_MODE_SENSE_G1
: {
1773 union scsi_cdb
*cdbp
;
1776 cdbp
= (void *)pkt
->pkt_cdbp
;
1777 page_code
= (uint16_t)cdbp
->cdb_un
.sg
.scsi
[0];
1778 switch (page_code
) {
1781 (void) mrsas_mode_sense_build(pkt
);
1782 return_raid_msg_pkt(instance
, cmd
);
1790 /* Pass-through command to logical drive */
1791 ddi_put8(acc_handle
, &scsi_raid_io
->Function
,
1792 MPI2_FUNCTION_LD_IO_REQUEST
);
1793 ddi_put8(acc_handle
, &scsi_raid_io
->LUN
[1], acmd
->lun
);
1794 ddi_put16(acc_handle
, &scsi_raid_io
->DevHandle
,
1796 ReqDescUnion
->SCSIIO
.RequestFlags
=
1797 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
1798 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1801 } else { /* Physical */
1802 /* Pass-through command to physical drive */
1804 /* Acquire SYNC MAP UPDATE lock */
1805 mutex_enter(&instance
->sync_map_mtx
);
1807 local_map_ptr
= instance
->ld_map
[instance
->map_id
& 1];
1809 ddi_put8(acc_handle
, &scsi_raid_io
->Function
,
1810 MPI2_FUNCTION_SCSI_IO_REQUEST
);
1812 ReqDescUnion
->SCSIIO
.RequestFlags
=
1813 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
<<
1814 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
1816 ddi_put16(acc_handle
, &scsi_raid_io
->DevHandle
,
1817 local_map_ptr
->raidMap
.
1818 devHndlInfo
[acmd
->device_id
].curDevHdl
);
1820 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1821 ddi_put8(acc_handle
,
1822 &scsi_raid_io
->RaidContext
.regLockFlags
, 0);
1823 ddi_put64(acc_handle
,
1824 &scsi_raid_io
->RaidContext
.regLockRowLBA
, 0);
1825 ddi_put32(acc_handle
,
1826 &scsi_raid_io
->RaidContext
.regLockLength
, 0);
1827 ddi_put8(acc_handle
,
1828 &scsi_raid_io
->RaidContext
.RAIDFlags
,
1829 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
<<
1830 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
);
1831 ddi_put16(acc_handle
,
1832 &scsi_raid_io
->RaidContext
.timeoutValue
,
1833 local_map_ptr
->raidMap
.fpPdIoTimeoutSec
);
1834 ddi_put16(acc_handle
,
1835 &scsi_raid_io
->RaidContext
.ldTargetId
,
1837 ddi_put8(acc_handle
,
1838 &scsi_raid_io
->LUN
[1], acmd
->lun
);
1840 if (instance
->fast_path_io
&& instance
->gen3
) {
1841 uint16_t IoFlags
= ddi_get16(acc_handle
,
1842 &scsi_raid_io
->IoFlags
);
1843 IoFlags
|= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
;
1844 ddi_put16(acc_handle
, &scsi_raid_io
->IoFlags
, IoFlags
);
1846 ddi_put16(acc_handle
, &ReqDescUnion
->SCSIIO
.DevHandle
,
1847 local_map_ptr
->raidMap
.
1848 devHndlInfo
[acmd
->device_id
].curDevHdl
);
1850 /* Release SYNC MAP UPDATE lock */
1851 mutex_exit(&instance
->sync_map_mtx
);
1854 /* Set sense buffer physical address/length in scsi_io_request. */
1855 ddi_put32(acc_handle
, &scsi_raid_io
->SenseBufferLowAddress
,
1856 cmd
->sense_phys_addr1
);
1857 ddi_put8(acc_handle
, &scsi_raid_io
->SenseBufferLength
, SENSE_LENGTH
);
1860 ddi_put8(acc_handle
, &scsi_raid_io
->SGLOffset0
,
1861 offsetof(MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 4);
1863 (void) mr_sas_tbolt_build_sgl(instance
, acmd
, cmd
,
1864 scsi_raid_io
, &datalen
);
1866 ddi_put32(acc_handle
, &scsi_raid_io
->DataLength
, datalen
);
1868 con_log(CL_ANN
, (CE_CONT
,
1869 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1870 pkt
->pkt_cdbp
[0], acmd
->device_id
));
1871 con_log(CL_DLEVEL1
, (CE_CONT
,
1872 "data length = %x\n",
1873 scsi_raid_io
->DataLength
));
1874 con_log(CL_DLEVEL1
, (CE_CONT
,
1875 "cdb length = %x\n",
1882 tbolt_read_fw_status_reg(struct mrsas_instance
*instance
)
1884 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance
));
1888 tbolt_issue_cmd(struct mrsas_cmd
*cmd
, struct mrsas_instance
*instance
)
1890 MRSAS_REQUEST_DESCRIPTOR_UNION
*req_desc
= cmd
->request_desc
;
1891 atomic_inc_16(&instance
->fw_outstanding
);
1893 struct scsi_pkt
*pkt
;
1896 (CE_NOTE
, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd
->SMID
));
1898 con_log(CL_DLEVEL1
, (CE_CONT
,
1899 " [req desc Words] %" PRIx64
" \n", req_desc
->Words
));
1900 con_log(CL_DLEVEL1
, (CE_CONT
,
1901 " [req desc low part] %x \n",
1902 (uint_t
)(req_desc
->Words
& 0xffffffffff)));
1903 con_log(CL_DLEVEL1
, (CE_CONT
,
1904 " [req desc high part] %x \n", (uint_t
)(req_desc
->Words
>> 32)));
1908 con_log(CL_ANN1
, (CE_CONT
, "%llx :TBOLT issue_cmd_ppc:"
1909 "ISSUED CMD TO FW : called : cmd:"
1910 ": %p instance : %p pkt : %p pkt_time : %x\n",
1911 gethrtime(), (void *)cmd
, (void *)instance
,
1912 (void *)pkt
, cmd
->drv_pkt_time
));
1913 if (instance
->adapterresetinprogress
) {
1914 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
1915 con_log(CL_ANN
, (CE_NOTE
,
1916 "TBOLT Reset the scsi_pkt timer"));
1918 push_pending_mfi_pkt(instance
, cmd
);
1922 con_log(CL_ANN1
, (CE_CONT
, "%llx :TBOLT issue_cmd_ppc:"
1923 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1924 "(NO PKT)\n", gethrtime(), (void *)cmd
, (void *)instance
));
1927 /* Issue the command to the FW */
1928 mutex_enter(&instance
->reg_write_mtx
);
1929 WR_IB_LOW_QPORT((uint32_t)(req_desc
->Words
), instance
);
1930 WR_IB_HIGH_QPORT((uint32_t)(req_desc
->Words
>> 32), instance
);
1931 mutex_exit(&instance
->reg_write_mtx
);
1935 * issue_cmd_in_sync_mode
1938 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance
*instance
,
1939 struct mrsas_cmd
*cmd
)
1942 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
1943 MRSAS_REQUEST_DESCRIPTOR_UNION
*req_desc
= cmd
->request_desc
;
1945 struct mrsas_header
*hdr
;
1946 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
1949 (CE_NOTE
, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1953 if (instance
->adapterresetinprogress
) {
1954 cmd
->drv_pkt_time
= ddi_get16
1955 (cmd
->frame_dma_obj
.acc_handle
, &hdr
->timeout
);
1956 if (cmd
->drv_pkt_time
< debug_timeout_g
)
1957 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
1958 con_log(CL_ANN
, (CE_NOTE
, "tbolt_issue_cmd_in_sync_mode:"
1959 "RESET-IN-PROGRESS, issue cmd & return."));
1961 mutex_enter(&instance
->reg_write_mtx
);
1962 WR_IB_LOW_QPORT((uint32_t)(req_desc
->Words
), instance
);
1963 WR_IB_HIGH_QPORT((uint32_t)(req_desc
->Words
>> 32), instance
);
1964 mutex_exit(&instance
->reg_write_mtx
);
1966 return (DDI_SUCCESS
);
1968 con_log(CL_ANN1
, (CE_NOTE
,
1969 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1970 push_pending_mfi_pkt(instance
, cmd
);
1973 con_log(CL_DLEVEL2
, (CE_NOTE
,
1974 "HighQport offset :%p",
1975 (void *)((uintptr_t)(instance
)->regmap
+ IB_HIGH_QPORT
)));
1976 con_log(CL_DLEVEL2
, (CE_NOTE
,
1977 "LowQport offset :%p",
1978 (void *)((uintptr_t)(instance
)->regmap
+ IB_LOW_QPORT
)));
1980 cmd
->sync_cmd
= MRSAS_TRUE
;
1981 cmd
->cmd_status
= ENODATA
;
1984 mutex_enter(&instance
->reg_write_mtx
);
1985 WR_IB_LOW_QPORT((uint32_t)(req_desc
->Words
), instance
);
1986 WR_IB_HIGH_QPORT((uint32_t)(req_desc
->Words
>> 32), instance
);
1987 mutex_exit(&instance
->reg_write_mtx
);
1989 con_log(CL_ANN1
, (CE_NOTE
,
1990 " req desc high part %x", (uint_t
)(req_desc
->Words
>> 32)));
1991 con_log(CL_ANN1
, (CE_NOTE
, " req desc low part %x",
1992 (uint_t
)(req_desc
->Words
& 0xffffffff)));
1994 mutex_enter(&instance
->int_cmd_mtx
);
1995 for (i
= 0; i
< msecs
&& (cmd
->cmd_status
== ENODATA
); i
++) {
1996 cv_wait(&instance
->int_cmd_cv
, &instance
->int_cmd_mtx
);
1998 mutex_exit(&instance
->int_cmd_mtx
);
2001 if (i
< (msecs
-1)) {
2002 return (DDI_SUCCESS
);
2004 return (DDI_FAILURE
);
2009 * issue_cmd_in_poll_mode
2012 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance
*instance
,
2013 struct mrsas_cmd
*cmd
)
2017 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
2018 struct mrsas_header
*frame_hdr
;
2021 (CE_NOTE
, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2024 MRSAS_REQUEST_DESCRIPTOR_UNION
*req_desc
= cmd
->request_desc
;
2026 frame_hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
2027 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
,
2028 MFI_CMD_STATUS_POLL_MODE
);
2029 flags
= ddi_get16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
);
2030 flags
|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
2031 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
, flags
);
2033 con_log(CL_ANN1
, (CE_NOTE
, " req desc low part %x",
2034 (uint_t
)(req_desc
->Words
& 0xffffffff)));
2035 con_log(CL_ANN1
, (CE_NOTE
,
2036 " req desc high part %x", (uint_t
)(req_desc
->Words
>> 32)));
2038 /* issue the frame using inbound queue port */
2039 mutex_enter(&instance
->reg_write_mtx
);
2040 WR_IB_LOW_QPORT((uint32_t)(req_desc
->Words
), instance
);
2041 WR_IB_HIGH_QPORT((uint32_t)(req_desc
->Words
>> 32), instance
);
2042 mutex_exit(&instance
->reg_write_mtx
);
2044 for (i
= 0; i
< msecs
&& (
2045 ddi_get8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
)
2046 == MFI_CMD_STATUS_POLL_MODE
); i
++) {
2047 /* wait for cmd_status to change from 0xFF */
2048 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */
2051 DTRACE_PROBE1(tbolt_complete_poll_cmd
, uint8_t, i
);
2053 if (ddi_get8(cmd
->frame_dma_obj
.acc_handle
,
2054 &frame_hdr
->cmd_status
) == MFI_CMD_STATUS_POLL_MODE
) {
2055 con_log(CL_ANN1
, (CE_NOTE
,
2056 " cmd failed %" PRIx64
, (req_desc
->Words
)));
2057 return (DDI_FAILURE
);
2060 return (DDI_SUCCESS
);
2064 tbolt_enable_intr(struct mrsas_instance
*instance
)
2066 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2067 /* writel(~0, ®s->outbound_intr_status); */
2068 /* readl(®s->outbound_intr_status); */
2070 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK
), instance
);
2072 /* dummy read to force PCI flush */
2073 (void) RD_OB_INTR_MASK(instance
);
2078 tbolt_disable_intr(struct mrsas_instance
*instance
)
2080 uint32_t mask
= 0xFFFFFFFF;
2082 WR_OB_INTR_MASK(mask
, instance
);
2084 /* Dummy readl to force pci flush */
2086 (void) RD_OB_INTR_MASK(instance
);
2091 tbolt_intr_ack(struct mrsas_instance
*instance
)
2095 /* check if it is our interrupt */
2096 status
= RD_OB_INTR_STATUS(instance
);
2097 con_log(CL_ANN1
, (CE_NOTE
,
2098 "chkpnt: Entered tbolt_intr_ack status = %d", status
));
2100 if (!(status
& MFI_FUSION_ENABLE_INTERRUPT_MASK
)) {
2101 return (DDI_INTR_UNCLAIMED
);
2104 if (mrsas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
2105 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2106 return (DDI_INTR_UNCLAIMED
);
2109 if ((status
& 1) || (status
& MFI_FUSION_ENABLE_INTERRUPT_MASK
)) {
2110 /* clear the interrupt by writing back the same value */
2111 WR_OB_INTR_STATUS(status
, instance
);
2113 (void) RD_OB_INTR_STATUS(instance
);
2115 return (DDI_INTR_CLAIMED
);
2119 * get_raid_msg_pkt : Get a command from the free pool
2120 * After successful allocation, the caller of this routine
2121 * must clear the frame buffer (memset to zero) before
2122 * using the packet further.
2125 * After clearing the frame buffer the context id of the
2126 * frame buffer SHOULD be restored back.
2130 get_raid_msg_pkt(struct mrsas_instance
*instance
)
2132 mlist_t
*head
= &instance
->cmd_pool_list
;
2133 struct mrsas_cmd
*cmd
= NULL
;
2135 mutex_enter(&instance
->cmd_pool_mtx
);
2136 ASSERT(mutex_owned(&instance
->cmd_pool_mtx
));
2139 if (!mlist_empty(head
)) {
2140 cmd
= mlist_entry(head
->next
, struct mrsas_cmd
, list
);
2141 mlist_del_init(head
->next
);
2145 cmd
->retry_count_for_ocr
= 0;
2146 cmd
->drv_pkt_time
= 0;
2148 mutex_exit(&instance
->cmd_pool_mtx
);
2151 bzero(cmd
->scsi_io_request
,
2152 sizeof (Mpi2RaidSCSIIORequest_t
));
2157 get_raid_msg_mfi_pkt(struct mrsas_instance
*instance
)
2159 mlist_t
*head
= &instance
->cmd_app_pool_list
;
2160 struct mrsas_cmd
*cmd
= NULL
;
2162 mutex_enter(&instance
->cmd_app_pool_mtx
);
2163 ASSERT(mutex_owned(&instance
->cmd_app_pool_mtx
));
2165 if (!mlist_empty(head
)) {
2166 cmd
= mlist_entry(head
->next
, struct mrsas_cmd
, list
);
2167 mlist_del_init(head
->next
);
2170 cmd
->retry_count_for_ocr
= 0;
2171 cmd
->drv_pkt_time
= 0;
2173 cmd
->request_desc
= NULL
;
2177 mutex_exit(&instance
->cmd_app_pool_mtx
);
2180 bzero(cmd
->scsi_io_request
,
2181 sizeof (Mpi2RaidSCSIIORequest_t
));
2188 * return_raid_msg_pkt : Return a cmd to free command pool
2191 return_raid_msg_pkt(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
2193 mutex_enter(&instance
->cmd_pool_mtx
);
2194 ASSERT(mutex_owned(&instance
->cmd_pool_mtx
));
2197 mlist_add_tail(&cmd
->list
, &instance
->cmd_pool_list
);
2199 mutex_exit(&instance
->cmd_pool_mtx
);
2203 return_raid_msg_mfi_pkt(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
2205 mutex_enter(&instance
->cmd_app_pool_mtx
);
2206 ASSERT(mutex_owned(&instance
->cmd_app_pool_mtx
));
2208 mlist_add_tail(&cmd
->list
, &instance
->cmd_app_pool_list
);
2210 mutex_exit(&instance
->cmd_app_pool_mtx
);
2215 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance
*instance
,
2216 struct mrsas_cmd
*cmd
)
2218 Mpi2RaidSCSIIORequest_t
*scsi_raid_io
;
2219 Mpi25IeeeSgeChain64_t
*scsi_raid_io_sgl_ieee
;
2220 MRSAS_REQUEST_DESCRIPTOR_UNION
*ReqDescUnion
;
2222 ddi_acc_handle_t acc_handle
=
2223 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
2225 if (!instance
->tbolt
) {
2226 con_log(CL_ANN
, (CE_NOTE
, "Not MFA enabled."));
2232 ReqDescUnion
= mr_sas_get_request_descriptor(instance
, index
);
2234 if (!ReqDescUnion
) {
2235 con_log(CL_ANN1
, (CE_NOTE
, "[NULL REQDESC]"));
2239 con_log(CL_ANN1
, (CE_NOTE
, "[SMID]%x", cmd
->SMID
));
2241 ReqDescUnion
->Words
= 0;
2243 ReqDescUnion
->SCSIIO
.RequestFlags
=
2244 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
2245 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2247 ReqDescUnion
->SCSIIO
.SMID
= cmd
->SMID
;
2249 cmd
->request_desc
= ReqDescUnion
;
2251 /* get raid message frame pointer */
2252 scsi_raid_io
= (Mpi2RaidSCSIIORequest_t
*)cmd
->scsi_io_request
;
2254 if (instance
->gen3
) {
2255 Mpi25IeeeSgeChain64_t
*sgl_ptr_end
= (Mpi25IeeeSgeChain64_t
*)
2256 &scsi_raid_io
->SGL
.IeeeChain
;
2257 sgl_ptr_end
+= instance
->max_sge_in_main_msg
- 1;
2258 ddi_put8(acc_handle
, &sgl_ptr_end
->Flags
, 0);
2261 ddi_put8(acc_handle
, &scsi_raid_io
->Function
,
2262 MPI2_FUNCTION_PASSTHRU_IO_REQUEST
);
2264 ddi_put8(acc_handle
, &scsi_raid_io
->SGLOffset0
,
2265 offsetof(MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 4);
2267 ddi_put8(acc_handle
, &scsi_raid_io
->ChainOffset
,
2268 (U8
)offsetof(MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 16);
2270 ddi_put32(acc_handle
, &scsi_raid_io
->SenseBufferLowAddress
,
2271 cmd
->sense_phys_addr1
);
2274 scsi_raid_io_sgl_ieee
=
2275 (Mpi25IeeeSgeChain64_t
*)&scsi_raid_io
->SGL
.IeeeChain
;
2277 ddi_put64(acc_handle
, &scsi_raid_io_sgl_ieee
->Address
,
2278 (U64
)cmd
->frame_phys_addr
);
2280 ddi_put8(acc_handle
,
2281 &scsi_raid_io_sgl_ieee
->Flags
, (IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
2282 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
));
2283 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2284 ddi_put32(acc_handle
, &scsi_raid_io_sgl_ieee
->Length
, 1024);
2286 con_log(CL_ANN1
, (CE_NOTE
,
2287 "[MFI CMD PHY ADDRESS]:%" PRIx64
,
2288 scsi_raid_io_sgl_ieee
->Address
));
2289 con_log(CL_ANN1
, (CE_NOTE
,
2290 "[SGL Length]:%x", scsi_raid_io_sgl_ieee
->Length
));
2291 con_log(CL_ANN1
, (CE_NOTE
, "[SGL Flags]:%x",
2292 scsi_raid_io_sgl_ieee
->Flags
));
2297 tbolt_complete_cmd(struct mrsas_instance
*instance
,
2298 struct mrsas_cmd
*cmd
)
2304 struct scsa_cmd
*acmd
;
2305 struct scsi_pkt
*pkt
;
2306 struct scsi_arq_status
*arqstat
;
2307 Mpi2RaidSCSIIORequest_t
*scsi_raid_io
;
2308 LD_LOAD_BALANCE_INFO
*lbinfo
;
2309 ddi_acc_handle_t acc_handle
=
2310 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
2312 scsi_raid_io
= (Mpi2RaidSCSIIORequest_t
*)cmd
->scsi_io_request
;
2314 status
= ddi_get8(acc_handle
, &scsi_raid_io
->RaidContext
.status
);
2315 extStatus
= ddi_get8(acc_handle
, &scsi_raid_io
->RaidContext
.extStatus
);
2317 con_log(CL_DLEVEL3
, (CE_NOTE
, "status %x", status
));
2318 con_log(CL_DLEVEL3
, (CE_NOTE
, "extStatus %x", extStatus
));
2320 if (status
!= MFI_STAT_OK
) {
2321 con_log(CL_ANN
, (CE_WARN
,
2322 "IO Cmd Failed SMID %x", cmd
->SMID
));
2324 con_log(CL_ANN
, (CE_NOTE
,
2325 "IO Cmd Success SMID %x", cmd
->SMID
));
2328 /* regular commands */
2330 function
= ddi_get8(acc_handle
, &scsi_raid_io
->Function
);
2331 DTRACE_PROBE3(tbolt_complete_cmd
, uint8_t, function
,
2332 uint8_t, status
, uint8_t, extStatus
);
2336 case MPI2_FUNCTION_SCSI_IO_REQUEST
: /* Fast Path IO. */
2337 acmd
= (struct scsa_cmd
*)cmd
->cmd
;
2338 lbinfo
= &instance
->load_balance_info
[acmd
->device_id
];
2340 if (cmd
->load_balance_flag
& MEGASAS_LOAD_BALANCE_FLAG
) {
2341 arm
= lbinfo
->raid1DevHandle
[0] ==
2342 scsi_raid_io
->DevHandle
? 0 : 1;
2344 lbinfo
->scsi_pending_cmds
[arm
]--;
2345 cmd
->load_balance_flag
&= ~MEGASAS_LOAD_BALANCE_FLAG
;
2347 con_log(CL_DLEVEL3
, (CE_NOTE
,
2348 "FastPath IO Completion Success "));
2351 case MPI2_FUNCTION_LD_IO_REQUEST
: { /* Regular Path IO. */
2352 acmd
= (struct scsa_cmd
*)cmd
->cmd
;
2353 pkt
= (struct scsi_pkt
*)CMD2PKT(acmd
);
2355 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
2356 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
2357 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
2358 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
2359 DDI_DMA_SYNC_FORCPU
);
2363 pkt
->pkt_reason
= CMD_CMPLT
;
2364 pkt
->pkt_statistics
= 0;
2365 pkt
->pkt_state
= STATE_GOT_BUS
| STATE_GOT_TARGET
|
2366 STATE_SENT_CMD
| STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
2368 con_log(CL_ANN
, (CE_CONT
, " CDB[0] = %x completed for %s: "
2369 "size %lx SMID %x cmd_status %x", pkt
->pkt_cdbp
[0],
2370 ((acmd
->islogical
) ? "LD" : "PD"),
2371 acmd
->cmd_dmacount
, cmd
->SMID
, status
));
2373 if (pkt
->pkt_cdbp
[0] == SCMD_INQUIRY
) {
2374 struct scsi_inquiry
*inq
;
2376 if (acmd
->cmd_dmacount
!= 0) {
2377 bp_mapin(acmd
->cmd_buf
);
2378 inq
= (struct scsi_inquiry
*)
2379 acmd
->cmd_buf
->b_un
.b_addr
;
2381 /* don't expose physical drives to OS */
2382 if (acmd
->islogical
&&
2383 (status
== MFI_STAT_OK
)) {
2384 display_scsi_inquiry((caddr_t
)inq
);
2385 } else if ((status
== MFI_STAT_OK
) &&
2386 inq
->inq_dtype
== DTYPE_DIRECT
) {
2387 display_scsi_inquiry((caddr_t
)inq
);
2389 /* for physical disk */
2390 status
= MFI_STAT_DEVICE_NOT_FOUND
;
2397 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
2399 case MFI_STAT_LD_CC_IN_PROGRESS
:
2400 case MFI_STAT_LD_RECON_IN_PROGRESS
:
2401 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
2403 case MFI_STAT_LD_INIT_IN_PROGRESS
:
2404 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2406 case MFI_STAT_SCSI_IO_FAILED
:
2407 dev_err(instance
->dip
, CE_WARN
,
2408 "tbolt_complete_cmd: scsi_io failed");
2409 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2411 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
2412 con_log(CL_ANN
, (CE_WARN
,
2413 "tbolt_complete_cmd: scsi_done with error"));
2415 pkt
->pkt_reason
= CMD_CMPLT
;
2416 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_chk
= 1;
2418 if (pkt
->pkt_cdbp
[0] == SCMD_TEST_UNIT_READY
) {
2420 (CE_WARN
, "TEST_UNIT_READY fail"));
2422 pkt
->pkt_state
|= STATE_ARQ_DONE
;
2423 arqstat
= (void *)(pkt
->pkt_scbp
);
2424 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
2425 arqstat
->sts_rqpkt_resid
= 0;
2426 arqstat
->sts_rqpkt_state
|=
2427 STATE_GOT_BUS
| STATE_GOT_TARGET
2429 | STATE_XFERRED_DATA
;
2430 *(uint8_t *)&arqstat
->sts_rqpkt_status
=
2433 (CE_NOTE
, "Copying Sense data %x",
2436 ddi_rep_get8(acc_handle
,
2437 (uint8_t *)&(arqstat
->sts_sensedata
),
2439 sizeof (struct scsi_extended_sense
),
2444 case MFI_STAT_LD_OFFLINE
:
2445 dev_err(instance
->dip
, CE_WARN
,
2446 "tbolt_complete_cmd: ld offline "
2447 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2449 ddi_get8(acc_handle
, &scsi_raid_io
->CDB
.CDB32
[0]),
2451 ddi_get16(acc_handle
,
2452 &scsi_raid_io
->RaidContext
.ldTargetId
),
2454 ddi_get16(acc_handle
, &scsi_raid_io
->DevHandle
));
2456 pkt
->pkt_reason
= CMD_DEV_GONE
;
2457 pkt
->pkt_statistics
= STAT_DISCON
;
2459 case MFI_STAT_DEVICE_NOT_FOUND
:
2460 con_log(CL_ANN
, (CE_CONT
,
2461 "tbolt_complete_cmd: device not found error"));
2462 pkt
->pkt_reason
= CMD_DEV_GONE
;
2463 pkt
->pkt_statistics
= STAT_DISCON
;
2466 case MFI_STAT_LD_LBA_OUT_OF_RANGE
:
2467 pkt
->pkt_state
|= STATE_ARQ_DONE
;
2468 pkt
->pkt_reason
= CMD_CMPLT
;
2469 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_chk
= 1;
2471 arqstat
= (void *)(pkt
->pkt_scbp
);
2472 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
2473 arqstat
->sts_rqpkt_resid
= 0;
2474 arqstat
->sts_rqpkt_state
|= STATE_GOT_BUS
2475 | STATE_GOT_TARGET
| STATE_SENT_CMD
2476 | STATE_XFERRED_DATA
;
2477 *(uint8_t *)&arqstat
->sts_rqpkt_status
= STATUS_GOOD
;
2479 arqstat
->sts_sensedata
.es_valid
= 1;
2480 arqstat
->sts_sensedata
.es_key
= KEY_ILLEGAL_REQUEST
;
2481 arqstat
->sts_sensedata
.es_class
= CLASS_EXTENDED_SENSE
;
2484 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2485 * ASC: 0x21h; ASCQ: 0x00h;
2487 arqstat
->sts_sensedata
.es_add_code
= 0x21;
2488 arqstat
->sts_sensedata
.es_qual_code
= 0x00;
2490 case MFI_STAT_INVALID_CMD
:
2491 case MFI_STAT_INVALID_DCMD
:
2492 case MFI_STAT_INVALID_PARAMETER
:
2493 case MFI_STAT_INVALID_SEQUENCE_NUMBER
:
2495 dev_err(instance
->dip
, CE_WARN
,
2496 "tbolt_complete_cmd: Unknown status!");
2497 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2502 atomic_add_16(&instance
->fw_outstanding
, (-1));
2504 (void) mrsas_common_check(instance
, cmd
);
2505 if (acmd
->cmd_dmahandle
) {
2506 if (mrsas_check_dma_handle(acmd
->cmd_dmahandle
) !=
2508 ddi_fm_service_impact(instance
->dip
,
2509 DDI_SERVICE_UNAFFECTED
);
2510 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2511 pkt
->pkt_statistics
= 0;
2515 /* Call the callback routine */
2516 if (((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) && pkt
->pkt_comp
)
2517 (*pkt
->pkt_comp
)(pkt
);
2519 con_log(CL_ANN1
, (CE_NOTE
, "Free smid %x", cmd
->SMID
));
2521 ddi_put8(acc_handle
, &scsi_raid_io
->RaidContext
.status
, 0);
2523 ddi_put8(acc_handle
, &scsi_raid_io
->RaidContext
.extStatus
, 0);
2525 return_raid_msg_pkt(instance
, cmd
);
2528 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST
: /* MFA command. */
2530 if (cmd
->frame
->dcmd
.opcode
== MR_DCMD_LD_MAP_GET_INFO
&&
2531 cmd
->frame
->dcmd
.mbox
.b
[1] == 1) {
2533 mutex_enter(&instance
->sync_map_mtx
);
2535 con_log(CL_ANN
, (CE_NOTE
,
2536 "LDMAP sync command SMID RECEIVED 0x%X",
2538 if (cmd
->frame
->hdr
.cmd_status
!= 0) {
2539 dev_err(instance
->dip
, CE_WARN
,
2540 "map sync failed, status = 0x%x.",
2541 cmd
->frame
->hdr
.cmd_status
);
2544 con_log(CL_ANN1
, (CE_NOTE
,
2545 "map sync received, switched map_id to %"
2546 PRIu64
, instance
->map_id
));
2549 if (MR_ValidateMapInfo(
2550 instance
->ld_map
[instance
->map_id
& 1],
2551 instance
->load_balance_info
)) {
2552 instance
->fast_path_io
= 1;
2554 instance
->fast_path_io
= 0;
2557 con_log(CL_ANN
, (CE_NOTE
,
2558 "instance->fast_path_io %d",
2559 instance
->fast_path_io
));
2561 instance
->unroll
.syncCmd
= 0;
2563 if (instance
->map_update_cmd
== cmd
) {
2564 return_raid_msg_pkt(instance
, cmd
);
2565 atomic_add_16(&instance
->fw_outstanding
, (-1));
2566 (void) mrsas_tbolt_sync_map_info(instance
);
2569 con_log(CL_ANN1
, (CE_NOTE
,
2570 "LDMAP sync completed, ldcount=%d",
2571 instance
->ld_map
[instance
->map_id
& 1]
2572 ->raidMap
.ldCount
));
2573 mutex_exit(&instance
->sync_map_mtx
);
2577 if (cmd
->frame
->dcmd
.opcode
== MR_DCMD_CTRL_EVENT_WAIT
) {
2578 con_log(CL_ANN1
, (CE_CONT
,
2579 "AEN command SMID RECEIVED 0x%X",
2581 if ((instance
->aen_cmd
== cmd
) &&
2582 (instance
->aen_cmd
->abort_aen
)) {
2583 con_log(CL_ANN
, (CE_WARN
, "mrsas_softintr: "
2584 "aborted_aen returned"));
2586 atomic_add_16(&instance
->fw_outstanding
, (-1));
2587 service_mfi_aen(instance
, cmd
);
2591 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2592 con_log(CL_ANN1
, (CE_CONT
,
2593 "Sync-mode Command Response SMID RECEIVED 0x%X",
2596 tbolt_complete_cmd_in_sync_mode(instance
, cmd
);
2598 con_log(CL_ANN
, (CE_CONT
,
2599 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2604 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
2605 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2609 (CE_NOTE
, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2615 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance
*instance
)
2618 Mpi2SCSIIOSuccessReplyDescriptor_t
*replyDesc
;
2619 Mpi2ReplyDescriptorsUnion_t
*desc
;
2621 union desc_value d_val
;
2622 struct mrsas_cmd
*cmd
;
2624 struct mrsas_header
*hdr
;
2625 struct scsi_pkt
*pkt
;
2627 (void) ddi_dma_sync(instance
->reply_desc_dma_obj
.dma_handle
,
2628 0, 0, DDI_DMA_SYNC_FORDEV
);
2630 (void) ddi_dma_sync(instance
->reply_desc_dma_obj
.dma_handle
,
2631 0, 0, DDI_DMA_SYNC_FORCPU
);
2633 desc
= instance
->reply_frame_pool
;
2634 desc
+= instance
->reply_read_index
;
2636 replyDesc
= (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
2637 replyType
= replyDesc
->ReplyFlags
&
2638 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
2640 if (replyType
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
2641 return (DDI_INTR_UNCLAIMED
);
2643 if (mrsas_check_dma_handle(instance
->mfi_internal_dma_obj
.dma_handle
)
2645 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
2646 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2648 (CE_WARN
, "mr_sas_tbolt_process_outstanding_cmd(): "
2649 "FMA check, returning DDI_INTR_UNCLAIMED"));
2650 return (DDI_INTR_CLAIMED
);
2653 con_log(CL_ANN1
, (CE_NOTE
, "Reply Desc = %p Words = %" PRIx64
,
2654 (void *)desc
, desc
->Words
));
2656 d_val
.word
= desc
->Words
;
2659 /* Read Reply descriptor */
2660 while ((d_val
.u1
.low
!= 0xffffffff) &&
2661 (d_val
.u1
.high
!= 0xffffffff)) {
2663 (void) ddi_dma_sync(instance
->reply_desc_dma_obj
.dma_handle
,
2664 0, 0, DDI_DMA_SYNC_FORCPU
);
2666 smid
= replyDesc
->SMID
;
2668 if (!smid
|| smid
> instance
->max_fw_cmds
+ 1) {
2669 con_log(CL_ANN1
, (CE_NOTE
,
2670 "Reply Desc at Break = %p Words = %" PRIx64
,
2671 (void *)desc
, desc
->Words
));
2675 cmd
= instance
->cmd_list
[smid
- 1];
2677 con_log(CL_ANN1
, (CE_NOTE
, "mr_sas_tbolt_process_"
2678 "outstanding_cmd: Invalid command "
2679 " or Poll commad Received in completion path"));
2681 mutex_enter(&instance
->cmd_pend_mtx
);
2682 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2683 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
2685 con_log(CL_ANN1
, (CE_NOTE
, "mr_sas_"
2686 "tbolt_process_outstanding_cmd:"
2687 " mlist_del_init(&cmd->list)."));
2688 mlist_del_init(&cmd
->list
);
2693 con_log(CL_ANN1
, (CE_NOTE
, "mr_sas_"
2694 "tbolt_process_outstanding_cmd:"
2695 "mlist_del_init(&cmd->list)."));
2696 mlist_del_init(&cmd
->list
);
2700 mutex_exit(&instance
->cmd_pend_mtx
);
2702 tbolt_complete_cmd(instance
, cmd
);
2704 /* set it back to all 1s. */
2707 instance
->reply_read_index
++;
2709 if (instance
->reply_read_index
>= (instance
->reply_q_depth
)) {
2710 con_log(CL_ANN1
, (CE_NOTE
, "wrap around"));
2711 instance
->reply_read_index
= 0;
2714 /* Get the next reply descriptor */
2715 if (!instance
->reply_read_index
)
2716 desc
= instance
->reply_frame_pool
;
2720 replyDesc
= (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
2722 d_val
.word
= desc
->Words
;
2724 con_log(CL_ANN1
, (CE_NOTE
,
2725 "Next Reply Desc = %p Words = %" PRIx64
,
2726 (void *)desc
, desc
->Words
));
2728 replyType
= replyDesc
->ReplyFlags
&
2729 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
2731 if (replyType
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
2734 } /* End of while loop. */
2736 /* update replyIndex to FW */
2737 WR_MPI2_REPLY_POST_INDEX(instance
->reply_read_index
, instance
);
2740 (void) ddi_dma_sync(instance
->reply_desc_dma_obj
.dma_handle
,
2741 0, 0, DDI_DMA_SYNC_FORDEV
);
2743 (void) ddi_dma_sync(instance
->reply_desc_dma_obj
.dma_handle
,
2744 0, 0, DDI_DMA_SYNC_FORCPU
);
2745 return (DDI_INTR_CLAIMED
);
2752 * complete_cmd_in_sync_mode - Completes an internal command
2753 * @instance: Adapter soft state
2754 * @cmd: Command to be completed
2756 * The issue_cmd_in_sync_mode() function waits for a command to complete
2757 * after it issues a command. This function wakes up that waiting routine by
2758 * calling wake_up() on the wait queue.
2761 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance
*instance
,
2762 struct mrsas_cmd
*cmd
)
2765 cmd
->cmd_status
= ddi_get8(cmd
->frame_dma_obj
.acc_handle
,
2766 &cmd
->frame
->io
.cmd_status
);
2768 cmd
->sync_cmd
= MRSAS_FALSE
;
2770 mutex_enter(&instance
->int_cmd_mtx
);
2771 if (cmd
->cmd_status
== ENODATA
) {
2772 cmd
->cmd_status
= 0;
2774 cv_broadcast(&instance
->int_cmd_cv
);
2775 mutex_exit(&instance
->int_cmd_mtx
);
2780 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2781 * instance: Adapter soft state
2783 * Issues an internal command (DCMD) to get the FW's controller PD
2784 * list structure. This information is mainly used to find out SYSTEM
2785 * supported by the FW.
2788 mrsas_tbolt_get_ld_map_info(struct mrsas_instance
*instance
)
2791 struct mrsas_cmd
*cmd
= NULL
;
2792 struct mrsas_dcmd_frame
*dcmd
;
2793 MR_FW_RAID_MAP_ALL
*ci
;
2797 cmd
= get_raid_msg_pkt(instance
);
2800 dev_err(instance
->dip
, CE_WARN
,
2801 "Failed to get a cmd from free-pool in get_ld_map_info()");
2802 return (DDI_FAILURE
);
2805 dcmd
= &cmd
->frame
->dcmd
;
2807 size_map_info
= sizeof (MR_FW_RAID_MAP
) +
2808 (sizeof (MR_LD_SPAN_MAP
) *
2809 (MAX_LOGICAL_DRIVES
- 1));
2811 con_log(CL_ANN
, (CE_NOTE
,
2812 "size_map_info : 0x%x", size_map_info
));
2814 ci
= instance
->ld_map
[instance
->map_id
& 1];
2815 ci_h
= instance
->ld_map_phy
[instance
->map_id
& 1];
2818 dev_err(instance
->dip
, CE_WARN
,
2819 "Failed to alloc mem for ld_map_info");
2820 return_raid_msg_pkt(instance
, cmd
);
2824 bzero(ci
, sizeof (*ci
));
2825 bzero(dcmd
->mbox
.b
, DCMD_MBOX_SZ
);
2827 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
2828 dcmd
->cmd_status
= 0xFF;
2829 dcmd
->sge_count
= 1;
2830 dcmd
->flags
= MFI_FRAME_DIR_READ
;
2833 dcmd
->data_xfer_len
= size_map_info
;
2834 dcmd
->opcode
= MR_DCMD_LD_MAP_GET_INFO
;
2835 dcmd
->sgl
.sge32
[0].phys_addr
= ci_h
;
2836 dcmd
->sgl
.sge32
[0].length
= size_map_info
;
2839 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
2841 if (!instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
2843 con_log(CL_ANN1
, (CE_NOTE
, "Get LD Map Info success"));
2845 dev_err(instance
->dip
, CE_WARN
, "Get LD Map Info failed");
2849 return_raid_msg_pkt(instance
, cmd
);
2855 mrsas_dump_reply_desc(struct mrsas_instance
*instance
)
2858 MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
2859 union desc_value d_val
;
2861 reply_desc
= instance
->reply_frame_pool
;
2863 for (i
= 0; i
< instance
->reply_q_depth
; i
++, reply_desc
++) {
2864 d_val
.word
= reply_desc
->Words
;
2865 con_log(CL_DLEVEL3
, (CE_NOTE
,
2867 i
, d_val
.u1
.high
, d_val
.u1
.low
));
2872 * mrsas_tbolt_command_create - Create command for fast path.
2873 * @io_info: MegaRAID IO request packet pointer.
2874 * @ref_tag: Reference tag for RD/WRPROTECT
2876 * Create the command for fast path.
2879 mrsas_tbolt_prepare_cdb(struct mrsas_instance
*instance
, U8 cdb
[],
2880 struct IO_REQUEST_INFO
*io_info
, Mpi2RaidSCSIIORequest_t
*scsi_io_request
,
2885 ddi_acc_handle_t acc_handle
=
2886 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
2888 /* Prepare 32-byte CDB if DIF is supported on this device */
2889 con_log(CL_ANN
, (CE_NOTE
, "Prepare DIF CDB"));
2893 cdb
[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD
;
2896 cdb
[7] = MRSAS_SCSI_ADDL_CDB_LEN
;
2898 if (io_info
->isRead
)
2899 cdb
[9] = MRSAS_SCSI_SERVICE_ACTION_READ32
;
2901 cdb
[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32
;
2903 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2904 cdb
[10] = MRSAS_RD_WR_PROTECT
;
2906 /* LOGICAL BLOCK ADDRESS */
2907 cdb
[12] = (U8
)(((io_info
->pdBlock
) >> 56) & 0xff);
2908 cdb
[13] = (U8
)(((io_info
->pdBlock
) >> 48) & 0xff);
2909 cdb
[14] = (U8
)(((io_info
->pdBlock
) >> 40) & 0xff);
2910 cdb
[15] = (U8
)(((io_info
->pdBlock
) >> 32) & 0xff);
2911 cdb
[16] = (U8
)(((io_info
->pdBlock
) >> 24) & 0xff);
2912 cdb
[17] = (U8
)(((io_info
->pdBlock
) >> 16) & 0xff);
2913 cdb
[18] = (U8
)(((io_info
->pdBlock
) >> 8) & 0xff);
2914 cdb
[19] = (U8
)((io_info
->pdBlock
) & 0xff);
2916 /* Logical block reference tag */
2917 ddi_put32(acc_handle
, &scsi_io_request
->CDB
.EEDP32
.PrimaryReferenceTag
,
2920 ddi_put16(acc_handle
,
2921 &scsi_io_request
->CDB
.EEDP32
.PrimaryApplicationTagMask
, 0xffff);
2923 ddi_put32(acc_handle
, &scsi_io_request
->DataLength
,
2924 ((io_info
->numBlocks
)*512));
2925 /* Specify 32-byte cdb */
2926 ddi_put16(acc_handle
, &scsi_io_request
->IoFlags
, 32);
2928 /* Transfer length */
2929 cdb
[28] = (U8
)(((io_info
->numBlocks
) >> 24) & 0xff);
2930 cdb
[29] = (U8
)(((io_info
->numBlocks
) >> 16) & 0xff);
2931 cdb
[30] = (U8
)(((io_info
->numBlocks
) >> 8) & 0xff);
2932 cdb
[31] = (U8
)((io_info
->numBlocks
) & 0xff);
2934 /* set SCSI IO EEDPFlags */
2935 EEDPFlags
= ddi_get16(acc_handle
, &scsi_io_request
->EEDPFlags
);
2936 Control
= ddi_get32(acc_handle
, &scsi_io_request
->Control
);
2938 /* set SCSI IO EEDPFlags bits */
2939 if (io_info
->isRead
) {
2941 * For READ commands, the EEDPFlags shall be set to specify to
2942 * Increment the Primary Reference Tag, to Check the Reference
2943 * Tag, and to Check and Remove the Protection Information
2946 EEDPFlags
= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
2947 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG
|
2948 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
|
2949 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG
|
2950 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD
;
2953 * For WRITE commands, the EEDPFlags shall be set to specify to
2954 * Increment the Primary Reference Tag, and to Insert
2955 * Protection Information fields.
2957 EEDPFlags
= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
2958 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
;
2960 Control
|= (0x4 << 26);
2962 ddi_put16(acc_handle
, &scsi_io_request
->EEDPFlags
, EEDPFlags
);
2963 ddi_put32(acc_handle
, &scsi_io_request
->Control
, Control
);
2964 ddi_put32(acc_handle
,
2965 &scsi_io_request
->EEDPBlockSize
, MRSAS_EEDPBLOCKSIZE
);
2970 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2972 * @cdb_size: CDB size
2973 * @cdb_len_ptr: cdb length
2974 * @start_blk: Start block of IO
2975 * @num_blocks: Number of blocks
2977 * Used to set the PD LBA in CDB for FP IOs
2980 mrsas_tbolt_set_pd_lba(U8
*cdb
, size_t cdb_size
, uint8_t *cdb_len_ptr
,
2981 U64 start_blk
, U32 num_blocks
)
2983 U8 cdb_len
= *cdb_len_ptr
;
2984 U8 flagvals
= 0, opcode
= 0, groupnum
= 0, control
= 0;
2986 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2987 if (((cdb_len
== 12) || (cdb_len
== 16)) &&
2988 (start_blk
<= 0xffffffff)) {
2989 if (cdb_len
== 16) {
2991 (CE_NOTE
, "Converting READ/WRITE(16) to READ10"));
2992 opcode
= cdb
[0] == READ_16
? READ_10
: WRITE_10
;
2998 (CE_NOTE
, "Converting READ/WRITE(12) to READ10"));
2999 opcode
= cdb
[0] == READ_12
? READ_10
: WRITE_10
;
3005 bzero(cdb
, cdb_size
);
3011 /* Set transfer length */
3012 cdb
[8] = (U8
)(num_blocks
& 0xff);
3013 cdb
[7] = (U8
)((num_blocks
>> 8) & 0xff);
3015 } else if ((cdb_len
< 16) && (start_blk
> 0xffffffff)) {
3016 /* Convert to 16 byte CDB for large LBA's */
3018 (CE_NOTE
, "Converting 6/10/12 CDB to 16 byte CDB"));
3021 opcode
= cdb
[0] == READ_6
? READ_16
: WRITE_16
;
3025 opcode
= cdb
[0] == READ_10
? READ_16
: WRITE_16
;
3031 opcode
= cdb
[0] == READ_12
? READ_16
: WRITE_16
;
3038 bzero(cdb
, cdb_size
);
3045 /* Transfer length */
3046 cdb
[13] = (U8
)(num_blocks
& 0xff);
3047 cdb
[12] = (U8
)((num_blocks
>> 8) & 0xff);
3048 cdb
[11] = (U8
)((num_blocks
>> 16) & 0xff);
3049 cdb
[10] = (U8
)((num_blocks
>> 24) & 0xff);
3051 /* Specify 16-byte cdb */
3053 } else if ((cdb_len
== 6) && (start_blk
> 0x1fffff)) {
3054 /* convert to 10 byte CDB */
3055 opcode
= cdb
[0] == READ_6
? READ_10
: WRITE_10
;
3058 bzero(cdb
, cdb_size
);
3062 /* Set transfer length */
3063 cdb
[8] = (U8
)(num_blocks
& 0xff);
3064 cdb
[7] = (U8
)((num_blocks
>> 8) & 0xff);
3066 /* Specify 10-byte cdb */
3071 /* Fall through Normal case, just load LBA here */
3075 U8 val
= cdb
[1] & 0xE0;
3076 cdb
[3] = (U8
)(start_blk
& 0xff);
3077 cdb
[2] = (U8
)((start_blk
>> 8) & 0xff);
3078 cdb
[1] = val
| ((U8
)(start_blk
>> 16) & 0x1f);
3082 cdb
[5] = (U8
)(start_blk
& 0xff);
3083 cdb
[4] = (U8
)((start_blk
>> 8) & 0xff);
3084 cdb
[3] = (U8
)((start_blk
>> 16) & 0xff);
3085 cdb
[2] = (U8
)((start_blk
>> 24) & 0xff);
3088 cdb
[5] = (U8
)(start_blk
& 0xff);
3089 cdb
[4] = (U8
)((start_blk
>> 8) & 0xff);
3090 cdb
[3] = (U8
)((start_blk
>> 16) & 0xff);
3091 cdb
[2] = (U8
)((start_blk
>> 24) & 0xff);
3095 cdb
[9] = (U8
)(start_blk
& 0xff);
3096 cdb
[8] = (U8
)((start_blk
>> 8) & 0xff);
3097 cdb
[7] = (U8
)((start_blk
>> 16) & 0xff);
3098 cdb
[6] = (U8
)((start_blk
>> 24) & 0xff);
3099 cdb
[5] = (U8
)((start_blk
>> 32) & 0xff);
3100 cdb
[4] = (U8
)((start_blk
>> 40) & 0xff);
3101 cdb
[3] = (U8
)((start_blk
>> 48) & 0xff);
3102 cdb
[2] = (U8
)((start_blk
>> 56) & 0xff);
3106 *cdb_len_ptr
= cdb_len
;
3111 mrsas_tbolt_check_map_info(struct mrsas_instance
*instance
)
3113 MR_FW_RAID_MAP_ALL
*ld_map
;
3115 if (!mrsas_tbolt_get_ld_map_info(instance
)) {
3117 ld_map
= instance
->ld_map
[instance
->map_id
& 1];
3119 con_log(CL_ANN1
, (CE_NOTE
, "ldCount=%d, map size=%d",
3120 ld_map
->raidMap
.ldCount
, ld_map
->raidMap
.totalSize
));
3122 if (MR_ValidateMapInfo(
3123 instance
->ld_map
[instance
->map_id
& 1],
3124 instance
->load_balance_info
)) {
3126 (CE_CONT
, "MR_ValidateMapInfo success"));
3128 instance
->fast_path_io
= 1;
3130 (CE_NOTE
, "instance->fast_path_io %d",
3131 instance
->fast_path_io
));
3133 return (DDI_SUCCESS
);
3138 instance
->fast_path_io
= 0;
3139 dev_err(instance
->dip
, CE_WARN
, "MR_ValidateMapInfo failed");
3140 con_log(CL_ANN
, (CE_NOTE
,
3141 "instance->fast_path_io %d", instance
->fast_path_io
));
3143 return (DDI_FAILURE
);
3147 * Marks HBA as bad. This will be called either when an
3148 * IO packet times out even after 3 FW resets
3149 * or FW is found to be fault even after 3 continuous resets.
3153 mrsas_tbolt_kill_adapter(struct mrsas_instance
*instance
)
3155 dev_err(instance
->dip
, CE_NOTE
, "TBOLT Kill adapter called");
3157 if (instance
->deadadapter
== 1)
3160 con_log(CL_ANN1
, (CE_NOTE
, "tbolt_kill_adapter: "
3161 "Writing to doorbell with MFI_STOP_ADP "));
3162 mutex_enter(&instance
->ocr_flags_mtx
);
3163 instance
->deadadapter
= 1;
3164 mutex_exit(&instance
->ocr_flags_mtx
);
3165 instance
->func_ptr
->disable_intr(instance
);
3166 WR_RESERVED0_REGISTER(MFI_STOP_ADP
, instance
);
3168 (void) RD_RESERVED0_REGISTER(instance
);
3170 (void) mrsas_print_pending_cmds(instance
);
3171 (void) mrsas_complete_pending_cmds(instance
);
3175 mrsas_reset_reply_desc(struct mrsas_instance
*instance
)
3178 MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
3179 instance
->reply_read_index
= 0;
3181 /* initializing reply address to 0xFFFFFFFF */
3182 reply_desc
= instance
->reply_frame_pool
;
3184 for (i
= 0; i
< instance
->reply_q_depth
; i
++) {
3185 reply_desc
->Words
= (uint64_t)~0;
3191 mrsas_tbolt_reset_ppc(struct mrsas_instance
*instance
)
3193 uint32_t status
= 0x00;
3195 uint32_t cur_abs_reg_val
;
3200 if (instance
->deadadapter
== 1) {
3201 dev_err(instance
->dip
, CE_WARN
, "mrsas_tbolt_reset_ppc: "
3202 "no more resets as HBA has been marked dead");
3203 return (DDI_FAILURE
);
3206 mutex_enter(&instance
->ocr_flags_mtx
);
3207 instance
->adapterresetinprogress
= 1;
3208 mutex_exit(&instance
->ocr_flags_mtx
);
3210 instance
->func_ptr
->disable_intr(instance
);
3212 /* Add delay in order to complete the ioctl & io cmds in-flight */
3213 for (i
= 0; i
< 3000; i
++)
3216 instance
->reply_read_index
= 0;
3219 con_log(CL_ANN
, (CE_NOTE
, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3222 WR_TBOLT_IB_WRITE_SEQ(0x0, instance
);
3223 /* Write magic number */
3224 WR_TBOLT_IB_WRITE_SEQ(0xF, instance
);
3225 WR_TBOLT_IB_WRITE_SEQ(0x4, instance
);
3226 WR_TBOLT_IB_WRITE_SEQ(0xb, instance
);
3227 WR_TBOLT_IB_WRITE_SEQ(0x2, instance
);
3228 WR_TBOLT_IB_WRITE_SEQ(0x7, instance
);
3229 WR_TBOLT_IB_WRITE_SEQ(0xd, instance
);
3231 con_log(CL_ANN1
, (CE_NOTE
,
3232 "mrsas_tbolt_reset_ppc: magic number written "
3233 "to write sequence register"));
3235 /* Wait for the diag write enable (DRWE) bit to be set */
3237 status
= RD_TBOLT_HOST_DIAG(instance
);
3238 while (!(status
& DIAG_WRITE_ENABLE
)) {
3240 status
= RD_TBOLT_HOST_DIAG(instance
);
3241 if (retry
++ >= 100) {
3242 dev_err(instance
->dip
, CE_WARN
,
3243 "%s(): timeout waiting for DRWE.", __func__
);
3244 return (DDI_FAILURE
);
3248 /* Send reset command */
3249 WR_TBOLT_HOST_DIAG(status
| DIAG_TBOLT_RESET_ADAPTER
, instance
);
3252 /* Wait for reset bit to clear */
3254 status
= RD_TBOLT_HOST_DIAG(instance
);
3255 while ((status
& DIAG_TBOLT_RESET_ADAPTER
)) {
3257 status
= RD_TBOLT_HOST_DIAG(instance
);
3258 if (retry
++ == 100) {
3259 /* Dont call kill adapter here */
3260 /* RESET BIT ADAPTER is cleared by firmare */
3261 /* mrsas_tbolt_kill_adapter(instance); */
3262 dev_err(instance
->dip
, CE_WARN
,
3263 "%s(): RESET FAILED; return failure!!!", __func__
);
3264 return (DDI_FAILURE
);
3269 (CE_NOTE
, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3271 abs_state
= instance
->func_ptr
->read_fw_status_reg(instance
);
3273 while ((abs_state
<= MFI_STATE_FW_INIT
) && (retry
++ < 1000)) {
3275 abs_state
= instance
->func_ptr
->read_fw_status_reg(instance
);
3277 if (abs_state
<= MFI_STATE_FW_INIT
) {
3278 dev_err(instance
->dip
, CE_WARN
,
3279 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3280 "state = 0x%x, RETRY RESET.", abs_state
);
3284 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3285 if (mfi_state_transition_to_ready(instance
) ||
3286 mrsas_debug_tbolt_fw_faults_after_ocr
== 1) {
3288 instance
->func_ptr
->read_fw_status_reg(instance
);
3289 fw_state
= cur_abs_reg_val
& MFI_STATE_MASK
;
3291 con_log(CL_ANN1
, (CE_NOTE
,
3292 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3293 "FW state = 0x%x", fw_state
));
3294 if (mrsas_debug_tbolt_fw_faults_after_ocr
== 1)
3295 fw_state
= MFI_STATE_FAULT
;
3298 (CE_NOTE
, "mrsas_tbolt_reset_ppc : FW is not ready "
3299 "FW state = 0x%x", fw_state
));
3301 if (fw_state
== MFI_STATE_FAULT
) {
3302 /* increment the count */
3303 instance
->fw_fault_count_after_ocr
++;
3304 if (instance
->fw_fault_count_after_ocr
3305 < MAX_FW_RESET_COUNT
) {
3306 dev_err(instance
->dip
, CE_WARN
,
3307 "mrsas_tbolt_reset_ppc: "
3308 "FW is in fault after OCR count %d "
3310 instance
->fw_fault_count_after_ocr
);
3314 dev_err(instance
->dip
, CE_WARN
, "%s:"
3315 "Max Reset Count exceeded >%d"
3316 "Mark HBA as bad, KILL adapter",
3317 __func__
, MAX_FW_RESET_COUNT
);
3319 mrsas_tbolt_kill_adapter(instance
);
3320 return (DDI_FAILURE
);
3325 /* reset the counter as FW is up after OCR */
3326 instance
->fw_fault_count_after_ocr
= 0;
3328 mrsas_reset_reply_desc(instance
);
3330 abs_state
= mrsas_issue_init_mpi2(instance
);
3331 if (abs_state
== (uint32_t)DDI_FAILURE
) {
3332 dev_err(instance
->dip
, CE_WARN
, "mrsas_tbolt_reset_ppc: "
3333 "INIT failed Retrying Reset");
3337 (void) mrsas_print_pending_cmds(instance
);
3339 instance
->func_ptr
->enable_intr(instance
);
3340 instance
->fw_outstanding
= 0;
3342 (void) mrsas_issue_pending_cmds(instance
);
3344 instance
->aen_cmd
->retry_count_for_ocr
= 0;
3345 instance
->aen_cmd
->drv_pkt_time
= 0;
3347 instance
->func_ptr
->issue_cmd(instance
->aen_cmd
, instance
);
3349 mutex_enter(&instance
->ocr_flags_mtx
);
3350 instance
->adapterresetinprogress
= 0;
3351 mutex_exit(&instance
->ocr_flags_mtx
);
3353 dev_err(instance
->dip
, CE_NOTE
, "TBOLT adapter reset successfully");
3355 return (DDI_SUCCESS
);
3359 * mrsas_sync_map_info - Returns FW's ld_map structure
3360 * @instance: Adapter soft state
3362 * Issues an internal command (DCMD) to get the FW's controller PD
3363 * list structure. This information is mainly used to find out SYSTEM
3364 * supported by the FW.
3368 mrsas_tbolt_sync_map_info(struct mrsas_instance
*instance
)
3371 struct mrsas_cmd
*cmd
= NULL
;
3372 struct mrsas_dcmd_frame
*dcmd
;
3373 uint32_t size_sync_info
, num_lds
;
3374 LD_TARGET_SYNC
*ci
= NULL
;
3375 MR_FW_RAID_MAP_ALL
*map
;
3377 LD_TARGET_SYNC
*ld_sync
;
3379 uint32_t size_map_info
;
3381 cmd
= get_raid_msg_pkt(instance
);
3384 dev_err(instance
->dip
, CE_WARN
,
3385 "Failed to get a cmd from free-pool in "
3386 "mrsas_tbolt_sync_map_info().");
3387 return (DDI_FAILURE
);
3390 /* Clear the frame buffer and assign back the context id */
3391 bzero((char *)&cmd
->frame
[0], sizeof (union mrsas_frame
));
3392 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3394 bzero(cmd
->scsi_io_request
, sizeof (Mpi2RaidSCSIIORequest_t
));
3397 map
= instance
->ld_map
[instance
->map_id
& 1];
3399 num_lds
= map
->raidMap
.ldCount
;
3401 dcmd
= &cmd
->frame
->dcmd
;
3403 size_sync_info
= sizeof (LD_TARGET_SYNC
) * num_lds
;
3405 con_log(CL_ANN
, (CE_NOTE
, "size_sync_info =0x%x ; ld count = 0x%x",
3406 size_sync_info
, num_lds
));
3408 ci
= (LD_TARGET_SYNC
*)instance
->ld_map
[(instance
->map_id
- 1) & 1];
3410 bzero(ci
, sizeof (MR_FW_RAID_MAP_ALL
));
3411 ci_h
= instance
->ld_map_phy
[(instance
->map_id
- 1) & 1];
3413 bzero(dcmd
->mbox
.b
, DCMD_MBOX_SZ
);
3415 ld_sync
= (LD_TARGET_SYNC
*)ci
;
3417 for (i
= 0; i
< num_lds
; i
++, ld_sync
++) {
3418 raid
= MR_LdRaidGet(i
, map
);
3421 (CE_NOTE
, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3422 i
, raid
->seqNum
, raid
->flags
.ldSyncRequired
));
3424 ld_sync
->ldTargetId
= MR_GetLDTgtId(i
, map
);
3426 con_log(CL_ANN1
, (CE_NOTE
, "i : 0x%x, tgt : 0x%x",
3427 i
, ld_sync
->ldTargetId
));
3429 ld_sync
->seqNum
= raid
->seqNum
;
3433 size_map_info
= sizeof (MR_FW_RAID_MAP
) +
3434 (sizeof (MR_LD_SPAN_MAP
) * (MAX_LOGICAL_DRIVES
- 1));
3436 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
3437 dcmd
->cmd_status
= 0xFF;
3438 dcmd
->sge_count
= 1;
3439 dcmd
->flags
= MFI_FRAME_DIR_WRITE
;
3442 dcmd
->data_xfer_len
= size_map_info
;
3443 ASSERT(num_lds
<= 255);
3444 dcmd
->mbox
.b
[0] = (U8
)num_lds
;
3445 dcmd
->mbox
.b
[1] = 1; /* Pend */
3446 dcmd
->opcode
= MR_DCMD_LD_MAP_GET_INFO
;
3447 dcmd
->sgl
.sge32
[0].phys_addr
= ci_h
;
3448 dcmd
->sgl
.sge32
[0].length
= size_map_info
;
3451 instance
->map_update_cmd
= cmd
;
3452 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3454 instance
->func_ptr
->issue_cmd(cmd
, instance
);
3456 instance
->unroll
.syncCmd
= 1;
3457 con_log(CL_ANN1
, (CE_NOTE
, "sync cmd issued. [SMID]:%x", cmd
->SMID
));
3466 abort_syncmap_cmd(struct mrsas_instance
*instance
,
3467 struct mrsas_cmd
*cmd_to_abort
)
3471 struct mrsas_cmd
*cmd
;
3472 struct mrsas_abort_frame
*abort_fr
;
3474 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt: abort_ldsync:%d", __LINE__
));
3476 cmd
= get_raid_msg_mfi_pkt(instance
);
3479 dev_err(instance
->dip
, CE_WARN
,
3480 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3481 return (DDI_FAILURE
);
3483 /* Clear the frame buffer and assign back the context id */
3484 bzero((char *)&cmd
->frame
[0], sizeof (union mrsas_frame
));
3485 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3488 abort_fr
= &cmd
->frame
->abort
;
3490 /* prepare and issue the abort frame */
3491 ddi_put8(cmd
->frame_dma_obj
.acc_handle
,
3492 &abort_fr
->cmd
, MFI_CMD_OP_ABORT
);
3493 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->cmd_status
,
3494 MFI_CMD_STATUS_SYNC_MODE
);
3495 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->flags
, 0);
3496 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->abort_context
,
3497 cmd_to_abort
->index
);
3498 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3499 &abort_fr
->abort_mfi_phys_addr_lo
, cmd_to_abort
->frame_phys_addr
);
3500 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3501 &abort_fr
->abort_mfi_phys_addr_hi
, 0);
3503 cmd
->frame_count
= 1;
3505 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3507 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
3508 con_log(CL_ANN1
, (CE_WARN
,
3509 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3515 return_raid_msg_mfi_pkt(instance
, cmd
);
3517 atomic_add_16(&instance
->fw_outstanding
, (-1));
3523 * Even though these functions were originally intended for 2208 only, it
3524 * turns out they're useful for "Skinny" support as well. In a perfect world,
3525 * these two functions would be either in mr_sas.c, or in their own new source
3526 * file. Since this driver needs some cleanup anyway, keep this portion in
3531 mrsas_tbolt_config_pd(struct mrsas_instance
*instance
, uint16_t tgt
,
3532 uint8_t lun
, dev_info_t
**ldip
)
3534 struct scsi_device
*sd
;
3537 struct mrsas_tbolt_pd_info
*pds
= NULL
;
3539 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_tbolt_config_pd: t = %d l = %d",
3542 if ((child
= mrsas_find_child(instance
, tgt
, lun
)) != NULL
) {
3546 if (instance
->mr_tbolt_pd_list
[tgt
].flag
!= MRDRV_TGT_VALID
) {
3547 rval
= mrsas_service_evt(instance
, tgt
, 1,
3548 MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
3549 con_log(CL_ANN1
, (CE_WARN
,
3550 "mr_sas:DELETING STALE ENTRY rval = %d "
3551 "tgt id = %d", rval
, tgt
));
3552 return (NDI_FAILURE
);
3554 return (NDI_SUCCESS
);
3557 pds
= (struct mrsas_tbolt_pd_info
*)
3558 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info
), KM_SLEEP
);
3559 mrsas_tbolt_get_pd_info(instance
, pds
, tgt
);
3560 dtype
= pds
->scsiDevType
;
3562 /* Check for Disk */
3563 if ((dtype
== DTYPE_DIRECT
)) {
3564 if ((dtype
== DTYPE_DIRECT
) &&
3565 (LE_16(pds
->fwState
) != PD_SYSTEM
)) {
3566 kmem_free(pds
, sizeof (struct mrsas_tbolt_pd_info
));
3567 return (NDI_FAILURE
);
3569 sd
= kmem_zalloc(sizeof (struct scsi_device
), KM_SLEEP
);
3570 sd
->sd_address
.a_hba_tran
= instance
->tran
;
3571 sd
->sd_address
.a_target
= (uint16_t)tgt
;
3572 sd
->sd_address
.a_lun
= (uint8_t)lun
;
3574 if (scsi_hba_probe(sd
, NULL
) == SCSIPROBE_EXISTS
) {
3575 rval
= mrsas_config_scsi_device(instance
, sd
, ldip
);
3576 dev_err(instance
->dip
, CE_CONT
,
3577 "?Phys. device found: tgt %d dtype %d: %s\n",
3578 tgt
, dtype
, sd
->sd_inq
->inq_vid
);
3581 con_log(CL_DLEVEL1
, (CE_NOTE
, "Phys. device Not found "
3582 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3583 tgt
, dtype
, sd
->sd_inq
->inq_vid
));
3586 /* sd_unprobe is blank now. Free buffer manually */
3588 kmem_free(sd
->sd_inq
, SUN_INQSIZE
);
3591 kmem_free(sd
, sizeof (struct scsi_device
));
3593 con_log(CL_ANN1
, (CE_NOTE
,
3594 "?Device not supported: tgt %d lun %d dtype %d",
3599 kmem_free(pds
, sizeof (struct mrsas_tbolt_pd_info
));
3600 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_config_pd: return rval = %d",
3606 mrsas_tbolt_get_pd_info(struct mrsas_instance
*instance
,
3607 struct mrsas_tbolt_pd_info
*pds
, int tgt
)
3609 struct mrsas_cmd
*cmd
;
3610 struct mrsas_dcmd_frame
*dcmd
;
3611 dma_obj_t dcmd_dma_obj
;
3613 ASSERT(instance
->tbolt
|| instance
->skinny
);
3615 if (instance
->tbolt
)
3616 cmd
= get_raid_msg_pkt(instance
);
3618 cmd
= mrsas_get_mfi_pkt(instance
);
3622 (CE_WARN
, "Failed to get a cmd for get pd info"));
3626 /* Clear the frame buffer and assign back the context id */
3627 bzero((char *)&cmd
->frame
[0], sizeof (union mrsas_frame
));
3628 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3632 dcmd
= &cmd
->frame
->dcmd
;
3633 dcmd_dma_obj
.size
= sizeof (struct mrsas_tbolt_pd_info
);
3634 dcmd_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
3635 dcmd_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xffffffff;
3636 dcmd_dma_obj
.dma_attr
.dma_attr_count_max
= 0xffffffff;
3637 dcmd_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3638 dcmd_dma_obj
.dma_attr
.dma_attr_align
= 1;
3640 (void) mrsas_alloc_dma_obj(instance
, &dcmd_dma_obj
,
3641 DDI_STRUCTURE_LE_ACC
);
3642 bzero(dcmd_dma_obj
.buffer
, sizeof (struct mrsas_tbolt_pd_info
));
3643 bzero(dcmd
->mbox
.b
, 12);
3644 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd
, MFI_CMD_OP_DCMD
);
3645 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd_status
, 0);
3646 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sge_count
, 1);
3647 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->flags
,
3648 MFI_FRAME_DIR_READ
);
3649 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->timeout
, 0);
3650 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->data_xfer_len
,
3651 sizeof (struct mrsas_tbolt_pd_info
));
3652 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->opcode
,
3653 MR_DCMD_PD_GET_INFO
);
3654 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->mbox
.w
[0], tgt
);
3655 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].length
,
3656 sizeof (struct mrsas_tbolt_pd_info
));
3657 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].phys_addr
,
3658 dcmd_dma_obj
.dma_cookie
[0].dmac_address
);
3660 cmd
->sync_cmd
= MRSAS_TRUE
;
3661 cmd
->frame_count
= 1;
3663 if (instance
->tbolt
)
3664 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3666 instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
);
3668 ddi_rep_get8(cmd
->frame_dma_obj
.acc_handle
, (uint8_t *)pds
,
3669 (uint8_t *)dcmd_dma_obj
.buffer
, sizeof (struct mrsas_tbolt_pd_info
),
3671 (void) mrsas_free_dma_obj(instance
, dcmd_dma_obj
);
3673 if (instance
->tbolt
)
3674 return_raid_msg_pkt(instance
, cmd
);
3676 mrsas_return_mfi_pkt(instance
, cmd
);