2 * **********************************************************************
6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 * Copyright (c) 2008-2012, LSI Logic Corporation.
19 * This module contains functions for device drivers
20 * to get pd-ld mapping information.
22 * **********************************************************************
25 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
26 * Copyright 2017 Citrus IT Limited. All rights reserved.
29 #include <sys/scsi/scsi.h>
31 #include "ld_pd_map.h"
34 * This function will check if FAST IO is possible on this logical drive
35 * by checking the EVENT information available in the driver
37 #define MR_LD_STATE_OPTIMAL 3
38 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
40 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL
*,
41 PLD_LOAD_BALANCE_INFO
);
46 typedef U64 REGION_KEY
;
47 typedef U32 REGION_LEN
;
48 extern int debug_level_g
;
52 *MR_LdRaidGet(U32 ld
, MR_FW_RAID_MAP_ALL
*map
)
54 return (&map
->raidMap
.ldSpanMap
[ld
].ldRaid
);
58 MR_GetLDTgtId(U32 ld
, MR_FW_RAID_MAP_ALL
*map
)
60 return (map
->raidMap
.ldSpanMap
[ld
].ldRaid
.targetId
);
64 static MR_SPAN_BLOCK_INFO
*
65 MR_LdSpanInfoGet(U32 ld
, MR_FW_RAID_MAP_ALL
*map
)
67 return (&map
->raidMap
.ldSpanMap
[ld
].spanBlock
[0]);
71 MR_LdDataArmGet(U32 ld
, U32 armIdx
, MR_FW_RAID_MAP_ALL
*map
)
73 return (map
->raidMap
.ldSpanMap
[ld
].dataArmMap
[armIdx
]);
77 MR_ArPdGet(U32 ar
, U32 arm
, MR_FW_RAID_MAP_ALL
*map
)
79 return (map
->raidMap
.arMapInfo
[ar
].pd
[arm
]);
83 MR_LdSpanArrayGet(U32 ld
, U32 span
, MR_FW_RAID_MAP_ALL
*map
)
85 return (map
->raidMap
.ldSpanMap
[ld
].spanBlock
[span
].span
.arrayRef
);
89 MR_PdDevHandleGet(U32 pd
, MR_FW_RAID_MAP_ALL
*map
)
91 return (map
->raidMap
.devHndlInfo
[pd
].curDevHdl
);
95 MR_TargetIdToLdGet(U32 ldTgtId
, MR_FW_RAID_MAP_ALL
*map
)
97 return (map
->raidMap
.ldTgtIdToLd
[ldTgtId
]);
101 MR_CheckDIF(U32 ldTgtId
, MR_FW_RAID_MAP_ALL
*map
)
106 ld
= MR_TargetIdToLdGet(ldTgtId
, map
);
108 if (ld
>= MAX_LOGICAL_DRIVES
) {
112 raid
= MR_LdRaidGet(ld
, map
);
114 return (raid
->capability
.ldPiMode
== 0x8);
118 MR_LdSpanPtrGet(U32 ld
, U32 span
, MR_FW_RAID_MAP_ALL
*map
)
120 return (&map
->raidMap
.ldSpanMap
[ld
].spanBlock
[span
].span
);
124 * This function will validate Map info data provided by FW
127 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL
*map
, PLD_LOAD_BALANCE_INFO lbInfo
)
129 MR_FW_RAID_MAP
*pFwRaidMap
= &map
->raidMap
;
130 U32 fwsize
= sizeof (MR_FW_RAID_MAP
) - sizeof (MR_LD_SPAN_MAP
) +
131 (sizeof (MR_LD_SPAN_MAP
) * pFwRaidMap
->ldCount
);
133 if (pFwRaidMap
->totalSize
!= fwsize
) {
135 con_log(CL_ANN1
, (CE_NOTE
,
136 "map info structure size 0x%x is "
137 "not matching with ld count\n", fwsize
));
138 /* sizeof (foo) returns size_t, which is *LONG*. */
139 con_log(CL_ANN1
, (CE_NOTE
, "span map 0x%x total size 0x%x\n",\
140 (int)sizeof (MR_LD_SPAN_MAP
), pFwRaidMap
->totalSize
));
145 mr_update_load_balance_params(map
, lbInfo
);
151 MR_GetSpanBlock(U32 ld
, U64 row
, U64
*span_blk
, MR_FW_RAID_MAP_ALL
*map
,
154 MR_SPAN_BLOCK_INFO
*pSpanBlock
= MR_LdSpanInfoGet(ld
, map
);
156 MR_LD_RAID
*raid
= MR_LdRaidGet(ld
, map
);
159 for (span
= 0; span
< raid
->spanDepth
; span
++, pSpanBlock
++) {
160 for (j
= 0; j
< pSpanBlock
->block_span_info
.noElements
; j
++) {
161 qe
= &pSpanBlock
->block_span_info
.quads
[j
];
166 if (qe
->logStart
<= row
&& row
<= qe
->logEnd
&&
167 (((row
- qe
->logStart
) % qe
->diff
)) == 0) {
168 if (span_blk
!= NULL
) {
170 blk
= ((row
- qe
->logStart
) /
173 blk
= (blk
+ qe
->offsetInSpan
) <<
186 * *************************************************************
188 * This routine calculates the arm, span and block for
189 * the specified stripe and reference in stripe.
193 * ld - Logical drive number
194 * stripRow - Stripe number
195 * stripRef - Reference in stripe
200 * block - Absolute Block number in the physical disk
203 MR_GetPhyParams(struct mrsas_instance
*instance
, U32 ld
, U64 stripRow
,
204 U16 stripRef
, U64
*pdBlock
, U16
*pDevHandle
,
205 MPI2_SCSI_IO_VENDOR_UNIQUE
*pRAID_Context
, MR_FW_RAID_MAP_ALL
*map
)
207 MR_LD_RAID
*raid
= MR_LdRaidGet(ld
, map
);
217 ASSERT(raid
->rowDataSize
!= 0);
219 row
= (stripRow
/ raid
->rowDataSize
);
221 if (raid
->level
== 6) {
222 U32 logArm
= (stripRow
% (raid
->rowDataSize
));
224 if (raid
->rowSize
== 0) {
227 rowMod
= (row
% (raid
->rowSize
));
228 armQ
= raid
->rowSize
-1-rowMod
;
229 arm
= armQ
+ 1 + logArm
;
230 if (arm
>= raid
->rowSize
)
231 arm
-= raid
->rowSize
;
234 if (raid
->modFactor
== 0)
236 physArm
= MR_LdDataArmGet(ld
,
237 (stripRow
% (raid
->modFactor
)), map
);
239 if (raid
->spanDepth
== 1) {
241 *pdBlock
= row
<< raid
->stripeShift
;
243 span
= (U8
)MR_GetSpanBlock(ld
, row
, pdBlock
, map
, &error_code
);
248 /* Get the array on which this span is present. */
249 arRef
= MR_LdSpanArrayGet(ld
, span
, map
);
251 pd
= MR_ArPdGet(arRef
, physArm
, map
);
252 /* Get dev handle from Pd. */
253 if (pd
!= MR_PD_INVALID
) {
254 *pDevHandle
= MR_PdDevHandleGet(pd
, map
);
256 *pDevHandle
= MR_PD_INVALID
; /* set dev handle as invalid. */
257 if (raid
->level
>= 5 && (!instance
->gen3
||
258 raid
->regTypeReqOnRead
!= REGION_TYPE_UNUSED
)) {
259 pRAID_Context
->regLockFlags
= REGION_TYPE_EXCLUSIVE
;
260 } else if (raid
->level
== 1) {
261 /* Get Alternate Pd. */
262 pd
= MR_ArPdGet(arRef
, physArm
+ 1, map
);
263 /* Get dev handle from Pd. */
264 if (pd
!= MR_PD_INVALID
)
265 *pDevHandle
= MR_PdDevHandleGet(pd
, map
);
269 *pdBlock
+= stripRef
+ MR_LdSpanPtrGet(ld
, span
, map
)->startBlk
;
271 pRAID_Context
->spanArm
= (span
<< RAID_CTX_SPANARM_SPAN_SHIFT
) |
280 * ***********************************************************************
282 * MR_BuildRaidContext function
284 * This function will initiate command processing. The start/end row and strip
285 * information is calculated then the lock is acquired.
286 * This function will return 0 if region lock
287 * was acquired OR return num strips ???
291 MR_BuildRaidContext(struct mrsas_instance
*instance
,
292 struct IO_REQUEST_INFO
*io_info
, MPI2_SCSI_IO_VENDOR_UNIQUE
*pRAID_Context
,
293 MR_FW_RAID_MAP_ALL
*map
)
296 U32 ld
, stripSize
, stripe_mask
;
297 U64 endLba
, endStrip
, endRow
;
298 U64 start_row
, start_strip
;
301 U8 num_strips
, numRows
;
302 U16 ref_in_start_stripe
;
303 U16 ref_in_end_stripe
;
306 U32 numBlocks
, ldTgtId
;
310 ldStartBlock
= io_info
->ldStartBlock
;
311 numBlocks
= io_info
->numBlocks
;
312 ldTgtId
= io_info
->ldTgtId
;
313 isRead
= io_info
->isRead
;
316 io_info
->fpOkForIo
= FALSE
;
320 ld
= MR_TargetIdToLdGet(ldTgtId
, map
);
322 if (ld
>= MAX_LOGICAL_DRIVES
) {
323 io_info
->fpOkForIo
= FALSE
;
327 raid
= MR_LdRaidGet(ld
, map
);
329 stripSize
= 1 << raid
->stripeShift
;
330 stripe_mask
= stripSize
-1;
332 * calculate starting row and stripe, and number of strips and rows
334 start_strip
= ldStartBlock
>> raid
->stripeShift
;
335 ref_in_start_stripe
= (U16
)(ldStartBlock
& stripe_mask
);
336 endLba
= ldStartBlock
+ numBlocks
- 1;
337 ref_in_end_stripe
= (U16
)(endLba
& stripe_mask
);
338 endStrip
= endLba
>> raid
->stripeShift
;
339 num_strips
= (U8
)(endStrip
- start_strip
+ 1);
340 /* Check to make sure is not dividing by zero */
341 if (raid
->rowDataSize
== 0)
343 start_row
= (start_strip
/ raid
->rowDataSize
);
344 endRow
= (endStrip
/ raid
->rowDataSize
);
345 /* get the row count */
346 numRows
= (U8
)(endRow
- start_row
+ 1);
349 * calculate region info.
351 regStart
= start_row
<< raid
->stripeShift
;
354 /* Check if we can send this I/O via FastPath */
355 if (raid
->capability
.fpCapable
) {
357 io_info
->fpOkForIo
= (raid
->capability
.fpReadCapable
&&
358 ((num_strips
== 1) ||
359 raid
->capability
.fpReadAcrossStripe
));
362 (raid
->capability
.fpWriteCapable
&&
363 ((num_strips
== 1) ||
364 raid
->capability
.fpWriteAcrossStripe
));
367 io_info
->fpOkForIo
= FALSE
;
371 * Check for DIF support
373 if (!raid
->capability
.ldPiMode
) {
374 io_info
->ldPI
= FALSE
;
376 io_info
->ldPI
= TRUE
;
380 if (num_strips
== 1) {
381 regStart
+= ref_in_start_stripe
;
385 if (start_strip
== (start_row
+ 1) * raid
->rowDataSize
- 1) {
386 regStart
+= ref_in_start_stripe
;
387 regSize
= stripSize
- ref_in_start_stripe
;
391 regSize
+= (numRows
- 2) << raid
->stripeShift
;
394 if (endStrip
== endRow
* raid
->rowDataSize
) {
395 regSize
+= ref_in_end_stripe
+ 1;
397 regSize
+= stripSize
;
401 pRAID_Context
->timeoutValue
= map
->raidMap
.fpPdIoTimeoutSec
;
403 if (instance
->gen3
) {
404 pRAID_Context
->regLockFlags
= (isRead
) ?
405 raid
->regTypeReqOnRead
: raid
->regTypeReqOnWrite
;
407 pRAID_Context
->regLockFlags
= (isRead
) ?
408 REGION_TYPE_SHARED_READ
: raid
->regTypeReqOnWrite
;
411 pRAID_Context
->ldTargetId
= raid
->targetId
;
412 pRAID_Context
->regLockRowLBA
= regStart
;
413 pRAID_Context
->regLockLength
= regSize
;
414 pRAID_Context
->configSeqNum
= raid
->seqNum
;
417 * Get Phy Params only if FP capable,
418 * or else leave it to MR firmware to do the calculation.
420 if (io_info
->fpOkForIo
) {
421 /* if fast path possible then get the physical parameters */
422 retval
= MR_GetPhyParams(instance
, ld
, start_strip
,
423 ref_in_start_stripe
, &io_info
->pdBlock
,
424 &io_info
->devHandle
, pRAID_Context
, map
);
426 /* If IO on an invalid Pd, then FP is not possible. */
427 if (io_info
->devHandle
== MR_PD_INVALID
)
428 io_info
->fpOkForIo
= FALSE
;
435 for (stripIdx
= 0; stripIdx
< num_strips
; stripIdx
++) {
436 if (!MR_GetPhyParams(instance
, ld
,
437 start_strip
+ stripIdx
, ref_in_start_stripe
,
438 &io_info
->pdBlock
, &io_info
->devHandle
,
439 pRAID_Context
, map
)) {
449 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL
*map
,
450 PLD_LOAD_BALANCE_INFO lbInfo
)
456 for (ldCount
= 0; ldCount
< MAX_LOGICAL_DRIVES
; ldCount
++) {
457 ld
= MR_TargetIdToLdGet(ldCount
, map
);
459 if (ld
>= MAX_LOGICAL_DRIVES
) {
461 (CE_NOTE
, "mrsas: ld=%d Invalid ld \n", ld
));
465 raid
= MR_LdRaidGet(ld
, map
);
467 /* Two drive Optimal RAID 1 */
468 if ((raid
->level
== 1) && (raid
->rowSize
== 2) &&
469 (raid
->spanDepth
== 1) &&
470 raid
->ldState
== MR_LD_STATE_OPTIMAL
) {
473 lbInfo
[ldCount
].loadBalanceFlag
= 1;
475 /* Get the array on which this span is present. */
476 arRef
= MR_LdSpanArrayGet(ld
, 0, map
);
478 pd
= MR_ArPdGet(arRef
, 0, map
); /* Get the Pd. */
479 /* Get dev handle from Pd. */
480 lbInfo
[ldCount
].raid1DevHandle
[0] =
481 MR_PdDevHandleGet(pd
, map
);
483 pd
= MR_ArPdGet(arRef
, 1, map
); /* Get the Pd. */
484 /* Get dev handle from Pd. */
485 lbInfo
[ldCount
].raid1DevHandle
[1] =
486 MR_PdDevHandleGet(pd
, map
);
487 con_log(CL_ANN1
, (CE_NOTE
,
488 "mrsas: ld=%d load balancing enabled \n", ldCount
));
490 lbInfo
[ldCount
].loadBalanceFlag
= 0;
497 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo
, U8 arm
, U64 block
,
504 /* get the pending cmds for the data and mirror arms */
505 pend0
= lbInfo
->scsi_pending_cmds
[0];
506 pend1
= lbInfo
->scsi_pending_cmds
[1];
508 /* Determine the disk whose head is nearer to the req. block */
509 diff0
= ABS_DIFF(block
, lbInfo
->last_accessed_block
[0]);
510 diff1
= ABS_DIFF(block
, lbInfo
->last_accessed_block
[1]);
511 bestArm
= (diff0
<= diff1
? 0 : 1);
513 if ((bestArm
== arm
&& pend0
> pend1
+ 16) ||
514 (bestArm
!= arm
&& pend1
> pend0
+ 16)) {
518 /* Update the last accessed block on the correct pd */
519 lbInfo
->last_accessed_block
[bestArm
] = block
+ count
- 1;
524 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo
,
525 struct IO_REQUEST_INFO
*io_info
)
530 old_arm
= lbInfo
->raid1DevHandle
[0] == io_info
->devHandle
? 0 : 1;
532 /* get best new arm */
533 arm
= megasas_get_best_arm(lbInfo
, old_arm
, io_info
->ldStartBlock
,
536 devHandle
= lbInfo
->raid1DevHandle
[arm
];
538 lbInfo
->scsi_pending_cmds
[arm
]++;