bmake-ify mr_sas
[unleashed.git] / kernel / drivers / scsi / mr_sas / ld_pd_map.c
blob0f2798a790a4c7b6beb9b7d28e9d395c48722514
1 /*
2 * **********************************************************************
4 * ld_pd_map.c
6 * Solaris MegaRAID device driver for SAS2.0 controllers
7 * Copyright (c) 2008-2012, LSI Logic Corporation.
8 * All rights reserved.
10 * Version:
11 * Author:
12 * Swaminathan K S
13 * Arun Chandrashekhar
14 * Manju R
15 * Rasheed
16 * Shakeel Bukhari
19 * This module contains functions for device drivers
20 * to get pd-ld mapping information.
22 * **********************************************************************
25 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
26 * Copyright 2017 Citrus IT Limited. All rights reserved.
29 #include <sys/scsi/scsi.h>
30 #include "mr_sas.h"
31 #include "ld_pd_map.h"
34 * This function will check if FAST IO is possible on this logical drive
35 * by checking the EVENT information available in the driver
37 #define MR_LD_STATE_OPTIMAL 3
38 #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
40 static void mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *,
41 PLD_LOAD_BALANCE_INFO);
43 #define FALSE 0
44 #define TRUE 1
46 typedef U64 REGION_KEY;
47 typedef U32 REGION_LEN;
48 extern int debug_level_g;
51 MR_LD_RAID
52 *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
54 return (&map->raidMap.ldSpanMap[ld].ldRaid);
57 U16
58 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map)
60 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
64 static MR_SPAN_BLOCK_INFO *
65 MR_LdSpanInfoGet(U32 ld, MR_FW_RAID_MAP_ALL *map)
67 return (&map->raidMap.ldSpanMap[ld].spanBlock[0]);
70 static U8
71 MR_LdDataArmGet(U32 ld, U32 armIdx, MR_FW_RAID_MAP_ALL *map)
73 return (map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]);
76 static U16
77 MR_ArPdGet(U32 ar, U32 arm, MR_FW_RAID_MAP_ALL *map)
79 return (map->raidMap.arMapInfo[ar].pd[arm]);
82 static U16
83 MR_LdSpanArrayGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
85 return (map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
88 static U16
89 MR_PdDevHandleGet(U32 pd, MR_FW_RAID_MAP_ALL *map)
91 return (map->raidMap.devHndlInfo[pd].curDevHdl);
94 U16
95 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
97 return (map->raidMap.ldTgtIdToLd[ldTgtId]);
101 MR_CheckDIF(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map)
103 MR_LD_RAID *raid;
104 U32 ld;
106 ld = MR_TargetIdToLdGet(ldTgtId, map);
108 if (ld >= MAX_LOGICAL_DRIVES) {
109 return (FALSE);
112 raid = MR_LdRaidGet(ld, map);
114 return (raid->capability.ldPiMode == 0x8);
117 static MR_LD_SPAN *
118 MR_LdSpanPtrGet(U32 ld, U32 span, MR_FW_RAID_MAP_ALL *map)
120 return (&map->raidMap.ldSpanMap[ld].spanBlock[span].span);
124 * This function will validate Map info data provided by FW
127 MR_ValidateMapInfo(MR_FW_RAID_MAP_ALL *map, PLD_LOAD_BALANCE_INFO lbInfo)
129 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
130 U32 fwsize = sizeof (MR_FW_RAID_MAP) - sizeof (MR_LD_SPAN_MAP) +
131 (sizeof (MR_LD_SPAN_MAP) * pFwRaidMap->ldCount);
133 if (pFwRaidMap->totalSize != fwsize) {
135 con_log(CL_ANN1, (CE_NOTE,
136 "map info structure size 0x%x is "
137 "not matching with ld count\n", fwsize));
138 /* sizeof (foo) returns size_t, which is *LONG*. */
139 con_log(CL_ANN1, (CE_NOTE, "span map 0x%x total size 0x%x\n",\
140 (int)sizeof (MR_LD_SPAN_MAP), pFwRaidMap->totalSize));
142 return (0);
145 mr_update_load_balance_params(map, lbInfo);
147 return (1);
151 MR_GetSpanBlock(U32 ld, U64 row, U64 *span_blk, MR_FW_RAID_MAP_ALL *map,
152 int *div_error)
154 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
155 MR_QUAD_ELEMENT *qe;
156 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
157 U32 span, j;
159 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
160 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
161 qe = &pSpanBlock->block_span_info.quads[j];
162 if (qe->diff == 0) {
163 *div_error = 1;
164 return (span);
166 if (qe->logStart <= row && row <= qe->logEnd &&
167 (((row - qe->logStart) % qe->diff)) == 0) {
168 if (span_blk != NULL) {
169 U64 blk;
170 blk = ((row - qe->logStart) /
171 (qe->diff));
173 blk = (blk + qe->offsetInSpan) <<
174 raid->stripeShift;
175 *span_blk = blk;
177 return (span);
181 return (span);
186 * *************************************************************
188 * This routine calculates the arm, span and block for
189 * the specified stripe and reference in stripe.
191 * Inputs :
193 * ld - Logical drive number
194 * stripRow - Stripe number
195 * stripRef - Reference in stripe
197 * Outputs :
199 * span - Span number
200 * block - Absolute Block number in the physical disk
203 MR_GetPhyParams(struct mrsas_instance *instance, U32 ld, U64 stripRow,
204 U16 stripRef, U64 *pdBlock, U16 *pDevHandle,
205 MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
207 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
208 U32 pd, arRef;
209 U8 physArm, span;
210 U64 row;
211 int error_code = 0;
212 U8 retval = TRUE;
213 U32 rowMod;
214 U32 armQ;
215 U32 arm;
217 ASSERT(raid->rowDataSize != 0);
219 row = (stripRow / raid->rowDataSize);
221 if (raid->level == 6) {
222 U32 logArm = (stripRow % (raid->rowDataSize));
224 if (raid->rowSize == 0) {
225 return (FALSE);
227 rowMod = (row % (raid->rowSize));
228 armQ = raid->rowSize-1-rowMod;
229 arm = armQ + 1 + logArm;
230 if (arm >= raid->rowSize)
231 arm -= raid->rowSize;
232 physArm = (U8)arm;
233 } else {
234 if (raid->modFactor == 0)
235 return (FALSE);
236 physArm = MR_LdDataArmGet(ld,
237 (stripRow % (raid->modFactor)), map);
239 if (raid->spanDepth == 1) {
240 span = 0;
241 *pdBlock = row << raid->stripeShift;
242 } else
243 span = (U8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
245 if (error_code == 1)
246 return (FALSE);
248 /* Get the array on which this span is present. */
249 arRef = MR_LdSpanArrayGet(ld, span, map);
250 /* Get the Pd. */
251 pd = MR_ArPdGet(arRef, physArm, map);
252 /* Get dev handle from Pd. */
253 if (pd != MR_PD_INVALID) {
254 *pDevHandle = MR_PdDevHandleGet(pd, map);
255 } else {
256 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
257 if (raid->level >= 5 && (!instance->gen3 ||
258 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)) {
259 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
260 } else if (raid->level == 1) {
261 /* Get Alternate Pd. */
262 pd = MR_ArPdGet(arRef, physArm + 1, map);
263 /* Get dev handle from Pd. */
264 if (pd != MR_PD_INVALID)
265 *pDevHandle = MR_PdDevHandleGet(pd, map);
269 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
271 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
272 physArm;
274 return (retval);
280 * ***********************************************************************
282 * MR_BuildRaidContext function
284 * This function will initiate command processing. The start/end row and strip
285 * information is calculated then the lock is acquired.
286 * This function will return 0 if region lock
287 * was acquired OR return num strips ???
291 MR_BuildRaidContext(struct mrsas_instance *instance,
292 struct IO_REQUEST_INFO *io_info, MPI2_SCSI_IO_VENDOR_UNIQUE *pRAID_Context,
293 MR_FW_RAID_MAP_ALL *map)
295 MR_LD_RAID *raid;
296 U32 ld, stripSize, stripe_mask;
297 U64 endLba, endStrip, endRow;
298 U64 start_row, start_strip;
299 REGION_KEY regStart;
300 REGION_LEN regSize;
301 U8 num_strips, numRows;
302 U16 ref_in_start_stripe;
303 U16 ref_in_end_stripe;
305 U64 ldStartBlock;
306 U32 numBlocks, ldTgtId;
307 U8 isRead;
308 U8 retval = 0;
310 ldStartBlock = io_info->ldStartBlock;
311 numBlocks = io_info->numBlocks;
312 ldTgtId = io_info->ldTgtId;
313 isRead = io_info->isRead;
315 if (map == NULL) {
316 io_info->fpOkForIo = FALSE;
317 return (FALSE);
320 ld = MR_TargetIdToLdGet(ldTgtId, map);
322 if (ld >= MAX_LOGICAL_DRIVES) {
323 io_info->fpOkForIo = FALSE;
324 return (FALSE);
327 raid = MR_LdRaidGet(ld, map);
329 stripSize = 1 << raid->stripeShift;
330 stripe_mask = stripSize-1;
332 * calculate starting row and stripe, and number of strips and rows
334 start_strip = ldStartBlock >> raid->stripeShift;
335 ref_in_start_stripe = (U16)(ldStartBlock & stripe_mask);
336 endLba = ldStartBlock + numBlocks - 1;
337 ref_in_end_stripe = (U16)(endLba & stripe_mask);
338 endStrip = endLba >> raid->stripeShift;
339 num_strips = (U8)(endStrip - start_strip + 1);
340 /* Check to make sure is not dividing by zero */
341 if (raid->rowDataSize == 0)
342 return (FALSE);
343 start_row = (start_strip / raid->rowDataSize);
344 endRow = (endStrip / raid->rowDataSize);
345 /* get the row count */
346 numRows = (U8)(endRow - start_row + 1);
349 * calculate region info.
351 regStart = start_row << raid->stripeShift;
352 regSize = stripSize;
354 /* Check if we can send this I/O via FastPath */
355 if (raid->capability.fpCapable) {
356 if (isRead) {
357 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
358 ((num_strips == 1) ||
359 raid->capability.fpReadAcrossStripe));
360 } else {
361 io_info->fpOkForIo =
362 (raid->capability.fpWriteCapable &&
363 ((num_strips == 1) ||
364 raid->capability.fpWriteAcrossStripe));
366 } else
367 io_info->fpOkForIo = FALSE;
371 * Check for DIF support
373 if (!raid->capability.ldPiMode) {
374 io_info->ldPI = FALSE;
375 } else {
376 io_info->ldPI = TRUE;
379 if (numRows == 1) {
380 if (num_strips == 1) {
381 regStart += ref_in_start_stripe;
382 regSize = numBlocks;
384 } else {
385 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
386 regStart += ref_in_start_stripe;
387 regSize = stripSize - ref_in_start_stripe;
390 if (numRows > 2) {
391 regSize += (numRows - 2) << raid->stripeShift;
394 if (endStrip == endRow * raid->rowDataSize) {
395 regSize += ref_in_end_stripe + 1;
396 } else {
397 regSize += stripSize;
401 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
403 if (instance->gen3) {
404 pRAID_Context->regLockFlags = (isRead) ?
405 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
406 } else {
407 pRAID_Context->regLockFlags = (isRead) ?
408 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
411 pRAID_Context->ldTargetId = raid->targetId;
412 pRAID_Context->regLockRowLBA = regStart;
413 pRAID_Context->regLockLength = regSize;
414 pRAID_Context->configSeqNum = raid->seqNum;
417 * Get Phy Params only if FP capable,
418 * or else leave it to MR firmware to do the calculation.
420 if (io_info->fpOkForIo) {
421 /* if fast path possible then get the physical parameters */
422 retval = MR_GetPhyParams(instance, ld, start_strip,
423 ref_in_start_stripe, &io_info->pdBlock,
424 &io_info->devHandle, pRAID_Context, map);
426 /* If IO on an invalid Pd, then FP is not possible. */
427 if (io_info->devHandle == MR_PD_INVALID)
428 io_info->fpOkForIo = FALSE;
430 return (retval);
432 } else if (isRead) {
433 uint_t stripIdx;
435 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
436 if (!MR_GetPhyParams(instance, ld,
437 start_strip + stripIdx, ref_in_start_stripe,
438 &io_info->pdBlock, &io_info->devHandle,
439 pRAID_Context, map)) {
440 return (TRUE);
444 return (TRUE);
448 void
449 mr_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
450 PLD_LOAD_BALANCE_INFO lbInfo)
452 int ldCount;
453 U16 ld;
454 MR_LD_RAID *raid;
456 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
457 ld = MR_TargetIdToLdGet(ldCount, map);
459 if (ld >= MAX_LOGICAL_DRIVES) {
460 con_log(CL_ANN1,
461 (CE_NOTE, "mrsas: ld=%d Invalid ld \n", ld));
462 continue;
465 raid = MR_LdRaidGet(ld, map);
467 /* Two drive Optimal RAID 1 */
468 if ((raid->level == 1) && (raid->rowSize == 2) &&
469 (raid->spanDepth == 1) &&
470 raid->ldState == MR_LD_STATE_OPTIMAL) {
471 U32 pd, arRef;
473 lbInfo[ldCount].loadBalanceFlag = 1;
475 /* Get the array on which this span is present. */
476 arRef = MR_LdSpanArrayGet(ld, 0, map);
478 pd = MR_ArPdGet(arRef, 0, map); /* Get the Pd. */
479 /* Get dev handle from Pd. */
480 lbInfo[ldCount].raid1DevHandle[0] =
481 MR_PdDevHandleGet(pd, map);
483 pd = MR_ArPdGet(arRef, 1, map); /* Get the Pd. */
484 /* Get dev handle from Pd. */
485 lbInfo[ldCount].raid1DevHandle[1] =
486 MR_PdDevHandleGet(pd, map);
487 con_log(CL_ANN1, (CE_NOTE,
488 "mrsas: ld=%d load balancing enabled \n", ldCount));
489 } else {
490 lbInfo[ldCount].loadBalanceFlag = 0;
497 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, U8 arm, U64 block,
498 U32 count)
500 U16 pend0, pend1;
501 U64 diff0, diff1;
502 U8 bestArm;
504 /* get the pending cmds for the data and mirror arms */
505 pend0 = lbInfo->scsi_pending_cmds[0];
506 pend1 = lbInfo->scsi_pending_cmds[1];
508 /* Determine the disk whose head is nearer to the req. block */
509 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
510 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
511 bestArm = (diff0 <= diff1 ? 0 : 1);
513 if ((bestArm == arm && pend0 > pend1 + 16) ||
514 (bestArm != arm && pend1 > pend0 + 16)) {
515 bestArm ^= 1;
518 /* Update the last accessed block on the correct pd */
519 lbInfo->last_accessed_block[bestArm] = block + count - 1;
520 return (bestArm);
524 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
525 struct IO_REQUEST_INFO *io_info)
527 U8 arm, old_arm;
528 U16 devHandle;
530 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
532 /* get best new arm */
533 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
534 io_info->numBlocks);
536 devHandle = lbInfo->raid1DevHandle[arm];
538 lbInfo->scsi_pending_cmds[arm]++;
540 return (devHandle);