2 * Engenio/LSI RDAC DM HW handler
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
26 #define DM_MSG_PREFIX "multipath rdac"
29 #include "dm-hw-handler.h"
31 #define RDAC_DM_HWH_NAME "rdac"
32 #define RDAC_DM_HWH_VER "0.4"
37 * These struct definitions and the forming of the
38 * mode page were taken from the LSI RDAC 2.4 GPL'd
39 * driver, and then converted to Linux conventions.
41 #define RDAC_QUIESCENCE_TIME 20;
45 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
48 * Controller modes definitions
50 #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
51 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
56 #define RDAC_FORCED_QUIESENCE 0x02
58 #define RDAC_FAILOVER_TIMEOUT (60 * HZ)
60 struct rdac_mode_6_hdr
{
67 struct rdac_mode_10_hdr
{
75 struct rdac_mode_common
{
76 u8 controller_serial
[16];
77 u8 alt_controller_serial
[16];
80 u8 quiescence_timeout
;
84 struct rdac_pg_legacy
{
85 struct rdac_mode_6_hdr hdr
;
88 struct rdac_mode_common common
;
89 #define MODE6_MAX_LUN 32
90 u8 lun_table
[MODE6_MAX_LUN
];
96 struct rdac_pg_expanded
{
97 struct rdac_mode_10_hdr hdr
;
101 struct rdac_mode_common common
;
109 u8 page_code
; /* 0xC9 */
112 u8 page_id
[4]; /* "vace" */
118 #define SUBSYS_ID_LEN 16
119 #define SLOT_ID_LEN 2
123 u8 page_code
; /* 0xC4 */
126 u8 page_id
[4]; /* "subs" */
127 u8 subsys_id
[SUBSYS_ID_LEN
];
129 u8 slot_id
[SLOT_ID_LEN
];
133 struct rdac_controller
{
134 u8 subsys_id
[SUBSYS_ID_LEN
];
135 u8 slot_id
[SLOT_ID_LEN
];
138 struct list_head node
; /* list of all controllers */
141 struct list_head cmd_list
; /* list of commands to be submitted */
143 struct rdac_pg_legacy legacy
;
144 struct rdac_pg_expanded expanded
;
149 u8 page_code
; /* 0xC8 */
152 u8 page_id
[4]; /* "edid" */
156 u8 vol_user_label_len
;
157 u8 vol_user_label
[60];
158 u8 array_uniq_id_len
;
159 u8 array_unique_id
[16];
160 u8 array_user_label_len
;
161 u8 array_user_label
[60];
167 u8 page_code
; /* 0xC2 */
170 u8 page_id
[4]; /* "swr4" */
174 u8 max_lun_supported
;
175 u8 partitions
[239]; /* Total allocation length should be 0xFF */
178 struct rdac_handler
{
179 struct list_head entry
; /* list waiting to submit MODE SELECT */
181 struct rdac_controller
*ctlr
;
182 #define UNINITIALIZED_LUN (1 << 8)
184 unsigned char sense
[SCSI_SENSE_BUFFERSIZE
];
185 struct dm_path
*path
;
186 struct work_struct work
;
187 #define SEND_C2_INQUIRY 1
188 #define SEND_C4_INQUIRY 2
189 #define SEND_C8_INQUIRY 3
190 #define SEND_C9_INQUIRY 4
191 #define SEND_MODE_SELECT 5
194 struct c2_inquiry c2
;
195 struct c4_inquiry c4
;
196 struct c8_inquiry c8
;
197 struct c9_inquiry c9
;
201 static LIST_HEAD(ctlr_list
);
202 static DEFINE_SPINLOCK(list_lock
);
203 static struct workqueue_struct
*rdac_wkqd
;
205 static inline int had_failures(struct request
*req
, int error
)
207 return (error
|| host_byte(req
->errors
) != DID_OK
||
208 msg_byte(req
->errors
) != COMMAND_COMPLETE
);
211 static void rdac_resubmit_all(struct rdac_handler
*h
)
213 struct rdac_controller
*ctlr
= h
->ctlr
;
214 struct rdac_handler
*tmp
, *h1
;
216 spin_lock(&ctlr
->lock
);
217 list_for_each_entry_safe(h1
, tmp
, &ctlr
->cmd_list
, entry
) {
218 h1
->cmd_to_send
= SEND_C9_INQUIRY
;
219 queue_work(rdac_wkqd
, &h1
->work
);
220 list_del(&h1
->entry
);
223 spin_unlock(&ctlr
->lock
);
226 static void mode_select_endio(struct request
*req
, int error
)
228 struct rdac_handler
*h
= req
->end_io_data
;
229 struct scsi_sense_hdr sense_hdr
;
230 int sense
= 0, fail
= 0;
232 if (had_failures(req
, error
)) {
237 if (status_byte(req
->errors
) == CHECK_CONDITION
) {
238 scsi_normalize_sense(req
->sense
, SCSI_SENSE_BUFFERSIZE
,
240 sense
= (sense_hdr
.sense_key
<< 16) | (sense_hdr
.asc
<< 8) |
242 /* If it is retryable failure, submit the c9 inquiry again */
243 if (sense
== 0x59136 || sense
== 0x68b02 || sense
== 0xb8b02 ||
245 /* 0x59136 - Command lock contention
246 * 0x[6b]8b02 - Quiesense in progress or achieved
247 * 0x62900 - Power On, Reset, or Bus Device Reset
249 h
->cmd_to_send
= SEND_C9_INQUIRY
;
250 queue_work(rdac_wkqd
, &h
->work
);
254 DMINFO("MODE_SELECT failed on %s with sense 0x%x",
255 h
->path
->dev
->name
, sense
);
259 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
261 dm_pg_init_complete(h
->path
, 0);
264 rdac_resubmit_all(h
);
265 __blk_put_request(req
->q
, req
);
268 static struct request
*get_rdac_req(struct rdac_handler
*h
,
269 void *buffer
, unsigned buflen
, int rw
)
272 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
274 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
277 DMINFO("get_rdac_req: blk_get_request failed");
281 if (buflen
&& blk_rq_map_kern(q
, rq
, buffer
, buflen
, GFP_KERNEL
)) {
283 DMINFO("get_rdac_req: blk_rq_map_kern failed");
287 rq
->sense
= h
->sense
;
288 memset(rq
->sense
, 0, SCSI_SENSE_BUFFERSIZE
);
292 rq
->timeout
= h
->timeout
;
293 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
294 rq
->cmd_flags
|= REQ_FAILFAST
| REQ_NOMERGE
;
298 static struct request
*rdac_failover_get(struct rdac_handler
*h
)
301 struct rdac_mode_common
*common
;
304 if (h
->ctlr
->use_10_ms
) {
305 struct rdac_pg_expanded
*rdac_pg
;
307 data_size
= sizeof(struct rdac_pg_expanded
);
308 rdac_pg
= &h
->ctlr
->mode_select
.expanded
;
309 memset(rdac_pg
, 0, data_size
);
310 common
= &rdac_pg
->common
;
311 rdac_pg
->page_code
= RDAC_PAGE_CODE_REDUNDANT_CONTROLLER
+ 0x40;
312 rdac_pg
->subpage_code
= 0x1;
313 rdac_pg
->page_len
[0] = 0x01;
314 rdac_pg
->page_len
[1] = 0x28;
315 rdac_pg
->lun_table
[h
->lun
] = 0x81;
317 struct rdac_pg_legacy
*rdac_pg
;
319 data_size
= sizeof(struct rdac_pg_legacy
);
320 rdac_pg
= &h
->ctlr
->mode_select
.legacy
;
321 memset(rdac_pg
, 0, data_size
);
322 common
= &rdac_pg
->common
;
323 rdac_pg
->page_code
= RDAC_PAGE_CODE_REDUNDANT_CONTROLLER
;
324 rdac_pg
->page_len
= 0x68;
325 rdac_pg
->lun_table
[h
->lun
] = 0x81;
327 common
->rdac_mode
[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS
;
328 common
->quiescence_timeout
= RDAC_QUIESCENCE_TIME
;
329 common
->rdac_options
= RDAC_FORCED_QUIESENCE
;
331 /* get request for block layer packet command */
332 rq
= get_rdac_req(h
, &h
->ctlr
->mode_select
, data_size
, WRITE
);
334 DMERR("rdac_failover_get: no rq");
338 /* Prepare the command. */
339 if (h
->ctlr
->use_10_ms
) {
340 rq
->cmd
[0] = MODE_SELECT_10
;
341 rq
->cmd
[7] = data_size
>> 8;
342 rq
->cmd
[8] = data_size
& 0xff;
344 rq
->cmd
[0] = MODE_SELECT
;
345 rq
->cmd
[4] = data_size
;
347 rq
->cmd_len
= COMMAND_SIZE(rq
->cmd
[0]);
352 /* Acquires h->ctlr->lock */
353 static void submit_mode_select(struct rdac_handler
*h
)
356 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
358 spin_lock(&h
->ctlr
->lock
);
359 if (h
->ctlr
->submitted
) {
360 list_add(&h
->entry
, &h
->ctlr
->cmd_list
);
365 DMINFO("submit_mode_select: no queue");
369 rq
= rdac_failover_get(h
);
371 DMERR("submit_mode_select: no rq");
375 DMINFO("queueing MODE_SELECT command on %s", h
->path
->dev
->name
);
377 blk_execute_rq_nowait(q
, NULL
, rq
, 1, mode_select_endio
);
378 h
->ctlr
->submitted
= 1;
381 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
383 spin_unlock(&h
->ctlr
->lock
);
386 static void release_ctlr(struct kref
*kref
)
388 struct rdac_controller
*ctlr
;
389 ctlr
= container_of(kref
, struct rdac_controller
, kref
);
391 spin_lock(&list_lock
);
392 list_del(&ctlr
->node
);
393 spin_unlock(&list_lock
);
397 static struct rdac_controller
*get_controller(u8
*subsys_id
, u8
*slot_id
)
399 struct rdac_controller
*ctlr
, *tmp
;
401 spin_lock(&list_lock
);
403 list_for_each_entry(tmp
, &ctlr_list
, node
) {
404 if ((memcmp(tmp
->subsys_id
, subsys_id
, SUBSYS_ID_LEN
) == 0) &&
405 (memcmp(tmp
->slot_id
, slot_id
, SLOT_ID_LEN
) == 0)) {
406 kref_get(&tmp
->kref
);
407 spin_unlock(&list_lock
);
411 ctlr
= kmalloc(sizeof(*ctlr
), GFP_ATOMIC
);
415 /* initialize fields of controller */
416 memcpy(ctlr
->subsys_id
, subsys_id
, SUBSYS_ID_LEN
);
417 memcpy(ctlr
->slot_id
, slot_id
, SLOT_ID_LEN
);
418 kref_init(&ctlr
->kref
);
419 spin_lock_init(&ctlr
->lock
);
421 ctlr
->use_10_ms
= -1;
422 INIT_LIST_HEAD(&ctlr
->cmd_list
);
423 list_add(&ctlr
->node
, &ctlr_list
);
425 spin_unlock(&list_lock
);
429 static void c4_endio(struct request
*req
, int error
)
431 struct rdac_handler
*h
= req
->end_io_data
;
432 struct c4_inquiry
*sp
;
434 if (had_failures(req
, error
)) {
435 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
441 h
->ctlr
= get_controller(sp
->subsys_id
, sp
->slot_id
);
444 h
->cmd_to_send
= SEND_C9_INQUIRY
;
445 queue_work(rdac_wkqd
, &h
->work
);
447 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
449 __blk_put_request(req
->q
, req
);
452 static void c2_endio(struct request
*req
, int error
)
454 struct rdac_handler
*h
= req
->end_io_data
;
455 struct c2_inquiry
*sp
;
457 if (had_failures(req
, error
)) {
458 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
464 /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
465 if (sp
->max_lun_supported
>= MODE6_MAX_LUN
)
466 h
->ctlr
->use_10_ms
= 1;
468 h
->ctlr
->use_10_ms
= 0;
470 h
->cmd_to_send
= SEND_MODE_SELECT
;
471 queue_work(rdac_wkqd
, &h
->work
);
473 __blk_put_request(req
->q
, req
);
476 static void c9_endio(struct request
*req
, int error
)
478 struct rdac_handler
*h
= req
->end_io_data
;
479 struct c9_inquiry
*sp
;
481 if (had_failures(req
, error
)) {
482 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
486 /* We need to look at the sense keys here to take clear action.
487 * For now simple logic: If the host is in AVT mode or if controller
488 * owns the lun, return dm_pg_init_complete(), otherwise submit
493 /* If in AVT mode, return success */
494 if ((sp
->avte_cvp
>> 7) == 0x1) {
495 dm_pg_init_complete(h
->path
, 0);
499 /* If the controller on this path owns the LUN, return success */
500 if (sp
->avte_cvp
& 0x1) {
501 dm_pg_init_complete(h
->path
, 0);
506 if (h
->ctlr
->use_10_ms
== -1)
507 h
->cmd_to_send
= SEND_C2_INQUIRY
;
509 h
->cmd_to_send
= SEND_MODE_SELECT
;
511 h
->cmd_to_send
= SEND_C4_INQUIRY
;
512 queue_work(rdac_wkqd
, &h
->work
);
514 __blk_put_request(req
->q
, req
);
517 static void c8_endio(struct request
*req
, int error
)
519 struct rdac_handler
*h
= req
->end_io_data
;
520 struct c8_inquiry
*sp
;
522 if (had_failures(req
, error
)) {
523 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
527 /* We need to look at the sense keys here to take clear action.
528 * For now simple logic: Get the lun from the inquiry page.
531 h
->lun
= sp
->lun
[7]; /* currently it uses only one byte */
532 h
->cmd_to_send
= SEND_C9_INQUIRY
;
533 queue_work(rdac_wkqd
, &h
->work
);
535 __blk_put_request(req
->q
, req
);
538 static void submit_inquiry(struct rdac_handler
*h
, int page_code
,
539 unsigned int len
, rq_end_io_fn endio
)
542 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
547 rq
= get_rdac_req(h
, &h
->inq
, len
, READ
);
551 /* Prepare the command. */
552 rq
->cmd
[0] = INQUIRY
;
554 rq
->cmd
[2] = page_code
;
556 rq
->cmd_len
= COMMAND_SIZE(INQUIRY
);
557 blk_execute_rq_nowait(q
, NULL
, rq
, 1, endio
);
561 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
564 static void service_wkq(struct work_struct
*work
)
566 struct rdac_handler
*h
= container_of(work
, struct rdac_handler
, work
);
568 switch (h
->cmd_to_send
) {
569 case SEND_C2_INQUIRY
:
570 submit_inquiry(h
, 0xC2, sizeof(struct c2_inquiry
), c2_endio
);
572 case SEND_C4_INQUIRY
:
573 submit_inquiry(h
, 0xC4, sizeof(struct c4_inquiry
), c4_endio
);
575 case SEND_C8_INQUIRY
:
576 submit_inquiry(h
, 0xC8, sizeof(struct c8_inquiry
), c8_endio
);
578 case SEND_C9_INQUIRY
:
579 submit_inquiry(h
, 0xC9, sizeof(struct c9_inquiry
), c9_endio
);
581 case SEND_MODE_SELECT
:
582 submit_mode_select(h
);
589 * only support subpage2c until we confirm that this is just a matter of
590 * of updating firmware or not, and RDAC (basic AVT works already) for now
591 * but we can add these in in when we get time and testers
593 static int rdac_create(struct hw_handler
*hwh
, unsigned argc
, char **argv
)
595 struct rdac_handler
*h
;
599 /* No arguments: use defaults */
600 timeout
= RDAC_FAILOVER_TIMEOUT
;
601 } else if (argc
!= 1) {
602 DMWARN("incorrect number of arguments");
605 if (sscanf(argv
[1], "%u", &timeout
) != 1) {
606 DMWARN("invalid timeout value");
611 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
616 h
->timeout
= timeout
;
617 h
->lun
= UNINITIALIZED_LUN
;
618 INIT_WORK(&h
->work
, service_wkq
);
619 DMWARN("using RDAC command with timeout %u", h
->timeout
);
624 static void rdac_destroy(struct hw_handler
*hwh
)
626 struct rdac_handler
*h
= hwh
->context
;
629 kref_put(&h
->ctlr
->kref
, release_ctlr
);
634 static unsigned rdac_error(struct hw_handler
*hwh
, struct bio
*bio
)
636 /* Try default handler */
637 return dm_scsi_err_handler(hwh
, bio
);
640 static void rdac_pg_init(struct hw_handler
*hwh
, unsigned bypassed
,
641 struct dm_path
*path
)
643 struct rdac_handler
*h
= hwh
->context
;
647 case UNINITIALIZED_LUN
:
648 submit_inquiry(h
, 0xC8, sizeof(struct c8_inquiry
), c8_endio
);
651 submit_inquiry(h
, 0xC9, sizeof(struct c9_inquiry
), c9_endio
);
655 static struct hw_handler_type rdac_handler
= {
656 .name
= RDAC_DM_HWH_NAME
,
657 .module
= THIS_MODULE
,
658 .create
= rdac_create
,
659 .destroy
= rdac_destroy
,
660 .pg_init
= rdac_pg_init
,
664 static int __init
rdac_init(void)
668 rdac_wkqd
= create_singlethread_workqueue("rdac_wkqd");
670 DMERR("Failed to create workqueue rdac_wkqd.");
674 r
= dm_register_hw_handler(&rdac_handler
);
676 DMERR("%s: register failed %d", RDAC_DM_HWH_NAME
, r
);
677 destroy_workqueue(rdac_wkqd
);
681 DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME
, RDAC_DM_HWH_VER
);
685 static void __exit
rdac_exit(void)
687 int r
= dm_unregister_hw_handler(&rdac_handler
);
689 destroy_workqueue(rdac_wkqd
);
691 DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME
, r
);
694 module_init(rdac_init
);
695 module_exit(rdac_exit
);
697 MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
698 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
699 MODULE_LICENSE("GPL");
700 MODULE_VERSION(RDAC_DM_HWH_VER
);