2 * Engenio/LSI RDAC DM HW handler
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_eh.h>
26 #define DM_MSG_PREFIX "multipath rdac"
29 #include "dm-hw-handler.h"
31 #define RDAC_DM_HWH_NAME "rdac"
32 #define RDAC_DM_HWH_VER "0.4"
37 * These struct definitions and the forming of the
38 * mode page were taken from the LSI RDAC 2.4 GPL'd
39 * driver, and then converted to Linux conventions.
41 #define RDAC_QUIESCENCE_TIME 20;
45 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
48 * Controller modes definitions
50 #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
51 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
56 #define RDAC_FORCED_QUIESENCE 0x02
58 #define RDAC_FAILOVER_TIMEOUT (60 * HZ)
60 struct rdac_mode_6_hdr
{
67 struct rdac_mode_10_hdr
{
75 struct rdac_mode_common
{
76 u8 controller_serial
[16];
77 u8 alt_controller_serial
[16];
80 u8 quiescence_timeout
;
84 struct rdac_pg_legacy
{
85 struct rdac_mode_6_hdr hdr
;
88 struct rdac_mode_common common
;
89 #define MODE6_MAX_LUN 32
90 u8 lun_table
[MODE6_MAX_LUN
];
96 struct rdac_pg_expanded
{
97 struct rdac_mode_10_hdr hdr
;
101 struct rdac_mode_common common
;
109 u8 page_code
; /* 0xC9 */
112 u8 page_id
[4]; /* "vace" */
118 #define SUBSYS_ID_LEN 16
119 #define SLOT_ID_LEN 2
123 u8 page_code
; /* 0xC4 */
126 u8 page_id
[4]; /* "subs" */
127 u8 subsys_id
[SUBSYS_ID_LEN
];
129 u8 slot_id
[SLOT_ID_LEN
];
133 struct rdac_controller
{
134 u8 subsys_id
[SUBSYS_ID_LEN
];
135 u8 slot_id
[SLOT_ID_LEN
];
138 struct list_head node
; /* list of all controllers */
141 struct list_head cmd_list
; /* list of commands to be submitted */
143 struct rdac_pg_legacy legacy
;
144 struct rdac_pg_expanded expanded
;
149 u8 page_code
; /* 0xC8 */
152 u8 page_id
[4]; /* "edid" */
156 u8 vol_user_label_len
;
157 u8 vol_user_label
[60];
158 u8 array_uniq_id_len
;
159 u8 array_unique_id
[16];
160 u8 array_user_label_len
;
161 u8 array_user_label
[60];
167 u8 page_code
; /* 0xC2 */
170 u8 page_id
[4]; /* "swr4" */
174 u8 max_lun_supported
;
175 u8 partitions
[239]; /* Total allocation length should be 0xFF */
178 struct rdac_handler
{
179 struct list_head entry
; /* list waiting to submit MODE SELECT */
181 struct rdac_controller
*ctlr
;
182 #define UNINITIALIZED_LUN (1 << 8)
184 unsigned char sense
[SCSI_SENSE_BUFFERSIZE
];
185 struct dm_path
*path
;
186 struct work_struct work
;
187 #define SEND_C2_INQUIRY 1
188 #define SEND_C4_INQUIRY 2
189 #define SEND_C8_INQUIRY 3
190 #define SEND_C9_INQUIRY 4
191 #define SEND_MODE_SELECT 5
194 struct c2_inquiry c2
;
195 struct c4_inquiry c4
;
196 struct c8_inquiry c8
;
197 struct c9_inquiry c9
;
201 static LIST_HEAD(ctlr_list
);
202 static DEFINE_SPINLOCK(list_lock
);
203 static struct workqueue_struct
*rdac_wkqd
;
205 static inline int had_failures(struct request
*req
, int error
)
207 return (error
|| host_byte(req
->errors
) != DID_OK
||
208 msg_byte(req
->errors
) != COMMAND_COMPLETE
);
211 static void rdac_resubmit_all(struct rdac_handler
*h
)
213 struct rdac_controller
*ctlr
= h
->ctlr
;
214 struct rdac_handler
*tmp
, *h1
;
216 spin_lock(&ctlr
->lock
);
217 list_for_each_entry_safe(h1
, tmp
, &ctlr
->cmd_list
, entry
) {
218 h1
->cmd_to_send
= SEND_C9_INQUIRY
;
219 queue_work(rdac_wkqd
, &h1
->work
);
220 list_del(&h1
->entry
);
223 spin_unlock(&ctlr
->lock
);
226 static void mode_select_endio(struct request
*req
, int error
)
228 struct rdac_handler
*h
= req
->end_io_data
;
229 struct scsi_sense_hdr sense_hdr
;
230 int sense
= 0, fail
= 0;
232 if (had_failures(req
, error
)) {
237 if (status_byte(req
->errors
) == CHECK_CONDITION
) {
238 scsi_normalize_sense(req
->sense
, SCSI_SENSE_BUFFERSIZE
,
240 sense
= (sense_hdr
.sense_key
<< 16) | (sense_hdr
.asc
<< 8) |
242 /* If it is retryable failure, submit the c9 inquiry again */
243 if (sense
== 0x59136 || sense
== 0x68b02 || sense
== 0xb8b02 ||
245 /* 0x59136 - Command lock contention
246 * 0x[6b]8b02 - Quiesense in progress or achieved
247 * 0x62900 - Power On, Reset, or Bus Device Reset
249 h
->cmd_to_send
= SEND_C9_INQUIRY
;
250 queue_work(rdac_wkqd
, &h
->work
);
254 DMINFO("MODE_SELECT failed on %s with sense 0x%x",
255 h
->path
->dev
->name
, sense
);
259 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
261 dm_pg_init_complete(h
->path
, 0);
264 rdac_resubmit_all(h
);
265 __blk_put_request(req
->q
, req
);
268 static struct request
*get_rdac_req(struct rdac_handler
*h
,
269 void *buffer
, unsigned buflen
, int rw
)
272 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
274 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
277 DMINFO("get_rdac_req: blk_get_request failed");
281 if (buflen
&& blk_rq_map_kern(q
, rq
, buffer
, buflen
, GFP_KERNEL
)) {
283 DMINFO("get_rdac_req: blk_rq_map_kern failed");
287 memset(&rq
->cmd
, 0, BLK_MAX_CDB
);
288 rq
->sense
= h
->sense
;
289 memset(rq
->sense
, 0, SCSI_SENSE_BUFFERSIZE
);
293 rq
->timeout
= h
->timeout
;
294 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
295 rq
->cmd_flags
|= REQ_FAILFAST
| REQ_NOMERGE
;
299 static struct request
*rdac_failover_get(struct rdac_handler
*h
)
302 struct rdac_mode_common
*common
;
305 if (h
->ctlr
->use_10_ms
) {
306 struct rdac_pg_expanded
*rdac_pg
;
308 data_size
= sizeof(struct rdac_pg_expanded
);
309 rdac_pg
= &h
->ctlr
->mode_select
.expanded
;
310 memset(rdac_pg
, 0, data_size
);
311 common
= &rdac_pg
->common
;
312 rdac_pg
->page_code
= RDAC_PAGE_CODE_REDUNDANT_CONTROLLER
+ 0x40;
313 rdac_pg
->subpage_code
= 0x1;
314 rdac_pg
->page_len
[0] = 0x01;
315 rdac_pg
->page_len
[1] = 0x28;
316 rdac_pg
->lun_table
[h
->lun
] = 0x81;
318 struct rdac_pg_legacy
*rdac_pg
;
320 data_size
= sizeof(struct rdac_pg_legacy
);
321 rdac_pg
= &h
->ctlr
->mode_select
.legacy
;
322 memset(rdac_pg
, 0, data_size
);
323 common
= &rdac_pg
->common
;
324 rdac_pg
->page_code
= RDAC_PAGE_CODE_REDUNDANT_CONTROLLER
;
325 rdac_pg
->page_len
= 0x68;
326 rdac_pg
->lun_table
[h
->lun
] = 0x81;
328 common
->rdac_mode
[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS
;
329 common
->quiescence_timeout
= RDAC_QUIESCENCE_TIME
;
330 common
->rdac_options
= RDAC_FORCED_QUIESENCE
;
332 /* get request for block layer packet command */
333 rq
= get_rdac_req(h
, &h
->ctlr
->mode_select
, data_size
, WRITE
);
335 DMERR("rdac_failover_get: no rq");
339 /* Prepare the command. */
340 if (h
->ctlr
->use_10_ms
) {
341 rq
->cmd
[0] = MODE_SELECT_10
;
342 rq
->cmd
[7] = data_size
>> 8;
343 rq
->cmd
[8] = data_size
& 0xff;
345 rq
->cmd
[0] = MODE_SELECT
;
346 rq
->cmd
[4] = data_size
;
348 rq
->cmd_len
= COMMAND_SIZE(rq
->cmd
[0]);
353 /* Acquires h->ctlr->lock */
354 static void submit_mode_select(struct rdac_handler
*h
)
357 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
359 spin_lock(&h
->ctlr
->lock
);
360 if (h
->ctlr
->submitted
) {
361 list_add(&h
->entry
, &h
->ctlr
->cmd_list
);
366 DMINFO("submit_mode_select: no queue");
370 rq
= rdac_failover_get(h
);
372 DMERR("submit_mode_select: no rq");
376 DMINFO("queueing MODE_SELECT command on %s", h
->path
->dev
->name
);
378 blk_execute_rq_nowait(q
, NULL
, rq
, 1, mode_select_endio
);
379 h
->ctlr
->submitted
= 1;
382 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
384 spin_unlock(&h
->ctlr
->lock
);
387 static void release_ctlr(struct kref
*kref
)
389 struct rdac_controller
*ctlr
;
390 ctlr
= container_of(kref
, struct rdac_controller
, kref
);
392 spin_lock(&list_lock
);
393 list_del(&ctlr
->node
);
394 spin_unlock(&list_lock
);
398 static struct rdac_controller
*get_controller(u8
*subsys_id
, u8
*slot_id
)
400 struct rdac_controller
*ctlr
, *tmp
;
402 spin_lock(&list_lock
);
404 list_for_each_entry(tmp
, &ctlr_list
, node
) {
405 if ((memcmp(tmp
->subsys_id
, subsys_id
, SUBSYS_ID_LEN
) == 0) &&
406 (memcmp(tmp
->slot_id
, slot_id
, SLOT_ID_LEN
) == 0)) {
407 kref_get(&tmp
->kref
);
408 spin_unlock(&list_lock
);
412 ctlr
= kmalloc(sizeof(*ctlr
), GFP_ATOMIC
);
416 /* initialize fields of controller */
417 memcpy(ctlr
->subsys_id
, subsys_id
, SUBSYS_ID_LEN
);
418 memcpy(ctlr
->slot_id
, slot_id
, SLOT_ID_LEN
);
419 kref_init(&ctlr
->kref
);
420 spin_lock_init(&ctlr
->lock
);
422 ctlr
->use_10_ms
= -1;
423 INIT_LIST_HEAD(&ctlr
->cmd_list
);
424 list_add(&ctlr
->node
, &ctlr_list
);
426 spin_unlock(&list_lock
);
430 static void c4_endio(struct request
*req
, int error
)
432 struct rdac_handler
*h
= req
->end_io_data
;
433 struct c4_inquiry
*sp
;
435 if (had_failures(req
, error
)) {
436 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
442 h
->ctlr
= get_controller(sp
->subsys_id
, sp
->slot_id
);
445 h
->cmd_to_send
= SEND_C9_INQUIRY
;
446 queue_work(rdac_wkqd
, &h
->work
);
448 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
450 __blk_put_request(req
->q
, req
);
453 static void c2_endio(struct request
*req
, int error
)
455 struct rdac_handler
*h
= req
->end_io_data
;
456 struct c2_inquiry
*sp
;
458 if (had_failures(req
, error
)) {
459 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
465 /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
466 if (sp
->max_lun_supported
>= MODE6_MAX_LUN
)
467 h
->ctlr
->use_10_ms
= 1;
469 h
->ctlr
->use_10_ms
= 0;
471 h
->cmd_to_send
= SEND_MODE_SELECT
;
472 queue_work(rdac_wkqd
, &h
->work
);
474 __blk_put_request(req
->q
, req
);
477 static void c9_endio(struct request
*req
, int error
)
479 struct rdac_handler
*h
= req
->end_io_data
;
480 struct c9_inquiry
*sp
;
482 if (had_failures(req
, error
)) {
483 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
487 /* We need to look at the sense keys here to take clear action.
488 * For now simple logic: If the host is in AVT mode or if controller
489 * owns the lun, return dm_pg_init_complete(), otherwise submit
494 /* If in AVT mode, return success */
495 if ((sp
->avte_cvp
>> 7) == 0x1) {
496 dm_pg_init_complete(h
->path
, 0);
500 /* If the controller on this path owns the LUN, return success */
501 if (sp
->avte_cvp
& 0x1) {
502 dm_pg_init_complete(h
->path
, 0);
507 if (h
->ctlr
->use_10_ms
== -1)
508 h
->cmd_to_send
= SEND_C2_INQUIRY
;
510 h
->cmd_to_send
= SEND_MODE_SELECT
;
512 h
->cmd_to_send
= SEND_C4_INQUIRY
;
513 queue_work(rdac_wkqd
, &h
->work
);
515 __blk_put_request(req
->q
, req
);
518 static void c8_endio(struct request
*req
, int error
)
520 struct rdac_handler
*h
= req
->end_io_data
;
521 struct c8_inquiry
*sp
;
523 if (had_failures(req
, error
)) {
524 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
528 /* We need to look at the sense keys here to take clear action.
529 * For now simple logic: Get the lun from the inquiry page.
532 h
->lun
= sp
->lun
[7]; /* currently it uses only one byte */
533 h
->cmd_to_send
= SEND_C9_INQUIRY
;
534 queue_work(rdac_wkqd
, &h
->work
);
536 __blk_put_request(req
->q
, req
);
539 static void submit_inquiry(struct rdac_handler
*h
, int page_code
,
540 unsigned int len
, rq_end_io_fn endio
)
543 struct request_queue
*q
= bdev_get_queue(h
->path
->dev
->bdev
);
548 rq
= get_rdac_req(h
, &h
->inq
, len
, READ
);
552 /* Prepare the command. */
553 rq
->cmd
[0] = INQUIRY
;
555 rq
->cmd
[2] = page_code
;
557 rq
->cmd_len
= COMMAND_SIZE(INQUIRY
);
558 blk_execute_rq_nowait(q
, NULL
, rq
, 1, endio
);
562 dm_pg_init_complete(h
->path
, MP_FAIL_PATH
);
565 static void service_wkq(struct work_struct
*work
)
567 struct rdac_handler
*h
= container_of(work
, struct rdac_handler
, work
);
569 switch (h
->cmd_to_send
) {
570 case SEND_C2_INQUIRY
:
571 submit_inquiry(h
, 0xC2, sizeof(struct c2_inquiry
), c2_endio
);
573 case SEND_C4_INQUIRY
:
574 submit_inquiry(h
, 0xC4, sizeof(struct c4_inquiry
), c4_endio
);
576 case SEND_C8_INQUIRY
:
577 submit_inquiry(h
, 0xC8, sizeof(struct c8_inquiry
), c8_endio
);
579 case SEND_C9_INQUIRY
:
580 submit_inquiry(h
, 0xC9, sizeof(struct c9_inquiry
), c9_endio
);
582 case SEND_MODE_SELECT
:
583 submit_mode_select(h
);
590 * only support subpage2c until we confirm that this is just a matter of
591 * of updating firmware or not, and RDAC (basic AVT works already) for now
592 * but we can add these in in when we get time and testers
594 static int rdac_create(struct hw_handler
*hwh
, unsigned argc
, char **argv
)
596 struct rdac_handler
*h
;
600 /* No arguments: use defaults */
601 timeout
= RDAC_FAILOVER_TIMEOUT
;
602 } else if (argc
!= 1) {
603 DMWARN("incorrect number of arguments");
606 if (sscanf(argv
[1], "%u", &timeout
) != 1) {
607 DMWARN("invalid timeout value");
612 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
617 h
->timeout
= timeout
;
618 h
->lun
= UNINITIALIZED_LUN
;
619 INIT_WORK(&h
->work
, service_wkq
);
620 DMWARN("using RDAC command with timeout %u", h
->timeout
);
625 static void rdac_destroy(struct hw_handler
*hwh
)
627 struct rdac_handler
*h
= hwh
->context
;
630 kref_put(&h
->ctlr
->kref
, release_ctlr
);
635 static unsigned rdac_error(struct hw_handler
*hwh
, struct bio
*bio
)
637 /* Try default handler */
638 return dm_scsi_err_handler(hwh
, bio
);
641 static void rdac_pg_init(struct hw_handler
*hwh
, unsigned bypassed
,
642 struct dm_path
*path
)
644 struct rdac_handler
*h
= hwh
->context
;
648 case UNINITIALIZED_LUN
:
649 submit_inquiry(h
, 0xC8, sizeof(struct c8_inquiry
), c8_endio
);
652 submit_inquiry(h
, 0xC9, sizeof(struct c9_inquiry
), c9_endio
);
656 static struct hw_handler_type rdac_handler
= {
657 .name
= RDAC_DM_HWH_NAME
,
658 .module
= THIS_MODULE
,
659 .create
= rdac_create
,
660 .destroy
= rdac_destroy
,
661 .pg_init
= rdac_pg_init
,
665 static int __init
rdac_init(void)
669 rdac_wkqd
= create_singlethread_workqueue("rdac_wkqd");
671 DMERR("Failed to create workqueue rdac_wkqd.");
675 r
= dm_register_hw_handler(&rdac_handler
);
677 DMERR("%s: register failed %d", RDAC_DM_HWH_NAME
, r
);
678 destroy_workqueue(rdac_wkqd
);
682 DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME
, RDAC_DM_HWH_VER
);
686 static void __exit
rdac_exit(void)
688 int r
= dm_unregister_hw_handler(&rdac_handler
);
690 destroy_workqueue(rdac_wkqd
);
692 DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME
, r
);
695 module_init(rdac_init
);
696 module_exit(rdac_exit
);
698 MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
699 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
700 MODULE_LICENSE("GPL");
701 MODULE_VERSION(RDAC_DM_HWH_VER
);