2 * PAV alias management for the DASD ECKD discipline
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 #include <linux/list.h>
9 #include <asm/ebcdic.h>
11 #include "dasd_eckd.h"
15 #endif /* PRINTK_HEADER */
16 #define PRINTK_HEADER "dasd(eckd):"
20 * General concept of alias management:
21 * - PAV and DASD alias management is specific to the eckd discipline.
22 * - A device is connected to an lcu as long as the device exists.
23 * dasd_alias_make_device_known_to_lcu will be called wenn the
24 * device is checked by the eckd discipline and
25 * dasd_alias_disconnect_device_from_lcu will be called
26 * before the device is deleted.
27 * - The dasd_alias_add_device / dasd_alias_remove_device
28 * functions mark the point when a device is 'ready for service'.
29 * - A summary unit check is a rare occasion, but it is mandatory to
30 * support it. It requires some complex recovery actions before the
31 * devices can be used again (see dasd_alias_handle_summary_unit_check).
32 * - dasd_alias_get_start_dev will find an alias device that can be used
33 * instead of the base device and does some (very simple) load balancing.
34 * This is the function that gets called for each I/O, so when improving
35 * something, this function should get faster or better, the rest has just
40 static void summary_unit_check_handling_work(struct work_struct
*);
41 static void lcu_update_work(struct work_struct
*);
42 static int _schedule_lcu_update(struct alias_lcu
*, struct dasd_device
*);
44 static struct alias_root aliastree
= {
45 .serverlist
= LIST_HEAD_INIT(aliastree
.serverlist
),
46 .lock
= __SPIN_LOCK_UNLOCKED(aliastree
.lock
),
49 static struct alias_server
*_find_server(struct dasd_uid
*uid
)
51 struct alias_server
*pos
;
52 list_for_each_entry(pos
, &aliastree
.serverlist
, server
) {
53 if (!strncmp(pos
->uid
.vendor
, uid
->vendor
,
55 && !strncmp(pos
->uid
.serial
, uid
->serial
,
62 static struct alias_lcu
*_find_lcu(struct alias_server
*server
,
65 struct alias_lcu
*pos
;
66 list_for_each_entry(pos
, &server
->lculist
, lcu
) {
67 if (pos
->uid
.ssid
== uid
->ssid
)
73 static struct alias_pav_group
*_find_group(struct alias_lcu
*lcu
,
76 struct alias_pav_group
*pos
;
77 __u8 search_unit_addr
;
79 /* for hyper pav there is only one group */
80 if (lcu
->pav
== HYPER_PAV
) {
81 if (list_empty(&lcu
->grouplist
))
84 return list_first_entry(&lcu
->grouplist
,
85 struct alias_pav_group
, group
);
88 /* for base pav we have to find the group that matches the base */
89 if (uid
->type
== UA_BASE_DEVICE
)
90 search_unit_addr
= uid
->real_unit_addr
;
92 search_unit_addr
= uid
->base_unit_addr
;
93 list_for_each_entry(pos
, &lcu
->grouplist
, group
) {
94 if (pos
->uid
.base_unit_addr
== search_unit_addr
)
100 static struct alias_server
*_allocate_server(struct dasd_uid
*uid
)
102 struct alias_server
*server
;
104 server
= kzalloc(sizeof(*server
), GFP_KERNEL
);
106 return ERR_PTR(-ENOMEM
);
107 memcpy(server
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
108 memcpy(server
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
109 INIT_LIST_HEAD(&server
->server
);
110 INIT_LIST_HEAD(&server
->lculist
);
114 static void _free_server(struct alias_server
*server
)
119 static struct alias_lcu
*_allocate_lcu(struct dasd_uid
*uid
)
121 struct alias_lcu
*lcu
;
123 lcu
= kzalloc(sizeof(*lcu
), GFP_KERNEL
);
125 return ERR_PTR(-ENOMEM
);
126 lcu
->uac
= kzalloc(sizeof(*(lcu
->uac
)), GFP_KERNEL
| GFP_DMA
);
129 lcu
->rsu_cqr
= kzalloc(sizeof(*lcu
->rsu_cqr
), GFP_KERNEL
| GFP_DMA
);
132 lcu
->rsu_cqr
->cpaddr
= kzalloc(sizeof(struct ccw1
),
133 GFP_KERNEL
| GFP_DMA
);
134 if (!lcu
->rsu_cqr
->cpaddr
)
136 lcu
->rsu_cqr
->data
= kzalloc(16, GFP_KERNEL
| GFP_DMA
);
137 if (!lcu
->rsu_cqr
->data
)
140 memcpy(lcu
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
141 memcpy(lcu
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
142 lcu
->uid
.ssid
= uid
->ssid
;
144 lcu
->flags
= NEED_UAC_UPDATE
| UPDATE_PENDING
;
145 INIT_LIST_HEAD(&lcu
->lcu
);
146 INIT_LIST_HEAD(&lcu
->inactive_devices
);
147 INIT_LIST_HEAD(&lcu
->active_devices
);
148 INIT_LIST_HEAD(&lcu
->grouplist
);
149 INIT_WORK(&lcu
->suc_data
.worker
, summary_unit_check_handling_work
);
150 INIT_DELAYED_WORK(&lcu
->ruac_data
.dwork
, lcu_update_work
);
151 spin_lock_init(&lcu
->lock
);
155 kfree(lcu
->rsu_cqr
->cpaddr
);
162 return ERR_PTR(-ENOMEM
);
165 static void _free_lcu(struct alias_lcu
*lcu
)
167 kfree(lcu
->rsu_cqr
->data
);
168 kfree(lcu
->rsu_cqr
->cpaddr
);
175 * This is the function that will allocate all the server and lcu data,
176 * so this function must be called first for a new device.
177 * If the return value is 1, the lcu was already known before, if it
178 * is 0, this is a new lcu.
179 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
181 int dasd_alias_make_device_known_to_lcu(struct dasd_device
*device
)
183 struct dasd_eckd_private
*private;
185 struct alias_server
*server
, *newserver
;
186 struct alias_lcu
*lcu
, *newlcu
;
188 struct dasd_uid
*uid
;
190 private = (struct dasd_eckd_private
*) device
->private;
192 spin_lock_irqsave(&aliastree
.lock
, flags
);
194 server
= _find_server(uid
);
196 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
197 newserver
= _allocate_server(uid
);
198 if (IS_ERR(newserver
))
199 return PTR_ERR(newserver
);
200 spin_lock_irqsave(&aliastree
.lock
, flags
);
201 server
= _find_server(uid
);
203 list_add(&newserver
->server
, &aliastree
.serverlist
);
207 /* someone was faster */
208 _free_server(newserver
);
212 lcu
= _find_lcu(server
, uid
);
214 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
215 newlcu
= _allocate_lcu(uid
);
218 spin_lock_irqsave(&aliastree
.lock
, flags
);
219 lcu
= _find_lcu(server
, uid
);
221 list_add(&newlcu
->lcu
, &server
->lculist
);
225 /* someone was faster */
230 spin_lock(&lcu
->lock
);
231 list_add(&device
->alias_list
, &lcu
->inactive_devices
);
233 spin_unlock(&lcu
->lock
);
234 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
240 * This function removes a device from the scope of alias management.
241 * The complicated part is to make sure that it is not in use by
242 * any of the workers. If necessary cancel the work.
244 void dasd_alias_disconnect_device_from_lcu(struct dasd_device
*device
)
246 struct dasd_eckd_private
*private;
248 struct alias_lcu
*lcu
;
249 struct alias_server
*server
;
252 private = (struct dasd_eckd_private
*) device
->private;
254 spin_lock_irqsave(&lcu
->lock
, flags
);
255 list_del_init(&device
->alias_list
);
256 /* make sure that the workers don't use this device */
257 if (device
== lcu
->suc_data
.device
) {
258 spin_unlock_irqrestore(&lcu
->lock
, flags
);
259 cancel_work_sync(&lcu
->suc_data
.worker
);
260 spin_lock_irqsave(&lcu
->lock
, flags
);
261 if (device
== lcu
->suc_data
.device
)
262 lcu
->suc_data
.device
= NULL
;
265 if (device
== lcu
->ruac_data
.device
) {
266 spin_unlock_irqrestore(&lcu
->lock
, flags
);
268 cancel_delayed_work_sync(&lcu
->ruac_data
.dwork
);
269 spin_lock_irqsave(&lcu
->lock
, flags
);
270 if (device
== lcu
->ruac_data
.device
)
271 lcu
->ruac_data
.device
= NULL
;
274 spin_unlock_irqrestore(&lcu
->lock
, flags
);
276 spin_lock_irqsave(&aliastree
.lock
, flags
);
277 spin_lock(&lcu
->lock
);
278 if (list_empty(&lcu
->grouplist
) &&
279 list_empty(&lcu
->active_devices
) &&
280 list_empty(&lcu
->inactive_devices
)) {
282 spin_unlock(&lcu
->lock
);
287 _schedule_lcu_update(lcu
, NULL
);
288 spin_unlock(&lcu
->lock
);
290 server
= _find_server(&private->uid
);
291 if (server
&& list_empty(&server
->lculist
)) {
292 list_del(&server
->server
);
293 _free_server(server
);
295 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
299 * This function assumes that the unit address configuration stored
300 * in the lcu is up to date and will update the device uid before
301 * adding it to a pav group.
303 static int _add_device_to_lcu(struct alias_lcu
*lcu
,
304 struct dasd_device
*device
)
307 struct dasd_eckd_private
*private;
308 struct alias_pav_group
*group
;
309 struct dasd_uid
*uid
;
311 private = (struct dasd_eckd_private
*) device
->private;
313 uid
->type
= lcu
->uac
->unit
[uid
->real_unit_addr
].ua_type
;
314 uid
->base_unit_addr
= lcu
->uac
->unit
[uid
->real_unit_addr
].base_ua
;
315 dasd_set_uid(device
->cdev
, &private->uid
);
317 /* if we have no PAV anyway, we don't need to bother with PAV groups */
318 if (lcu
->pav
== NO_PAV
) {
319 list_move(&device
->alias_list
, &lcu
->active_devices
);
323 group
= _find_group(lcu
, uid
);
325 group
= kzalloc(sizeof(*group
), GFP_ATOMIC
);
328 memcpy(group
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
329 memcpy(group
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
330 group
->uid
.ssid
= uid
->ssid
;
331 if (uid
->type
== UA_BASE_DEVICE
)
332 group
->uid
.base_unit_addr
= uid
->real_unit_addr
;
334 group
->uid
.base_unit_addr
= uid
->base_unit_addr
;
335 INIT_LIST_HEAD(&group
->group
);
336 INIT_LIST_HEAD(&group
->baselist
);
337 INIT_LIST_HEAD(&group
->aliaslist
);
338 list_add(&group
->group
, &lcu
->grouplist
);
340 if (uid
->type
== UA_BASE_DEVICE
)
341 list_move(&device
->alias_list
, &group
->baselist
);
343 list_move(&device
->alias_list
, &group
->aliaslist
);
344 private->pavgroup
= group
;
348 static void _remove_device_from_lcu(struct alias_lcu
*lcu
,
349 struct dasd_device
*device
)
351 struct dasd_eckd_private
*private;
352 struct alias_pav_group
*group
;
354 private = (struct dasd_eckd_private
*) device
->private;
355 list_move(&device
->alias_list
, &lcu
->inactive_devices
);
356 group
= private->pavgroup
;
359 private->pavgroup
= NULL
;
360 if (list_empty(&group
->baselist
) && list_empty(&group
->aliaslist
)) {
361 list_del(&group
->group
);
365 if (group
->next
== device
)
369 static int read_unit_address_configuration(struct dasd_device
*device
,
370 struct alias_lcu
*lcu
)
372 struct dasd_psf_prssd_data
*prssdp
;
373 struct dasd_ccw_req
*cqr
;
378 cqr
= dasd_kmalloc_request("ECKD",
379 1 /* PSF */ + 1 /* RSSD */ ,
380 (sizeof(struct dasd_psf_prssd_data
)),
384 cqr
->startdev
= device
;
385 cqr
->memdev
= device
;
386 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
388 cqr
->expires
= 20 * HZ
;
390 /* Prepare for Read Subsystem Data */
391 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
392 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
393 prssdp
->order
= PSF_ORDER_PRSSD
;
394 prssdp
->suborder
= 0x0e; /* Read unit address configuration */
395 /* all other bytes of prssdp must be zero */
398 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
399 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
400 ccw
->flags
|= CCW_FLAG_CC
;
401 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
403 /* Read Subsystem Data - feature codes */
404 memset(lcu
->uac
, 0, sizeof(*(lcu
->uac
)));
407 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
408 ccw
->count
= sizeof(*(lcu
->uac
));
409 ccw
->cda
= (__u32
)(addr_t
) lcu
->uac
;
411 cqr
->buildclk
= get_clock();
412 cqr
->status
= DASD_CQR_FILLED
;
414 /* need to unset flag here to detect race with summary unit check */
415 spin_lock_irqsave(&lcu
->lock
, flags
);
416 lcu
->flags
&= ~NEED_UAC_UPDATE
;
417 spin_unlock_irqrestore(&lcu
->lock
, flags
);
420 rc
= dasd_sleep_on(cqr
);
421 } while (rc
&& (cqr
->retries
> 0));
423 spin_lock_irqsave(&lcu
->lock
, flags
);
424 lcu
->flags
|= NEED_UAC_UPDATE
;
425 spin_unlock_irqrestore(&lcu
->lock
, flags
);
427 dasd_kfree_request(cqr
, cqr
->memdev
);
431 static int _lcu_update(struct dasd_device
*refdev
, struct alias_lcu
*lcu
)
434 struct alias_pav_group
*pavgroup
, *tempgroup
;
435 struct dasd_device
*device
, *tempdev
;
437 struct dasd_eckd_private
*private;
439 spin_lock_irqsave(&lcu
->lock
, flags
);
440 list_for_each_entry_safe(pavgroup
, tempgroup
, &lcu
->grouplist
, group
) {
441 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->baselist
,
443 list_move(&device
->alias_list
, &lcu
->active_devices
);
444 private = (struct dasd_eckd_private
*) device
->private;
445 private->pavgroup
= NULL
;
447 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->aliaslist
,
449 list_move(&device
->alias_list
, &lcu
->active_devices
);
450 private = (struct dasd_eckd_private
*) device
->private;
451 private->pavgroup
= NULL
;
453 list_del(&pavgroup
->group
);
456 spin_unlock_irqrestore(&lcu
->lock
, flags
);
458 rc
= read_unit_address_configuration(refdev
, lcu
);
462 spin_lock_irqsave(&lcu
->lock
, flags
);
464 for (i
= 0; i
< MAX_DEVICES_PER_LCU
; ++i
) {
465 switch (lcu
->uac
->unit
[i
].ua_type
) {
466 case UA_BASE_PAV_ALIAS
:
469 case UA_HYPER_PAV_ALIAS
:
470 lcu
->pav
= HYPER_PAV
;
473 if (lcu
->pav
!= NO_PAV
)
477 list_for_each_entry_safe(device
, tempdev
, &lcu
->active_devices
,
479 _add_device_to_lcu(lcu
, device
);
481 spin_unlock_irqrestore(&lcu
->lock
, flags
);
485 static void lcu_update_work(struct work_struct
*work
)
487 struct alias_lcu
*lcu
;
488 struct read_uac_work_data
*ruac_data
;
489 struct dasd_device
*device
;
493 ruac_data
= container_of(work
, struct read_uac_work_data
, dwork
.work
);
494 lcu
= container_of(ruac_data
, struct alias_lcu
, ruac_data
);
495 device
= ruac_data
->device
;
496 rc
= _lcu_update(device
, lcu
);
498 * Need to check flags again, as there could have been another
499 * prepare_update or a new device a new device while we were still
500 * processing the data
502 spin_lock_irqsave(&lcu
->lock
, flags
);
503 if (rc
|| (lcu
->flags
& NEED_UAC_UPDATE
)) {
504 DEV_MESSAGE(KERN_WARNING
, device
, "could not update"
505 " alias data in lcu (rc = %d), retry later", rc
);
506 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 30*HZ
);
508 lcu
->ruac_data
.device
= NULL
;
509 lcu
->flags
&= ~UPDATE_PENDING
;
511 spin_unlock_irqrestore(&lcu
->lock
, flags
);
514 static int _schedule_lcu_update(struct alias_lcu
*lcu
,
515 struct dasd_device
*device
)
517 struct dasd_device
*usedev
= NULL
;
518 struct alias_pav_group
*group
;
520 lcu
->flags
|= NEED_UAC_UPDATE
;
521 if (lcu
->ruac_data
.device
) {
522 /* already scheduled or running */
525 if (device
&& !list_empty(&device
->alias_list
))
528 if (!usedev
&& !list_empty(&lcu
->grouplist
)) {
529 group
= list_first_entry(&lcu
->grouplist
,
530 struct alias_pav_group
, group
);
531 if (!list_empty(&group
->baselist
))
532 usedev
= list_first_entry(&group
->baselist
,
535 else if (!list_empty(&group
->aliaslist
))
536 usedev
= list_first_entry(&group
->aliaslist
,
540 if (!usedev
&& !list_empty(&lcu
->active_devices
)) {
541 usedev
= list_first_entry(&lcu
->active_devices
,
542 struct dasd_device
, alias_list
);
545 * if we haven't found a proper device yet, give up for now, the next
546 * device that will be set active will trigger an lcu update
550 lcu
->ruac_data
.device
= usedev
;
551 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 0);
555 int dasd_alias_add_device(struct dasd_device
*device
)
557 struct dasd_eckd_private
*private;
558 struct alias_lcu
*lcu
;
562 private = (struct dasd_eckd_private
*) device
->private;
565 spin_lock_irqsave(&lcu
->lock
, flags
);
566 if (!(lcu
->flags
& UPDATE_PENDING
)) {
567 rc
= _add_device_to_lcu(lcu
, device
);
569 lcu
->flags
|= UPDATE_PENDING
;
571 if (lcu
->flags
& UPDATE_PENDING
) {
572 list_move(&device
->alias_list
, &lcu
->active_devices
);
573 _schedule_lcu_update(lcu
, device
);
575 spin_unlock_irqrestore(&lcu
->lock
, flags
);
579 int dasd_alias_remove_device(struct dasd_device
*device
)
581 struct dasd_eckd_private
*private;
582 struct alias_lcu
*lcu
;
585 private = (struct dasd_eckd_private
*) device
->private;
587 spin_lock_irqsave(&lcu
->lock
, flags
);
588 _remove_device_from_lcu(lcu
, device
);
589 spin_unlock_irqrestore(&lcu
->lock
, flags
);
593 struct dasd_device
*dasd_alias_get_start_dev(struct dasd_device
*base_device
)
596 struct dasd_device
*alias_device
;
597 struct alias_pav_group
*group
;
598 struct alias_lcu
*lcu
;
599 struct dasd_eckd_private
*private, *alias_priv
;
602 private = (struct dasd_eckd_private
*) base_device
->private;
603 group
= private->pavgroup
;
607 if (lcu
->pav
== NO_PAV
||
608 lcu
->flags
& (NEED_UAC_UPDATE
| UPDATE_PENDING
))
611 spin_lock_irqsave(&lcu
->lock
, flags
);
612 alias_device
= group
->next
;
614 if (list_empty(&group
->aliaslist
)) {
615 spin_unlock_irqrestore(&lcu
->lock
, flags
);
618 alias_device
= list_first_entry(&group
->aliaslist
,
623 if (list_is_last(&alias_device
->alias_list
, &group
->aliaslist
))
624 group
->next
= list_first_entry(&group
->aliaslist
,
625 struct dasd_device
, alias_list
);
627 group
->next
= list_first_entry(&alias_device
->alias_list
,
628 struct dasd_device
, alias_list
);
629 spin_unlock_irqrestore(&lcu
->lock
, flags
);
630 alias_priv
= (struct dasd_eckd_private
*) alias_device
->private;
631 if ((alias_priv
->count
< private->count
) && !alias_device
->stopped
)
638 * Summary unit check handling depends on the way alias devices
639 * are handled so it is done here rather then in dasd_eckd.c
641 static int reset_summary_unit_check(struct alias_lcu
*lcu
,
642 struct dasd_device
*device
,
645 struct dasd_ccw_req
*cqr
;
649 strncpy((char *) &cqr
->magic
, "ECKD", 4);
650 ASCEBC((char *) &cqr
->magic
, 4);
651 cqr
->cpaddr
->cmd_code
= DASD_ECKD_CCW_RSCK
;
652 cqr
->cpaddr
->flags
= 0 ;
653 cqr
->cpaddr
->count
= 16;
654 cqr
->cpaddr
->cda
= (__u32
)(addr_t
) cqr
->data
;
655 ((char *)cqr
->data
)[0] = reason
;
657 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
658 cqr
->retries
= 255; /* set retry counter to enable basic ERP */
659 cqr
->startdev
= device
;
660 cqr
->memdev
= device
;
662 cqr
->expires
= 5 * HZ
;
663 cqr
->buildclk
= get_clock();
664 cqr
->status
= DASD_CQR_FILLED
;
666 rc
= dasd_sleep_on_immediatly(cqr
);
670 static void _restart_all_base_devices_on_lcu(struct alias_lcu
*lcu
)
672 struct alias_pav_group
*pavgroup
;
673 struct dasd_device
*device
;
674 struct dasd_eckd_private
*private;
676 /* active and inactive list can contain alias as well as base devices */
677 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
678 private = (struct dasd_eckd_private
*) device
->private;
679 if (private->uid
.type
!= UA_BASE_DEVICE
)
681 dasd_schedule_block_bh(device
->block
);
682 dasd_schedule_device_bh(device
);
684 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
685 private = (struct dasd_eckd_private
*) device
->private;
686 if (private->uid
.type
!= UA_BASE_DEVICE
)
688 dasd_schedule_block_bh(device
->block
);
689 dasd_schedule_device_bh(device
);
691 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
692 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
693 dasd_schedule_block_bh(device
->block
);
694 dasd_schedule_device_bh(device
);
699 static void flush_all_alias_devices_on_lcu(struct alias_lcu
*lcu
)
701 struct alias_pav_group
*pavgroup
;
702 struct dasd_device
*device
, *temp
;
703 struct dasd_eckd_private
*private;
709 * Problem here ist that dasd_flush_device_queue may wait
710 * for termination of a request to complete. We can't keep
711 * the lcu lock during that time, so we must assume that
712 * the lists may have changed.
713 * Idea: first gather all active alias devices in a separate list,
714 * then flush the first element of this list unlocked, and afterwards
715 * check if it is still on the list before moving it to the
716 * active_devices list.
719 spin_lock_irqsave(&lcu
->lock
, flags
);
720 list_for_each_entry_safe(device
, temp
, &lcu
->active_devices
,
722 private = (struct dasd_eckd_private
*) device
->private;
723 if (private->uid
.type
== UA_BASE_DEVICE
)
725 list_move(&device
->alias_list
, &active
);
728 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
729 list_splice_init(&pavgroup
->aliaslist
, &active
);
731 while (!list_empty(&active
)) {
732 device
= list_first_entry(&active
, struct dasd_device
,
734 spin_unlock_irqrestore(&lcu
->lock
, flags
);
735 rc
= dasd_flush_device_queue(device
);
736 spin_lock_irqsave(&lcu
->lock
, flags
);
738 * only move device around if it wasn't moved away while we
739 * were waiting for the flush
741 if (device
== list_first_entry(&active
,
742 struct dasd_device
, alias_list
))
743 list_move(&device
->alias_list
, &lcu
->active_devices
);
745 spin_unlock_irqrestore(&lcu
->lock
, flags
);
748 static void __stop_device_on_lcu(struct dasd_device
*device
,
749 struct dasd_device
*pos
)
751 /* If pos == device then device is already locked! */
753 pos
->stopped
|= DASD_STOPPED_SU
;
756 spin_lock(get_ccwdev_lock(pos
->cdev
));
757 pos
->stopped
|= DASD_STOPPED_SU
;
758 spin_unlock(get_ccwdev_lock(pos
->cdev
));
762 * This function is called in interrupt context, so the
763 * cdev lock for device is already locked!
765 static void _stop_all_devices_on_lcu(struct alias_lcu
*lcu
,
766 struct dasd_device
*device
)
768 struct alias_pav_group
*pavgroup
;
769 struct dasd_device
*pos
;
771 list_for_each_entry(pos
, &lcu
->active_devices
, alias_list
)
772 __stop_device_on_lcu(device
, pos
);
773 list_for_each_entry(pos
, &lcu
->inactive_devices
, alias_list
)
774 __stop_device_on_lcu(device
, pos
);
775 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
776 list_for_each_entry(pos
, &pavgroup
->baselist
, alias_list
)
777 __stop_device_on_lcu(device
, pos
);
778 list_for_each_entry(pos
, &pavgroup
->aliaslist
, alias_list
)
779 __stop_device_on_lcu(device
, pos
);
783 static void _unstop_all_devices_on_lcu(struct alias_lcu
*lcu
)
785 struct alias_pav_group
*pavgroup
;
786 struct dasd_device
*device
;
789 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
790 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
791 device
->stopped
&= ~DASD_STOPPED_SU
;
792 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
795 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
796 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
797 device
->stopped
&= ~DASD_STOPPED_SU
;
798 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
801 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
802 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
803 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
804 device
->stopped
&= ~DASD_STOPPED_SU
;
805 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
808 list_for_each_entry(device
, &pavgroup
->aliaslist
, alias_list
) {
809 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
810 device
->stopped
&= ~DASD_STOPPED_SU
;
811 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
817 static void summary_unit_check_handling_work(struct work_struct
*work
)
819 struct alias_lcu
*lcu
;
820 struct summary_unit_check_work_data
*suc_data
;
822 struct dasd_device
*device
;
824 suc_data
= container_of(work
, struct summary_unit_check_work_data
,
826 lcu
= container_of(suc_data
, struct alias_lcu
, suc_data
);
827 device
= suc_data
->device
;
829 /* 1. flush alias devices */
830 flush_all_alias_devices_on_lcu(lcu
);
832 /* 2. reset summary unit check */
833 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
834 device
->stopped
&= ~(DASD_STOPPED_SU
| DASD_STOPPED_PENDING
);
835 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
836 reset_summary_unit_check(lcu
, device
, suc_data
->reason
);
838 spin_lock_irqsave(&lcu
->lock
, flags
);
839 _unstop_all_devices_on_lcu(lcu
);
840 _restart_all_base_devices_on_lcu(lcu
);
841 /* 3. read new alias configuration */
842 _schedule_lcu_update(lcu
, device
);
843 lcu
->suc_data
.device
= NULL
;
844 spin_unlock_irqrestore(&lcu
->lock
, flags
);
848 * note: this will be called from int handler context (cdev locked)
850 void dasd_alias_handle_summary_unit_check(struct dasd_device
*device
,
853 struct alias_lcu
*lcu
;
855 struct dasd_eckd_private
*private;
857 private = (struct dasd_eckd_private
*) device
->private;
859 reason
= irb
->ecw
[8];
860 DEV_MESSAGE(KERN_WARNING
, device
, "%s %x",
861 "eckd handle summary unit check: reason", reason
);
865 DEV_MESSAGE(KERN_WARNING
, device
, "%s",
866 "device not ready to handle summary"
867 " unit check (no lcu structure)");
870 spin_lock(&lcu
->lock
);
871 _stop_all_devices_on_lcu(lcu
, device
);
872 /* prepare for lcu_update */
873 private->lcu
->flags
|= NEED_UAC_UPDATE
| UPDATE_PENDING
;
874 /* If this device is about to be removed just return and wait for
875 * the next interrupt on a different device
877 if (list_empty(&device
->alias_list
)) {
878 DEV_MESSAGE(KERN_WARNING
, device
, "%s",
879 "device is in offline processing,"
880 " don't do summary unit check handling");
881 spin_unlock(&lcu
->lock
);
884 if (lcu
->suc_data
.device
) {
885 /* already scheduled or running */
886 DEV_MESSAGE(KERN_WARNING
, device
, "%s",
887 "previous instance of summary unit check worker"
889 spin_unlock(&lcu
->lock
);
892 lcu
->suc_data
.reason
= reason
;
893 lcu
->suc_data
.device
= device
;
894 spin_unlock(&lcu
->lock
);
895 schedule_work(&lcu
->suc_data
.worker
);