2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2009
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
26 #include "cio_debug.h"
33 int css_init_done
= 0;
36 struct channel_subsystem
*channel_subsystems
[__MAX_CSSID
+ 1];
39 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
41 struct subchannel_id schid
;
44 init_subchannel_id(&schid
);
48 ret
= fn(schid
, data
);
51 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
53 } while (schid
.ssid
++ < max_ssid
);
60 int (*fn_known_sch
)(struct subchannel
*, void *);
61 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
64 static int call_fn_known_sch(struct device
*dev
, void *data
)
66 struct subchannel
*sch
= to_subchannel(dev
);
67 struct cb_data
*cb
= data
;
70 idset_sch_del(cb
->set
, sch
->schid
);
72 rc
= cb
->fn_known_sch(sch
, cb
->data
);
76 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
78 struct cb_data
*cb
= data
;
81 if (idset_sch_contains(cb
->set
, schid
))
82 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
86 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
88 struct cb_data
*cb
= data
;
89 struct subchannel
*sch
;
92 sch
= get_subchannel_by_schid(schid
);
95 rc
= cb
->fn_known_sch(sch
, cb
->data
);
96 put_device(&sch
->dev
);
98 if (cb
->fn_unknown_sch
)
99 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
105 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
106 int (*fn_unknown
)(struct subchannel_id
,
113 cb
.fn_known_sch
= fn_known
;
114 cb
.fn_unknown_sch
= fn_unknown
;
116 cb
.set
= idset_sch_new();
118 /* fall back to brute force scanning in case of oom */
119 return for_each_subchannel(call_fn_all_sch
, &cb
);
123 /* Process registered subchannels. */
124 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
127 /* Process unregistered subchannels. */
129 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
136 static struct subchannel
*
137 css_alloc_subchannel(struct subchannel_id schid
)
139 struct subchannel
*sch
;
142 sch
= kmalloc (sizeof (*sch
), GFP_KERNEL
| GFP_DMA
);
144 return ERR_PTR(-ENOMEM
);
145 ret
= cio_validate_subchannel (sch
, schid
);
154 css_subchannel_release(struct device
*dev
)
156 struct subchannel
*sch
;
158 sch
= to_subchannel(dev
);
159 if (!cio_is_console(sch
->schid
)) {
160 /* Reset intparm to zeroes. */
161 sch
->config
.intparm
= 0;
162 cio_commit_config(sch
);
168 static int css_sch_device_register(struct subchannel
*sch
)
172 mutex_lock(&sch
->reg_mutex
);
173 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
175 ret
= device_register(&sch
->dev
);
176 mutex_unlock(&sch
->reg_mutex
);
181 * css_sch_device_unregister - unregister a subchannel
182 * @sch: subchannel to be unregistered
184 void css_sch_device_unregister(struct subchannel
*sch
)
186 mutex_lock(&sch
->reg_mutex
);
187 if (device_is_registered(&sch
->dev
))
188 device_unregister(&sch
->dev
);
189 mutex_unlock(&sch
->reg_mutex
);
191 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
193 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
198 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
199 ssd
->path_mask
= pmcw
->pim
;
200 for (i
= 0; i
< 8; i
++) {
202 if (pmcw
->pim
& mask
) {
203 chp_id_init(&ssd
->chpid
[i
]);
204 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
209 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
214 for (i
= 0; i
< 8; i
++) {
216 if (ssd
->path_mask
& mask
)
217 if (!chp_is_registered(ssd
->chpid
[i
]))
218 chp_new(ssd
->chpid
[i
]);
222 void css_update_ssd_info(struct subchannel
*sch
)
226 if (cio_is_console(sch
->schid
)) {
227 /* Console is initialized too early for functions requiring
228 * memory allocation. */
229 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
231 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
233 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
234 ssd_register_chpids(&sch
->ssd_info
);
238 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
241 struct subchannel
*sch
= to_subchannel(dev
);
243 return sprintf(buf
, "%01x\n", sch
->st
);
246 static DEVICE_ATTR(type
, 0444, type_show
, NULL
);
248 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
251 struct subchannel
*sch
= to_subchannel(dev
);
253 return sprintf(buf
, "css:t%01X\n", sch
->st
);
256 static DEVICE_ATTR(modalias
, 0444, modalias_show
, NULL
);
258 static struct attribute
*subch_attrs
[] = {
260 &dev_attr_modalias
.attr
,
264 static struct attribute_group subch_attr_group
= {
265 .attrs
= subch_attrs
,
268 static const struct attribute_group
*default_subch_attr_groups
[] = {
273 static int css_register_subchannel(struct subchannel
*sch
)
277 /* Initialize the subchannel structure */
278 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
279 sch
->dev
.bus
= &css_bus_type
;
280 sch
->dev
.release
= &css_subchannel_release
;
281 sch
->dev
.groups
= default_subch_attr_groups
;
283 * We don't want to generate uevents for I/O subchannels that don't
284 * have a working ccw device behind them since they will be
285 * unregistered before they can be used anyway, so we delay the add
286 * uevent until after device recognition was successful.
287 * Note that we suppress the uevent for all subchannel types;
288 * the subchannel driver can decide itself when it wants to inform
289 * userspace of its existence.
291 dev_set_uevent_suppress(&sch
->dev
, 1);
292 css_update_ssd_info(sch
);
293 /* make it known to the system */
294 ret
= css_sch_device_register(sch
);
296 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
297 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
302 * No driver matched. Generate the uevent now so that
303 * a fitting driver module may be loaded based on the
306 dev_set_uevent_suppress(&sch
->dev
, 0);
307 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
312 int css_probe_device(struct subchannel_id schid
)
315 struct subchannel
*sch
;
317 if (cio_is_console(schid
))
318 sch
= cio_get_console_subchannel();
320 sch
= css_alloc_subchannel(schid
);
324 ret
= css_register_subchannel(sch
);
326 if (!cio_is_console(schid
))
327 put_device(&sch
->dev
);
333 check_subchannel(struct device
* dev
, void * data
)
335 struct subchannel
*sch
;
336 struct subchannel_id
*schid
= data
;
338 sch
= to_subchannel(dev
);
339 return schid_equal(&sch
->schid
, schid
);
343 get_subchannel_by_schid(struct subchannel_id schid
)
347 dev
= bus_find_device(&css_bus_type
, NULL
,
348 &schid
, check_subchannel
);
350 return dev
? to_subchannel(dev
) : NULL
;
354 * css_sch_is_valid() - check if a subchannel is valid
355 * @schib: subchannel information block for the subchannel
357 int css_sch_is_valid(struct schib
*schib
)
359 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
361 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
365 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
367 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
372 /* Will be done on the slow path. */
375 if (stsch_err(schid
, &schib
) || !css_sch_is_valid(&schib
)) {
376 /* Unusable - ignore. */
379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
380 "slow path.\n", schid
.ssid
, schid
.sch_no
, CIO_OPER
);
382 return css_probe_device(schid
);
385 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
390 if (sch
->driver
->sch_event
)
391 ret
= sch
->driver
->sch_event(sch
, slow
);
394 "Got subchannel machine check but "
395 "no sch_event handler provided.\n");
400 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
402 struct subchannel
*sch
;
405 sch
= get_subchannel_by_schid(schid
);
407 ret
= css_evaluate_known_subchannel(sch
, slow
);
408 put_device(&sch
->dev
);
410 ret
= css_evaluate_new_subchannel(schid
, slow
);
412 css_schedule_eval(schid
);
415 static struct idset
*slow_subchannel_set
;
416 static spinlock_t slow_subchannel_lock
;
417 static wait_queue_head_t css_eval_wq
;
418 static atomic_t css_eval_scheduled
;
420 static int __init
slow_subchannel_init(void)
422 spin_lock_init(&slow_subchannel_lock
);
423 atomic_set(&css_eval_scheduled
, 0);
424 init_waitqueue_head(&css_eval_wq
);
425 slow_subchannel_set
= idset_sch_new();
426 if (!slow_subchannel_set
) {
427 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
433 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
438 spin_lock_irq(&slow_subchannel_lock
);
439 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
440 idset_sch_del(slow_subchannel_set
, sch
->schid
);
441 spin_unlock_irq(&slow_subchannel_lock
);
443 rc
= css_evaluate_known_subchannel(sch
, 1);
445 css_schedule_eval(sch
->schid
);
450 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
455 spin_lock_irq(&slow_subchannel_lock
);
456 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
457 idset_sch_del(slow_subchannel_set
, schid
);
458 spin_unlock_irq(&slow_subchannel_lock
);
460 rc
= css_evaluate_new_subchannel(schid
, 1);
463 css_schedule_eval(schid
);
469 /* These should abort looping */
478 static void css_slow_path_func(struct work_struct
*unused
)
482 CIO_TRACE_EVENT(4, "slowpath");
483 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
485 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
486 if (idset_is_empty(slow_subchannel_set
)) {
487 atomic_set(&css_eval_scheduled
, 0);
488 wake_up(&css_eval_wq
);
490 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
493 static DECLARE_WORK(slow_path_work
, css_slow_path_func
);
494 struct workqueue_struct
*slow_path_wq
;
496 void css_schedule_eval(struct subchannel_id schid
)
500 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
501 idset_sch_add(slow_subchannel_set
, schid
);
502 atomic_set(&css_eval_scheduled
, 1);
503 queue_work(slow_path_wq
, &slow_path_work
);
504 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
507 void css_schedule_eval_all(void)
511 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
512 idset_fill(slow_subchannel_set
);
513 atomic_set(&css_eval_scheduled
, 1);
514 queue_work(slow_path_wq
, &slow_path_work
);
515 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
518 static int __unset_registered(struct device
*dev
, void *data
)
520 struct idset
*set
= data
;
521 struct subchannel
*sch
= to_subchannel(dev
);
523 idset_sch_del(set
, sch
->schid
);
527 void css_schedule_eval_all_unreg(void)
530 struct idset
*unreg_set
;
532 /* Find unregistered subchannels. */
533 unreg_set
= idset_sch_new();
536 css_schedule_eval_all();
539 idset_fill(unreg_set
);
540 bus_for_each_dev(&css_bus_type
, NULL
, unreg_set
, __unset_registered
);
541 /* Apply to slow_subchannel_set. */
542 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
543 idset_add_set(slow_subchannel_set
, unreg_set
);
544 atomic_set(&css_eval_scheduled
, 1);
545 queue_work(slow_path_wq
, &slow_path_work
);
546 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
547 idset_free(unreg_set
);
550 void css_wait_for_slow_path(void)
552 flush_workqueue(slow_path_wq
);
555 /* Schedule reprobing of all unregistered subchannels. */
556 void css_schedule_reprobe(void)
558 css_schedule_eval_all_unreg();
560 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
563 * Called from the machine check handler for subchannel report words.
565 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
567 struct subchannel_id mchk_schid
;
570 css_schedule_eval_all();
573 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
574 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
575 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
576 crw0
->erc
, crw0
->rsid
);
578 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
579 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
580 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
581 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
582 init_subchannel_id(&mchk_schid
);
583 mchk_schid
.sch_no
= crw0
->rsid
;
585 mchk_schid
.ssid
= (crw1
->rsid
>> 8) & 3;
588 * Since we are always presented with IPI in the CRW, we have to
589 * use stsch() to find out if the subchannel in question has come
592 css_evaluate_subchannel(mchk_schid
, 0);
596 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
598 if (css_general_characteristics
.mcss
) {
599 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
600 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
= css
->cssid
;
603 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
605 css
->global_pgid
.pgid_high
.cpu_addr
= 0;
608 css
->global_pgid
.cpu_id
= S390_lowcore
.cpu_id
.ident
;
609 css
->global_pgid
.cpu_model
= S390_lowcore
.cpu_id
.machine
;
610 css
->global_pgid
.tod_high
= tod_high
;
615 channel_subsystem_release(struct device
*dev
)
617 struct channel_subsystem
*css
;
620 mutex_destroy(&css
->mutex
);
621 if (css
->pseudo_subchannel
) {
622 /* Implies that it has been generated but never registered. */
623 css_subchannel_release(&css
->pseudo_subchannel
->dev
);
624 css
->pseudo_subchannel
= NULL
;
630 css_cm_enable_show(struct device
*dev
, struct device_attribute
*attr
,
633 struct channel_subsystem
*css
= to_css(dev
);
638 mutex_lock(&css
->mutex
);
639 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
640 mutex_unlock(&css
->mutex
);
645 css_cm_enable_store(struct device
*dev
, struct device_attribute
*attr
,
646 const char *buf
, size_t count
)
648 struct channel_subsystem
*css
= to_css(dev
);
652 ret
= strict_strtoul(buf
, 16, &val
);
655 mutex_lock(&css
->mutex
);
658 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
661 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
666 mutex_unlock(&css
->mutex
);
667 return ret
< 0 ? ret
: count
;
670 static DEVICE_ATTR(cm_enable
, 0644, css_cm_enable_show
, css_cm_enable_store
);
672 static int __init
setup_css(int nr
)
676 struct channel_subsystem
*css
;
678 css
= channel_subsystems
[nr
];
679 memset(css
, 0, sizeof(struct channel_subsystem
));
680 css
->pseudo_subchannel
=
681 kzalloc(sizeof(*css
->pseudo_subchannel
), GFP_KERNEL
);
682 if (!css
->pseudo_subchannel
)
684 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
685 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
686 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
687 ret
= cio_create_sch_lock(css
->pseudo_subchannel
);
689 kfree(css
->pseudo_subchannel
);
692 mutex_init(&css
->mutex
);
695 dev_set_name(&css
->device
, "css%x", nr
);
696 css
->device
.release
= channel_subsystem_release
;
697 tod_high
= (u32
) (get_clock() >> 32);
698 css_generate_pgid(css
, tod_high
);
702 static int css_reboot_event(struct notifier_block
*this,
709 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
710 struct channel_subsystem
*css
;
712 css
= channel_subsystems
[i
];
713 mutex_lock(&css
->mutex
);
715 if (chsc_secm(css
, 0))
717 mutex_unlock(&css
->mutex
);
723 static struct notifier_block css_reboot_notifier
= {
724 .notifier_call
= css_reboot_event
,
728 * Since the css devices are neither on a bus nor have a class
729 * nor have a special device type, we cannot stop/restart channel
730 * path measurements via the normal suspend/resume callbacks, but have
733 static int css_power_event(struct notifier_block
*this, unsigned long event
,
740 case PM_HIBERNATION_PREPARE
:
741 case PM_SUSPEND_PREPARE
:
743 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
744 struct channel_subsystem
*css
;
746 css
= channel_subsystems
[i
];
747 mutex_lock(&css
->mutex
);
748 if (!css
->cm_enabled
) {
749 mutex_unlock(&css
->mutex
);
752 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
755 if (__chsc_do_secm(css
, 0, secm_area
))
757 free_page((unsigned long)secm_area
);
761 mutex_unlock(&css
->mutex
);
764 case PM_POST_HIBERNATION
:
765 case PM_POST_SUSPEND
:
767 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
768 struct channel_subsystem
*css
;
770 css
= channel_subsystems
[i
];
771 mutex_lock(&css
->mutex
);
772 if (!css
->cm_enabled
) {
773 mutex_unlock(&css
->mutex
);
776 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
779 if (__chsc_do_secm(css
, 1, secm_area
))
781 free_page((unsigned long)secm_area
);
785 mutex_unlock(&css
->mutex
);
787 /* search for subchannels, which appeared during hibernation */
788 css_schedule_reprobe();
796 static struct notifier_block css_power_notifier
= {
797 .notifier_call
= css_power_event
,
801 * Now that the driver core is running, we can setup our channel subsystem.
802 * The struct subchannel's are created during probing (except for the
803 * static console subchannel).
805 static int __init
css_bus_init(void)
809 ret
= chsc_determine_css_characteristics();
813 ret
= chsc_alloc_sei_area();
817 /* Try to enable MSS. */
818 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
820 case 0: /* Success. */
821 max_ssid
= __MAX_SSID
;
829 ret
= slow_subchannel_init();
833 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
837 if ((ret
= bus_register(&css_bus_type
)))
840 /* Setup css structure. */
841 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
842 struct channel_subsystem
*css
;
844 css
= kmalloc(sizeof(struct channel_subsystem
), GFP_KERNEL
);
849 channel_subsystems
[i
] = css
;
852 kfree(channel_subsystems
[i
]);
855 ret
= device_register(&css
->device
);
857 put_device(&css
->device
);
860 if (css_chsc_characteristics
.secm
) {
861 ret
= device_create_file(&css
->device
,
862 &dev_attr_cm_enable
);
866 ret
= device_register(&css
->pseudo_subchannel
->dev
);
868 put_device(&css
->pseudo_subchannel
->dev
);
872 ret
= register_reboot_notifier(&css_reboot_notifier
);
875 ret
= register_pm_notifier(&css_power_notifier
);
877 unregister_reboot_notifier(&css_reboot_notifier
);
882 /* Enable default isc for I/O subchannels. */
883 isc_register(IO_SCH_ISC
);
887 if (css_chsc_characteristics
.secm
)
888 device_remove_file(&channel_subsystems
[i
]->device
,
889 &dev_attr_cm_enable
);
891 device_unregister(&channel_subsystems
[i
]->device
);
894 struct channel_subsystem
*css
;
897 css
= channel_subsystems
[i
];
898 device_unregister(&css
->pseudo_subchannel
->dev
);
899 css
->pseudo_subchannel
= NULL
;
900 if (css_chsc_characteristics
.secm
)
901 device_remove_file(&css
->device
,
902 &dev_attr_cm_enable
);
903 device_unregister(&css
->device
);
905 bus_unregister(&css_bus_type
);
907 crw_unregister_handler(CRW_RSC_CSS
);
908 chsc_free_sei_area();
909 idset_free(slow_subchannel_set
);
910 pr_alert("The CSS device driver initialization failed with "
915 static void __init
css_bus_cleanup(void)
917 struct channel_subsystem
*css
;
920 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
921 css
= channel_subsystems
[i
];
922 device_unregister(&css
->pseudo_subchannel
->dev
);
923 css
->pseudo_subchannel
= NULL
;
924 if (css_chsc_characteristics
.secm
)
925 device_remove_file(&css
->device
, &dev_attr_cm_enable
);
926 device_unregister(&css
->device
);
928 bus_unregister(&css_bus_type
);
929 crw_unregister_handler(CRW_RSC_CSS
);
930 chsc_free_sei_area();
931 idset_free(slow_subchannel_set
);
932 isc_unregister(IO_SCH_ISC
);
935 static int __init
channel_subsystem_init(void)
939 ret
= css_bus_init();
943 ret
= io_subchannel_init();
949 subsys_initcall(channel_subsystem_init
);
951 static int css_settle(struct device_driver
*drv
, void *unused
)
953 struct css_driver
*cssdrv
= to_cssdriver(drv
);
961 * Wait for the initialization of devices to finish, to make sure we are
962 * done with our setup if the search for the root device starts.
964 static int __init
channel_subsystem_init_sync(void)
966 /* Start initial subchannel evaluation. */
967 css_schedule_eval_all();
968 /* Wait for the evaluation of subchannels to finish. */
969 wait_event(css_eval_wq
, atomic_read(&css_eval_scheduled
) == 0);
970 /* Wait for the subchannel type specific initialization to finish */
971 return bus_for_each_drv(&css_bus_type
, NULL
, NULL
, css_settle
);
973 subsys_initcall_sync(channel_subsystem_init_sync
);
975 int sch_is_pseudo_sch(struct subchannel
*sch
)
977 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
980 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
982 struct subchannel
*sch
= to_subchannel(dev
);
983 struct css_driver
*driver
= to_cssdriver(drv
);
984 struct css_device_id
*id
;
986 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
987 if (sch
->st
== id
->type
)
994 static int css_probe(struct device
*dev
)
996 struct subchannel
*sch
;
999 sch
= to_subchannel(dev
);
1000 sch
->driver
= to_cssdriver(dev
->driver
);
1001 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1007 static int css_remove(struct device
*dev
)
1009 struct subchannel
*sch
;
1012 sch
= to_subchannel(dev
);
1013 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1018 static void css_shutdown(struct device
*dev
)
1020 struct subchannel
*sch
;
1022 sch
= to_subchannel(dev
);
1023 if (sch
->driver
&& sch
->driver
->shutdown
)
1024 sch
->driver
->shutdown(sch
);
1027 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1029 struct subchannel
*sch
= to_subchannel(dev
);
1032 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1035 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1039 static int css_pm_prepare(struct device
*dev
)
1041 struct subchannel
*sch
= to_subchannel(dev
);
1042 struct css_driver
*drv
;
1044 if (mutex_is_locked(&sch
->reg_mutex
))
1046 if (!sch
->dev
.driver
)
1048 drv
= to_cssdriver(sch
->dev
.driver
);
1049 /* Notify drivers that they may not register children. */
1050 return drv
->prepare
? drv
->prepare(sch
) : 0;
1053 static void css_pm_complete(struct device
*dev
)
1055 struct subchannel
*sch
= to_subchannel(dev
);
1056 struct css_driver
*drv
;
1058 if (!sch
->dev
.driver
)
1060 drv
= to_cssdriver(sch
->dev
.driver
);
1065 static int css_pm_freeze(struct device
*dev
)
1067 struct subchannel
*sch
= to_subchannel(dev
);
1068 struct css_driver
*drv
;
1070 if (!sch
->dev
.driver
)
1072 drv
= to_cssdriver(sch
->dev
.driver
);
1073 return drv
->freeze
? drv
->freeze(sch
) : 0;
1076 static int css_pm_thaw(struct device
*dev
)
1078 struct subchannel
*sch
= to_subchannel(dev
);
1079 struct css_driver
*drv
;
1081 if (!sch
->dev
.driver
)
1083 drv
= to_cssdriver(sch
->dev
.driver
);
1084 return drv
->thaw
? drv
->thaw(sch
) : 0;
1087 static int css_pm_restore(struct device
*dev
)
1089 struct subchannel
*sch
= to_subchannel(dev
);
1090 struct css_driver
*drv
;
1092 if (!sch
->dev
.driver
)
1094 drv
= to_cssdriver(sch
->dev
.driver
);
1095 return drv
->restore
? drv
->restore(sch
) : 0;
1098 static struct dev_pm_ops css_pm_ops
= {
1099 .prepare
= css_pm_prepare
,
1100 .complete
= css_pm_complete
,
1101 .freeze
= css_pm_freeze
,
1102 .thaw
= css_pm_thaw
,
1103 .restore
= css_pm_restore
,
1106 struct bus_type css_bus_type
= {
1108 .match
= css_bus_match
,
1110 .remove
= css_remove
,
1111 .shutdown
= css_shutdown
,
1112 .uevent
= css_uevent
,
1117 * css_driver_register - register a css driver
1118 * @cdrv: css driver to register
1120 * This is mainly a wrapper around driver_register that sets name
1121 * and bus_type in the embedded struct device_driver correctly.
1123 int css_driver_register(struct css_driver
*cdrv
)
1125 cdrv
->drv
.name
= cdrv
->name
;
1126 cdrv
->drv
.bus
= &css_bus_type
;
1127 cdrv
->drv
.owner
= cdrv
->owner
;
1128 return driver_register(&cdrv
->drv
);
1130 EXPORT_SYMBOL_GPL(css_driver_register
);
1133 * css_driver_unregister - unregister a css driver
1134 * @cdrv: css driver to unregister
1136 * This is a wrapper around driver_unregister.
1138 void css_driver_unregister(struct css_driver
*cdrv
)
1140 driver_unregister(&cdrv
->drv
);
1142 EXPORT_SYMBOL_GPL(css_driver_unregister
);
1144 MODULE_LICENSE("GPL");
1145 EXPORT_SYMBOL(css_bus_type
);