2 * arch/s390/appldata/appldata_base.c
4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules.
8 * Copyright IBM Corp. 2003, 2009
10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
13 #define KMSG_COMPONENT "appldata"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
23 #include <linux/swap.h>
24 #include <linux/pagemap.h>
25 #include <linux/sysctl.h>
26 #include <linux/notifier.h>
27 #include <linux/cpu.h>
28 #include <linux/workqueue.h>
29 #include <linux/suspend.h>
30 #include <linux/platform_device.h>
31 #include <asm/appldata.h>
32 #include <asm/timer.h>
33 #include <asm/uaccess.h>
40 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
44 #define TOD_MICRO 0x01000 /* nr. of TOD clock units
47 static struct platform_device
*appldata_pdev
;
50 * /proc entries (sysctl)
52 static const char appldata_proc_name
[APPLDATA_PROC_NAME_LENGTH
] = "appldata";
53 static int appldata_timer_handler(ctl_table
*ctl
, int write
,
54 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
);
55 static int appldata_interval_handler(ctl_table
*ctl
, int write
,
57 size_t *lenp
, loff_t
*ppos
);
59 static struct ctl_table_header
*appldata_sysctl_header
;
60 static struct ctl_table appldata_table
[] = {
63 .mode
= S_IRUGO
| S_IWUSR
,
64 .proc_handler
= appldata_timer_handler
,
67 .procname
= "interval",
68 .mode
= S_IRUGO
| S_IWUSR
,
69 .proc_handler
= appldata_interval_handler
,
74 static struct ctl_table appldata_dir_table
[] = {
76 .procname
= appldata_proc_name
,
78 .mode
= S_IRUGO
| S_IXUGO
,
79 .child
= appldata_table
,
87 static DEFINE_PER_CPU(struct vtimer_list
, appldata_timer
);
88 static atomic_t appldata_expire_count
= ATOMIC_INIT(0);
90 static DEFINE_SPINLOCK(appldata_timer_lock
);
91 static int appldata_interval
= APPLDATA_CPU_INTERVAL
;
92 static int appldata_timer_active
;
93 static int appldata_timer_suspended
= 0;
98 static struct workqueue_struct
*appldata_wq
;
99 static void appldata_work_fn(struct work_struct
*work
);
100 static DECLARE_WORK(appldata_work
, appldata_work_fn
);
106 static DEFINE_MUTEX(appldata_ops_mutex
);
107 static LIST_HEAD(appldata_ops_list
);
110 /*************************** timer, work, DIAG *******************************/
112 * appldata_timer_function()
114 * schedule work and reschedule timer
116 static void appldata_timer_function(unsigned long data
)
118 if (atomic_dec_and_test(&appldata_expire_count
)) {
119 atomic_set(&appldata_expire_count
, num_online_cpus());
120 queue_work(appldata_wq
, (struct work_struct
*) data
);
127 * call data gathering function for each (active) module
129 static void appldata_work_fn(struct work_struct
*work
)
131 struct list_head
*lh
;
132 struct appldata_ops
*ops
;
137 mutex_lock(&appldata_ops_mutex
);
138 list_for_each(lh
, &appldata_ops_list
) {
139 ops
= list_entry(lh
, struct appldata_ops
, list
);
140 if (ops
->active
== 1) {
141 ops
->callback(ops
->data
);
144 mutex_unlock(&appldata_ops_mutex
);
151 * prepare parameter list, issue DIAG 0xDC
153 int appldata_diag(char record_nr
, u16 function
, unsigned long buffer
,
154 u16 length
, char *mod_lvl
)
156 struct appldata_product_id id
= {
157 .prod_nr
= {0xD3, 0xC9, 0xD5, 0xE4,
158 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
159 .prod_fn
= 0xD5D3, /* "NL" */
160 .version_nr
= 0xF2F6, /* "26" */
161 .release_nr
= 0xF0F1, /* "01" */
164 id
.record_nr
= record_nr
;
165 id
.mod_lvl
= (mod_lvl
[0]) << 8 | mod_lvl
[1];
166 return appldata_asm(&id
, function
, (void *) buffer
, length
);
168 /************************ timer, work, DIAG <END> ****************************/
171 /****************************** /proc stuff **********************************/
174 * appldata_mod_vtimer_wrap()
176 * wrapper function for mod_virt_timer(), because smp_call_function_single()
177 * accepts only one parameter.
179 static void __appldata_mod_vtimer_wrap(void *p
) {
181 struct vtimer_list
*timer
;
184 mod_virt_timer_periodic(args
->timer
, args
->expires
);
187 #define APPLDATA_ADD_TIMER 0
188 #define APPLDATA_DEL_TIMER 1
189 #define APPLDATA_MOD_TIMER 2
192 * __appldata_vtimer_setup()
194 * Add, delete or modify virtual timers on all online cpus.
195 * The caller needs to get the appldata_timer_lock spinlock.
198 __appldata_vtimer_setup(int cmd
)
200 u64 per_cpu_interval
;
204 case APPLDATA_ADD_TIMER
:
205 if (appldata_timer_active
)
207 per_cpu_interval
= (u64
) (appldata_interval
*1000 /
208 num_online_cpus()) * TOD_MICRO
;
209 for_each_online_cpu(i
) {
210 per_cpu(appldata_timer
, i
).expires
= per_cpu_interval
;
211 smp_call_function_single(i
, add_virt_timer_periodic
,
212 &per_cpu(appldata_timer
, i
),
215 appldata_timer_active
= 1;
217 case APPLDATA_DEL_TIMER
:
218 for_each_online_cpu(i
)
219 del_virt_timer(&per_cpu(appldata_timer
, i
));
220 if (!appldata_timer_active
)
222 appldata_timer_active
= 0;
223 atomic_set(&appldata_expire_count
, num_online_cpus());
225 case APPLDATA_MOD_TIMER
:
226 per_cpu_interval
= (u64
) (appldata_interval
*1000 /
227 num_online_cpus()) * TOD_MICRO
;
228 if (!appldata_timer_active
)
230 for_each_online_cpu(i
) {
232 struct vtimer_list
*timer
;
235 args
.timer
= &per_cpu(appldata_timer
, i
);
236 args
.expires
= per_cpu_interval
;
237 smp_call_function_single(i
, __appldata_mod_vtimer_wrap
,
244 * appldata_timer_handler()
246 * Start/Stop timer, show status of timer (0 = not active, 1 = active)
249 appldata_timer_handler(ctl_table
*ctl
, int write
,
250 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
255 if (!*lenp
|| *ppos
) {
260 len
= sprintf(buf
, appldata_timer_active
? "1\n" : "0\n");
263 if (copy_to_user(buffer
, buf
, len
))
268 if (copy_from_user(buf
, buffer
, len
> sizeof(buf
) ? sizeof(buf
) : len
))
271 spin_lock(&appldata_timer_lock
);
273 __appldata_vtimer_setup(APPLDATA_ADD_TIMER
);
274 else if (buf
[0] == '0')
275 __appldata_vtimer_setup(APPLDATA_DEL_TIMER
);
276 spin_unlock(&appldata_timer_lock
);
285 * appldata_interval_handler()
287 * Set (CPU) timer interval for collection of data (in milliseconds), show
288 * current timer interval.
291 appldata_interval_handler(ctl_table
*ctl
, int write
,
292 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
297 if (!*lenp
|| *ppos
) {
302 len
= sprintf(buf
, "%i\n", appldata_interval
);
305 if (copy_to_user(buffer
, buf
, len
))
310 if (copy_from_user(buf
, buffer
, len
> sizeof(buf
) ? sizeof(buf
) : len
)) {
314 sscanf(buf
, "%i", &interval
);
319 spin_lock(&appldata_timer_lock
);
320 appldata_interval
= interval
;
321 __appldata_vtimer_setup(APPLDATA_MOD_TIMER
);
322 spin_unlock(&appldata_timer_lock
);
331 * appldata_generic_handler()
333 * Generic start/stop monitoring and DIAG, show status of
334 * monitoring (0 = not in process, 1 = in process)
337 appldata_generic_handler(ctl_table
*ctl
, int write
,
338 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
340 struct appldata_ops
*ops
= NULL
, *tmp_ops
;
343 struct list_head
*lh
;
346 mutex_lock(&appldata_ops_mutex
);
347 list_for_each(lh
, &appldata_ops_list
) {
348 tmp_ops
= list_entry(lh
, struct appldata_ops
, list
);
349 if (&tmp_ops
->ctl_table
[2] == ctl
) {
354 mutex_unlock(&appldata_ops_mutex
);
358 if (!try_module_get(ops
->owner
)) { // protect this function
359 mutex_unlock(&appldata_ops_mutex
);
362 mutex_unlock(&appldata_ops_mutex
);
364 if (!*lenp
|| *ppos
) {
366 module_put(ops
->owner
);
370 len
= sprintf(buf
, ops
->active
? "1\n" : "0\n");
373 if (copy_to_user(buffer
, buf
, len
)) {
374 module_put(ops
->owner
);
380 if (copy_from_user(buf
, buffer
,
381 len
> sizeof(buf
) ? sizeof(buf
) : len
)) {
382 module_put(ops
->owner
);
386 mutex_lock(&appldata_ops_mutex
);
387 if ((buf
[0] == '1') && (ops
->active
== 0)) {
388 // protect work queue callback
389 if (!try_module_get(ops
->owner
)) {
390 mutex_unlock(&appldata_ops_mutex
);
391 module_put(ops
->owner
);
394 ops
->callback(ops
->data
); // init record
395 rc
= appldata_diag(ops
->record_nr
,
396 APPLDATA_START_INTERVAL_REC
,
397 (unsigned long) ops
->data
, ops
->size
,
400 pr_err("Starting the data collection for %s "
401 "failed with rc=%d\n", ops
->name
, rc
);
402 module_put(ops
->owner
);
405 } else if ((buf
[0] == '0') && (ops
->active
== 1)) {
407 rc
= appldata_diag(ops
->record_nr
, APPLDATA_STOP_REC
,
408 (unsigned long) ops
->data
, ops
->size
,
411 pr_err("Stopping the data collection for %s "
412 "failed with rc=%d\n", ops
->name
, rc
);
413 module_put(ops
->owner
);
415 mutex_unlock(&appldata_ops_mutex
);
419 module_put(ops
->owner
);
423 /*************************** /proc stuff <END> *******************************/
426 /************************* module-ops management *****************************/
428 * appldata_register_ops()
430 * update ops list, register /proc/sys entries
432 int appldata_register_ops(struct appldata_ops
*ops
)
434 if (ops
->size
> APPLDATA_MAX_REC_SIZE
)
437 ops
->ctl_table
= kzalloc(4 * sizeof(struct ctl_table
), GFP_KERNEL
);
441 mutex_lock(&appldata_ops_mutex
);
442 list_add(&ops
->list
, &appldata_ops_list
);
443 mutex_unlock(&appldata_ops_mutex
);
445 ops
->ctl_table
[0].procname
= appldata_proc_name
;
446 ops
->ctl_table
[0].maxlen
= 0;
447 ops
->ctl_table
[0].mode
= S_IRUGO
| S_IXUGO
;
448 ops
->ctl_table
[0].child
= &ops
->ctl_table
[2];
450 ops
->ctl_table
[2].procname
= ops
->name
;
451 ops
->ctl_table
[2].mode
= S_IRUGO
| S_IWUSR
;
452 ops
->ctl_table
[2].proc_handler
= appldata_generic_handler
;
453 ops
->ctl_table
[2].data
= ops
;
455 ops
->sysctl_header
= register_sysctl_table(ops
->ctl_table
);
456 if (!ops
->sysctl_header
)
460 mutex_lock(&appldata_ops_mutex
);
461 list_del(&ops
->list
);
462 mutex_unlock(&appldata_ops_mutex
);
463 kfree(ops
->ctl_table
);
468 * appldata_unregister_ops()
470 * update ops list, unregister /proc entries, stop DIAG if necessary
472 void appldata_unregister_ops(struct appldata_ops
*ops
)
474 mutex_lock(&appldata_ops_mutex
);
475 list_del(&ops
->list
);
476 mutex_unlock(&appldata_ops_mutex
);
477 unregister_sysctl_table(ops
->sysctl_header
);
478 kfree(ops
->ctl_table
);
480 /********************** module-ops management <END> **************************/
483 /**************************** suspend / resume *******************************/
484 static int appldata_freeze(struct device
*dev
)
486 struct appldata_ops
*ops
;
488 struct list_head
*lh
;
491 spin_lock(&appldata_timer_lock
);
492 if (appldata_timer_active
) {
493 __appldata_vtimer_setup(APPLDATA_DEL_TIMER
);
494 appldata_timer_suspended
= 1;
496 spin_unlock(&appldata_timer_lock
);
499 mutex_lock(&appldata_ops_mutex
);
500 list_for_each(lh
, &appldata_ops_list
) {
501 ops
= list_entry(lh
, struct appldata_ops
, list
);
502 if (ops
->active
== 1) {
503 rc
= appldata_diag(ops
->record_nr
, APPLDATA_STOP_REC
,
504 (unsigned long) ops
->data
, ops
->size
,
507 pr_err("Stopping the data collection for %s "
508 "failed with rc=%d\n", ops
->name
, rc
);
511 mutex_unlock(&appldata_ops_mutex
);
515 static int appldata_restore(struct device
*dev
)
517 struct appldata_ops
*ops
;
519 struct list_head
*lh
;
522 spin_lock(&appldata_timer_lock
);
523 if (appldata_timer_suspended
) {
524 __appldata_vtimer_setup(APPLDATA_ADD_TIMER
);
525 appldata_timer_suspended
= 0;
527 spin_unlock(&appldata_timer_lock
);
530 mutex_lock(&appldata_ops_mutex
);
531 list_for_each(lh
, &appldata_ops_list
) {
532 ops
= list_entry(lh
, struct appldata_ops
, list
);
533 if (ops
->active
== 1) {
534 ops
->callback(ops
->data
); // init record
535 rc
= appldata_diag(ops
->record_nr
,
536 APPLDATA_START_INTERVAL_REC
,
537 (unsigned long) ops
->data
, ops
->size
,
540 pr_err("Starting the data collection for %s "
541 "failed with rc=%d\n", ops
->name
, rc
);
545 mutex_unlock(&appldata_ops_mutex
);
549 static int appldata_thaw(struct device
*dev
)
551 return appldata_restore(dev
);
554 static const struct dev_pm_ops appldata_pm_ops
= {
555 .freeze
= appldata_freeze
,
556 .thaw
= appldata_thaw
,
557 .restore
= appldata_restore
,
560 static struct platform_driver appldata_pdrv
= {
563 .owner
= THIS_MODULE
,
564 .pm
= &appldata_pm_ops
,
567 /************************* suspend / resume <END> ****************************/
570 /******************************* init / exit *********************************/
572 static void __cpuinit
appldata_online_cpu(int cpu
)
574 init_virt_timer(&per_cpu(appldata_timer
, cpu
));
575 per_cpu(appldata_timer
, cpu
).function
= appldata_timer_function
;
576 per_cpu(appldata_timer
, cpu
).data
= (unsigned long)
578 atomic_inc(&appldata_expire_count
);
579 spin_lock(&appldata_timer_lock
);
580 __appldata_vtimer_setup(APPLDATA_MOD_TIMER
);
581 spin_unlock(&appldata_timer_lock
);
584 static void __cpuinit
appldata_offline_cpu(int cpu
)
586 del_virt_timer(&per_cpu(appldata_timer
, cpu
));
587 if (atomic_dec_and_test(&appldata_expire_count
)) {
588 atomic_set(&appldata_expire_count
, num_online_cpus());
589 queue_work(appldata_wq
, &appldata_work
);
591 spin_lock(&appldata_timer_lock
);
592 __appldata_vtimer_setup(APPLDATA_MOD_TIMER
);
593 spin_unlock(&appldata_timer_lock
);
596 static int __cpuinit
appldata_cpu_notify(struct notifier_block
*self
,
597 unsigned long action
,
602 case CPU_ONLINE_FROZEN
:
603 appldata_online_cpu((long) hcpu
);
606 case CPU_DEAD_FROZEN
:
607 appldata_offline_cpu((long) hcpu
);
615 static struct notifier_block __cpuinitdata appldata_nb
= {
616 .notifier_call
= appldata_cpu_notify
,
622 * init timer, register /proc entries
624 static int __init
appldata_init(void)
628 rc
= platform_driver_register(&appldata_pdrv
);
632 appldata_pdev
= platform_device_register_simple("appldata", -1, NULL
,
634 if (IS_ERR(appldata_pdev
)) {
635 rc
= PTR_ERR(appldata_pdev
);
638 appldata_wq
= create_singlethread_workqueue("appldata");
645 for_each_online_cpu(i
)
646 appldata_online_cpu(i
);
649 /* Register cpu hotplug notifier */
650 register_hotcpu_notifier(&appldata_nb
);
652 appldata_sysctl_header
= register_sysctl_table(appldata_dir_table
);
656 platform_device_unregister(appldata_pdev
);
658 platform_driver_unregister(&appldata_pdrv
);
662 __initcall(appldata_init
);
664 /**************************** init / exit <END> ******************************/
666 EXPORT_SYMBOL_GPL(appldata_register_ops
);
667 EXPORT_SYMBOL_GPL(appldata_unregister_ops
);
668 EXPORT_SYMBOL_GPL(appldata_diag
);
671 EXPORT_SYMBOL_GPL(si_swapinfo
);
673 EXPORT_SYMBOL_GPL(nr_threads
);
674 EXPORT_SYMBOL_GPL(nr_running
);
675 EXPORT_SYMBOL_GPL(nr_iowait
);