GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / s390 / char / vmlogrdr.c
blobe40a1b89286667d02329800f64c03d2fa35cfdda
1 /*
2 * drivers/s390/char/vmlogrdr.c
3 * character device driver for reading z/VM system service records
6 * Copyright IBM Corp. 2004, 2009
7 * character device driver for reading z/VM system service records,
8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10 * Stefan Weinhuber <wein@de.ibm.com>
14 #define KMSG_COMPONENT "vmlogrdr"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <asm/atomic.h>
25 #include <asm/uaccess.h>
26 #include <asm/cpcmd.h>
27 #include <asm/debug.h>
28 #include <asm/ebcdic.h>
29 #include <net/iucv/iucv.h>
30 #include <linux/kmod.h>
31 #include <linux/cdev.h>
32 #include <linux/device.h>
33 #include <linux/smp_lock.h>
34 #include <linux/string.h>
36 MODULE_AUTHOR
37 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
38 " Stefan Weinhuber (wein@de.ibm.com)");
39 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
40 "system service records.");
41 MODULE_LICENSE("GPL");
45 * The size of the buffer for iucv data transfer is one page,
46 * but in addition to the data we read from iucv we also
47 * place an integer and some characters into that buffer,
48 * so the maximum size for record data is a little less then
49 * one page.
51 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
54 * The elements that are concurrently accessed by bottom halves are
55 * connection_established, iucv_path_severed, local_interrupt_buffer
56 * and receive_ready. The first three can be protected by
57 * priv_lock. receive_ready is atomic, so it can be incremented and
58 * decremented without holding a lock.
59 * The variable dev_in_use needs to be protected by the lock, since
60 * it's a flag used by open to make sure that the device is opened only
61 * by one user at the same time.
63 struct vmlogrdr_priv_t {
64 char system_service[8];
65 char internal_name[8];
66 char recording_name[8];
67 struct iucv_path *path;
68 int connection_established;
69 int iucv_path_severed;
70 struct iucv_message local_interrupt_buffer;
71 atomic_t receive_ready;
72 int minor_num;
73 char * buffer;
74 char * current_position;
75 int remaining;
76 ulong residual_length;
77 int buffer_free;
78 int dev_in_use; /* 1: already opened, 0: not opened*/
79 spinlock_t priv_lock;
80 struct device *device;
81 struct device *class_device;
82 int autorecording;
83 int autopurge;
88 * File operation structure for vmlogrdr devices
90 static int vmlogrdr_open(struct inode *, struct file *);
91 static int vmlogrdr_release(struct inode *, struct file *);
92 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
93 size_t count, loff_t * ppos);
95 static const struct file_operations vmlogrdr_fops = {
96 .owner = THIS_MODULE,
97 .open = vmlogrdr_open,
98 .release = vmlogrdr_release,
99 .read = vmlogrdr_read,
103 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
104 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
105 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
106 struct iucv_message *);
109 static struct iucv_handler vmlogrdr_iucv_handler = {
110 .path_complete = vmlogrdr_iucv_path_complete,
111 .path_severed = vmlogrdr_iucv_path_severed,
112 .message_pending = vmlogrdr_iucv_message_pending,
116 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
117 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
120 * pointer to system service private structure
121 * minor number 0 --> logrec
122 * minor number 1 --> account
123 * minor number 2 --> symptom
126 static struct vmlogrdr_priv_t sys_ser[] = {
127 { .system_service = "*LOGREC ",
128 .internal_name = "logrec",
129 .recording_name = "EREP",
130 .minor_num = 0,
131 .buffer_free = 1,
132 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
133 .autorecording = 1,
134 .autopurge = 1,
136 { .system_service = "*ACCOUNT",
137 .internal_name = "account",
138 .recording_name = "ACCOUNT",
139 .minor_num = 1,
140 .buffer_free = 1,
141 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
142 .autorecording = 1,
143 .autopurge = 1,
145 { .system_service = "*SYMPTOM",
146 .internal_name = "symptom",
147 .recording_name = "SYMPTOM",
148 .minor_num = 2,
149 .buffer_free = 1,
150 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
151 .autorecording = 1,
152 .autopurge = 1,
156 #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
158 static char FENCE[] = {"EOR"};
159 static int vmlogrdr_major = 0;
160 static struct cdev *vmlogrdr_cdev = NULL;
161 static int recording_class_AB;
164 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
166 struct vmlogrdr_priv_t * logptr = path->private;
168 spin_lock(&logptr->priv_lock);
169 logptr->connection_established = 1;
170 spin_unlock(&logptr->priv_lock);
171 wake_up(&conn_wait_queue);
175 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
177 struct vmlogrdr_priv_t * logptr = path->private;
178 u8 reason = (u8) ipuser[8];
180 pr_err("vmlogrdr: connection severed with reason %i\n", reason);
182 iucv_path_sever(path, NULL);
183 kfree(path);
184 logptr->path = NULL;
186 spin_lock(&logptr->priv_lock);
187 logptr->connection_established = 0;
188 logptr->iucv_path_severed = 1;
189 spin_unlock(&logptr->priv_lock);
191 wake_up(&conn_wait_queue);
192 /* just in case we're sleeping waiting for a record */
193 wake_up_interruptible(&read_wait_queue);
197 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
198 struct iucv_message *msg)
200 struct vmlogrdr_priv_t * logptr = path->private;
203 * This function is the bottom half so it should be quick.
204 * Copy the external interrupt data into our local eib and increment
205 * the usage count
207 spin_lock(&logptr->priv_lock);
208 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
209 atomic_inc(&logptr->receive_ready);
210 spin_unlock(&logptr->priv_lock);
211 wake_up_interruptible(&read_wait_queue);
215 static int vmlogrdr_get_recording_class_AB(void)
217 char cp_command[]="QUERY COMMAND RECORDING ";
218 char cp_response[80];
219 char *tail;
220 int len,i;
222 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
223 len = strnlen(cp_response,sizeof(cp_response));
224 // now the parsing
225 tail=strnchr(cp_response,len,'=');
226 if (!tail)
227 return 0;
228 tail++;
229 if (!strncmp("ANY",tail,3))
230 return 1;
231 if (!strncmp("NONE",tail,4))
232 return 0;
234 * expect comma separated list of classes here, if one of them
235 * is A or B return 1 otherwise 0
237 for (i=tail-cp_response; i<len; i++)
238 if ( cp_response[i]=='A' || cp_response[i]=='B' )
239 return 1;
240 return 0;
244 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
245 int action, int purge)
248 char cp_command[80];
249 char cp_response[160];
250 char *onoff, *qid_string;
252 memset(cp_command, 0x00, sizeof(cp_command));
253 memset(cp_response, 0x00, sizeof(cp_response));
255 onoff = ((action == 1) ? "ON" : "OFF");
256 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
259 * The recording commands needs to be called with option QID
260 * for guests that have previlege classes A or B.
261 * Purging has to be done as separate step, because recording
262 * can't be switched on as long as records are on the queue.
263 * Doing both at the same time doesn't work.
266 if (purge) {
267 snprintf(cp_command, sizeof(cp_command),
268 "RECORDING %s PURGE %s",
269 logptr->recording_name,
270 qid_string);
272 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
275 memset(cp_command, 0x00, sizeof(cp_command));
276 memset(cp_response, 0x00, sizeof(cp_response));
277 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
278 logptr->recording_name,
279 onoff,
280 qid_string);
282 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
283 /* The recording command will usually answer with 'Command complete'
284 * on success, but when the specific service was never connected
285 * before then there might be an additional informational message
286 * 'HCPCRC8072I Recording entry not found' before the
287 * 'Command complete'. So I use strstr rather then the strncmp.
289 if (strstr(cp_response,"Command complete"))
290 return 0;
291 else
292 return -EIO;
297 static int vmlogrdr_open (struct inode *inode, struct file *filp)
299 int dev_num = 0;
300 struct vmlogrdr_priv_t * logptr = NULL;
301 int connect_rc = 0;
302 int ret;
304 dev_num = iminor(inode);
305 if (dev_num > MAXMINOR)
306 return -ENODEV;
307 logptr = &sys_ser[dev_num];
310 * only allow for blocking reads to be open
312 if (filp->f_flags & O_NONBLOCK)
313 return -ENOSYS;
315 /* Besure this device hasn't already been opened */
316 spin_lock_bh(&logptr->priv_lock);
317 if (logptr->dev_in_use) {
318 spin_unlock_bh(&logptr->priv_lock);
319 return -EBUSY;
321 logptr->dev_in_use = 1;
322 logptr->connection_established = 0;
323 logptr->iucv_path_severed = 0;
324 atomic_set(&logptr->receive_ready, 0);
325 logptr->buffer_free = 1;
326 spin_unlock_bh(&logptr->priv_lock);
328 /* set the file options */
329 filp->private_data = logptr;
330 filp->f_op = &vmlogrdr_fops;
332 /* start recording for this service*/
333 if (logptr->autorecording) {
334 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
335 if (ret)
336 pr_warning("vmlogrdr: failed to start "
337 "recording automatically\n");
340 /* create connection to the system service */
341 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
342 if (!logptr->path)
343 goto out_dev;
344 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
345 logptr->system_service, NULL, NULL,
346 logptr);
347 if (connect_rc) {
348 pr_err("vmlogrdr: iucv connection to %s "
349 "failed with rc %i \n",
350 logptr->system_service, connect_rc);
351 goto out_path;
354 /* We've issued the connect and now we must wait for a
355 * ConnectionComplete or ConnectinSevered Interrupt
356 * before we can continue to process.
358 wait_event(conn_wait_queue, (logptr->connection_established)
359 || (logptr->iucv_path_severed));
360 if (logptr->iucv_path_severed)
361 goto out_record;
362 nonseekable_open(inode, filp);
363 return 0;
365 out_record:
366 if (logptr->autorecording)
367 vmlogrdr_recording(logptr,0,logptr->autopurge);
368 out_path:
369 kfree(logptr->path); /* kfree(NULL) is ok. */
370 logptr->path = NULL;
371 out_dev:
372 logptr->dev_in_use = 0;
373 return -EIO;
377 static int vmlogrdr_release (struct inode *inode, struct file *filp)
379 int ret;
381 struct vmlogrdr_priv_t * logptr = filp->private_data;
383 iucv_path_sever(logptr->path, NULL);
384 kfree(logptr->path);
385 logptr->path = NULL;
386 if (logptr->autorecording) {
387 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
388 if (ret)
389 pr_warning("vmlogrdr: failed to stop "
390 "recording automatically\n");
392 logptr->dev_in_use = 0;
394 return 0;
398 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
400 int rc, *temp;
401 /* we need to keep track of two data sizes here:
402 * The number of bytes we need to receive from iucv and
403 * the total number of bytes we actually write into the buffer.
405 int user_data_count, iucv_data_count;
406 char * buffer;
408 if (atomic_read(&priv->receive_ready)) {
409 spin_lock_bh(&priv->priv_lock);
410 if (priv->residual_length){
411 /* receive second half of a record */
412 iucv_data_count = priv->residual_length;
413 user_data_count = 0;
414 buffer = priv->buffer;
415 } else {
416 /* receive a new record:
417 * We need to return the total length of the record
418 * + size of FENCE in the first 4 bytes of the buffer.
420 iucv_data_count = priv->local_interrupt_buffer.length;
421 user_data_count = sizeof(int);
422 temp = (int*)priv->buffer;
423 *temp= iucv_data_count + sizeof(FENCE);
424 buffer = priv->buffer + sizeof(int);
427 * If the record is bigger than our buffer, we receive only
428 * a part of it. We can get the rest later.
430 if (iucv_data_count > NET_BUFFER_SIZE)
431 iucv_data_count = NET_BUFFER_SIZE;
432 rc = iucv_message_receive(priv->path,
433 &priv->local_interrupt_buffer,
434 0, buffer, iucv_data_count,
435 &priv->residual_length);
436 spin_unlock_bh(&priv->priv_lock);
437 /* An rc of 5 indicates that the record was bigger than
438 * the buffer, which is OK for us. A 9 indicates that the
439 * record was purged befor we could receive it.
441 if (rc == 5)
442 rc = 0;
443 if (rc == 9)
444 atomic_set(&priv->receive_ready, 0);
445 } else {
446 rc = 1;
448 if (!rc) {
449 priv->buffer_free = 0;
450 user_data_count += iucv_data_count;
451 priv->current_position = priv->buffer;
452 if (priv->residual_length == 0){
453 /* the whole record has been captured,
454 * now add the fence */
455 atomic_dec(&priv->receive_ready);
456 buffer = priv->buffer + user_data_count;
457 memcpy(buffer, FENCE, sizeof(FENCE));
458 user_data_count += sizeof(FENCE);
460 priv->remaining = user_data_count;
463 return rc;
467 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
468 size_t count, loff_t * ppos)
470 int rc;
471 struct vmlogrdr_priv_t * priv = filp->private_data;
473 while (priv->buffer_free) {
474 rc = vmlogrdr_receive_data(priv);
475 if (rc) {
476 rc = wait_event_interruptible(read_wait_queue,
477 atomic_read(&priv->receive_ready));
478 if (rc)
479 return rc;
482 /* copy only up to end of record */
483 if (count > priv->remaining)
484 count = priv->remaining;
486 if (copy_to_user(data, priv->current_position, count))
487 return -EFAULT;
489 *ppos += count;
490 priv->current_position += count;
491 priv->remaining -= count;
493 /* if all data has been transferred, set buffer free */
494 if (priv->remaining == 0)
495 priv->buffer_free = 1;
497 return count;
500 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
501 struct device_attribute *attr,
502 const char * buf, size_t count)
504 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
505 ssize_t ret = count;
507 switch (buf[0]) {
508 case '0':
509 priv->autopurge=0;
510 break;
511 case '1':
512 priv->autopurge=1;
513 break;
514 default:
515 ret = -EINVAL;
517 return ret;
521 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
525 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
526 return sprintf(buf, "%u\n", priv->autopurge);
530 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
531 vmlogrdr_autopurge_store);
534 static ssize_t vmlogrdr_purge_store(struct device * dev,
535 struct device_attribute *attr,
536 const char * buf, size_t count)
539 char cp_command[80];
540 char cp_response[80];
541 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
543 if (buf[0] != '1')
544 return -EINVAL;
546 memset(cp_command, 0x00, sizeof(cp_command));
547 memset(cp_response, 0x00, sizeof(cp_response));
550 * The recording command needs to be called with option QID
551 * for guests that have previlege classes A or B.
552 * Other guests will not recognize the command and we have to
553 * issue the same command without the QID parameter.
556 if (recording_class_AB)
557 snprintf(cp_command, sizeof(cp_command),
558 "RECORDING %s PURGE QID * ",
559 priv->recording_name);
560 else
561 snprintf(cp_command, sizeof(cp_command),
562 "RECORDING %s PURGE ",
563 priv->recording_name);
565 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
567 return count;
571 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
574 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
575 struct device_attribute *attr,
576 const char *buf, size_t count)
578 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
579 ssize_t ret = count;
581 switch (buf[0]) {
582 case '0':
583 priv->autorecording=0;
584 break;
585 case '1':
586 priv->autorecording=1;
587 break;
588 default:
589 ret = -EINVAL;
591 return ret;
595 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
596 struct device_attribute *attr,
597 char *buf)
599 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
600 return sprintf(buf, "%u\n", priv->autorecording);
604 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
605 vmlogrdr_autorecording_store);
608 static ssize_t vmlogrdr_recording_store(struct device * dev,
609 struct device_attribute *attr,
610 const char * buf, size_t count)
612 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
613 ssize_t ret;
615 switch (buf[0]) {
616 case '0':
617 ret = vmlogrdr_recording(priv,0,0);
618 break;
619 case '1':
620 ret = vmlogrdr_recording(priv,1,0);
621 break;
622 default:
623 ret = -EINVAL;
625 if (ret)
626 return ret;
627 else
628 return count;
633 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
636 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
637 char *buf)
640 char cp_command[] = "QUERY RECORDING ";
641 int len;
643 cpcmd(cp_command, buf, 4096, NULL);
644 len = strlen(buf);
645 return len;
649 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
650 NULL);
652 static struct attribute *vmlogrdr_attrs[] = {
653 &dev_attr_autopurge.attr,
654 &dev_attr_purge.attr,
655 &dev_attr_autorecording.attr,
656 &dev_attr_recording.attr,
657 NULL,
660 static int vmlogrdr_pm_prepare(struct device *dev)
662 int rc;
663 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
665 rc = 0;
666 if (priv) {
667 spin_lock_bh(&priv->priv_lock);
668 if (priv->dev_in_use)
669 rc = -EBUSY;
670 spin_unlock_bh(&priv->priv_lock);
672 if (rc)
673 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
674 dev_name(dev));
675 return rc;
679 static const struct dev_pm_ops vmlogrdr_pm_ops = {
680 .prepare = vmlogrdr_pm_prepare,
683 static struct attribute_group vmlogrdr_attr_group = {
684 .attrs = vmlogrdr_attrs,
687 static struct class *vmlogrdr_class;
688 static struct device_driver vmlogrdr_driver = {
689 .name = "vmlogrdr",
690 .bus = &iucv_bus,
691 .pm = &vmlogrdr_pm_ops,
695 static int vmlogrdr_register_driver(void)
697 int ret;
699 /* Register with iucv driver */
700 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
701 if (ret)
702 goto out;
704 ret = driver_register(&vmlogrdr_driver);
705 if (ret)
706 goto out_iucv;
708 ret = driver_create_file(&vmlogrdr_driver,
709 &driver_attr_recording_status);
710 if (ret)
711 goto out_driver;
713 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
714 if (IS_ERR(vmlogrdr_class)) {
715 ret = PTR_ERR(vmlogrdr_class);
716 vmlogrdr_class = NULL;
717 goto out_attr;
719 return 0;
721 out_attr:
722 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
723 out_driver:
724 driver_unregister(&vmlogrdr_driver);
725 out_iucv:
726 iucv_unregister(&vmlogrdr_iucv_handler, 1);
727 out:
728 return ret;
732 static void vmlogrdr_unregister_driver(void)
734 class_destroy(vmlogrdr_class);
735 vmlogrdr_class = NULL;
736 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
737 driver_unregister(&vmlogrdr_driver);
738 iucv_unregister(&vmlogrdr_iucv_handler, 1);
742 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
744 struct device *dev;
745 int ret;
747 dev = kzalloc(sizeof(struct device), GFP_KERNEL);
748 if (dev) {
749 dev_set_name(dev, priv->internal_name);
750 dev->bus = &iucv_bus;
751 dev->parent = iucv_root;
752 dev->driver = &vmlogrdr_driver;
753 dev_set_drvdata(dev, priv);
755 * The release function could be called after the
756 * module has been unloaded. It's _only_ task is to
757 * free the struct. Therefore, we specify kfree()
758 * directly here. (Probably a little bit obfuscating
759 * but legitime ...).
761 dev->release = (void (*)(struct device *))kfree;
762 } else
763 return -ENOMEM;
764 ret = device_register(dev);
765 if (ret) {
766 put_device(dev);
767 return ret;
770 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
771 if (ret) {
772 device_unregister(dev);
773 return ret;
775 priv->class_device = device_create(vmlogrdr_class, dev,
776 MKDEV(vmlogrdr_major,
777 priv->minor_num),
778 priv, "%s", dev_name(dev));
779 if (IS_ERR(priv->class_device)) {
780 ret = PTR_ERR(priv->class_device);
781 priv->class_device=NULL;
782 sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
783 device_unregister(dev);
784 return ret;
786 priv->device = dev;
787 return 0;
791 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
793 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
794 if (priv->device != NULL) {
795 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
796 device_unregister(priv->device);
797 priv->device=NULL;
799 return 0;
803 static int vmlogrdr_register_cdev(dev_t dev)
805 int rc = 0;
806 vmlogrdr_cdev = cdev_alloc();
807 if (!vmlogrdr_cdev) {
808 return -ENOMEM;
810 vmlogrdr_cdev->owner = THIS_MODULE;
811 vmlogrdr_cdev->ops = &vmlogrdr_fops;
812 vmlogrdr_cdev->dev = dev;
813 rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
814 if (!rc)
815 return 0;
817 // cleanup: cdev is not fully registered, no cdev_del here!
818 kobject_put(&vmlogrdr_cdev->kobj);
819 vmlogrdr_cdev=NULL;
820 return rc;
824 static void vmlogrdr_cleanup(void)
826 int i;
828 if (vmlogrdr_cdev) {
829 cdev_del(vmlogrdr_cdev);
830 vmlogrdr_cdev=NULL;
832 for (i=0; i < MAXMINOR; ++i ) {
833 vmlogrdr_unregister_device(&sys_ser[i]);
834 free_page((unsigned long)sys_ser[i].buffer);
836 vmlogrdr_unregister_driver();
837 if (vmlogrdr_major) {
838 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
839 vmlogrdr_major=0;
844 static int __init vmlogrdr_init(void)
846 int rc;
847 int i;
848 dev_t dev;
850 if (! MACHINE_IS_VM) {
851 pr_err("not running under VM, driver not loaded.\n");
852 return -ENODEV;
855 recording_class_AB = vmlogrdr_get_recording_class_AB();
857 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
858 if (rc)
859 return rc;
860 vmlogrdr_major = MAJOR(dev);
862 rc=vmlogrdr_register_driver();
863 if (rc)
864 goto cleanup;
866 for (i=0; i < MAXMINOR; ++i ) {
867 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
868 if (!sys_ser[i].buffer) {
869 rc = -ENOMEM;
870 break;
872 sys_ser[i].current_position = sys_ser[i].buffer;
873 rc=vmlogrdr_register_device(&sys_ser[i]);
874 if (rc)
875 break;
877 if (rc)
878 goto cleanup;
880 rc = vmlogrdr_register_cdev(dev);
881 if (rc)
882 goto cleanup;
883 return 0;
885 cleanup:
886 vmlogrdr_cleanup();
887 return rc;
891 static void __exit vmlogrdr_exit(void)
893 vmlogrdr_cleanup();
894 return;
898 module_init(vmlogrdr_init);
899 module_exit(vmlogrdr_exit);