4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
48 struct ipmi_file_private
51 spinlock_t recv_msg_lock
;
52 struct list_head recv_msgs
;
54 struct fasync_struct
*fasync_queue
;
55 wait_queue_head_t wait
;
56 struct mutex recv_mutex
;
58 unsigned int default_retry_time_ms
;
61 static void file_receive_handler(struct ipmi_recv_msg
*msg
,
64 struct ipmi_file_private
*priv
= handler_data
;
68 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
70 was_empty
= list_empty(&(priv
->recv_msgs
));
71 list_add_tail(&(msg
->link
), &(priv
->recv_msgs
));
74 wake_up_interruptible(&priv
->wait
);
75 kill_fasync(&priv
->fasync_queue
, SIGIO
, POLL_IN
);
78 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
81 static unsigned int ipmi_poll(struct file
*file
, poll_table
*wait
)
83 struct ipmi_file_private
*priv
= file
->private_data
;
84 unsigned int mask
= 0;
87 poll_wait(file
, &priv
->wait
, wait
);
89 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
91 if (!list_empty(&(priv
->recv_msgs
)))
92 mask
|= (POLLIN
| POLLRDNORM
);
94 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
99 static int ipmi_fasync(int fd
, struct file
*file
, int on
)
101 struct ipmi_file_private
*priv
= file
->private_data
;
104 result
= fasync_helper(fd
, file
, on
, &priv
->fasync_queue
);
109 static struct ipmi_user_hndl ipmi_hndlrs
=
111 .ipmi_recv_hndl
= file_receive_handler
,
114 static int ipmi_open(struct inode
*inode
, struct file
*file
)
116 int if_num
= iminor(inode
);
118 struct ipmi_file_private
*priv
;
121 priv
= kmalloc(sizeof(*priv
), GFP_KERNEL
);
127 rv
= ipmi_create_user(if_num
,
136 file
->private_data
= priv
;
138 spin_lock_init(&(priv
->recv_msg_lock
));
139 INIT_LIST_HEAD(&(priv
->recv_msgs
));
140 init_waitqueue_head(&priv
->wait
);
141 priv
->fasync_queue
= NULL
;
142 mutex_init(&priv
->recv_mutex
);
144 /* Use the low-level defaults. */
145 priv
->default_retries
= -1;
146 priv
->default_retry_time_ms
= 0;
151 static int ipmi_release(struct inode
*inode
, struct file
*file
)
153 struct ipmi_file_private
*priv
= file
->private_data
;
156 rv
= ipmi_destroy_user(priv
->user
);
160 ipmi_fasync (-1, file
, 0);
162 /* FIXME - free the messages in the list. */
168 static int handle_send_req(ipmi_user_t user
,
169 struct ipmi_req
*req
,
171 unsigned int retry_time_ms
)
174 struct ipmi_addr addr
;
175 struct kernel_ipmi_msg msg
;
177 if (req
->addr_len
> sizeof(struct ipmi_addr
))
180 if (copy_from_user(&addr
, req
->addr
, req
->addr_len
))
183 msg
.netfn
= req
->msg
.netfn
;
184 msg
.cmd
= req
->msg
.cmd
;
185 msg
.data_len
= req
->msg
.data_len
;
186 msg
.data
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
190 /* From here out we cannot return, we must jump to "out" for
191 error exits to free msgdata. */
193 rv
= ipmi_validate_addr(&addr
, req
->addr_len
);
197 if (req
->msg
.data
!= NULL
) {
198 if (req
->msg
.data_len
> IPMI_MAX_MSG_LENGTH
) {
203 if (copy_from_user(msg
.data
,
214 rv
= ipmi_request_settime(user
,
227 static int ipmi_ioctl(struct inode
*inode
,
233 struct ipmi_file_private
*priv
= file
->private_data
;
234 void __user
*arg
= (void __user
*)data
;
238 case IPMICTL_SEND_COMMAND
:
242 if (copy_from_user(&req
, arg
, sizeof(req
))) {
247 rv
= handle_send_req(priv
->user
,
249 priv
->default_retries
,
250 priv
->default_retry_time_ms
);
254 case IPMICTL_SEND_COMMAND_SETTIME
:
256 struct ipmi_req_settime req
;
258 if (copy_from_user(&req
, arg
, sizeof(req
))) {
263 rv
= handle_send_req(priv
->user
,
270 case IPMICTL_RECEIVE_MSG
:
271 case IPMICTL_RECEIVE_MSG_TRUNC
:
273 struct ipmi_recv rsp
;
275 struct list_head
*entry
;
276 struct ipmi_recv_msg
*msg
;
281 if (copy_from_user(&rsp
, arg
, sizeof(rsp
))) {
286 /* We claim a mutex because we don't want two
287 users getting something from the queue at a time.
288 Since we have to release the spinlock before we can
289 copy the data to the user, it's possible another
290 user will grab something from the queue, too. Then
291 the messages might get out of order if something
292 fails and the message gets put back onto the
293 queue. This mutex prevents that problem. */
294 mutex_lock(&priv
->recv_mutex
);
296 /* Grab the message off the list. */
297 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
298 if (list_empty(&(priv
->recv_msgs
))) {
299 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
303 entry
= priv
->recv_msgs
.next
;
304 msg
= list_entry(entry
, struct ipmi_recv_msg
, link
);
306 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
308 addr_len
= ipmi_addr_length(msg
->addr
.addr_type
);
309 if (rsp
.addr_len
< addr_len
)
312 goto recv_putback_on_err
;
315 if (copy_to_user(rsp
.addr
, &(msg
->addr
), addr_len
)) {
317 goto recv_putback_on_err
;
319 rsp
.addr_len
= addr_len
;
321 rsp
.recv_type
= msg
->recv_type
;
322 rsp
.msgid
= msg
->msgid
;
323 rsp
.msg
.netfn
= msg
->msg
.netfn
;
324 rsp
.msg
.cmd
= msg
->msg
.cmd
;
326 if (msg
->msg
.data_len
> 0) {
327 if (rsp
.msg
.data_len
< msg
->msg
.data_len
) {
329 if (cmd
== IPMICTL_RECEIVE_MSG_TRUNC
) {
330 msg
->msg
.data_len
= rsp
.msg
.data_len
;
332 goto recv_putback_on_err
;
336 if (copy_to_user(rsp
.msg
.data
,
341 goto recv_putback_on_err
;
343 rsp
.msg
.data_len
= msg
->msg
.data_len
;
345 rsp
.msg
.data_len
= 0;
348 if (copy_to_user(arg
, &rsp
, sizeof(rsp
))) {
350 goto recv_putback_on_err
;
353 mutex_unlock(&priv
->recv_mutex
);
354 ipmi_free_recv_msg(msg
);
358 /* If we got an error, put the message back onto
359 the head of the queue. */
360 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
361 list_add(entry
, &(priv
->recv_msgs
));
362 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
363 mutex_unlock(&priv
->recv_mutex
);
367 mutex_unlock(&priv
->recv_mutex
);
371 case IPMICTL_REGISTER_FOR_CMD
:
373 struct ipmi_cmdspec val
;
375 if (copy_from_user(&val
, arg
, sizeof(val
))) {
380 rv
= ipmi_register_for_cmd(priv
->user
, val
.netfn
, val
.cmd
);
384 case IPMICTL_UNREGISTER_FOR_CMD
:
386 struct ipmi_cmdspec val
;
388 if (copy_from_user(&val
, arg
, sizeof(val
))) {
393 rv
= ipmi_unregister_for_cmd(priv
->user
, val
.netfn
, val
.cmd
);
397 case IPMICTL_SET_GETS_EVENTS_CMD
:
401 if (copy_from_user(&val
, arg
, sizeof(val
))) {
406 rv
= ipmi_set_gets_events(priv
->user
, val
);
410 /* The next four are legacy, not per-channel. */
411 case IPMICTL_SET_MY_ADDRESS_CMD
:
415 if (copy_from_user(&val
, arg
, sizeof(val
))) {
420 rv
= ipmi_set_my_address(priv
->user
, 0, val
);
424 case IPMICTL_GET_MY_ADDRESS_CMD
:
429 rv
= ipmi_get_my_address(priv
->user
, 0, &rval
);
435 if (copy_to_user(arg
, &val
, sizeof(val
))) {
442 case IPMICTL_SET_MY_LUN_CMD
:
446 if (copy_from_user(&val
, arg
, sizeof(val
))) {
451 rv
= ipmi_set_my_LUN(priv
->user
, 0, val
);
455 case IPMICTL_GET_MY_LUN_CMD
:
460 rv
= ipmi_get_my_LUN(priv
->user
, 0, &rval
);
466 if (copy_to_user(arg
, &val
, sizeof(val
))) {
473 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD
:
475 struct ipmi_channel_lun_address_set val
;
477 if (copy_from_user(&val
, arg
, sizeof(val
))) {
482 return ipmi_set_my_address(priv
->user
, val
.channel
, val
.value
);
486 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD
:
488 struct ipmi_channel_lun_address_set val
;
490 if (copy_from_user(&val
, arg
, sizeof(val
))) {
495 rv
= ipmi_get_my_address(priv
->user
, val
.channel
, &val
.value
);
499 if (copy_to_user(arg
, &val
, sizeof(val
))) {
506 case IPMICTL_SET_MY_CHANNEL_LUN_CMD
:
508 struct ipmi_channel_lun_address_set val
;
510 if (copy_from_user(&val
, arg
, sizeof(val
))) {
515 rv
= ipmi_set_my_LUN(priv
->user
, val
.channel
, val
.value
);
519 case IPMICTL_GET_MY_CHANNEL_LUN_CMD
:
521 struct ipmi_channel_lun_address_set val
;
523 if (copy_from_user(&val
, arg
, sizeof(val
))) {
528 rv
= ipmi_get_my_LUN(priv
->user
, val
.channel
, &val
.value
);
532 if (copy_to_user(arg
, &val
, sizeof(val
))) {
539 case IPMICTL_SET_TIMING_PARMS_CMD
:
541 struct ipmi_timing_parms parms
;
543 if (copy_from_user(&parms
, arg
, sizeof(parms
))) {
548 priv
->default_retries
= parms
.retries
;
549 priv
->default_retry_time_ms
= parms
.retry_time_ms
;
554 case IPMICTL_GET_TIMING_PARMS_CMD
:
556 struct ipmi_timing_parms parms
;
558 parms
.retries
= priv
->default_retries
;
559 parms
.retry_time_ms
= priv
->default_retry_time_ms
;
561 if (copy_to_user(arg
, &parms
, sizeof(parms
))) {
577 * The following code contains code for supporting 32-bit compatible
578 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
581 #define COMPAT_IPMICTL_SEND_COMMAND \
582 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
583 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
584 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
585 #define COMPAT_IPMICTL_RECEIVE_MSG \
586 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
587 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
588 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
590 struct compat_ipmi_msg
{
597 struct compat_ipmi_req
{
599 compat_uint_t addr_len
;
601 struct compat_ipmi_msg msg
;
604 struct compat_ipmi_recv
{
605 compat_int_t recv_type
;
607 compat_uint_t addr_len
;
609 struct compat_ipmi_msg msg
;
612 struct compat_ipmi_req_settime
{
613 struct compat_ipmi_req req
;
614 compat_int_t retries
;
615 compat_uint_t retry_time_ms
;
619 * Define some helper functions for copying IPMI data
621 static long get_compat_ipmi_msg(struct ipmi_msg
*p64
,
622 struct compat_ipmi_msg __user
*p32
)
626 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
627 __get_user(p64
->netfn
, &p32
->netfn
) ||
628 __get_user(p64
->cmd
, &p32
->cmd
) ||
629 __get_user(p64
->data_len
, &p32
->data_len
) ||
630 __get_user(tmp
, &p32
->data
))
632 p64
->data
= compat_ptr(tmp
);
636 static long put_compat_ipmi_msg(struct ipmi_msg
*p64
,
637 struct compat_ipmi_msg __user
*p32
)
639 if (!access_ok(VERIFY_WRITE
, p32
, sizeof(*p32
)) ||
640 __put_user(p64
->netfn
, &p32
->netfn
) ||
641 __put_user(p64
->cmd
, &p32
->cmd
) ||
642 __put_user(p64
->data_len
, &p32
->data_len
))
647 static long get_compat_ipmi_req(struct ipmi_req
*p64
,
648 struct compat_ipmi_req __user
*p32
)
653 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
654 __get_user(tmp
, &p32
->addr
) ||
655 __get_user(p64
->addr_len
, &p32
->addr_len
) ||
656 __get_user(p64
->msgid
, &p32
->msgid
) ||
657 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
659 p64
->addr
= compat_ptr(tmp
);
663 static long get_compat_ipmi_req_settime(struct ipmi_req_settime
*p64
,
664 struct compat_ipmi_req_settime __user
*p32
)
666 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
667 get_compat_ipmi_req(&p64
->req
, &p32
->req
) ||
668 __get_user(p64
->retries
, &p32
->retries
) ||
669 __get_user(p64
->retry_time_ms
, &p32
->retry_time_ms
))
674 static long get_compat_ipmi_recv(struct ipmi_recv
*p64
,
675 struct compat_ipmi_recv __user
*p32
)
679 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
680 __get_user(p64
->recv_type
, &p32
->recv_type
) ||
681 __get_user(tmp
, &p32
->addr
) ||
682 __get_user(p64
->addr_len
, &p32
->addr_len
) ||
683 __get_user(p64
->msgid
, &p32
->msgid
) ||
684 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
686 p64
->addr
= compat_ptr(tmp
);
690 static long put_compat_ipmi_recv(struct ipmi_recv
*p64
,
691 struct compat_ipmi_recv __user
*p32
)
693 if (!access_ok(VERIFY_WRITE
, p32
, sizeof(*p32
)) ||
694 __put_user(p64
->recv_type
, &p32
->recv_type
) ||
695 __put_user(p64
->addr_len
, &p32
->addr_len
) ||
696 __put_user(p64
->msgid
, &p32
->msgid
) ||
697 put_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
703 * Handle compatibility ioctls
705 static long compat_ipmi_ioctl(struct file
*filep
, unsigned int cmd
,
709 struct ipmi_file_private
*priv
= filep
->private_data
;
712 case COMPAT_IPMICTL_SEND_COMMAND
:
716 if (get_compat_ipmi_req(&rp
, compat_ptr(arg
)))
719 return handle_send_req(priv
->user
, &rp
,
720 priv
->default_retries
,
721 priv
->default_retry_time_ms
);
723 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME
:
725 struct ipmi_req_settime sp
;
727 if (get_compat_ipmi_req_settime(&sp
, compat_ptr(arg
)))
730 return handle_send_req(priv
->user
, &sp
.req
,
731 sp
.retries
, sp
.retry_time_ms
);
733 case COMPAT_IPMICTL_RECEIVE_MSG
:
734 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC
:
736 struct ipmi_recv __user
*precv64
;
737 struct ipmi_recv recv64
;
739 if (get_compat_ipmi_recv(&recv64
, compat_ptr(arg
)))
742 precv64
= compat_alloc_user_space(sizeof(recv64
));
743 if (copy_to_user(precv64
, &recv64
, sizeof(recv64
)))
746 rc
= ipmi_ioctl(filep
->f_dentry
->d_inode
, filep
,
747 ((cmd
== COMPAT_IPMICTL_RECEIVE_MSG
)
748 ? IPMICTL_RECEIVE_MSG
749 : IPMICTL_RECEIVE_MSG_TRUNC
),
750 (unsigned long) precv64
);
754 if (copy_from_user(&recv64
, precv64
, sizeof(recv64
)))
757 if (put_compat_ipmi_recv(&recv64
, compat_ptr(arg
)))
763 return ipmi_ioctl(filep
->f_dentry
->d_inode
, filep
, cmd
, arg
);
768 static const struct file_operations ipmi_fops
= {
769 .owner
= THIS_MODULE
,
772 .compat_ioctl
= compat_ipmi_ioctl
,
775 .release
= ipmi_release
,
776 .fasync
= ipmi_fasync
,
780 #define DEVICE_NAME "ipmidev"
782 static int ipmi_major
= 0;
783 module_param(ipmi_major
, int, 0);
784 MODULE_PARM_DESC(ipmi_major
, "Sets the major number of the IPMI device. By"
785 " default, or if you set it to zero, it will choose the next"
786 " available device. Setting it to -1 will disable the"
787 " interface. Other values will set the major device number"
790 /* Keep track of the devices that are registered. */
791 struct ipmi_reg_list
{
793 struct list_head link
;
795 static LIST_HEAD(reg_list
);
796 static DEFINE_MUTEX(reg_list_mutex
);
798 static struct class *ipmi_class
;
800 static void ipmi_new_smi(int if_num
, struct device
*device
)
802 dev_t dev
= MKDEV(ipmi_major
, if_num
);
803 struct ipmi_reg_list
*entry
;
805 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
807 printk(KERN_ERR
"ipmi_devintf: Unable to create the"
808 " ipmi class device link\n");
813 mutex_lock(®_list_mutex
);
814 class_device_create(ipmi_class
, NULL
, dev
, device
, "ipmi%d", if_num
);
815 list_add(&entry
->link
, ®_list
);
816 mutex_unlock(®_list_mutex
);
819 static void ipmi_smi_gone(int if_num
)
821 dev_t dev
= MKDEV(ipmi_major
, if_num
);
822 struct ipmi_reg_list
*entry
;
824 mutex_lock(®_list_mutex
);
825 list_for_each_entry(entry
, ®_list
, link
) {
826 if (entry
->dev
== dev
) {
827 list_del(&entry
->link
);
832 class_device_destroy(ipmi_class
, dev
);
833 mutex_unlock(®_list_mutex
);
836 static struct ipmi_smi_watcher smi_watcher
=
838 .owner
= THIS_MODULE
,
839 .new_smi
= ipmi_new_smi
,
840 .smi_gone
= ipmi_smi_gone
,
843 static __init
int init_ipmi_devintf(void)
850 printk(KERN_INFO
"ipmi device interface\n");
852 ipmi_class
= class_create(THIS_MODULE
, "ipmi");
853 if (IS_ERR(ipmi_class
)) {
854 printk(KERN_ERR
"ipmi: can't register device class\n");
855 return PTR_ERR(ipmi_class
);
858 rv
= register_chrdev(ipmi_major
, DEVICE_NAME
, &ipmi_fops
);
860 class_destroy(ipmi_class
);
861 printk(KERN_ERR
"ipmi: can't get major %d\n", ipmi_major
);
865 if (ipmi_major
== 0) {
869 rv
= ipmi_smi_watcher_register(&smi_watcher
);
871 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
872 class_destroy(ipmi_class
);
873 printk(KERN_WARNING
"ipmi: can't register smi watcher\n");
879 module_init(init_ipmi_devintf
);
881 static __exit
void cleanup_ipmi(void)
883 struct ipmi_reg_list
*entry
, *entry2
;
884 mutex_lock(®_list_mutex
);
885 list_for_each_entry_safe(entry
, entry2
, ®_list
, link
) {
886 list_del(&entry
->link
);
887 class_device_destroy(ipmi_class
, entry
->dev
);
890 mutex_unlock(®_list_mutex
);
891 class_destroy(ipmi_class
);
892 ipmi_smi_watcher_unregister(&smi_watcher
);
893 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
895 module_exit(cleanup_ipmi
);
897 MODULE_LICENSE("GPL");
898 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
899 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");