4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized
= 0;
59 static struct proc_dir_entry
*proc_ipmi_root
= NULL
;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link
;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl
*handler
;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link
;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr
*next
;
113 unsigned int inuse
: 1;
114 unsigned int broadcast
: 1;
116 unsigned long timeout
;
117 unsigned long orig_timeout
;
118 unsigned int retries_left
;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg
*recv_msg
;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium
;
146 unsigned char protocol
;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address
;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry
*next
;
167 struct platform_device
*dev
;
168 struct ipmi_device_id id
;
169 unsigned char guid
[16];
172 struct kref refcount
;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr
;
176 struct device_attribute provides_dev_sdrs_attr
;
177 struct device_attribute revision_attr
;
178 struct device_attribute firmware_rev_attr
;
179 struct device_attribute version_attr
;
180 struct device_attribute add_dev_support_attr
;
181 struct device_attribute manufacturer_id_attr
;
182 struct device_attribute product_id_attr
;
183 struct device_attribute guid_attr
;
184 struct device_attribute aux_firmware_rev_attr
;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount
;
196 /* Used for a list of interfaces. */
197 struct list_head link
;
199 /* The list of upper layers that are using me. seq_lock
201 struct list_head users
;
203 /* Used for wake ups at startup. */
204 wait_queue_head_t waitq
;
206 struct bmc_device
*bmc
;
210 /* This is the lower-layer's sender routine. */
211 struct ipmi_smi_handlers
*handlers
;
214 #ifdef CONFIG_PROC_FS
215 /* A list of proc entries for this interface. This does not
216 need a lock, only one thread creates it and only one thread
218 spinlock_t proc_entry_lock
;
219 struct ipmi_proc_entry
*proc_entries
;
222 /* Driver-model device for the system interface. */
223 struct device
*si_dev
;
225 /* A table of sequence numbers for this interface. We use the
226 sequence numbers for IPMB messages that go out of the
227 interface to match them up with their responses. A routine
228 is called periodically to time the items in this list. */
230 struct seq_table seq_table
[IPMI_IPMB_NUM_SEQ
];
233 /* Messages that were delayed for some reason (out of memory,
234 for instance), will go in here to be processed later in a
235 periodic timer interrupt. */
236 spinlock_t waiting_msgs_lock
;
237 struct list_head waiting_msgs
;
239 /* The list of command receivers that are registered for commands
240 on this interface. */
241 struct mutex cmd_rcvrs_mutex
;
242 struct list_head cmd_rcvrs
;
244 /* Events that were queues because no one was there to receive
246 spinlock_t events_lock
; /* For dealing with event stuff. */
247 struct list_head waiting_events
;
248 unsigned int waiting_events_count
; /* How many events in queue? */
250 /* The event receiver for my BMC, only really used at panic
251 shutdown as a place to store this. */
252 unsigned char event_receiver
;
253 unsigned char event_receiver_lun
;
254 unsigned char local_sel_device
;
255 unsigned char local_event_generator
;
257 /* A cheap hack, if this is non-null and a message to an
258 interface comes in with a NULL user, call this routine with
259 it. Note that the message will still be freed by the
260 caller. This only works on the system interface. */
261 void (*null_user_handler
)(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
);
263 /* When we are scanning the channels for an SMI, this will
264 tell which channel we are scanning. */
267 /* Channel information */
268 struct ipmi_channel channels
[IPMI_MAX_CHANNELS
];
271 struct proc_dir_entry
*proc_dir
;
272 char proc_dir_name
[10];
274 spinlock_t counter_lock
; /* For making counters atomic. */
276 /* Commands we got that were invalid. */
277 unsigned int sent_invalid_commands
;
279 /* Commands we sent to the MC. */
280 unsigned int sent_local_commands
;
281 /* Responses from the MC that were delivered to a user. */
282 unsigned int handled_local_responses
;
283 /* Responses from the MC that were not delivered to a user. */
284 unsigned int unhandled_local_responses
;
286 /* Commands we sent out to the IPMB bus. */
287 unsigned int sent_ipmb_commands
;
288 /* Commands sent on the IPMB that had errors on the SEND CMD */
289 unsigned int sent_ipmb_command_errs
;
290 /* Each retransmit increments this count. */
291 unsigned int retransmitted_ipmb_commands
;
292 /* When a message times out (runs out of retransmits) this is
294 unsigned int timed_out_ipmb_commands
;
296 /* This is like above, but for broadcasts. Broadcasts are
297 *not* included in the above count (they are expected to
299 unsigned int timed_out_ipmb_broadcasts
;
301 /* Responses I have sent to the IPMB bus. */
302 unsigned int sent_ipmb_responses
;
304 /* The response was delivered to the user. */
305 unsigned int handled_ipmb_responses
;
306 /* The response had invalid data in it. */
307 unsigned int invalid_ipmb_responses
;
308 /* The response didn't have anyone waiting for it. */
309 unsigned int unhandled_ipmb_responses
;
311 /* Commands we sent out to the IPMB bus. */
312 unsigned int sent_lan_commands
;
313 /* Commands sent on the IPMB that had errors on the SEND CMD */
314 unsigned int sent_lan_command_errs
;
315 /* Each retransmit increments this count. */
316 unsigned int retransmitted_lan_commands
;
317 /* When a message times out (runs out of retransmits) this is
319 unsigned int timed_out_lan_commands
;
321 /* Responses I have sent to the IPMB bus. */
322 unsigned int sent_lan_responses
;
324 /* The response was delivered to the user. */
325 unsigned int handled_lan_responses
;
326 /* The response had invalid data in it. */
327 unsigned int invalid_lan_responses
;
328 /* The response didn't have anyone waiting for it. */
329 unsigned int unhandled_lan_responses
;
331 /* The command was delivered to the user. */
332 unsigned int handled_commands
;
333 /* The command had invalid data in it. */
334 unsigned int invalid_commands
;
335 /* The command didn't have anyone waiting for it. */
336 unsigned int unhandled_commands
;
338 /* Invalid data in an event. */
339 unsigned int invalid_events
;
340 /* Events that were received with the proper format. */
343 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
346 * The driver model view of the IPMI messaging driver.
348 static struct device_driver ipmidriver
= {
350 .bus
= &platform_bus_type
352 static DEFINE_MUTEX(ipmidriver_mutex
);
354 static struct list_head ipmi_interfaces
= LIST_HEAD_INIT(ipmi_interfaces
);
355 static DEFINE_MUTEX(ipmi_interfaces_mutex
);
357 /* List of watchers that want to know when smi's are added and
359 static struct list_head smi_watchers
= LIST_HEAD_INIT(smi_watchers
);
360 static DECLARE_RWSEM(smi_watchers_sem
);
363 static void free_recv_msg_list(struct list_head
*q
)
365 struct ipmi_recv_msg
*msg
, *msg2
;
367 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
368 list_del(&msg
->link
);
369 ipmi_free_recv_msg(msg
);
373 static void free_smi_msg_list(struct list_head
*q
)
375 struct ipmi_smi_msg
*msg
, *msg2
;
377 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
378 list_del(&msg
->link
);
379 ipmi_free_smi_msg(msg
);
383 static void clean_up_interface_data(ipmi_smi_t intf
)
386 struct cmd_rcvr
*rcvr
, *rcvr2
;
387 struct list_head list
;
389 free_smi_msg_list(&intf
->waiting_msgs
);
390 free_recv_msg_list(&intf
->waiting_events
);
392 /* Wholesale remove all the entries from the list in the
393 * interface and wait for RCU to know that none are in use. */
394 mutex_lock(&intf
->cmd_rcvrs_mutex
);
395 list_add_rcu(&list
, &intf
->cmd_rcvrs
);
396 list_del_rcu(&intf
->cmd_rcvrs
);
397 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
400 list_for_each_entry_safe(rcvr
, rcvr2
, &list
, link
)
403 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
404 if ((intf
->seq_table
[i
].inuse
)
405 && (intf
->seq_table
[i
].recv_msg
))
407 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
412 static void intf_free(struct kref
*ref
)
414 ipmi_smi_t intf
= container_of(ref
, struct ipmi_smi
, refcount
);
416 clean_up_interface_data(intf
);
420 struct watcher_entry
{
421 struct list_head link
;
425 int ipmi_smi_watcher_register(struct ipmi_smi_watcher
*watcher
)
428 struct list_head to_deliver
= LIST_HEAD_INIT(to_deliver
);
429 struct watcher_entry
*e
, *e2
;
431 mutex_lock(&ipmi_interfaces_mutex
);
433 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
434 if (intf
->intf_num
== -1)
436 e
= kmalloc(sizeof(*e
), GFP_KERNEL
);
439 e
->intf_num
= intf
->intf_num
;
440 list_add_tail(&e
->link
, &to_deliver
);
443 down_write(&smi_watchers_sem
);
444 list_add(&(watcher
->link
), &smi_watchers
);
445 up_write(&smi_watchers_sem
);
447 mutex_unlock(&ipmi_interfaces_mutex
);
449 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
451 watcher
->new_smi(e
->intf_num
, intf
->si_dev
);
459 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
466 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher
*watcher
)
468 down_write(&smi_watchers_sem
);
469 list_del(&(watcher
->link
));
470 up_write(&smi_watchers_sem
);
475 call_smi_watchers(int i
, struct device
*dev
)
477 struct ipmi_smi_watcher
*w
;
479 down_read(&smi_watchers_sem
);
480 list_for_each_entry(w
, &smi_watchers
, link
) {
481 if (try_module_get(w
->owner
)) {
483 module_put(w
->owner
);
486 up_read(&smi_watchers_sem
);
490 ipmi_addr_equal(struct ipmi_addr
*addr1
, struct ipmi_addr
*addr2
)
492 if (addr1
->addr_type
!= addr2
->addr_type
)
495 if (addr1
->channel
!= addr2
->channel
)
498 if (addr1
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
499 struct ipmi_system_interface_addr
*smi_addr1
500 = (struct ipmi_system_interface_addr
*) addr1
;
501 struct ipmi_system_interface_addr
*smi_addr2
502 = (struct ipmi_system_interface_addr
*) addr2
;
503 return (smi_addr1
->lun
== smi_addr2
->lun
);
506 if ((addr1
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
507 || (addr1
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
509 struct ipmi_ipmb_addr
*ipmb_addr1
510 = (struct ipmi_ipmb_addr
*) addr1
;
511 struct ipmi_ipmb_addr
*ipmb_addr2
512 = (struct ipmi_ipmb_addr
*) addr2
;
514 return ((ipmb_addr1
->slave_addr
== ipmb_addr2
->slave_addr
)
515 && (ipmb_addr1
->lun
== ipmb_addr2
->lun
));
518 if (addr1
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
519 struct ipmi_lan_addr
*lan_addr1
520 = (struct ipmi_lan_addr
*) addr1
;
521 struct ipmi_lan_addr
*lan_addr2
522 = (struct ipmi_lan_addr
*) addr2
;
524 return ((lan_addr1
->remote_SWID
== lan_addr2
->remote_SWID
)
525 && (lan_addr1
->local_SWID
== lan_addr2
->local_SWID
)
526 && (lan_addr1
->session_handle
527 == lan_addr2
->session_handle
)
528 && (lan_addr1
->lun
== lan_addr2
->lun
));
534 int ipmi_validate_addr(struct ipmi_addr
*addr
, int len
)
536 if (len
< sizeof(struct ipmi_system_interface_addr
)) {
540 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
541 if (addr
->channel
!= IPMI_BMC_CHANNEL
)
546 if ((addr
->channel
== IPMI_BMC_CHANNEL
)
547 || (addr
->channel
>= IPMI_MAX_CHANNELS
)
548 || (addr
->channel
< 0))
551 if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
552 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
554 if (len
< sizeof(struct ipmi_ipmb_addr
)) {
560 if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
561 if (len
< sizeof(struct ipmi_lan_addr
)) {
570 unsigned int ipmi_addr_length(int addr_type
)
572 if (addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
573 return sizeof(struct ipmi_system_interface_addr
);
575 if ((addr_type
== IPMI_IPMB_ADDR_TYPE
)
576 || (addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
578 return sizeof(struct ipmi_ipmb_addr
);
581 if (addr_type
== IPMI_LAN_ADDR_TYPE
)
582 return sizeof(struct ipmi_lan_addr
);
587 static void deliver_response(struct ipmi_recv_msg
*msg
)
590 ipmi_smi_t intf
= msg
->user_msg_data
;
593 /* Special handling for NULL users. */
594 if (intf
->null_user_handler
) {
595 intf
->null_user_handler(intf
, msg
);
596 spin_lock_irqsave(&intf
->counter_lock
, flags
);
597 intf
->handled_local_responses
++;
598 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
600 /* No handler, so give up. */
601 spin_lock_irqsave(&intf
->counter_lock
, flags
);
602 intf
->unhandled_local_responses
++;
603 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
605 ipmi_free_recv_msg(msg
);
607 ipmi_user_t user
= msg
->user
;
608 user
->handler
->ipmi_recv_hndl(msg
, user
->handler_data
);
612 /* Find the next sequence number not being used and add the given
613 message with the given timeout to the sequence table. This must be
614 called with the interface's seq_lock held. */
615 static int intf_next_seq(ipmi_smi_t intf
,
616 struct ipmi_recv_msg
*recv_msg
,
617 unsigned long timeout
,
626 for (i
= intf
->curr_seq
;
627 (i
+1)%IPMI_IPMB_NUM_SEQ
!= intf
->curr_seq
;
628 i
= (i
+1)%IPMI_IPMB_NUM_SEQ
)
630 if (!intf
->seq_table
[i
].inuse
)
634 if (!intf
->seq_table
[i
].inuse
) {
635 intf
->seq_table
[i
].recv_msg
= recv_msg
;
637 /* Start with the maximum timeout, when the send response
638 comes in we will start the real timer. */
639 intf
->seq_table
[i
].timeout
= MAX_MSG_TIMEOUT
;
640 intf
->seq_table
[i
].orig_timeout
= timeout
;
641 intf
->seq_table
[i
].retries_left
= retries
;
642 intf
->seq_table
[i
].broadcast
= broadcast
;
643 intf
->seq_table
[i
].inuse
= 1;
644 intf
->seq_table
[i
].seqid
= NEXT_SEQID(intf
->seq_table
[i
].seqid
);
646 *seqid
= intf
->seq_table
[i
].seqid
;
647 intf
->curr_seq
= (i
+1)%IPMI_IPMB_NUM_SEQ
;
655 /* Return the receive message for the given sequence number and
656 release the sequence number so it can be reused. Some other data
657 is passed in to be sure the message matches up correctly (to help
658 guard against message coming in after their timeout and the
659 sequence number being reused). */
660 static int intf_find_seq(ipmi_smi_t intf
,
665 struct ipmi_addr
*addr
,
666 struct ipmi_recv_msg
**recv_msg
)
671 if (seq
>= IPMI_IPMB_NUM_SEQ
)
674 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
675 if (intf
->seq_table
[seq
].inuse
) {
676 struct ipmi_recv_msg
*msg
= intf
->seq_table
[seq
].recv_msg
;
678 if ((msg
->addr
.channel
== channel
)
679 && (msg
->msg
.cmd
== cmd
)
680 && (msg
->msg
.netfn
== netfn
)
681 && (ipmi_addr_equal(addr
, &(msg
->addr
))))
684 intf
->seq_table
[seq
].inuse
= 0;
688 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
694 /* Start the timer for a specific sequence table entry. */
695 static int intf_start_seq_timer(ipmi_smi_t intf
,
704 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
706 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
707 /* We do this verification because the user can be deleted
708 while a message is outstanding. */
709 if ((intf
->seq_table
[seq
].inuse
)
710 && (intf
->seq_table
[seq
].seqid
== seqid
))
712 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
713 ent
->timeout
= ent
->orig_timeout
;
716 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
721 /* Got an error for the send message for a specific sequence number. */
722 static int intf_err_seq(ipmi_smi_t intf
,
730 struct ipmi_recv_msg
*msg
= NULL
;
733 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
735 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
736 /* We do this verification because the user can be deleted
737 while a message is outstanding. */
738 if ((intf
->seq_table
[seq
].inuse
)
739 && (intf
->seq_table
[seq
].seqid
== seqid
))
741 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
747 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
750 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
751 msg
->msg_data
[0] = err
;
752 msg
->msg
.netfn
|= 1; /* Convert to a response. */
753 msg
->msg
.data_len
= 1;
754 msg
->msg
.data
= msg
->msg_data
;
755 deliver_response(msg
);
762 int ipmi_create_user(unsigned int if_num
,
763 struct ipmi_user_hndl
*handler
,
768 ipmi_user_t new_user
;
772 /* There is no module usecount here, because it's not
773 required. Since this can only be used by and called from
774 other modules, they will implicitly use this module, and
775 thus this can't be removed unless the other modules are
781 /* Make sure the driver is actually initialized, this handles
782 problems with initialization order. */
784 rv
= ipmi_init_msghandler();
788 /* The init code doesn't return an error if it was turned
789 off, but it won't initialize. Check that. */
794 new_user
= kmalloc(sizeof(*new_user
), GFP_KERNEL
);
799 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
800 if (intf
->intf_num
== if_num
)
808 /* Note that each existing user holds a refcount to the interface. */
809 kref_get(&intf
->refcount
);
812 kref_init(&new_user
->refcount
);
813 new_user
->handler
= handler
;
814 new_user
->handler_data
= handler_data
;
815 new_user
->intf
= intf
;
816 new_user
->gets_events
= 0;
818 if (!try_module_get(intf
->handlers
->owner
)) {
823 if (intf
->handlers
->inc_usecount
) {
824 rv
= intf
->handlers
->inc_usecount(intf
->send_info
);
826 module_put(intf
->handlers
->owner
);
832 spin_lock_irqsave(&intf
->seq_lock
, flags
);
833 list_add_rcu(&new_user
->link
, &intf
->users
);
834 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
839 kref_put(&intf
->refcount
, intf_free
);
845 static void free_user(struct kref
*ref
)
847 ipmi_user_t user
= container_of(ref
, struct ipmi_user
, refcount
);
851 int ipmi_destroy_user(ipmi_user_t user
)
853 ipmi_smi_t intf
= user
->intf
;
856 struct cmd_rcvr
*rcvr
;
857 struct cmd_rcvr
*rcvrs
= NULL
;
861 /* Remove the user from the interface's sequence table. */
862 spin_lock_irqsave(&intf
->seq_lock
, flags
);
863 list_del_rcu(&user
->link
);
865 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
866 if (intf
->seq_table
[i
].inuse
867 && (intf
->seq_table
[i
].recv_msg
->user
== user
))
869 intf
->seq_table
[i
].inuse
= 0;
872 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
875 * Remove the user from the command receiver's table. First
876 * we build a list of everything (not using the standard link,
877 * since other things may be using it till we do
878 * synchronize_rcu()) then free everything in that list.
880 mutex_lock(&intf
->cmd_rcvrs_mutex
);
881 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
882 if (rcvr
->user
== user
) {
883 list_del_rcu(&rcvr
->link
);
888 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
896 module_put(intf
->handlers
->owner
);
897 if (intf
->handlers
->dec_usecount
)
898 intf
->handlers
->dec_usecount(intf
->send_info
);
900 kref_put(&intf
->refcount
, intf_free
);
902 kref_put(&user
->refcount
, free_user
);
907 void ipmi_get_version(ipmi_user_t user
,
908 unsigned char *major
,
909 unsigned char *minor
)
911 *major
= ipmi_version_major(&user
->intf
->bmc
->id
);
912 *minor
= ipmi_version_minor(&user
->intf
->bmc
->id
);
915 int ipmi_set_my_address(ipmi_user_t user
,
916 unsigned int channel
,
917 unsigned char address
)
919 if (channel
>= IPMI_MAX_CHANNELS
)
921 user
->intf
->channels
[channel
].address
= address
;
925 int ipmi_get_my_address(ipmi_user_t user
,
926 unsigned int channel
,
927 unsigned char *address
)
929 if (channel
>= IPMI_MAX_CHANNELS
)
931 *address
= user
->intf
->channels
[channel
].address
;
935 int ipmi_set_my_LUN(ipmi_user_t user
,
936 unsigned int channel
,
939 if (channel
>= IPMI_MAX_CHANNELS
)
941 user
->intf
->channels
[channel
].lun
= LUN
& 0x3;
945 int ipmi_get_my_LUN(ipmi_user_t user
,
946 unsigned int channel
,
947 unsigned char *address
)
949 if (channel
>= IPMI_MAX_CHANNELS
)
951 *address
= user
->intf
->channels
[channel
].lun
;
955 int ipmi_set_gets_events(ipmi_user_t user
, int val
)
958 ipmi_smi_t intf
= user
->intf
;
959 struct ipmi_recv_msg
*msg
, *msg2
;
960 struct list_head msgs
;
962 INIT_LIST_HEAD(&msgs
);
964 spin_lock_irqsave(&intf
->events_lock
, flags
);
965 user
->gets_events
= val
;
968 /* Deliver any queued events. */
969 list_for_each_entry_safe(msg
, msg2
, &intf
->waiting_events
, link
)
970 list_move_tail(&msg
->link
, &msgs
);
971 intf
->waiting_events_count
= 0;
974 /* Hold the events lock while doing this to preserve order. */
975 list_for_each_entry_safe(msg
, msg2
, &msgs
, link
) {
977 kref_get(&user
->refcount
);
978 deliver_response(msg
);
981 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
986 static struct cmd_rcvr
*find_cmd_rcvr(ipmi_smi_t intf
,
991 struct cmd_rcvr
*rcvr
;
993 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
994 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
995 && (rcvr
->chans
& (1 << chan
)))
1001 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf
,
1002 unsigned char netfn
,
1006 struct cmd_rcvr
*rcvr
;
1008 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1009 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1010 && (rcvr
->chans
& chans
))
1016 int ipmi_register_for_cmd(ipmi_user_t user
,
1017 unsigned char netfn
,
1021 ipmi_smi_t intf
= user
->intf
;
1022 struct cmd_rcvr
*rcvr
;
1026 rcvr
= kmalloc(sizeof(*rcvr
), GFP_KERNEL
);
1030 rcvr
->netfn
= netfn
;
1031 rcvr
->chans
= chans
;
1034 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1035 /* Make sure the command/netfn is not already registered. */
1036 if (!is_cmd_rcvr_exclusive(intf
, netfn
, cmd
, chans
)) {
1041 list_add_rcu(&rcvr
->link
, &intf
->cmd_rcvrs
);
1044 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1051 int ipmi_unregister_for_cmd(ipmi_user_t user
,
1052 unsigned char netfn
,
1056 ipmi_smi_t intf
= user
->intf
;
1057 struct cmd_rcvr
*rcvr
;
1058 struct cmd_rcvr
*rcvrs
= NULL
;
1059 int i
, rv
= -ENOENT
;
1061 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1062 for (i
= 0; i
< IPMI_NUM_CHANNELS
; i
++) {
1063 if (((1 << i
) & chans
) == 0)
1065 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, i
);
1068 if (rcvr
->user
== user
) {
1070 rcvr
->chans
&= ~chans
;
1071 if (rcvr
->chans
== 0) {
1072 list_del_rcu(&rcvr
->link
);
1078 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1088 void ipmi_user_set_run_to_completion(ipmi_user_t user
, int val
)
1090 ipmi_smi_t intf
= user
->intf
;
1091 intf
->handlers
->set_run_to_completion(intf
->send_info
, val
);
1094 static unsigned char
1095 ipmb_checksum(unsigned char *data
, int size
)
1097 unsigned char csum
= 0;
1099 for (; size
> 0; size
--, data
++)
1105 static inline void format_ipmb_msg(struct ipmi_smi_msg
*smi_msg
,
1106 struct kernel_ipmi_msg
*msg
,
1107 struct ipmi_ipmb_addr
*ipmb_addr
,
1109 unsigned char ipmb_seq
,
1111 unsigned char source_address
,
1112 unsigned char source_lun
)
1116 /* Format the IPMB header data. */
1117 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1118 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1119 smi_msg
->data
[2] = ipmb_addr
->channel
;
1121 smi_msg
->data
[3] = 0;
1122 smi_msg
->data
[i
+3] = ipmb_addr
->slave_addr
;
1123 smi_msg
->data
[i
+4] = (msg
->netfn
<< 2) | (ipmb_addr
->lun
& 0x3);
1124 smi_msg
->data
[i
+5] = ipmb_checksum(&(smi_msg
->data
[i
+3]), 2);
1125 smi_msg
->data
[i
+6] = source_address
;
1126 smi_msg
->data
[i
+7] = (ipmb_seq
<< 2) | source_lun
;
1127 smi_msg
->data
[i
+8] = msg
->cmd
;
1129 /* Now tack on the data to the message. */
1130 if (msg
->data_len
> 0)
1131 memcpy(&(smi_msg
->data
[i
+9]), msg
->data
,
1133 smi_msg
->data_size
= msg
->data_len
+ 9;
1135 /* Now calculate the checksum and tack it on. */
1136 smi_msg
->data
[i
+smi_msg
->data_size
]
1137 = ipmb_checksum(&(smi_msg
->data
[i
+6]),
1138 smi_msg
->data_size
-6);
1140 /* Add on the checksum size and the offset from the
1142 smi_msg
->data_size
+= 1 + i
;
1144 smi_msg
->msgid
= msgid
;
1147 static inline void format_lan_msg(struct ipmi_smi_msg
*smi_msg
,
1148 struct kernel_ipmi_msg
*msg
,
1149 struct ipmi_lan_addr
*lan_addr
,
1151 unsigned char ipmb_seq
,
1152 unsigned char source_lun
)
1154 /* Format the IPMB header data. */
1155 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1156 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1157 smi_msg
->data
[2] = lan_addr
->channel
;
1158 smi_msg
->data
[3] = lan_addr
->session_handle
;
1159 smi_msg
->data
[4] = lan_addr
->remote_SWID
;
1160 smi_msg
->data
[5] = (msg
->netfn
<< 2) | (lan_addr
->lun
& 0x3);
1161 smi_msg
->data
[6] = ipmb_checksum(&(smi_msg
->data
[4]), 2);
1162 smi_msg
->data
[7] = lan_addr
->local_SWID
;
1163 smi_msg
->data
[8] = (ipmb_seq
<< 2) | source_lun
;
1164 smi_msg
->data
[9] = msg
->cmd
;
1166 /* Now tack on the data to the message. */
1167 if (msg
->data_len
> 0)
1168 memcpy(&(smi_msg
->data
[10]), msg
->data
,
1170 smi_msg
->data_size
= msg
->data_len
+ 10;
1172 /* Now calculate the checksum and tack it on. */
1173 smi_msg
->data
[smi_msg
->data_size
]
1174 = ipmb_checksum(&(smi_msg
->data
[7]),
1175 smi_msg
->data_size
-7);
1177 /* Add on the checksum size and the offset from the
1179 smi_msg
->data_size
+= 1;
1181 smi_msg
->msgid
= msgid
;
1184 /* Separate from ipmi_request so that the user does not have to be
1185 supplied in certain circumstances (mainly at panic time). If
1186 messages are supplied, they will be freed, even if an error
1188 static int i_ipmi_request(ipmi_user_t user
,
1190 struct ipmi_addr
*addr
,
1192 struct kernel_ipmi_msg
*msg
,
1193 void *user_msg_data
,
1195 struct ipmi_recv_msg
*supplied_recv
,
1197 unsigned char source_address
,
1198 unsigned char source_lun
,
1200 unsigned int retry_time_ms
)
1203 struct ipmi_smi_msg
*smi_msg
;
1204 struct ipmi_recv_msg
*recv_msg
;
1205 unsigned long flags
;
1208 if (supplied_recv
) {
1209 recv_msg
= supplied_recv
;
1211 recv_msg
= ipmi_alloc_recv_msg();
1212 if (recv_msg
== NULL
) {
1216 recv_msg
->user_msg_data
= user_msg_data
;
1219 smi_msg
= (struct ipmi_smi_msg
*) supplied_smi
;
1221 smi_msg
= ipmi_alloc_smi_msg();
1222 if (smi_msg
== NULL
) {
1223 ipmi_free_recv_msg(recv_msg
);
1228 recv_msg
->user
= user
;
1230 kref_get(&user
->refcount
);
1231 recv_msg
->msgid
= msgid
;
1232 /* Store the message to send in the receive message so timeout
1233 responses can get the proper response data. */
1234 recv_msg
->msg
= *msg
;
1236 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
1237 struct ipmi_system_interface_addr
*smi_addr
;
1239 if (msg
->netfn
& 1) {
1240 /* Responses are not allowed to the SMI. */
1245 smi_addr
= (struct ipmi_system_interface_addr
*) addr
;
1246 if (smi_addr
->lun
> 3) {
1247 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1248 intf
->sent_invalid_commands
++;
1249 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1254 memcpy(&recv_msg
->addr
, smi_addr
, sizeof(*smi_addr
));
1256 if ((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1257 && ((msg
->cmd
== IPMI_SEND_MSG_CMD
)
1258 || (msg
->cmd
== IPMI_GET_MSG_CMD
)
1259 || (msg
->cmd
== IPMI_READ_EVENT_MSG_BUFFER_CMD
)))
1261 /* We don't let the user do these, since we manage
1262 the sequence numbers. */
1263 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1264 intf
->sent_invalid_commands
++;
1265 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1270 if ((msg
->data_len
+ 2) > IPMI_MAX_MSG_LENGTH
) {
1271 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1272 intf
->sent_invalid_commands
++;
1273 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1278 smi_msg
->data
[0] = (msg
->netfn
<< 2) | (smi_addr
->lun
& 0x3);
1279 smi_msg
->data
[1] = msg
->cmd
;
1280 smi_msg
->msgid
= msgid
;
1281 smi_msg
->user_data
= recv_msg
;
1282 if (msg
->data_len
> 0)
1283 memcpy(&(smi_msg
->data
[2]), msg
->data
, msg
->data_len
);
1284 smi_msg
->data_size
= msg
->data_len
+ 2;
1285 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1286 intf
->sent_local_commands
++;
1287 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1288 } else if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
1289 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
1291 struct ipmi_ipmb_addr
*ipmb_addr
;
1292 unsigned char ipmb_seq
;
1296 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1297 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1298 intf
->sent_invalid_commands
++;
1299 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1304 if (intf
->channels
[addr
->channel
].medium
1305 != IPMI_CHANNEL_MEDIUM_IPMB
)
1307 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1308 intf
->sent_invalid_commands
++;
1309 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1315 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
)
1316 retries
= 0; /* Don't retry broadcasts. */
1320 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
) {
1321 /* Broadcasts add a zero at the beginning of the
1322 message, but otherwise is the same as an IPMB
1324 addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
1329 /* Default to 1 second retries. */
1330 if (retry_time_ms
== 0)
1331 retry_time_ms
= 1000;
1333 /* 9 for the header and 1 for the checksum, plus
1334 possibly one for the broadcast. */
1335 if ((msg
->data_len
+ 10 + broadcast
) > IPMI_MAX_MSG_LENGTH
) {
1336 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1337 intf
->sent_invalid_commands
++;
1338 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1343 ipmb_addr
= (struct ipmi_ipmb_addr
*) addr
;
1344 if (ipmb_addr
->lun
> 3) {
1345 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1346 intf
->sent_invalid_commands
++;
1347 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1352 memcpy(&recv_msg
->addr
, ipmb_addr
, sizeof(*ipmb_addr
));
1354 if (recv_msg
->msg
.netfn
& 0x1) {
1355 /* It's a response, so use the user's sequence
1357 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1358 intf
->sent_ipmb_responses
++;
1359 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1360 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
, msgid
,
1362 source_address
, source_lun
);
1364 /* Save the receive message so we can use it
1365 to deliver the response. */
1366 smi_msg
->user_data
= recv_msg
;
1368 /* It's a command, so get a sequence for it. */
1370 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1372 spin_lock(&intf
->counter_lock
);
1373 intf
->sent_ipmb_commands
++;
1374 spin_unlock(&intf
->counter_lock
);
1376 /* Create a sequence number with a 1 second
1377 timeout and 4 retries. */
1378 rv
= intf_next_seq(intf
,
1386 /* We have used up all the sequence numbers,
1387 probably, so abort. */
1388 spin_unlock_irqrestore(&(intf
->seq_lock
),
1393 /* Store the sequence number in the message,
1394 so that when the send message response
1395 comes back we can start the timer. */
1396 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
,
1397 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1398 ipmb_seq
, broadcast
,
1399 source_address
, source_lun
);
1401 /* Copy the message into the recv message data, so we
1402 can retransmit it later if necessary. */
1403 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1404 smi_msg
->data_size
);
1405 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1406 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1408 /* We don't unlock until here, because we need
1409 to copy the completed message into the
1410 recv_msg before we release the lock.
1411 Otherwise, race conditions may bite us. I
1412 know that's pretty paranoid, but I prefer
1414 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1416 } else if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
1417 struct ipmi_lan_addr
*lan_addr
;
1418 unsigned char ipmb_seq
;
1421 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1422 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1423 intf
->sent_invalid_commands
++;
1424 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1429 if ((intf
->channels
[addr
->channel
].medium
1430 != IPMI_CHANNEL_MEDIUM_8023LAN
)
1431 && (intf
->channels
[addr
->channel
].medium
1432 != IPMI_CHANNEL_MEDIUM_ASYNC
))
1434 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1435 intf
->sent_invalid_commands
++;
1436 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1443 /* Default to 1 second retries. */
1444 if (retry_time_ms
== 0)
1445 retry_time_ms
= 1000;
1447 /* 11 for the header and 1 for the checksum. */
1448 if ((msg
->data_len
+ 12) > IPMI_MAX_MSG_LENGTH
) {
1449 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1450 intf
->sent_invalid_commands
++;
1451 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1456 lan_addr
= (struct ipmi_lan_addr
*) addr
;
1457 if (lan_addr
->lun
> 3) {
1458 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1459 intf
->sent_invalid_commands
++;
1460 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1465 memcpy(&recv_msg
->addr
, lan_addr
, sizeof(*lan_addr
));
1467 if (recv_msg
->msg
.netfn
& 0x1) {
1468 /* It's a response, so use the user's sequence
1470 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1471 intf
->sent_lan_responses
++;
1472 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1473 format_lan_msg(smi_msg
, msg
, lan_addr
, msgid
,
1476 /* Save the receive message so we can use it
1477 to deliver the response. */
1478 smi_msg
->user_data
= recv_msg
;
1480 /* It's a command, so get a sequence for it. */
1482 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1484 spin_lock(&intf
->counter_lock
);
1485 intf
->sent_lan_commands
++;
1486 spin_unlock(&intf
->counter_lock
);
1488 /* Create a sequence number with a 1 second
1489 timeout and 4 retries. */
1490 rv
= intf_next_seq(intf
,
1498 /* We have used up all the sequence numbers,
1499 probably, so abort. */
1500 spin_unlock_irqrestore(&(intf
->seq_lock
),
1505 /* Store the sequence number in the message,
1506 so that when the send message response
1507 comes back we can start the timer. */
1508 format_lan_msg(smi_msg
, msg
, lan_addr
,
1509 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1510 ipmb_seq
, source_lun
);
1512 /* Copy the message into the recv message data, so we
1513 can retransmit it later if necessary. */
1514 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1515 smi_msg
->data_size
);
1516 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1517 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1519 /* We don't unlock until here, because we need
1520 to copy the completed message into the
1521 recv_msg before we release the lock.
1522 Otherwise, race conditions may bite us. I
1523 know that's pretty paranoid, but I prefer
1525 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1528 /* Unknown address type. */
1529 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1530 intf
->sent_invalid_commands
++;
1531 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1539 for (m
= 0; m
< smi_msg
->data_size
; m
++)
1540 printk(" %2.2x", smi_msg
->data
[m
]);
1544 intf
->handlers
->sender(intf
->send_info
, smi_msg
, priority
);
1549 ipmi_free_smi_msg(smi_msg
);
1550 ipmi_free_recv_msg(recv_msg
);
1554 static int check_addr(ipmi_smi_t intf
,
1555 struct ipmi_addr
*addr
,
1556 unsigned char *saddr
,
1559 if (addr
->channel
>= IPMI_MAX_CHANNELS
)
1561 *lun
= intf
->channels
[addr
->channel
].lun
;
1562 *saddr
= intf
->channels
[addr
->channel
].address
;
1566 int ipmi_request_settime(ipmi_user_t user
,
1567 struct ipmi_addr
*addr
,
1569 struct kernel_ipmi_msg
*msg
,
1570 void *user_msg_data
,
1573 unsigned int retry_time_ms
)
1575 unsigned char saddr
, lun
;
1580 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1583 return i_ipmi_request(user
,
1597 int ipmi_request_supply_msgs(ipmi_user_t user
,
1598 struct ipmi_addr
*addr
,
1600 struct kernel_ipmi_msg
*msg
,
1601 void *user_msg_data
,
1603 struct ipmi_recv_msg
*supplied_recv
,
1606 unsigned char saddr
, lun
;
1611 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1614 return i_ipmi_request(user
,
1628 static int ipmb_file_read_proc(char *page
, char **start
, off_t off
,
1629 int count
, int *eof
, void *data
)
1631 char *out
= (char *) page
;
1632 ipmi_smi_t intf
= data
;
1636 for (i
= 0; i
< IPMI_MAX_CHANNELS
; i
++)
1637 rv
+= sprintf(out
+rv
, "%x ", intf
->channels
[i
].address
);
1638 out
[rv
-1] = '\n'; /* Replace the final space with a newline */
1644 static int version_file_read_proc(char *page
, char **start
, off_t off
,
1645 int count
, int *eof
, void *data
)
1647 char *out
= (char *) page
;
1648 ipmi_smi_t intf
= data
;
1650 return sprintf(out
, "%d.%d\n",
1651 ipmi_version_major(&intf
->bmc
->id
),
1652 ipmi_version_minor(&intf
->bmc
->id
));
1655 static int stat_file_read_proc(char *page
, char **start
, off_t off
,
1656 int count
, int *eof
, void *data
)
1658 char *out
= (char *) page
;
1659 ipmi_smi_t intf
= data
;
1661 out
+= sprintf(out
, "sent_invalid_commands: %d\n",
1662 intf
->sent_invalid_commands
);
1663 out
+= sprintf(out
, "sent_local_commands: %d\n",
1664 intf
->sent_local_commands
);
1665 out
+= sprintf(out
, "handled_local_responses: %d\n",
1666 intf
->handled_local_responses
);
1667 out
+= sprintf(out
, "unhandled_local_responses: %d\n",
1668 intf
->unhandled_local_responses
);
1669 out
+= sprintf(out
, "sent_ipmb_commands: %d\n",
1670 intf
->sent_ipmb_commands
);
1671 out
+= sprintf(out
, "sent_ipmb_command_errs: %d\n",
1672 intf
->sent_ipmb_command_errs
);
1673 out
+= sprintf(out
, "retransmitted_ipmb_commands: %d\n",
1674 intf
->retransmitted_ipmb_commands
);
1675 out
+= sprintf(out
, "timed_out_ipmb_commands: %d\n",
1676 intf
->timed_out_ipmb_commands
);
1677 out
+= sprintf(out
, "timed_out_ipmb_broadcasts: %d\n",
1678 intf
->timed_out_ipmb_broadcasts
);
1679 out
+= sprintf(out
, "sent_ipmb_responses: %d\n",
1680 intf
->sent_ipmb_responses
);
1681 out
+= sprintf(out
, "handled_ipmb_responses: %d\n",
1682 intf
->handled_ipmb_responses
);
1683 out
+= sprintf(out
, "invalid_ipmb_responses: %d\n",
1684 intf
->invalid_ipmb_responses
);
1685 out
+= sprintf(out
, "unhandled_ipmb_responses: %d\n",
1686 intf
->unhandled_ipmb_responses
);
1687 out
+= sprintf(out
, "sent_lan_commands: %d\n",
1688 intf
->sent_lan_commands
);
1689 out
+= sprintf(out
, "sent_lan_command_errs: %d\n",
1690 intf
->sent_lan_command_errs
);
1691 out
+= sprintf(out
, "retransmitted_lan_commands: %d\n",
1692 intf
->retransmitted_lan_commands
);
1693 out
+= sprintf(out
, "timed_out_lan_commands: %d\n",
1694 intf
->timed_out_lan_commands
);
1695 out
+= sprintf(out
, "sent_lan_responses: %d\n",
1696 intf
->sent_lan_responses
);
1697 out
+= sprintf(out
, "handled_lan_responses: %d\n",
1698 intf
->handled_lan_responses
);
1699 out
+= sprintf(out
, "invalid_lan_responses: %d\n",
1700 intf
->invalid_lan_responses
);
1701 out
+= sprintf(out
, "unhandled_lan_responses: %d\n",
1702 intf
->unhandled_lan_responses
);
1703 out
+= sprintf(out
, "handled_commands: %d\n",
1704 intf
->handled_commands
);
1705 out
+= sprintf(out
, "invalid_commands: %d\n",
1706 intf
->invalid_commands
);
1707 out
+= sprintf(out
, "unhandled_commands: %d\n",
1708 intf
->unhandled_commands
);
1709 out
+= sprintf(out
, "invalid_events: %d\n",
1710 intf
->invalid_events
);
1711 out
+= sprintf(out
, "events: %d\n",
1714 return (out
- ((char *) page
));
1717 int ipmi_smi_add_proc_entry(ipmi_smi_t smi
, char *name
,
1718 read_proc_t
*read_proc
, write_proc_t
*write_proc
,
1719 void *data
, struct module
*owner
)
1722 #ifdef CONFIG_PROC_FS
1723 struct proc_dir_entry
*file
;
1724 struct ipmi_proc_entry
*entry
;
1726 /* Create a list element. */
1727 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1730 entry
->name
= kmalloc(strlen(name
)+1, GFP_KERNEL
);
1735 strcpy(entry
->name
, name
);
1737 file
= create_proc_entry(name
, 0, smi
->proc_dir
);
1745 file
->read_proc
= read_proc
;
1746 file
->write_proc
= write_proc
;
1747 file
->owner
= owner
;
1749 spin_lock(&smi
->proc_entry_lock
);
1750 /* Stick it on the list. */
1751 entry
->next
= smi
->proc_entries
;
1752 smi
->proc_entries
= entry
;
1753 spin_unlock(&smi
->proc_entry_lock
);
1755 #endif /* CONFIG_PROC_FS */
1760 static int add_proc_entries(ipmi_smi_t smi
, int num
)
1764 #ifdef CONFIG_PROC_FS
1765 sprintf(smi
->proc_dir_name
, "%d", num
);
1766 smi
->proc_dir
= proc_mkdir(smi
->proc_dir_name
, proc_ipmi_root
);
1770 smi
->proc_dir
->owner
= THIS_MODULE
;
1774 rv
= ipmi_smi_add_proc_entry(smi
, "stats",
1775 stat_file_read_proc
, NULL
,
1779 rv
= ipmi_smi_add_proc_entry(smi
, "ipmb",
1780 ipmb_file_read_proc
, NULL
,
1784 rv
= ipmi_smi_add_proc_entry(smi
, "version",
1785 version_file_read_proc
, NULL
,
1787 #endif /* CONFIG_PROC_FS */
1792 static void remove_proc_entries(ipmi_smi_t smi
)
1794 #ifdef CONFIG_PROC_FS
1795 struct ipmi_proc_entry
*entry
;
1797 spin_lock(&smi
->proc_entry_lock
);
1798 while (smi
->proc_entries
) {
1799 entry
= smi
->proc_entries
;
1800 smi
->proc_entries
= entry
->next
;
1802 remove_proc_entry(entry
->name
, smi
->proc_dir
);
1806 spin_unlock(&smi
->proc_entry_lock
);
1807 remove_proc_entry(smi
->proc_dir_name
, proc_ipmi_root
);
1808 #endif /* CONFIG_PROC_FS */
1811 static int __find_bmc_guid(struct device
*dev
, void *data
)
1813 unsigned char *id
= data
;
1814 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1815 return memcmp(bmc
->guid
, id
, 16) == 0;
1818 static struct bmc_device
*ipmi_find_bmc_guid(struct device_driver
*drv
,
1819 unsigned char *guid
)
1823 dev
= driver_find_device(drv
, NULL
, guid
, __find_bmc_guid
);
1825 return dev_get_drvdata(dev
);
1830 struct prod_dev_id
{
1831 unsigned int product_id
;
1832 unsigned char device_id
;
1835 static int __find_bmc_prod_dev_id(struct device
*dev
, void *data
)
1837 struct prod_dev_id
*id
= data
;
1838 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1840 return (bmc
->id
.product_id
== id
->product_id
1841 && bmc
->id
.device_id
== id
->device_id
);
1844 static struct bmc_device
*ipmi_find_bmc_prod_dev_id(
1845 struct device_driver
*drv
,
1846 unsigned int product_id
, unsigned char device_id
)
1848 struct prod_dev_id id
= {
1849 .product_id
= product_id
,
1850 .device_id
= device_id
,
1854 dev
= driver_find_device(drv
, NULL
, &id
, __find_bmc_prod_dev_id
);
1856 return dev_get_drvdata(dev
);
1861 static ssize_t
device_id_show(struct device
*dev
,
1862 struct device_attribute
*attr
,
1865 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1867 return snprintf(buf
, 10, "%u\n", bmc
->id
.device_id
);
1870 static ssize_t
provides_dev_sdrs_show(struct device
*dev
,
1871 struct device_attribute
*attr
,
1874 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1876 return snprintf(buf
, 10, "%u\n",
1877 (bmc
->id
.device_revision
& 0x80) >> 7);
1880 static ssize_t
revision_show(struct device
*dev
, struct device_attribute
*attr
,
1883 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1885 return snprintf(buf
, 20, "%u\n",
1886 bmc
->id
.device_revision
& 0x0F);
1889 static ssize_t
firmware_rev_show(struct device
*dev
,
1890 struct device_attribute
*attr
,
1893 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1895 return snprintf(buf
, 20, "%u.%x\n", bmc
->id
.firmware_revision_1
,
1896 bmc
->id
.firmware_revision_2
);
1899 static ssize_t
ipmi_version_show(struct device
*dev
,
1900 struct device_attribute
*attr
,
1903 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1905 return snprintf(buf
, 20, "%u.%u\n",
1906 ipmi_version_major(&bmc
->id
),
1907 ipmi_version_minor(&bmc
->id
));
1910 static ssize_t
add_dev_support_show(struct device
*dev
,
1911 struct device_attribute
*attr
,
1914 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1916 return snprintf(buf
, 10, "0x%02x\n",
1917 bmc
->id
.additional_device_support
);
1920 static ssize_t
manufacturer_id_show(struct device
*dev
,
1921 struct device_attribute
*attr
,
1924 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1926 return snprintf(buf
, 20, "0x%6.6x\n", bmc
->id
.manufacturer_id
);
1929 static ssize_t
product_id_show(struct device
*dev
,
1930 struct device_attribute
*attr
,
1933 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1935 return snprintf(buf
, 10, "0x%4.4x\n", bmc
->id
.product_id
);
1938 static ssize_t
aux_firmware_rev_show(struct device
*dev
,
1939 struct device_attribute
*attr
,
1942 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1944 return snprintf(buf
, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1945 bmc
->id
.aux_firmware_revision
[3],
1946 bmc
->id
.aux_firmware_revision
[2],
1947 bmc
->id
.aux_firmware_revision
[1],
1948 bmc
->id
.aux_firmware_revision
[0]);
1951 static ssize_t
guid_show(struct device
*dev
, struct device_attribute
*attr
,
1954 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1956 return snprintf(buf
, 100, "%Lx%Lx\n",
1957 (long long) bmc
->guid
[0],
1958 (long long) bmc
->guid
[8]);
1961 static void remove_files(struct bmc_device
*bmc
)
1966 device_remove_file(&bmc
->dev
->dev
,
1967 &bmc
->device_id_attr
);
1968 device_remove_file(&bmc
->dev
->dev
,
1969 &bmc
->provides_dev_sdrs_attr
);
1970 device_remove_file(&bmc
->dev
->dev
,
1971 &bmc
->revision_attr
);
1972 device_remove_file(&bmc
->dev
->dev
,
1973 &bmc
->firmware_rev_attr
);
1974 device_remove_file(&bmc
->dev
->dev
,
1975 &bmc
->version_attr
);
1976 device_remove_file(&bmc
->dev
->dev
,
1977 &bmc
->add_dev_support_attr
);
1978 device_remove_file(&bmc
->dev
->dev
,
1979 &bmc
->manufacturer_id_attr
);
1980 device_remove_file(&bmc
->dev
->dev
,
1981 &bmc
->product_id_attr
);
1983 if (bmc
->id
.aux_firmware_revision_set
)
1984 device_remove_file(&bmc
->dev
->dev
,
1985 &bmc
->aux_firmware_rev_attr
);
1987 device_remove_file(&bmc
->dev
->dev
,
1992 cleanup_bmc_device(struct kref
*ref
)
1994 struct bmc_device
*bmc
;
1996 bmc
= container_of(ref
, struct bmc_device
, refcount
);
2000 platform_device_unregister(bmc
->dev
);
2004 static void ipmi_bmc_unregister(ipmi_smi_t intf
)
2006 struct bmc_device
*bmc
= intf
->bmc
;
2008 if (intf
->sysfs_name
) {
2009 sysfs_remove_link(&intf
->si_dev
->kobj
, intf
->sysfs_name
);
2010 kfree(intf
->sysfs_name
);
2011 intf
->sysfs_name
= NULL
;
2013 if (intf
->my_dev_name
) {
2014 sysfs_remove_link(&bmc
->dev
->dev
.kobj
, intf
->my_dev_name
);
2015 kfree(intf
->my_dev_name
);
2016 intf
->my_dev_name
= NULL
;
2019 mutex_lock(&ipmidriver_mutex
);
2020 kref_put(&bmc
->refcount
, cleanup_bmc_device
);
2022 mutex_unlock(&ipmidriver_mutex
);
2025 static int create_files(struct bmc_device
*bmc
)
2029 bmc
->device_id_attr
.attr
.name
= "device_id";
2030 bmc
->device_id_attr
.attr
.owner
= THIS_MODULE
;
2031 bmc
->device_id_attr
.attr
.mode
= S_IRUGO
;
2032 bmc
->device_id_attr
.show
= device_id_show
;
2034 bmc
->provides_dev_sdrs_attr
.attr
.name
= "provides_device_sdrs";
2035 bmc
->provides_dev_sdrs_attr
.attr
.owner
= THIS_MODULE
;
2036 bmc
->provides_dev_sdrs_attr
.attr
.mode
= S_IRUGO
;
2037 bmc
->provides_dev_sdrs_attr
.show
= provides_dev_sdrs_show
;
2039 bmc
->revision_attr
.attr
.name
= "revision";
2040 bmc
->revision_attr
.attr
.owner
= THIS_MODULE
;
2041 bmc
->revision_attr
.attr
.mode
= S_IRUGO
;
2042 bmc
->revision_attr
.show
= revision_show
;
2044 bmc
->firmware_rev_attr
.attr
.name
= "firmware_revision";
2045 bmc
->firmware_rev_attr
.attr
.owner
= THIS_MODULE
;
2046 bmc
->firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2047 bmc
->firmware_rev_attr
.show
= firmware_rev_show
;
2049 bmc
->version_attr
.attr
.name
= "ipmi_version";
2050 bmc
->version_attr
.attr
.owner
= THIS_MODULE
;
2051 bmc
->version_attr
.attr
.mode
= S_IRUGO
;
2052 bmc
->version_attr
.show
= ipmi_version_show
;
2054 bmc
->add_dev_support_attr
.attr
.name
= "additional_device_support";
2055 bmc
->add_dev_support_attr
.attr
.owner
= THIS_MODULE
;
2056 bmc
->add_dev_support_attr
.attr
.mode
= S_IRUGO
;
2057 bmc
->add_dev_support_attr
.show
= add_dev_support_show
;
2059 bmc
->manufacturer_id_attr
.attr
.name
= "manufacturer_id";
2060 bmc
->manufacturer_id_attr
.attr
.owner
= THIS_MODULE
;
2061 bmc
->manufacturer_id_attr
.attr
.mode
= S_IRUGO
;
2062 bmc
->manufacturer_id_attr
.show
= manufacturer_id_show
;
2064 bmc
->product_id_attr
.attr
.name
= "product_id";
2065 bmc
->product_id_attr
.attr
.owner
= THIS_MODULE
;
2066 bmc
->product_id_attr
.attr
.mode
= S_IRUGO
;
2067 bmc
->product_id_attr
.show
= product_id_show
;
2069 bmc
->guid_attr
.attr
.name
= "guid";
2070 bmc
->guid_attr
.attr
.owner
= THIS_MODULE
;
2071 bmc
->guid_attr
.attr
.mode
= S_IRUGO
;
2072 bmc
->guid_attr
.show
= guid_show
;
2074 bmc
->aux_firmware_rev_attr
.attr
.name
= "aux_firmware_revision";
2075 bmc
->aux_firmware_rev_attr
.attr
.owner
= THIS_MODULE
;
2076 bmc
->aux_firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2077 bmc
->aux_firmware_rev_attr
.show
= aux_firmware_rev_show
;
2079 err
= device_create_file(&bmc
->dev
->dev
,
2080 &bmc
->device_id_attr
);
2082 err
= device_create_file(&bmc
->dev
->dev
,
2083 &bmc
->provides_dev_sdrs_attr
);
2084 if (err
) goto out_devid
;
2085 err
= device_create_file(&bmc
->dev
->dev
,
2086 &bmc
->revision_attr
);
2087 if (err
) goto out_sdrs
;
2088 err
= device_create_file(&bmc
->dev
->dev
,
2089 &bmc
->firmware_rev_attr
);
2090 if (err
) goto out_rev
;
2091 err
= device_create_file(&bmc
->dev
->dev
,
2092 &bmc
->version_attr
);
2093 if (err
) goto out_firm
;
2094 err
= device_create_file(&bmc
->dev
->dev
,
2095 &bmc
->add_dev_support_attr
);
2096 if (err
) goto out_version
;
2097 err
= device_create_file(&bmc
->dev
->dev
,
2098 &bmc
->manufacturer_id_attr
);
2099 if (err
) goto out_add_dev
;
2100 err
= device_create_file(&bmc
->dev
->dev
,
2101 &bmc
->product_id_attr
);
2102 if (err
) goto out_manu
;
2103 if (bmc
->id
.aux_firmware_revision_set
) {
2104 err
= device_create_file(&bmc
->dev
->dev
,
2105 &bmc
->aux_firmware_rev_attr
);
2106 if (err
) goto out_prod_id
;
2108 if (bmc
->guid_set
) {
2109 err
= device_create_file(&bmc
->dev
->dev
,
2111 if (err
) goto out_aux_firm
;
2117 if (bmc
->id
.aux_firmware_revision_set
)
2118 device_remove_file(&bmc
->dev
->dev
,
2119 &bmc
->aux_firmware_rev_attr
);
2121 device_remove_file(&bmc
->dev
->dev
,
2122 &bmc
->product_id_attr
);
2124 device_remove_file(&bmc
->dev
->dev
,
2125 &bmc
->manufacturer_id_attr
);
2127 device_remove_file(&bmc
->dev
->dev
,
2128 &bmc
->add_dev_support_attr
);
2130 device_remove_file(&bmc
->dev
->dev
,
2131 &bmc
->version_attr
);
2133 device_remove_file(&bmc
->dev
->dev
,
2134 &bmc
->firmware_rev_attr
);
2136 device_remove_file(&bmc
->dev
->dev
,
2137 &bmc
->revision_attr
);
2139 device_remove_file(&bmc
->dev
->dev
,
2140 &bmc
->provides_dev_sdrs_attr
);
2142 device_remove_file(&bmc
->dev
->dev
,
2143 &bmc
->device_id_attr
);
2148 static int ipmi_bmc_register(ipmi_smi_t intf
, int ifnum
,
2149 const char *sysfs_name
)
2152 struct bmc_device
*bmc
= intf
->bmc
;
2153 struct bmc_device
*old_bmc
;
2157 mutex_lock(&ipmidriver_mutex
);
2160 * Try to find if there is an bmc_device struct
2161 * representing the interfaced BMC already
2164 old_bmc
= ipmi_find_bmc_guid(&ipmidriver
, bmc
->guid
);
2166 old_bmc
= ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2171 * If there is already an bmc_device, free the new one,
2172 * otherwise register the new BMC device
2176 intf
->bmc
= old_bmc
;
2179 kref_get(&bmc
->refcount
);
2180 mutex_unlock(&ipmidriver_mutex
);
2183 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2184 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2185 bmc
->id
.manufacturer_id
,
2190 unsigned char orig_dev_id
= bmc
->id
.device_id
;
2191 int warn_printed
= 0;
2193 snprintf(name
, sizeof(name
),
2194 "ipmi_bmc.%4.4x", bmc
->id
.product_id
);
2196 while (ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2200 if (!warn_printed
) {
2201 printk(KERN_WARNING PFX
2202 "This machine has two different BMCs"
2203 " with the same product id and device"
2204 " id. This is an error in the"
2205 " firmware, but incrementing the"
2206 " device id to work around the problem."
2207 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2208 bmc
->id
.product_id
, bmc
->id
.device_id
);
2211 bmc
->id
.device_id
++; /* Wraps at 255 */
2212 if (bmc
->id
.device_id
== orig_dev_id
) {
2214 "Out of device ids!\n");
2219 bmc
->dev
= platform_device_alloc(name
, bmc
->id
.device_id
);
2221 mutex_unlock(&ipmidriver_mutex
);
2224 " Unable to allocate platform device\n");
2227 bmc
->dev
->dev
.driver
= &ipmidriver
;
2228 dev_set_drvdata(&bmc
->dev
->dev
, bmc
);
2229 kref_init(&bmc
->refcount
);
2231 rv
= platform_device_add(bmc
->dev
);
2232 mutex_unlock(&ipmidriver_mutex
);
2234 platform_device_put(bmc
->dev
);
2238 " Unable to register bmc device: %d\n",
2240 /* Don't go to out_err, you can only do that if
2241 the device is registered already. */
2245 rv
= create_files(bmc
);
2247 mutex_lock(&ipmidriver_mutex
);
2248 platform_device_unregister(bmc
->dev
);
2249 mutex_unlock(&ipmidriver_mutex
);
2255 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2256 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2257 bmc
->id
.manufacturer_id
,
2263 * create symlink from system interface device to bmc device
2266 intf
->sysfs_name
= kstrdup(sysfs_name
, GFP_KERNEL
);
2267 if (!intf
->sysfs_name
) {
2270 "ipmi_msghandler: allocate link to BMC: %d\n",
2275 rv
= sysfs_create_link(&intf
->si_dev
->kobj
,
2276 &bmc
->dev
->dev
.kobj
, intf
->sysfs_name
);
2278 kfree(intf
->sysfs_name
);
2279 intf
->sysfs_name
= NULL
;
2281 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2286 size
= snprintf(dummy
, 0, "ipmi%d", ifnum
);
2287 intf
->my_dev_name
= kmalloc(size
+1, GFP_KERNEL
);
2288 if (!intf
->my_dev_name
) {
2289 kfree(intf
->sysfs_name
);
2290 intf
->sysfs_name
= NULL
;
2293 "ipmi_msghandler: allocate link from BMC: %d\n",
2297 snprintf(intf
->my_dev_name
, size
+1, "ipmi%d", ifnum
);
2299 rv
= sysfs_create_link(&bmc
->dev
->dev
.kobj
, &intf
->si_dev
->kobj
,
2302 kfree(intf
->sysfs_name
);
2303 intf
->sysfs_name
= NULL
;
2304 kfree(intf
->my_dev_name
);
2305 intf
->my_dev_name
= NULL
;
2308 " Unable to create symlink to bmc: %d\n",
2316 ipmi_bmc_unregister(intf
);
2321 send_guid_cmd(ipmi_smi_t intf
, int chan
)
2323 struct kernel_ipmi_msg msg
;
2324 struct ipmi_system_interface_addr si
;
2326 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2327 si
.channel
= IPMI_BMC_CHANNEL
;
2330 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2331 msg
.cmd
= IPMI_GET_DEVICE_GUID_CMD
;
2334 return i_ipmi_request(NULL
,
2336 (struct ipmi_addr
*) &si
,
2343 intf
->channels
[0].address
,
2344 intf
->channels
[0].lun
,
2349 guid_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2351 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2352 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
2353 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_GUID_CMD
))
2357 if (msg
->msg
.data
[0] != 0) {
2358 /* Error from getting the GUID, the BMC doesn't have one. */
2359 intf
->bmc
->guid_set
= 0;
2363 if (msg
->msg
.data_len
< 17) {
2364 intf
->bmc
->guid_set
= 0;
2365 printk(KERN_WARNING PFX
2366 "guid_handler: The GUID response from the BMC was too"
2367 " short, it was %d but should have been 17. Assuming"
2368 " GUID is not available.\n",
2373 memcpy(intf
->bmc
->guid
, msg
->msg
.data
, 16);
2374 intf
->bmc
->guid_set
= 1;
2376 wake_up(&intf
->waitq
);
2380 get_guid(ipmi_smi_t intf
)
2384 intf
->bmc
->guid_set
= 0x2;
2385 intf
->null_user_handler
= guid_handler
;
2386 rv
= send_guid_cmd(intf
, 0);
2388 /* Send failed, no GUID available. */
2389 intf
->bmc
->guid_set
= 0;
2390 wait_event(intf
->waitq
, intf
->bmc
->guid_set
!= 2);
2391 intf
->null_user_handler
= NULL
;
2395 send_channel_info_cmd(ipmi_smi_t intf
, int chan
)
2397 struct kernel_ipmi_msg msg
;
2398 unsigned char data
[1];
2399 struct ipmi_system_interface_addr si
;
2401 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2402 si
.channel
= IPMI_BMC_CHANNEL
;
2405 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2406 msg
.cmd
= IPMI_GET_CHANNEL_INFO_CMD
;
2410 return i_ipmi_request(NULL
,
2412 (struct ipmi_addr
*) &si
,
2419 intf
->channels
[0].address
,
2420 intf
->channels
[0].lun
,
2425 channel_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2430 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2431 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
2432 && (msg
->msg
.cmd
== IPMI_GET_CHANNEL_INFO_CMD
))
2434 /* It's the one we want */
2435 if (msg
->msg
.data
[0] != 0) {
2436 /* Got an error from the channel, just go on. */
2438 if (msg
->msg
.data
[0] == IPMI_INVALID_COMMAND_ERR
) {
2439 /* If the MC does not support this
2440 command, that is legal. We just
2441 assume it has one IPMB at channel
2443 intf
->channels
[0].medium
2444 = IPMI_CHANNEL_MEDIUM_IPMB
;
2445 intf
->channels
[0].protocol
2446 = IPMI_CHANNEL_PROTOCOL_IPMB
;
2449 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2450 wake_up(&intf
->waitq
);
2455 if (msg
->msg
.data_len
< 4) {
2456 /* Message not big enough, just go on. */
2459 chan
= intf
->curr_channel
;
2460 intf
->channels
[chan
].medium
= msg
->msg
.data
[2] & 0x7f;
2461 intf
->channels
[chan
].protocol
= msg
->msg
.data
[3] & 0x1f;
2464 intf
->curr_channel
++;
2465 if (intf
->curr_channel
>= IPMI_MAX_CHANNELS
)
2466 wake_up(&intf
->waitq
);
2468 rv
= send_channel_info_cmd(intf
, intf
->curr_channel
);
2471 /* Got an error somehow, just give up. */
2472 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2473 wake_up(&intf
->waitq
);
2475 printk(KERN_WARNING PFX
2476 "Error sending channel information: %d\n",
2484 int ipmi_register_smi(struct ipmi_smi_handlers
*handlers
,
2486 struct ipmi_device_id
*device_id
,
2487 struct device
*si_dev
,
2488 const char *sysfs_name
,
2489 unsigned char slave_addr
)
2497 struct list_head
*link
;
2499 version_major
= ipmi_version_major(device_id
);
2500 version_minor
= ipmi_version_minor(device_id
);
2502 /* Make sure the driver is actually initialized, this handles
2503 problems with initialization order. */
2505 rv
= ipmi_init_msghandler();
2508 /* The init code doesn't return an error if it was turned
2509 off, but it won't initialize. Check that. */
2514 intf
= kmalloc(sizeof(*intf
), GFP_KERNEL
);
2517 memset(intf
, 0, sizeof(*intf
));
2518 intf
->bmc
= kzalloc(sizeof(*intf
->bmc
), GFP_KERNEL
);
2523 intf
->intf_num
= -1; /* Mark it invalid for now. */
2524 kref_init(&intf
->refcount
);
2525 intf
->bmc
->id
= *device_id
;
2526 intf
->si_dev
= si_dev
;
2527 for (j
= 0; j
< IPMI_MAX_CHANNELS
; j
++) {
2528 intf
->channels
[j
].address
= IPMI_BMC_SLAVE_ADDR
;
2529 intf
->channels
[j
].lun
= 2;
2531 if (slave_addr
!= 0)
2532 intf
->channels
[0].address
= slave_addr
;
2533 INIT_LIST_HEAD(&intf
->users
);
2534 intf
->handlers
= handlers
;
2535 intf
->send_info
= send_info
;
2536 spin_lock_init(&intf
->seq_lock
);
2537 for (j
= 0; j
< IPMI_IPMB_NUM_SEQ
; j
++) {
2538 intf
->seq_table
[j
].inuse
= 0;
2539 intf
->seq_table
[j
].seqid
= 0;
2542 #ifdef CONFIG_PROC_FS
2543 spin_lock_init(&intf
->proc_entry_lock
);
2545 spin_lock_init(&intf
->waiting_msgs_lock
);
2546 INIT_LIST_HEAD(&intf
->waiting_msgs
);
2547 spin_lock_init(&intf
->events_lock
);
2548 INIT_LIST_HEAD(&intf
->waiting_events
);
2549 intf
->waiting_events_count
= 0;
2550 mutex_init(&intf
->cmd_rcvrs_mutex
);
2551 INIT_LIST_HEAD(&intf
->cmd_rcvrs
);
2552 init_waitqueue_head(&intf
->waitq
);
2554 spin_lock_init(&intf
->counter_lock
);
2555 intf
->proc_dir
= NULL
;
2557 mutex_lock(&ipmi_interfaces_mutex
);
2558 /* Look for a hole in the numbers. */
2560 link
= &ipmi_interfaces
;
2561 list_for_each_entry_rcu(tintf
, &ipmi_interfaces
, link
) {
2562 if (tintf
->intf_num
!= i
) {
2563 link
= &tintf
->link
;
2568 /* Add the new interface in numeric order. */
2570 list_add_rcu(&intf
->link
, &ipmi_interfaces
);
2572 list_add_tail_rcu(&intf
->link
, link
);
2574 rv
= handlers
->start_processing(send_info
, intf
);
2580 if ((version_major
> 1)
2581 || ((version_major
== 1) && (version_minor
>= 5)))
2583 /* Start scanning the channels to see what is
2585 intf
->null_user_handler
= channel_handler
;
2586 intf
->curr_channel
= 0;
2587 rv
= send_channel_info_cmd(intf
, 0);
2591 /* Wait for the channel info to be read. */
2592 wait_event(intf
->waitq
,
2593 intf
->curr_channel
>= IPMI_MAX_CHANNELS
);
2594 intf
->null_user_handler
= NULL
;
2596 /* Assume a single IPMB channel at zero. */
2597 intf
->channels
[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB
;
2598 intf
->channels
[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB
;
2602 rv
= add_proc_entries(intf
, i
);
2604 rv
= ipmi_bmc_register(intf
, i
, sysfs_name
);
2609 remove_proc_entries(intf
);
2610 list_del_rcu(&intf
->link
);
2611 mutex_unlock(&ipmi_interfaces_mutex
);
2613 kref_put(&intf
->refcount
, intf_free
);
2615 /* After this point the interface is legal to use. */
2617 mutex_unlock(&ipmi_interfaces_mutex
);
2618 call_smi_watchers(i
, intf
->si_dev
);
2624 int ipmi_unregister_smi(ipmi_smi_t intf
)
2626 struct ipmi_smi_watcher
*w
;
2628 ipmi_bmc_unregister(intf
);
2630 mutex_lock(&ipmi_interfaces_mutex
);
2631 list_del_rcu(&intf
->link
);
2632 mutex_unlock(&ipmi_interfaces_mutex
);
2635 remove_proc_entries(intf
);
2637 /* Call all the watcher interfaces to tell them that
2638 an interface is gone. */
2639 down_read(&smi_watchers_sem
);
2640 list_for_each_entry(w
, &smi_watchers
, link
)
2641 w
->smi_gone(intf
->intf_num
);
2642 up_read(&smi_watchers_sem
);
2644 kref_put(&intf
->refcount
, intf_free
);
2648 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf
,
2649 struct ipmi_smi_msg
*msg
)
2651 struct ipmi_ipmb_addr ipmb_addr
;
2652 struct ipmi_recv_msg
*recv_msg
;
2653 unsigned long flags
;
2656 /* This is 11, not 10, because the response must contain a
2657 * completion code. */
2658 if (msg
->rsp_size
< 11) {
2659 /* Message not big enough, just ignore it. */
2660 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2661 intf
->invalid_ipmb_responses
++;
2662 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2666 if (msg
->rsp
[2] != 0) {
2667 /* An error getting the response, just ignore it. */
2671 ipmb_addr
.addr_type
= IPMI_IPMB_ADDR_TYPE
;
2672 ipmb_addr
.slave_addr
= msg
->rsp
[6];
2673 ipmb_addr
.channel
= msg
->rsp
[3] & 0x0f;
2674 ipmb_addr
.lun
= msg
->rsp
[7] & 3;
2676 /* It's a response from a remote entity. Look up the sequence
2677 number and handle the response. */
2678 if (intf_find_seq(intf
,
2682 (msg
->rsp
[4] >> 2) & (~1),
2683 (struct ipmi_addr
*) &(ipmb_addr
),
2686 /* We were unable to find the sequence number,
2687 so just nuke the message. */
2688 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2689 intf
->unhandled_ipmb_responses
++;
2690 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2694 memcpy(recv_msg
->msg_data
,
2697 /* THe other fields matched, so no need to set them, except
2698 for netfn, which needs to be the response that was
2699 returned, not the request value. */
2700 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2701 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2702 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2703 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
2704 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2705 intf
->handled_ipmb_responses
++;
2706 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2707 deliver_response(recv_msg
);
2712 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf
,
2713 struct ipmi_smi_msg
*msg
)
2715 struct cmd_rcvr
*rcvr
;
2717 unsigned char netfn
;
2720 ipmi_user_t user
= NULL
;
2721 struct ipmi_ipmb_addr
*ipmb_addr
;
2722 struct ipmi_recv_msg
*recv_msg
;
2723 unsigned long flags
;
2725 if (msg
->rsp_size
< 10) {
2726 /* Message not big enough, just ignore it. */
2727 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2728 intf
->invalid_commands
++;
2729 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2733 if (msg
->rsp
[2] != 0) {
2734 /* An error getting the response, just ignore it. */
2738 netfn
= msg
->rsp
[4] >> 2;
2740 chan
= msg
->rsp
[3] & 0xf;
2743 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
2746 kref_get(&user
->refcount
);
2752 /* We didn't find a user, deliver an error response. */
2753 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2754 intf
->unhandled_commands
++;
2755 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2757 msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
2758 msg
->data
[1] = IPMI_SEND_MSG_CMD
;
2759 msg
->data
[2] = msg
->rsp
[3];
2760 msg
->data
[3] = msg
->rsp
[6];
2761 msg
->data
[4] = ((netfn
+ 1) << 2) | (msg
->rsp
[7] & 0x3);
2762 msg
->data
[5] = ipmb_checksum(&(msg
->data
[3]), 2);
2763 msg
->data
[6] = intf
->channels
[msg
->rsp
[3] & 0xf].address
;
2765 msg
->data
[7] = (msg
->rsp
[7] & 0xfc) | (msg
->rsp
[4] & 0x3);
2766 msg
->data
[8] = msg
->rsp
[8]; /* cmd */
2767 msg
->data
[9] = IPMI_INVALID_CMD_COMPLETION_CODE
;
2768 msg
->data
[10] = ipmb_checksum(&(msg
->data
[6]), 4);
2769 msg
->data_size
= 11;
2774 printk("Invalid command:");
2775 for (m
= 0; m
< msg
->data_size
; m
++)
2776 printk(" %2.2x", msg
->data
[m
]);
2780 intf
->handlers
->sender(intf
->send_info
, msg
, 0);
2782 rv
= -1; /* We used the message, so return the value that
2783 causes it to not be freed or queued. */
2785 /* Deliver the message to the user. */
2786 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2787 intf
->handled_commands
++;
2788 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2790 recv_msg
= ipmi_alloc_recv_msg();
2792 /* We couldn't allocate memory for the
2793 message, so requeue it for handling
2796 kref_put(&user
->refcount
, free_user
);
2798 /* Extract the source address from the data. */
2799 ipmb_addr
= (struct ipmi_ipmb_addr
*) &recv_msg
->addr
;
2800 ipmb_addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
2801 ipmb_addr
->slave_addr
= msg
->rsp
[6];
2802 ipmb_addr
->lun
= msg
->rsp
[7] & 3;
2803 ipmb_addr
->channel
= msg
->rsp
[3] & 0xf;
2805 /* Extract the rest of the message information
2806 from the IPMB header.*/
2807 recv_msg
->user
= user
;
2808 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
2809 recv_msg
->msgid
= msg
->rsp
[7] >> 2;
2810 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2811 recv_msg
->msg
.cmd
= msg
->rsp
[8];
2812 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2814 /* We chop off 10, not 9 bytes because the checksum
2815 at the end also needs to be removed. */
2816 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2817 memcpy(recv_msg
->msg_data
,
2819 msg
->rsp_size
- 10);
2820 deliver_response(recv_msg
);
2827 static int handle_lan_get_msg_rsp(ipmi_smi_t intf
,
2828 struct ipmi_smi_msg
*msg
)
2830 struct ipmi_lan_addr lan_addr
;
2831 struct ipmi_recv_msg
*recv_msg
;
2832 unsigned long flags
;
2835 /* This is 13, not 12, because the response must contain a
2836 * completion code. */
2837 if (msg
->rsp_size
< 13) {
2838 /* Message not big enough, just ignore it. */
2839 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2840 intf
->invalid_lan_responses
++;
2841 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2845 if (msg
->rsp
[2] != 0) {
2846 /* An error getting the response, just ignore it. */
2850 lan_addr
.addr_type
= IPMI_LAN_ADDR_TYPE
;
2851 lan_addr
.session_handle
= msg
->rsp
[4];
2852 lan_addr
.remote_SWID
= msg
->rsp
[8];
2853 lan_addr
.local_SWID
= msg
->rsp
[5];
2854 lan_addr
.channel
= msg
->rsp
[3] & 0x0f;
2855 lan_addr
.privilege
= msg
->rsp
[3] >> 4;
2856 lan_addr
.lun
= msg
->rsp
[9] & 3;
2858 /* It's a response from a remote entity. Look up the sequence
2859 number and handle the response. */
2860 if (intf_find_seq(intf
,
2864 (msg
->rsp
[6] >> 2) & (~1),
2865 (struct ipmi_addr
*) &(lan_addr
),
2868 /* We were unable to find the sequence number,
2869 so just nuke the message. */
2870 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2871 intf
->unhandled_lan_responses
++;
2872 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2876 memcpy(recv_msg
->msg_data
,
2878 msg
->rsp_size
- 11);
2879 /* The other fields matched, so no need to set them, except
2880 for netfn, which needs to be the response that was
2881 returned, not the request value. */
2882 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
2883 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2884 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
2885 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
2886 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2887 intf
->handled_lan_responses
++;
2888 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2889 deliver_response(recv_msg
);
2894 static int handle_lan_get_msg_cmd(ipmi_smi_t intf
,
2895 struct ipmi_smi_msg
*msg
)
2897 struct cmd_rcvr
*rcvr
;
2899 unsigned char netfn
;
2902 ipmi_user_t user
= NULL
;
2903 struct ipmi_lan_addr
*lan_addr
;
2904 struct ipmi_recv_msg
*recv_msg
;
2905 unsigned long flags
;
2907 if (msg
->rsp_size
< 12) {
2908 /* Message not big enough, just ignore it. */
2909 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2910 intf
->invalid_commands
++;
2911 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2915 if (msg
->rsp
[2] != 0) {
2916 /* An error getting the response, just ignore it. */
2920 netfn
= msg
->rsp
[6] >> 2;
2922 chan
= msg
->rsp
[3] & 0xf;
2925 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
2928 kref_get(&user
->refcount
);
2934 /* We didn't find a user, just give up. */
2935 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2936 intf
->unhandled_commands
++;
2937 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2939 rv
= 0; /* Don't do anything with these messages, just
2940 allow them to be freed. */
2942 /* Deliver the message to the user. */
2943 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2944 intf
->handled_commands
++;
2945 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2947 recv_msg
= ipmi_alloc_recv_msg();
2949 /* We couldn't allocate memory for the
2950 message, so requeue it for handling
2953 kref_put(&user
->refcount
, free_user
);
2955 /* Extract the source address from the data. */
2956 lan_addr
= (struct ipmi_lan_addr
*) &recv_msg
->addr
;
2957 lan_addr
->addr_type
= IPMI_LAN_ADDR_TYPE
;
2958 lan_addr
->session_handle
= msg
->rsp
[4];
2959 lan_addr
->remote_SWID
= msg
->rsp
[8];
2960 lan_addr
->local_SWID
= msg
->rsp
[5];
2961 lan_addr
->lun
= msg
->rsp
[9] & 3;
2962 lan_addr
->channel
= msg
->rsp
[3] & 0xf;
2963 lan_addr
->privilege
= msg
->rsp
[3] >> 4;
2965 /* Extract the rest of the message information
2966 from the IPMB header.*/
2967 recv_msg
->user
= user
;
2968 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
2969 recv_msg
->msgid
= msg
->rsp
[9] >> 2;
2970 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
2971 recv_msg
->msg
.cmd
= msg
->rsp
[10];
2972 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2974 /* We chop off 12, not 11 bytes because the checksum
2975 at the end also needs to be removed. */
2976 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
2977 memcpy(recv_msg
->msg_data
,
2979 msg
->rsp_size
- 12);
2980 deliver_response(recv_msg
);
2987 static void copy_event_into_recv_msg(struct ipmi_recv_msg
*recv_msg
,
2988 struct ipmi_smi_msg
*msg
)
2990 struct ipmi_system_interface_addr
*smi_addr
;
2992 recv_msg
->msgid
= 0;
2993 smi_addr
= (struct ipmi_system_interface_addr
*) &(recv_msg
->addr
);
2994 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2995 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
2996 smi_addr
->lun
= msg
->rsp
[0] & 3;
2997 recv_msg
->recv_type
= IPMI_ASYNC_EVENT_RECV_TYPE
;
2998 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
2999 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3000 memcpy(recv_msg
->msg_data
, &(msg
->rsp
[3]), msg
->rsp_size
- 3);
3001 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3002 recv_msg
->msg
.data_len
= msg
->rsp_size
- 3;
3005 static int handle_read_event_rsp(ipmi_smi_t intf
,
3006 struct ipmi_smi_msg
*msg
)
3008 struct ipmi_recv_msg
*recv_msg
, *recv_msg2
;
3009 struct list_head msgs
;
3012 int deliver_count
= 0;
3013 unsigned long flags
;
3015 if (msg
->rsp_size
< 19) {
3016 /* Message is too small to be an IPMB event. */
3017 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3018 intf
->invalid_events
++;
3019 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3023 if (msg
->rsp
[2] != 0) {
3024 /* An error getting the event, just ignore it. */
3028 INIT_LIST_HEAD(&msgs
);
3030 spin_lock_irqsave(&intf
->events_lock
, flags
);
3032 spin_lock(&intf
->counter_lock
);
3034 spin_unlock(&intf
->counter_lock
);
3036 /* Allocate and fill in one message for every user that is getting
3039 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3040 if (!user
->gets_events
)
3043 recv_msg
= ipmi_alloc_recv_msg();
3046 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
,
3048 list_del(&recv_msg
->link
);
3049 ipmi_free_recv_msg(recv_msg
);
3051 /* We couldn't allocate memory for the
3052 message, so requeue it for handling
3060 copy_event_into_recv_msg(recv_msg
, msg
);
3061 recv_msg
->user
= user
;
3062 kref_get(&user
->refcount
);
3063 list_add_tail(&(recv_msg
->link
), &msgs
);
3067 if (deliver_count
) {
3068 /* Now deliver all the messages. */
3069 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
, link
) {
3070 list_del(&recv_msg
->link
);
3071 deliver_response(recv_msg
);
3073 } else if (intf
->waiting_events_count
< MAX_EVENTS_IN_QUEUE
) {
3074 /* No one to receive the message, put it in queue if there's
3075 not already too many things in the queue. */
3076 recv_msg
= ipmi_alloc_recv_msg();
3078 /* We couldn't allocate memory for the
3079 message, so requeue it for handling
3085 copy_event_into_recv_msg(recv_msg
, msg
);
3086 list_add_tail(&(recv_msg
->link
), &(intf
->waiting_events
));
3087 intf
->waiting_events_count
++;
3089 /* There's too many things in the queue, discard this
3091 printk(KERN_WARNING PFX
"Event queue full, discarding an"
3092 " incoming event\n");
3096 spin_unlock_irqrestore(&(intf
->events_lock
), flags
);
3101 static int handle_bmc_rsp(ipmi_smi_t intf
,
3102 struct ipmi_smi_msg
*msg
)
3104 struct ipmi_recv_msg
*recv_msg
;
3105 unsigned long flags
;
3106 struct ipmi_user
*user
;
3108 recv_msg
= (struct ipmi_recv_msg
*) msg
->user_data
;
3109 if (recv_msg
== NULL
)
3111 printk(KERN_WARNING
"IPMI message received with no owner. This\n"
3112 "could be because of a malformed message, or\n"
3113 "because of a hardware error. Contact your\n"
3114 "hardware vender for assistance\n");
3118 user
= recv_msg
->user
;
3119 /* Make sure the user still exists. */
3120 if (user
&& !user
->valid
) {
3121 /* The user for the message went away, so give up. */
3122 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3123 intf
->unhandled_local_responses
++;
3124 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3125 ipmi_free_recv_msg(recv_msg
);
3127 struct ipmi_system_interface_addr
*smi_addr
;
3129 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3130 intf
->handled_local_responses
++;
3131 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3132 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3133 recv_msg
->msgid
= msg
->msgid
;
3134 smi_addr
= ((struct ipmi_system_interface_addr
*)
3136 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3137 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
3138 smi_addr
->lun
= msg
->rsp
[0] & 3;
3139 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
3140 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3141 memcpy(recv_msg
->msg_data
,
3144 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3145 recv_msg
->msg
.data_len
= msg
->rsp_size
- 2;
3146 deliver_response(recv_msg
);
3152 /* Handle a new message. Return 1 if the message should be requeued,
3153 0 if the message should be freed, or -1 if the message should not
3154 be freed or requeued. */
3155 static int handle_new_recv_msg(ipmi_smi_t intf
,
3156 struct ipmi_smi_msg
*msg
)
3164 for (m
= 0; m
< msg
->rsp_size
; m
++)
3165 printk(" %2.2x", msg
->rsp
[m
]);
3168 if (msg
->rsp_size
< 2) {
3169 /* Message is too small to be correct. */
3170 printk(KERN_WARNING PFX
"BMC returned to small a message"
3171 " for netfn %x cmd %x, got %d bytes\n",
3172 (msg
->data
[0] >> 2) | 1, msg
->data
[1], msg
->rsp_size
);
3174 /* Generate an error response for the message. */
3175 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3176 msg
->rsp
[1] = msg
->data
[1];
3177 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3179 } else if (((msg
->rsp
[0] >> 2) != ((msg
->data
[0] >> 2) | 1))/* Netfn */
3180 || (msg
->rsp
[1] != msg
->data
[1])) /* Command */
3182 /* The response is not even marginally correct. */
3183 printk(KERN_WARNING PFX
"BMC returned incorrect response,"
3184 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3185 (msg
->data
[0] >> 2) | 1, msg
->data
[1],
3186 msg
->rsp
[0] >> 2, msg
->rsp
[1]);
3188 /* Generate an error response for the message. */
3189 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3190 msg
->rsp
[1] = msg
->data
[1];
3191 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3195 if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3196 && (msg
->rsp
[1] == IPMI_SEND_MSG_CMD
)
3197 && (msg
->user_data
!= NULL
))
3199 /* It's a response to a response we sent. For this we
3200 deliver a send message response to the user. */
3201 struct ipmi_recv_msg
*recv_msg
= msg
->user_data
;
3204 if (msg
->rsp_size
< 2)
3205 /* Message is too small to be correct. */
3208 chan
= msg
->data
[2] & 0x0f;
3209 if (chan
>= IPMI_MAX_CHANNELS
)
3210 /* Invalid channel number */
3216 /* Make sure the user still exists. */
3217 if (!recv_msg
->user
|| !recv_msg
->user
->valid
)
3220 recv_msg
->recv_type
= IPMI_RESPONSE_RESPONSE_TYPE
;
3221 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3222 recv_msg
->msg
.data_len
= 1;
3223 recv_msg
->msg_data
[0] = msg
->rsp
[2];
3224 deliver_response(recv_msg
);
3225 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3226 && (msg
->rsp
[1] == IPMI_GET_MSG_CMD
))
3228 /* It's from the receive queue. */
3229 chan
= msg
->rsp
[3] & 0xf;
3230 if (chan
>= IPMI_MAX_CHANNELS
) {
3231 /* Invalid channel number */
3236 switch (intf
->channels
[chan
].medium
) {
3237 case IPMI_CHANNEL_MEDIUM_IPMB
:
3238 if (msg
->rsp
[4] & 0x04) {
3239 /* It's a response, so find the
3240 requesting message and send it up. */
3241 requeue
= handle_ipmb_get_msg_rsp(intf
, msg
);
3243 /* It's a command to the SMS from some other
3244 entity. Handle that. */
3245 requeue
= handle_ipmb_get_msg_cmd(intf
, msg
);
3249 case IPMI_CHANNEL_MEDIUM_8023LAN
:
3250 case IPMI_CHANNEL_MEDIUM_ASYNC
:
3251 if (msg
->rsp
[6] & 0x04) {
3252 /* It's a response, so find the
3253 requesting message and send it up. */
3254 requeue
= handle_lan_get_msg_rsp(intf
, msg
);
3256 /* It's a command to the SMS from some other
3257 entity. Handle that. */
3258 requeue
= handle_lan_get_msg_cmd(intf
, msg
);
3263 /* We don't handle the channel type, so just
3264 * free the message. */
3268 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3269 && (msg
->rsp
[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD
))
3271 /* It's an asyncronous event. */
3272 requeue
= handle_read_event_rsp(intf
, msg
);
3274 /* It's a response from the local BMC. */
3275 requeue
= handle_bmc_rsp(intf
, msg
);
3282 /* Handle a new message from the lower layer. */
3283 void ipmi_smi_msg_received(ipmi_smi_t intf
,
3284 struct ipmi_smi_msg
*msg
)
3286 unsigned long flags
;
3290 if ((msg
->data_size
>= 2)
3291 && (msg
->data
[0] == (IPMI_NETFN_APP_REQUEST
<< 2))
3292 && (msg
->data
[1] == IPMI_SEND_MSG_CMD
)
3293 && (msg
->user_data
== NULL
))
3295 /* This is the local response to a command send, start
3296 the timer for these. The user_data will not be
3297 NULL if this is a response send, and we will let
3298 response sends just go through. */
3300 /* Check for errors, if we get certain errors (ones
3301 that mean basically we can try again later), we
3302 ignore them and start the timer. Otherwise we
3303 report the error immediately. */
3304 if ((msg
->rsp_size
>= 3) && (msg
->rsp
[2] != 0)
3305 && (msg
->rsp
[2] != IPMI_NODE_BUSY_ERR
)
3306 && (msg
->rsp
[2] != IPMI_LOST_ARBITRATION_ERR
)
3307 && (msg
->rsp
[2] != IPMI_BUS_ERR
)
3308 && (msg
->rsp
[2] != IPMI_NAK_ON_WRITE_ERR
))
3310 int chan
= msg
->rsp
[3] & 0xf;
3312 /* Got an error sending the message, handle it. */
3313 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3314 if (chan
>= IPMI_MAX_CHANNELS
)
3315 ; /* This shouldn't happen */
3316 else if ((intf
->channels
[chan
].medium
3317 == IPMI_CHANNEL_MEDIUM_8023LAN
)
3318 || (intf
->channels
[chan
].medium
3319 == IPMI_CHANNEL_MEDIUM_ASYNC
))
3320 intf
->sent_lan_command_errs
++;
3322 intf
->sent_ipmb_command_errs
++;
3323 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3324 intf_err_seq(intf
, msg
->msgid
, msg
->rsp
[2]);
3326 /* The message was sent, start the timer. */
3327 intf_start_seq_timer(intf
, msg
->msgid
);
3330 ipmi_free_smi_msg(msg
);
3334 /* To preserve message order, if the list is not empty, we
3335 tack this message onto the end of the list. */
3336 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3337 if (!list_empty(&intf
->waiting_msgs
)) {
3338 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3339 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3342 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3344 rv
= handle_new_recv_msg(intf
, msg
);
3346 /* Could not handle the message now, just add it to a
3347 list to handle later. */
3348 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3349 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3350 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3351 } else if (rv
== 0) {
3352 ipmi_free_smi_msg(msg
);
3359 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf
)
3364 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3365 if (!user
->handler
->ipmi_watchdog_pretimeout
)
3368 user
->handler
->ipmi_watchdog_pretimeout(user
->handler_data
);
3374 handle_msg_timeout(struct ipmi_recv_msg
*msg
)
3376 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3377 msg
->msg_data
[0] = IPMI_TIMEOUT_COMPLETION_CODE
;
3378 msg
->msg
.netfn
|= 1; /* Convert to a response. */
3379 msg
->msg
.data_len
= 1;
3380 msg
->msg
.data
= msg
->msg_data
;
3381 deliver_response(msg
);
3384 static struct ipmi_smi_msg
*
3385 smi_from_recv_msg(ipmi_smi_t intf
, struct ipmi_recv_msg
*recv_msg
,
3386 unsigned char seq
, long seqid
)
3388 struct ipmi_smi_msg
*smi_msg
= ipmi_alloc_smi_msg();
3390 /* If we can't allocate the message, then just return, we
3391 get 4 retries, so this should be ok. */
3394 memcpy(smi_msg
->data
, recv_msg
->msg
.data
, recv_msg
->msg
.data_len
);
3395 smi_msg
->data_size
= recv_msg
->msg
.data_len
;
3396 smi_msg
->msgid
= STORE_SEQ_IN_MSGID(seq
, seqid
);
3402 for (m
= 0; m
< smi_msg
->data_size
; m
++)
3403 printk(" %2.2x", smi_msg
->data
[m
]);
3410 static void check_msg_timeout(ipmi_smi_t intf
, struct seq_table
*ent
,
3411 struct list_head
*timeouts
, long timeout_period
,
3412 int slot
, unsigned long *flags
)
3414 struct ipmi_recv_msg
*msg
;
3419 ent
->timeout
-= timeout_period
;
3420 if (ent
->timeout
> 0)
3423 if (ent
->retries_left
== 0) {
3424 /* The message has used all its retries. */
3426 msg
= ent
->recv_msg
;
3427 list_add_tail(&msg
->link
, timeouts
);
3428 spin_lock(&intf
->counter_lock
);
3430 intf
->timed_out_ipmb_broadcasts
++;
3431 else if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3432 intf
->timed_out_lan_commands
++;
3434 intf
->timed_out_ipmb_commands
++;
3435 spin_unlock(&intf
->counter_lock
);
3437 struct ipmi_smi_msg
*smi_msg
;
3438 /* More retries, send again. */
3440 /* Start with the max timer, set to normal
3441 timer after the message is sent. */
3442 ent
->timeout
= MAX_MSG_TIMEOUT
;
3443 ent
->retries_left
--;
3444 spin_lock(&intf
->counter_lock
);
3445 if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3446 intf
->retransmitted_lan_commands
++;
3448 intf
->retransmitted_ipmb_commands
++;
3449 spin_unlock(&intf
->counter_lock
);
3451 smi_msg
= smi_from_recv_msg(intf
, ent
->recv_msg
, slot
,
3456 spin_unlock_irqrestore(&intf
->seq_lock
, *flags
);
3457 /* Send the new message. We send with a zero
3458 * priority. It timed out, I doubt time is
3459 * that critical now, and high priority
3460 * messages are really only for messages to the
3461 * local MC, which don't get resent. */
3462 intf
->handlers
->sender(intf
->send_info
,
3464 spin_lock_irqsave(&intf
->seq_lock
, *flags
);
3468 static void ipmi_timeout_handler(long timeout_period
)
3471 struct list_head timeouts
;
3472 struct ipmi_recv_msg
*msg
, *msg2
;
3473 struct ipmi_smi_msg
*smi_msg
, *smi_msg2
;
3474 unsigned long flags
;
3477 INIT_LIST_HEAD(&timeouts
);
3480 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3481 /* See if any waiting messages need to be processed. */
3482 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3483 list_for_each_entry_safe(smi_msg
, smi_msg2
,
3484 &intf
->waiting_msgs
, link
) {
3485 if (!handle_new_recv_msg(intf
, smi_msg
)) {
3486 list_del(&smi_msg
->link
);
3487 ipmi_free_smi_msg(smi_msg
);
3489 /* To preserve message order, quit if we
3490 can't handle a message. */
3494 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3496 /* Go through the seq table and find any messages that
3497 have timed out, putting them in the timeouts
3499 spin_lock_irqsave(&intf
->seq_lock
, flags
);
3500 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++)
3501 check_msg_timeout(intf
, &(intf
->seq_table
[i
]),
3502 &timeouts
, timeout_period
, i
,
3504 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
3506 list_for_each_entry_safe(msg
, msg2
, &timeouts
, link
)
3507 handle_msg_timeout(msg
);
3512 static void ipmi_request_event(void)
3517 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
)
3518 intf
->handlers
->request_events(intf
->send_info
);
3522 static struct timer_list ipmi_timer
;
3524 /* Call every ~100 ms. */
3525 #define IPMI_TIMEOUT_TIME 100
3527 /* How many jiffies does it take to get to the timeout time. */
3528 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3530 /* Request events from the queue every second (this is the number of
3531 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3532 future, IPMI will add a way to know immediately if an event is in
3533 the queue and this silliness can go away. */
3534 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3536 static atomic_t stop_operation
;
3537 static unsigned int ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3539 static void ipmi_timeout(unsigned long data
)
3541 if (atomic_read(&stop_operation
))
3545 if (ticks_to_req_ev
== 0) {
3546 ipmi_request_event();
3547 ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3550 ipmi_timeout_handler(IPMI_TIMEOUT_TIME
);
3552 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
3556 static atomic_t smi_msg_inuse_count
= ATOMIC_INIT(0);
3557 static atomic_t recv_msg_inuse_count
= ATOMIC_INIT(0);
3559 /* FIXME - convert these to slabs. */
3560 static void free_smi_msg(struct ipmi_smi_msg
*msg
)
3562 atomic_dec(&smi_msg_inuse_count
);
3566 struct ipmi_smi_msg
*ipmi_alloc_smi_msg(void)
3568 struct ipmi_smi_msg
*rv
;
3569 rv
= kmalloc(sizeof(struct ipmi_smi_msg
), GFP_ATOMIC
);
3571 rv
->done
= free_smi_msg
;
3572 rv
->user_data
= NULL
;
3573 atomic_inc(&smi_msg_inuse_count
);
3578 static void free_recv_msg(struct ipmi_recv_msg
*msg
)
3580 atomic_dec(&recv_msg_inuse_count
);
3584 struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void)
3586 struct ipmi_recv_msg
*rv
;
3588 rv
= kmalloc(sizeof(struct ipmi_recv_msg
), GFP_ATOMIC
);
3591 rv
->done
= free_recv_msg
;
3592 atomic_inc(&recv_msg_inuse_count
);
3597 void ipmi_free_recv_msg(struct ipmi_recv_msg
*msg
)
3600 kref_put(&msg
->user
->refcount
, free_user
);
3604 #ifdef CONFIG_IPMI_PANIC_EVENT
3606 static void dummy_smi_done_handler(struct ipmi_smi_msg
*msg
)
3610 static void dummy_recv_done_handler(struct ipmi_recv_msg
*msg
)
3614 #ifdef CONFIG_IPMI_PANIC_STRING
3615 static void event_receiver_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3617 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3618 && (msg
->msg
.netfn
== IPMI_NETFN_SENSOR_EVENT_RESPONSE
)
3619 && (msg
->msg
.cmd
== IPMI_GET_EVENT_RECEIVER_CMD
)
3620 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3622 /* A get event receiver command, save it. */
3623 intf
->event_receiver
= msg
->msg
.data
[1];
3624 intf
->event_receiver_lun
= msg
->msg
.data
[2] & 0x3;
3628 static void device_id_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3630 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3631 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
3632 && (msg
->msg
.cmd
== IPMI_GET_DEVICE_ID_CMD
)
3633 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3635 /* A get device id command, save if we are an event
3636 receiver or generator. */
3637 intf
->local_sel_device
= (msg
->msg
.data
[6] >> 2) & 1;
3638 intf
->local_event_generator
= (msg
->msg
.data
[6] >> 5) & 1;
3643 static void send_panic_events(char *str
)
3645 struct kernel_ipmi_msg msg
;
3647 unsigned char data
[16];
3648 struct ipmi_system_interface_addr
*si
;
3649 struct ipmi_addr addr
;
3650 struct ipmi_smi_msg smi_msg
;
3651 struct ipmi_recv_msg recv_msg
;
3653 si
= (struct ipmi_system_interface_addr
*) &addr
;
3654 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3655 si
->channel
= IPMI_BMC_CHANNEL
;
3658 /* Fill in an event telling that we have failed. */
3659 msg
.netfn
= 0x04; /* Sensor or Event. */
3660 msg
.cmd
= 2; /* Platform event command. */
3663 data
[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3664 data
[1] = 0x03; /* This is for IPMI 1.0. */
3665 data
[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3666 data
[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3667 data
[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3669 /* Put a few breadcrumbs in. Hopefully later we can add more things
3670 to make the panic events more useful. */
3677 smi_msg
.done
= dummy_smi_done_handler
;
3678 recv_msg
.done
= dummy_recv_done_handler
;
3680 /* For every registered interface, send the event. */
3681 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3682 if (intf
->intf_num
== -1)
3683 /* Interface was not ready yet. */
3686 /* Send the event announcing the panic. */
3687 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
3688 i_ipmi_request(NULL
,
3697 intf
->channels
[0].address
,
3698 intf
->channels
[0].lun
,
3699 0, 1); /* Don't retry, and don't wait. */
3702 #ifdef CONFIG_IPMI_PANIC_STRING
3703 /* On every interface, dump a bunch of OEM event holding the
3708 /* For every registered interface, send the event. */
3709 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3711 struct ipmi_ipmb_addr
*ipmb
;
3714 if (intf
->intf_num
== -1)
3715 /* Interface was not ready yet. */
3718 /* First job here is to figure out where to send the
3719 OEM events. There's no way in IPMI to send OEM
3720 events using an event send command, so we have to
3721 find the SEL to put them in and stick them in
3724 /* Get capabilities from the get device id. */
3725 intf
->local_sel_device
= 0;
3726 intf
->local_event_generator
= 0;
3727 intf
->event_receiver
= 0;
3729 /* Request the device info from the local MC. */
3730 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3731 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
3734 intf
->null_user_handler
= device_id_fetcher
;
3735 i_ipmi_request(NULL
,
3744 intf
->channels
[0].address
,
3745 intf
->channels
[0].lun
,
3746 0, 1); /* Don't retry, and don't wait. */
3748 if (intf
->local_event_generator
) {
3749 /* Request the event receiver from the local MC. */
3750 msg
.netfn
= IPMI_NETFN_SENSOR_EVENT_REQUEST
;
3751 msg
.cmd
= IPMI_GET_EVENT_RECEIVER_CMD
;
3754 intf
->null_user_handler
= event_receiver_fetcher
;
3755 i_ipmi_request(NULL
,
3764 intf
->channels
[0].address
,
3765 intf
->channels
[0].lun
,
3766 0, 1); /* no retry, and no wait. */
3768 intf
->null_user_handler
= NULL
;
3770 /* Validate the event receiver. The low bit must not
3771 be 1 (it must be a valid IPMB address), it cannot
3772 be zero, and it must not be my address. */
3773 if (((intf
->event_receiver
& 1) == 0)
3774 && (intf
->event_receiver
!= 0)
3775 && (intf
->event_receiver
!= intf
->channels
[0].address
))
3777 /* The event receiver is valid, send an IPMB
3779 ipmb
= (struct ipmi_ipmb_addr
*) &addr
;
3780 ipmb
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
3781 ipmb
->channel
= 0; /* FIXME - is this right? */
3782 ipmb
->lun
= intf
->event_receiver_lun
;
3783 ipmb
->slave_addr
= intf
->event_receiver
;
3784 } else if (intf
->local_sel_device
) {
3785 /* The event receiver was not valid (or was
3786 me), but I am an SEL device, just dump it
3788 si
= (struct ipmi_system_interface_addr
*) &addr
;
3789 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3790 si
->channel
= IPMI_BMC_CHANNEL
;
3793 continue; /* No where to send the event. */
3796 msg
.netfn
= IPMI_NETFN_STORAGE_REQUEST
; /* Storage. */
3797 msg
.cmd
= IPMI_ADD_SEL_ENTRY_CMD
;
3803 int size
= strlen(p
);
3809 data
[2] = 0xf0; /* OEM event without timestamp. */
3810 data
[3] = intf
->channels
[0].address
;
3811 data
[4] = j
++; /* sequence # */
3812 /* Always give 11 bytes, so strncpy will fill
3813 it with zeroes for me. */
3814 strncpy(data
+5, p
, 11);
3817 i_ipmi_request(NULL
,
3826 intf
->channels
[0].address
,
3827 intf
->channels
[0].lun
,
3828 0, 1); /* no retry, and no wait. */
3831 #endif /* CONFIG_IPMI_PANIC_STRING */
3833 #endif /* CONFIG_IPMI_PANIC_EVENT */
3835 static int has_panicked
= 0;
3837 static int panic_event(struct notifier_block
*this,
3838 unsigned long event
,
3847 /* For every registered interface, set it to run to completion. */
3848 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3849 if (intf
->intf_num
== -1)
3850 /* Interface was not ready yet. */
3853 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
3856 #ifdef CONFIG_IPMI_PANIC_EVENT
3857 send_panic_events(ptr
);
3863 static struct notifier_block panic_block
= {
3864 .notifier_call
= panic_event
,
3866 .priority
= 200 /* priority: INT_MAX >= x >= 0 */
3869 static int ipmi_init_msghandler(void)
3876 rv
= driver_register(&ipmidriver
);
3878 printk(KERN_ERR PFX
"Could not register IPMI driver\n");
3882 printk(KERN_INFO
"ipmi message handler version "
3883 IPMI_DRIVER_VERSION
"\n");
3885 #ifdef CONFIG_PROC_FS
3886 proc_ipmi_root
= proc_mkdir("ipmi", NULL
);
3887 if (!proc_ipmi_root
) {
3888 printk(KERN_ERR PFX
"Unable to create IPMI proc dir");
3892 proc_ipmi_root
->owner
= THIS_MODULE
;
3893 #endif /* CONFIG_PROC_FS */
3895 setup_timer(&ipmi_timer
, ipmi_timeout
, 0);
3896 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
3898 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
3905 static __init
int ipmi_init_msghandler_mod(void)
3907 ipmi_init_msghandler();
3911 static __exit
void cleanup_ipmi(void)
3918 atomic_notifier_chain_unregister(&panic_notifier_list
, &panic_block
);
3920 /* This can't be called if any interfaces exist, so no worry about
3921 shutting down the interfaces. */
3923 /* Tell the timer to stop, then wait for it to stop. This avoids
3924 problems with race conditions removing the timer here. */
3925 atomic_inc(&stop_operation
);
3926 del_timer_sync(&ipmi_timer
);
3928 #ifdef CONFIG_PROC_FS
3929 remove_proc_entry(proc_ipmi_root
->name
, &proc_root
);
3930 #endif /* CONFIG_PROC_FS */
3932 driver_unregister(&ipmidriver
);
3936 /* Check for buffer leaks. */
3937 count
= atomic_read(&smi_msg_inuse_count
);
3939 printk(KERN_WARNING PFX
"SMI message count %d at exit\n",
3941 count
= atomic_read(&recv_msg_inuse_count
);
3943 printk(KERN_WARNING PFX
"recv message count %d at exit\n",
3946 module_exit(cleanup_ipmi
);
3948 module_init(ipmi_init_msghandler_mod
);
3949 MODULE_LICENSE("GPL");
3950 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3951 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3952 MODULE_VERSION(IPMI_DRIVER_VERSION
);
3954 EXPORT_SYMBOL(ipmi_create_user
);
3955 EXPORT_SYMBOL(ipmi_destroy_user
);
3956 EXPORT_SYMBOL(ipmi_get_version
);
3957 EXPORT_SYMBOL(ipmi_request_settime
);
3958 EXPORT_SYMBOL(ipmi_request_supply_msgs
);
3959 EXPORT_SYMBOL(ipmi_register_smi
);
3960 EXPORT_SYMBOL(ipmi_unregister_smi
);
3961 EXPORT_SYMBOL(ipmi_register_for_cmd
);
3962 EXPORT_SYMBOL(ipmi_unregister_for_cmd
);
3963 EXPORT_SYMBOL(ipmi_smi_msg_received
);
3964 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout
);
3965 EXPORT_SYMBOL(ipmi_alloc_smi_msg
);
3966 EXPORT_SYMBOL(ipmi_addr_length
);
3967 EXPORT_SYMBOL(ipmi_validate_addr
);
3968 EXPORT_SYMBOL(ipmi_set_gets_events
);
3969 EXPORT_SYMBOL(ipmi_smi_watcher_register
);
3970 EXPORT_SYMBOL(ipmi_smi_watcher_unregister
);
3971 EXPORT_SYMBOL(ipmi_set_my_address
);
3972 EXPORT_SYMBOL(ipmi_get_my_address
);
3973 EXPORT_SYMBOL(ipmi_set_my_LUN
);
3974 EXPORT_SYMBOL(ipmi_get_my_LUN
);
3975 EXPORT_SYMBOL(ipmi_smi_add_proc_entry
);
3976 EXPORT_SYMBOL(ipmi_user_set_run_to_completion
);
3977 EXPORT_SYMBOL(ipmi_free_recv_msg
);