[PATCH] IPMI: remove zero inits
[linux-2.6/kvm.git] / drivers / char / ipmi / ipmi_msghandler.c
blobbfcc6a030a16fab92fb2cc989ed52f52139b15e1
1 /*
2 * ipmi_msghandler.c
4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.1"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized;
58 #ifdef CONFIG_PROC_FS
59 static struct proc_dir_entry *proc_ipmi_root;
60 #endif /* CONFIG_PROC_FS */
62 /* Remain in auto-maintenance mode for this amount of time (in ms). */
63 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
65 #define MAX_EVENTS_IN_QUEUE 25
67 /* Don't let a message sit in a queue forever, always time it with at lest
68 the max message timer. This is in milliseconds. */
69 #define MAX_MSG_TIMEOUT 60000
73 * The main "user" data structure.
75 struct ipmi_user
77 struct list_head link;
79 /* Set to "0" when the user is destroyed. */
80 int valid;
82 struct kref refcount;
84 /* The upper layer that handles receive messages. */
85 struct ipmi_user_hndl *handler;
86 void *handler_data;
88 /* The interface this user is bound to. */
89 ipmi_smi_t intf;
91 /* Does this interface receive IPMI events? */
92 int gets_events;
95 struct cmd_rcvr
97 struct list_head link;
99 ipmi_user_t user;
100 unsigned char netfn;
101 unsigned char cmd;
102 unsigned int chans;
105 * This is used to form a linked lised during mass deletion.
106 * Since this is in an RCU list, we cannot use the link above
107 * or change any data until the RCU period completes. So we
108 * use this next variable during mass deletion so we can have
109 * a list and don't have to wait and restart the search on
110 * every individual deletion of a command. */
111 struct cmd_rcvr *next;
114 struct seq_table
116 unsigned int inuse : 1;
117 unsigned int broadcast : 1;
119 unsigned long timeout;
120 unsigned long orig_timeout;
121 unsigned int retries_left;
123 /* To verify on an incoming send message response that this is
124 the message that the response is for, we keep a sequence id
125 and increment it every time we send a message. */
126 long seqid;
128 /* This is held so we can properly respond to the message on a
129 timeout, and it is used to hold the temporary data for
130 retransmission, too. */
131 struct ipmi_recv_msg *recv_msg;
134 /* Store the information in a msgid (long) to allow us to find a
135 sequence table entry from the msgid. */
136 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
138 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
139 do { \
140 seq = ((msgid >> 26) & 0x3f); \
141 seqid = (msgid & 0x3fffff); \
142 } while (0)
144 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
146 struct ipmi_channel
148 unsigned char medium;
149 unsigned char protocol;
151 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
152 but may be changed by the user. */
153 unsigned char address;
155 /* My LUN. This should generally stay the SMS LUN, but just in
156 case... */
157 unsigned char lun;
160 #ifdef CONFIG_PROC_FS
161 struct ipmi_proc_entry
163 char *name;
164 struct ipmi_proc_entry *next;
166 #endif
168 struct bmc_device
170 struct platform_device *dev;
171 struct ipmi_device_id id;
172 unsigned char guid[16];
173 int guid_set;
175 struct kref refcount;
177 /* bmc device attributes */
178 struct device_attribute device_id_attr;
179 struct device_attribute provides_dev_sdrs_attr;
180 struct device_attribute revision_attr;
181 struct device_attribute firmware_rev_attr;
182 struct device_attribute version_attr;
183 struct device_attribute add_dev_support_attr;
184 struct device_attribute manufacturer_id_attr;
185 struct device_attribute product_id_attr;
186 struct device_attribute guid_attr;
187 struct device_attribute aux_firmware_rev_attr;
190 #define IPMI_IPMB_NUM_SEQ 64
191 #define IPMI_MAX_CHANNELS 16
192 struct ipmi_smi
194 /* What interface number are we? */
195 int intf_num;
197 struct kref refcount;
199 /* Used for a list of interfaces. */
200 struct list_head link;
202 /* The list of upper layers that are using me. seq_lock
203 * protects this. */
204 struct list_head users;
206 /* Information to supply to users. */
207 unsigned char ipmi_version_major;
208 unsigned char ipmi_version_minor;
210 /* Used for wake ups at startup. */
211 wait_queue_head_t waitq;
213 struct bmc_device *bmc;
214 char *my_dev_name;
215 char *sysfs_name;
217 /* This is the lower-layer's sender routine. Note that you
218 * must either be holding the ipmi_interfaces_mutex or be in
219 * an umpreemptible region to use this. You must fetch the
220 * value into a local variable and make sure it is not NULL. */
221 struct ipmi_smi_handlers *handlers;
222 void *send_info;
224 #ifdef CONFIG_PROC_FS
225 /* A list of proc entries for this interface. This does not
226 need a lock, only one thread creates it and only one thread
227 destroys it. */
228 spinlock_t proc_entry_lock;
229 struct ipmi_proc_entry *proc_entries;
230 #endif
232 /* Driver-model device for the system interface. */
233 struct device *si_dev;
235 /* A table of sequence numbers for this interface. We use the
236 sequence numbers for IPMB messages that go out of the
237 interface to match them up with their responses. A routine
238 is called periodically to time the items in this list. */
239 spinlock_t seq_lock;
240 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
241 int curr_seq;
243 /* Messages that were delayed for some reason (out of memory,
244 for instance), will go in here to be processed later in a
245 periodic timer interrupt. */
246 spinlock_t waiting_msgs_lock;
247 struct list_head waiting_msgs;
249 /* The list of command receivers that are registered for commands
250 on this interface. */
251 struct mutex cmd_rcvrs_mutex;
252 struct list_head cmd_rcvrs;
254 /* Events that were queues because no one was there to receive
255 them. */
256 spinlock_t events_lock; /* For dealing with event stuff. */
257 struct list_head waiting_events;
258 unsigned int waiting_events_count; /* How many events in queue? */
259 int delivering_events;
261 /* The event receiver for my BMC, only really used at panic
262 shutdown as a place to store this. */
263 unsigned char event_receiver;
264 unsigned char event_receiver_lun;
265 unsigned char local_sel_device;
266 unsigned char local_event_generator;
268 /* For handling of maintenance mode. */
269 int maintenance_mode;
270 int maintenance_mode_enable;
271 int auto_maintenance_timeout;
272 spinlock_t maintenance_mode_lock; /* Used in a timer... */
274 /* A cheap hack, if this is non-null and a message to an
275 interface comes in with a NULL user, call this routine with
276 it. Note that the message will still be freed by the
277 caller. This only works on the system interface. */
278 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
280 /* When we are scanning the channels for an SMI, this will
281 tell which channel we are scanning. */
282 int curr_channel;
284 /* Channel information */
285 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
287 /* Proc FS stuff. */
288 struct proc_dir_entry *proc_dir;
289 char proc_dir_name[10];
291 spinlock_t counter_lock; /* For making counters atomic. */
293 /* Commands we got that were invalid. */
294 unsigned int sent_invalid_commands;
296 /* Commands we sent to the MC. */
297 unsigned int sent_local_commands;
298 /* Responses from the MC that were delivered to a user. */
299 unsigned int handled_local_responses;
300 /* Responses from the MC that were not delivered to a user. */
301 unsigned int unhandled_local_responses;
303 /* Commands we sent out to the IPMB bus. */
304 unsigned int sent_ipmb_commands;
305 /* Commands sent on the IPMB that had errors on the SEND CMD */
306 unsigned int sent_ipmb_command_errs;
307 /* Each retransmit increments this count. */
308 unsigned int retransmitted_ipmb_commands;
309 /* When a message times out (runs out of retransmits) this is
310 incremented. */
311 unsigned int timed_out_ipmb_commands;
313 /* This is like above, but for broadcasts. Broadcasts are
314 *not* included in the above count (they are expected to
315 time out). */
316 unsigned int timed_out_ipmb_broadcasts;
318 /* Responses I have sent to the IPMB bus. */
319 unsigned int sent_ipmb_responses;
321 /* The response was delivered to the user. */
322 unsigned int handled_ipmb_responses;
323 /* The response had invalid data in it. */
324 unsigned int invalid_ipmb_responses;
325 /* The response didn't have anyone waiting for it. */
326 unsigned int unhandled_ipmb_responses;
328 /* Commands we sent out to the IPMB bus. */
329 unsigned int sent_lan_commands;
330 /* Commands sent on the IPMB that had errors on the SEND CMD */
331 unsigned int sent_lan_command_errs;
332 /* Each retransmit increments this count. */
333 unsigned int retransmitted_lan_commands;
334 /* When a message times out (runs out of retransmits) this is
335 incremented. */
336 unsigned int timed_out_lan_commands;
338 /* Responses I have sent to the IPMB bus. */
339 unsigned int sent_lan_responses;
341 /* The response was delivered to the user. */
342 unsigned int handled_lan_responses;
343 /* The response had invalid data in it. */
344 unsigned int invalid_lan_responses;
345 /* The response didn't have anyone waiting for it. */
346 unsigned int unhandled_lan_responses;
348 /* The command was delivered to the user. */
349 unsigned int handled_commands;
350 /* The command had invalid data in it. */
351 unsigned int invalid_commands;
352 /* The command didn't have anyone waiting for it. */
353 unsigned int unhandled_commands;
355 /* Invalid data in an event. */
356 unsigned int invalid_events;
357 /* Events that were received with the proper format. */
358 unsigned int events;
360 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
363 * The driver model view of the IPMI messaging driver.
365 static struct device_driver ipmidriver = {
366 .name = "ipmi",
367 .bus = &platform_bus_type
369 static DEFINE_MUTEX(ipmidriver_mutex);
371 static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
372 static DEFINE_MUTEX(ipmi_interfaces_mutex);
374 /* List of watchers that want to know when smi's are added and
375 deleted. */
376 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
377 static DEFINE_MUTEX(smi_watchers_mutex);
380 static void free_recv_msg_list(struct list_head *q)
382 struct ipmi_recv_msg *msg, *msg2;
384 list_for_each_entry_safe(msg, msg2, q, link) {
385 list_del(&msg->link);
386 ipmi_free_recv_msg(msg);
390 static void free_smi_msg_list(struct list_head *q)
392 struct ipmi_smi_msg *msg, *msg2;
394 list_for_each_entry_safe(msg, msg2, q, link) {
395 list_del(&msg->link);
396 ipmi_free_smi_msg(msg);
400 static void clean_up_interface_data(ipmi_smi_t intf)
402 int i;
403 struct cmd_rcvr *rcvr, *rcvr2;
404 struct list_head list;
406 free_smi_msg_list(&intf->waiting_msgs);
407 free_recv_msg_list(&intf->waiting_events);
409 /* Wholesale remove all the entries from the list in the
410 * interface and wait for RCU to know that none are in use. */
411 mutex_lock(&intf->cmd_rcvrs_mutex);
412 list_add_rcu(&list, &intf->cmd_rcvrs);
413 list_del_rcu(&intf->cmd_rcvrs);
414 mutex_unlock(&intf->cmd_rcvrs_mutex);
415 synchronize_rcu();
417 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
418 kfree(rcvr);
420 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
421 if ((intf->seq_table[i].inuse)
422 && (intf->seq_table[i].recv_msg))
424 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
429 static void intf_free(struct kref *ref)
431 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
433 clean_up_interface_data(intf);
434 kfree(intf);
437 struct watcher_entry {
438 int intf_num;
439 ipmi_smi_t intf;
440 struct list_head link;
443 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
445 ipmi_smi_t intf;
446 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
447 struct watcher_entry *e, *e2;
449 mutex_lock(&smi_watchers_mutex);
451 mutex_lock(&ipmi_interfaces_mutex);
453 /* Build a list of things to deliver. */
454 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
455 if (intf->intf_num == -1)
456 continue;
457 e = kmalloc(sizeof(*e), GFP_KERNEL);
458 if (!e)
459 goto out_err;
460 kref_get(&intf->refcount);
461 e->intf = intf;
462 e->intf_num = intf->intf_num;
463 list_add_tail(&e->link, &to_deliver);
466 /* We will succeed, so add it to the list. */
467 list_add(&watcher->link, &smi_watchers);
469 mutex_unlock(&ipmi_interfaces_mutex);
471 list_for_each_entry_safe(e, e2, &to_deliver, link) {
472 list_del(&e->link);
473 watcher->new_smi(e->intf_num, e->intf->si_dev);
474 kref_put(&e->intf->refcount, intf_free);
475 kfree(e);
478 mutex_unlock(&smi_watchers_mutex);
480 return 0;
482 out_err:
483 mutex_unlock(&ipmi_interfaces_mutex);
484 mutex_unlock(&smi_watchers_mutex);
485 list_for_each_entry_safe(e, e2, &to_deliver, link) {
486 list_del(&e->link);
487 kref_put(&e->intf->refcount, intf_free);
488 kfree(e);
490 return -ENOMEM;
493 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
495 mutex_lock(&smi_watchers_mutex);
496 list_del(&(watcher->link));
497 mutex_unlock(&smi_watchers_mutex);
498 return 0;
502 * Must be called with smi_watchers_mutex held.
504 static void
505 call_smi_watchers(int i, struct device *dev)
507 struct ipmi_smi_watcher *w;
509 list_for_each_entry(w, &smi_watchers, link) {
510 if (try_module_get(w->owner)) {
511 w->new_smi(i, dev);
512 module_put(w->owner);
517 static int
518 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
520 if (addr1->addr_type != addr2->addr_type)
521 return 0;
523 if (addr1->channel != addr2->channel)
524 return 0;
526 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
527 struct ipmi_system_interface_addr *smi_addr1
528 = (struct ipmi_system_interface_addr *) addr1;
529 struct ipmi_system_interface_addr *smi_addr2
530 = (struct ipmi_system_interface_addr *) addr2;
531 return (smi_addr1->lun == smi_addr2->lun);
534 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
535 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
537 struct ipmi_ipmb_addr *ipmb_addr1
538 = (struct ipmi_ipmb_addr *) addr1;
539 struct ipmi_ipmb_addr *ipmb_addr2
540 = (struct ipmi_ipmb_addr *) addr2;
542 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
543 && (ipmb_addr1->lun == ipmb_addr2->lun));
546 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
547 struct ipmi_lan_addr *lan_addr1
548 = (struct ipmi_lan_addr *) addr1;
549 struct ipmi_lan_addr *lan_addr2
550 = (struct ipmi_lan_addr *) addr2;
552 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
553 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
554 && (lan_addr1->session_handle
555 == lan_addr2->session_handle)
556 && (lan_addr1->lun == lan_addr2->lun));
559 return 1;
562 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
564 if (len < sizeof(struct ipmi_system_interface_addr)) {
565 return -EINVAL;
568 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
569 if (addr->channel != IPMI_BMC_CHANNEL)
570 return -EINVAL;
571 return 0;
574 if ((addr->channel == IPMI_BMC_CHANNEL)
575 || (addr->channel >= IPMI_MAX_CHANNELS)
576 || (addr->channel < 0))
577 return -EINVAL;
579 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
580 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
582 if (len < sizeof(struct ipmi_ipmb_addr)) {
583 return -EINVAL;
585 return 0;
588 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
589 if (len < sizeof(struct ipmi_lan_addr)) {
590 return -EINVAL;
592 return 0;
595 return -EINVAL;
598 unsigned int ipmi_addr_length(int addr_type)
600 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
601 return sizeof(struct ipmi_system_interface_addr);
603 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
604 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
606 return sizeof(struct ipmi_ipmb_addr);
609 if (addr_type == IPMI_LAN_ADDR_TYPE)
610 return sizeof(struct ipmi_lan_addr);
612 return 0;
615 static void deliver_response(struct ipmi_recv_msg *msg)
617 if (!msg->user) {
618 ipmi_smi_t intf = msg->user_msg_data;
619 unsigned long flags;
621 /* Special handling for NULL users. */
622 if (intf->null_user_handler) {
623 intf->null_user_handler(intf, msg);
624 spin_lock_irqsave(&intf->counter_lock, flags);
625 intf->handled_local_responses++;
626 spin_unlock_irqrestore(&intf->counter_lock, flags);
627 } else {
628 /* No handler, so give up. */
629 spin_lock_irqsave(&intf->counter_lock, flags);
630 intf->unhandled_local_responses++;
631 spin_unlock_irqrestore(&intf->counter_lock, flags);
633 ipmi_free_recv_msg(msg);
634 } else {
635 ipmi_user_t user = msg->user;
636 user->handler->ipmi_recv_hndl(msg, user->handler_data);
640 static void
641 deliver_err_response(struct ipmi_recv_msg *msg, int err)
643 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
644 msg->msg_data[0] = err;
645 msg->msg.netfn |= 1; /* Convert to a response. */
646 msg->msg.data_len = 1;
647 msg->msg.data = msg->msg_data;
648 deliver_response(msg);
651 /* Find the next sequence number not being used and add the given
652 message with the given timeout to the sequence table. This must be
653 called with the interface's seq_lock held. */
654 static int intf_next_seq(ipmi_smi_t intf,
655 struct ipmi_recv_msg *recv_msg,
656 unsigned long timeout,
657 int retries,
658 int broadcast,
659 unsigned char *seq,
660 long *seqid)
662 int rv = 0;
663 unsigned int i;
665 for (i = intf->curr_seq;
666 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
667 i = (i+1)%IPMI_IPMB_NUM_SEQ)
669 if (!intf->seq_table[i].inuse)
670 break;
673 if (!intf->seq_table[i].inuse) {
674 intf->seq_table[i].recv_msg = recv_msg;
676 /* Start with the maximum timeout, when the send response
677 comes in we will start the real timer. */
678 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
679 intf->seq_table[i].orig_timeout = timeout;
680 intf->seq_table[i].retries_left = retries;
681 intf->seq_table[i].broadcast = broadcast;
682 intf->seq_table[i].inuse = 1;
683 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
684 *seq = i;
685 *seqid = intf->seq_table[i].seqid;
686 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
687 } else {
688 rv = -EAGAIN;
691 return rv;
694 /* Return the receive message for the given sequence number and
695 release the sequence number so it can be reused. Some other data
696 is passed in to be sure the message matches up correctly (to help
697 guard against message coming in after their timeout and the
698 sequence number being reused). */
699 static int intf_find_seq(ipmi_smi_t intf,
700 unsigned char seq,
701 short channel,
702 unsigned char cmd,
703 unsigned char netfn,
704 struct ipmi_addr *addr,
705 struct ipmi_recv_msg **recv_msg)
707 int rv = -ENODEV;
708 unsigned long flags;
710 if (seq >= IPMI_IPMB_NUM_SEQ)
711 return -EINVAL;
713 spin_lock_irqsave(&(intf->seq_lock), flags);
714 if (intf->seq_table[seq].inuse) {
715 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
717 if ((msg->addr.channel == channel)
718 && (msg->msg.cmd == cmd)
719 && (msg->msg.netfn == netfn)
720 && (ipmi_addr_equal(addr, &(msg->addr))))
722 *recv_msg = msg;
723 intf->seq_table[seq].inuse = 0;
724 rv = 0;
727 spin_unlock_irqrestore(&(intf->seq_lock), flags);
729 return rv;
733 /* Start the timer for a specific sequence table entry. */
734 static int intf_start_seq_timer(ipmi_smi_t intf,
735 long msgid)
737 int rv = -ENODEV;
738 unsigned long flags;
739 unsigned char seq;
740 unsigned long seqid;
743 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
745 spin_lock_irqsave(&(intf->seq_lock), flags);
746 /* We do this verification because the user can be deleted
747 while a message is outstanding. */
748 if ((intf->seq_table[seq].inuse)
749 && (intf->seq_table[seq].seqid == seqid))
751 struct seq_table *ent = &(intf->seq_table[seq]);
752 ent->timeout = ent->orig_timeout;
753 rv = 0;
755 spin_unlock_irqrestore(&(intf->seq_lock), flags);
757 return rv;
760 /* Got an error for the send message for a specific sequence number. */
761 static int intf_err_seq(ipmi_smi_t intf,
762 long msgid,
763 unsigned int err)
765 int rv = -ENODEV;
766 unsigned long flags;
767 unsigned char seq;
768 unsigned long seqid;
769 struct ipmi_recv_msg *msg = NULL;
772 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
774 spin_lock_irqsave(&(intf->seq_lock), flags);
775 /* We do this verification because the user can be deleted
776 while a message is outstanding. */
777 if ((intf->seq_table[seq].inuse)
778 && (intf->seq_table[seq].seqid == seqid))
780 struct seq_table *ent = &(intf->seq_table[seq]);
782 ent->inuse = 0;
783 msg = ent->recv_msg;
784 rv = 0;
786 spin_unlock_irqrestore(&(intf->seq_lock), flags);
788 if (msg)
789 deliver_err_response(msg, err);
791 return rv;
795 int ipmi_create_user(unsigned int if_num,
796 struct ipmi_user_hndl *handler,
797 void *handler_data,
798 ipmi_user_t *user)
800 unsigned long flags;
801 ipmi_user_t new_user;
802 int rv = 0;
803 ipmi_smi_t intf;
805 /* There is no module usecount here, because it's not
806 required. Since this can only be used by and called from
807 other modules, they will implicitly use this module, and
808 thus this can't be removed unless the other modules are
809 removed. */
811 if (handler == NULL)
812 return -EINVAL;
814 /* Make sure the driver is actually initialized, this handles
815 problems with initialization order. */
816 if (!initialized) {
817 rv = ipmi_init_msghandler();
818 if (rv)
819 return rv;
821 /* The init code doesn't return an error if it was turned
822 off, but it won't initialize. Check that. */
823 if (!initialized)
824 return -ENODEV;
827 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
828 if (!new_user)
829 return -ENOMEM;
831 mutex_lock(&ipmi_interfaces_mutex);
832 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
833 if (intf->intf_num == if_num)
834 goto found;
836 /* Not found, return an error */
837 rv = -EINVAL;
838 goto out_kfree;
840 found:
841 /* Note that each existing user holds a refcount to the interface. */
842 kref_get(&intf->refcount);
844 kref_init(&new_user->refcount);
845 new_user->handler = handler;
846 new_user->handler_data = handler_data;
847 new_user->intf = intf;
848 new_user->gets_events = 0;
850 if (!try_module_get(intf->handlers->owner)) {
851 rv = -ENODEV;
852 goto out_kref;
855 if (intf->handlers->inc_usecount) {
856 rv = intf->handlers->inc_usecount(intf->send_info);
857 if (rv) {
858 module_put(intf->handlers->owner);
859 goto out_kref;
863 /* Hold the lock so intf->handlers is guaranteed to be good
864 * until now */
865 mutex_unlock(&ipmi_interfaces_mutex);
867 new_user->valid = 1;
868 spin_lock_irqsave(&intf->seq_lock, flags);
869 list_add_rcu(&new_user->link, &intf->users);
870 spin_unlock_irqrestore(&intf->seq_lock, flags);
871 *user = new_user;
872 return 0;
874 out_kref:
875 kref_put(&intf->refcount, intf_free);
876 out_kfree:
877 mutex_unlock(&ipmi_interfaces_mutex);
878 kfree(new_user);
879 return rv;
882 static void free_user(struct kref *ref)
884 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
885 kfree(user);
888 int ipmi_destroy_user(ipmi_user_t user)
890 ipmi_smi_t intf = user->intf;
891 int i;
892 unsigned long flags;
893 struct cmd_rcvr *rcvr;
894 struct cmd_rcvr *rcvrs = NULL;
896 user->valid = 0;
898 /* Remove the user from the interface's sequence table. */
899 spin_lock_irqsave(&intf->seq_lock, flags);
900 list_del_rcu(&user->link);
902 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
903 if (intf->seq_table[i].inuse
904 && (intf->seq_table[i].recv_msg->user == user))
906 intf->seq_table[i].inuse = 0;
907 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
910 spin_unlock_irqrestore(&intf->seq_lock, flags);
913 * Remove the user from the command receiver's table. First
914 * we build a list of everything (not using the standard link,
915 * since other things may be using it till we do
916 * synchronize_rcu()) then free everything in that list.
918 mutex_lock(&intf->cmd_rcvrs_mutex);
919 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
920 if (rcvr->user == user) {
921 list_del_rcu(&rcvr->link);
922 rcvr->next = rcvrs;
923 rcvrs = rcvr;
926 mutex_unlock(&intf->cmd_rcvrs_mutex);
927 synchronize_rcu();
928 while (rcvrs) {
929 rcvr = rcvrs;
930 rcvrs = rcvr->next;
931 kfree(rcvr);
934 mutex_lock(&ipmi_interfaces_mutex);
935 if (intf->handlers) {
936 module_put(intf->handlers->owner);
937 if (intf->handlers->dec_usecount)
938 intf->handlers->dec_usecount(intf->send_info);
940 mutex_unlock(&ipmi_interfaces_mutex);
942 kref_put(&intf->refcount, intf_free);
944 kref_put(&user->refcount, free_user);
946 return 0;
949 void ipmi_get_version(ipmi_user_t user,
950 unsigned char *major,
951 unsigned char *minor)
953 *major = user->intf->ipmi_version_major;
954 *minor = user->intf->ipmi_version_minor;
957 int ipmi_set_my_address(ipmi_user_t user,
958 unsigned int channel,
959 unsigned char address)
961 if (channel >= IPMI_MAX_CHANNELS)
962 return -EINVAL;
963 user->intf->channels[channel].address = address;
964 return 0;
967 int ipmi_get_my_address(ipmi_user_t user,
968 unsigned int channel,
969 unsigned char *address)
971 if (channel >= IPMI_MAX_CHANNELS)
972 return -EINVAL;
973 *address = user->intf->channels[channel].address;
974 return 0;
977 int ipmi_set_my_LUN(ipmi_user_t user,
978 unsigned int channel,
979 unsigned char LUN)
981 if (channel >= IPMI_MAX_CHANNELS)
982 return -EINVAL;
983 user->intf->channels[channel].lun = LUN & 0x3;
984 return 0;
987 int ipmi_get_my_LUN(ipmi_user_t user,
988 unsigned int channel,
989 unsigned char *address)
991 if (channel >= IPMI_MAX_CHANNELS)
992 return -EINVAL;
993 *address = user->intf->channels[channel].lun;
994 return 0;
997 int ipmi_get_maintenance_mode(ipmi_user_t user)
999 int mode;
1000 unsigned long flags;
1002 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1003 mode = user->intf->maintenance_mode;
1004 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1006 return mode;
1008 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1010 static void maintenance_mode_update(ipmi_smi_t intf)
1012 if (intf->handlers->set_maintenance_mode)
1013 intf->handlers->set_maintenance_mode(
1014 intf->send_info, intf->maintenance_mode_enable);
1017 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1019 int rv = 0;
1020 unsigned long flags;
1021 ipmi_smi_t intf = user->intf;
1023 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1024 if (intf->maintenance_mode != mode) {
1025 switch (mode) {
1026 case IPMI_MAINTENANCE_MODE_AUTO:
1027 intf->maintenance_mode = mode;
1028 intf->maintenance_mode_enable
1029 = (intf->auto_maintenance_timeout > 0);
1030 break;
1032 case IPMI_MAINTENANCE_MODE_OFF:
1033 intf->maintenance_mode = mode;
1034 intf->maintenance_mode_enable = 0;
1035 break;
1037 case IPMI_MAINTENANCE_MODE_ON:
1038 intf->maintenance_mode = mode;
1039 intf->maintenance_mode_enable = 1;
1040 break;
1042 default:
1043 rv = -EINVAL;
1044 goto out_unlock;
1047 maintenance_mode_update(intf);
1049 out_unlock:
1050 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1052 return rv;
1054 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1056 int ipmi_set_gets_events(ipmi_user_t user, int val)
1058 unsigned long flags;
1059 ipmi_smi_t intf = user->intf;
1060 struct ipmi_recv_msg *msg, *msg2;
1061 struct list_head msgs;
1063 INIT_LIST_HEAD(&msgs);
1065 spin_lock_irqsave(&intf->events_lock, flags);
1066 user->gets_events = val;
1068 if (intf->delivering_events)
1070 * Another thread is delivering events for this, so
1071 * let it handle any new events.
1073 goto out;
1075 /* Deliver any queued events. */
1076 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1077 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1078 list_move_tail(&msg->link, &msgs);
1079 intf->waiting_events_count = 0;
1081 intf->delivering_events = 1;
1082 spin_unlock_irqrestore(&intf->events_lock, flags);
1084 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1085 msg->user = user;
1086 kref_get(&user->refcount);
1087 deliver_response(msg);
1090 spin_lock_irqsave(&intf->events_lock, flags);
1091 intf->delivering_events = 0;
1094 out:
1095 spin_unlock_irqrestore(&intf->events_lock, flags);
1097 return 0;
1100 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1101 unsigned char netfn,
1102 unsigned char cmd,
1103 unsigned char chan)
1105 struct cmd_rcvr *rcvr;
1107 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1108 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1109 && (rcvr->chans & (1 << chan)))
1110 return rcvr;
1112 return NULL;
1115 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1116 unsigned char netfn,
1117 unsigned char cmd,
1118 unsigned int chans)
1120 struct cmd_rcvr *rcvr;
1122 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1123 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1124 && (rcvr->chans & chans))
1125 return 0;
1127 return 1;
1130 int ipmi_register_for_cmd(ipmi_user_t user,
1131 unsigned char netfn,
1132 unsigned char cmd,
1133 unsigned int chans)
1135 ipmi_smi_t intf = user->intf;
1136 struct cmd_rcvr *rcvr;
1137 int rv = 0;
1140 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1141 if (!rcvr)
1142 return -ENOMEM;
1143 rcvr->cmd = cmd;
1144 rcvr->netfn = netfn;
1145 rcvr->chans = chans;
1146 rcvr->user = user;
1148 mutex_lock(&intf->cmd_rcvrs_mutex);
1149 /* Make sure the command/netfn is not already registered. */
1150 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1151 rv = -EBUSY;
1152 goto out_unlock;
1155 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1157 out_unlock:
1158 mutex_unlock(&intf->cmd_rcvrs_mutex);
1159 if (rv)
1160 kfree(rcvr);
1162 return rv;
1165 int ipmi_unregister_for_cmd(ipmi_user_t user,
1166 unsigned char netfn,
1167 unsigned char cmd,
1168 unsigned int chans)
1170 ipmi_smi_t intf = user->intf;
1171 struct cmd_rcvr *rcvr;
1172 struct cmd_rcvr *rcvrs = NULL;
1173 int i, rv = -ENOENT;
1175 mutex_lock(&intf->cmd_rcvrs_mutex);
1176 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1177 if (((1 << i) & chans) == 0)
1178 continue;
1179 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1180 if (rcvr == NULL)
1181 continue;
1182 if (rcvr->user == user) {
1183 rv = 0;
1184 rcvr->chans &= ~chans;
1185 if (rcvr->chans == 0) {
1186 list_del_rcu(&rcvr->link);
1187 rcvr->next = rcvrs;
1188 rcvrs = rcvr;
1192 mutex_unlock(&intf->cmd_rcvrs_mutex);
1193 synchronize_rcu();
1194 while (rcvrs) {
1195 rcvr = rcvrs;
1196 rcvrs = rcvr->next;
1197 kfree(rcvr);
1199 return rv;
1202 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1204 ipmi_smi_t intf = user->intf;
1205 if (intf->handlers)
1206 intf->handlers->set_run_to_completion(intf->send_info, val);
1209 static unsigned char
1210 ipmb_checksum(unsigned char *data, int size)
1212 unsigned char csum = 0;
1214 for (; size > 0; size--, data++)
1215 csum += *data;
1217 return -csum;
1220 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1221 struct kernel_ipmi_msg *msg,
1222 struct ipmi_ipmb_addr *ipmb_addr,
1223 long msgid,
1224 unsigned char ipmb_seq,
1225 int broadcast,
1226 unsigned char source_address,
1227 unsigned char source_lun)
1229 int i = broadcast;
1231 /* Format the IPMB header data. */
1232 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1233 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1234 smi_msg->data[2] = ipmb_addr->channel;
1235 if (broadcast)
1236 smi_msg->data[3] = 0;
1237 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1238 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1239 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1240 smi_msg->data[i+6] = source_address;
1241 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1242 smi_msg->data[i+8] = msg->cmd;
1244 /* Now tack on the data to the message. */
1245 if (msg->data_len > 0)
1246 memcpy(&(smi_msg->data[i+9]), msg->data,
1247 msg->data_len);
1248 smi_msg->data_size = msg->data_len + 9;
1250 /* Now calculate the checksum and tack it on. */
1251 smi_msg->data[i+smi_msg->data_size]
1252 = ipmb_checksum(&(smi_msg->data[i+6]),
1253 smi_msg->data_size-6);
1255 /* Add on the checksum size and the offset from the
1256 broadcast. */
1257 smi_msg->data_size += 1 + i;
1259 smi_msg->msgid = msgid;
1262 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1263 struct kernel_ipmi_msg *msg,
1264 struct ipmi_lan_addr *lan_addr,
1265 long msgid,
1266 unsigned char ipmb_seq,
1267 unsigned char source_lun)
1269 /* Format the IPMB header data. */
1270 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1271 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1272 smi_msg->data[2] = lan_addr->channel;
1273 smi_msg->data[3] = lan_addr->session_handle;
1274 smi_msg->data[4] = lan_addr->remote_SWID;
1275 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1276 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1277 smi_msg->data[7] = lan_addr->local_SWID;
1278 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1279 smi_msg->data[9] = msg->cmd;
1281 /* Now tack on the data to the message. */
1282 if (msg->data_len > 0)
1283 memcpy(&(smi_msg->data[10]), msg->data,
1284 msg->data_len);
1285 smi_msg->data_size = msg->data_len + 10;
1287 /* Now calculate the checksum and tack it on. */
1288 smi_msg->data[smi_msg->data_size]
1289 = ipmb_checksum(&(smi_msg->data[7]),
1290 smi_msg->data_size-7);
1292 /* Add on the checksum size and the offset from the
1293 broadcast. */
1294 smi_msg->data_size += 1;
1296 smi_msg->msgid = msgid;
1299 /* Separate from ipmi_request so that the user does not have to be
1300 supplied in certain circumstances (mainly at panic time). If
1301 messages are supplied, they will be freed, even if an error
1302 occurs. */
1303 static int i_ipmi_request(ipmi_user_t user,
1304 ipmi_smi_t intf,
1305 struct ipmi_addr *addr,
1306 long msgid,
1307 struct kernel_ipmi_msg *msg,
1308 void *user_msg_data,
1309 void *supplied_smi,
1310 struct ipmi_recv_msg *supplied_recv,
1311 int priority,
1312 unsigned char source_address,
1313 unsigned char source_lun,
1314 int retries,
1315 unsigned int retry_time_ms)
1317 int rv = 0;
1318 struct ipmi_smi_msg *smi_msg;
1319 struct ipmi_recv_msg *recv_msg;
1320 unsigned long flags;
1321 struct ipmi_smi_handlers *handlers;
1324 if (supplied_recv) {
1325 recv_msg = supplied_recv;
1326 } else {
1327 recv_msg = ipmi_alloc_recv_msg();
1328 if (recv_msg == NULL) {
1329 return -ENOMEM;
1332 recv_msg->user_msg_data = user_msg_data;
1334 if (supplied_smi) {
1335 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1336 } else {
1337 smi_msg = ipmi_alloc_smi_msg();
1338 if (smi_msg == NULL) {
1339 ipmi_free_recv_msg(recv_msg);
1340 return -ENOMEM;
1344 rcu_read_lock();
1345 handlers = intf->handlers;
1346 if (!handlers) {
1347 rv = -ENODEV;
1348 goto out_err;
1351 recv_msg->user = user;
1352 if (user)
1353 kref_get(&user->refcount);
1354 recv_msg->msgid = msgid;
1355 /* Store the message to send in the receive message so timeout
1356 responses can get the proper response data. */
1357 recv_msg->msg = *msg;
1359 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1360 struct ipmi_system_interface_addr *smi_addr;
1362 if (msg->netfn & 1) {
1363 /* Responses are not allowed to the SMI. */
1364 rv = -EINVAL;
1365 goto out_err;
1368 smi_addr = (struct ipmi_system_interface_addr *) addr;
1369 if (smi_addr->lun > 3) {
1370 spin_lock_irqsave(&intf->counter_lock, flags);
1371 intf->sent_invalid_commands++;
1372 spin_unlock_irqrestore(&intf->counter_lock, flags);
1373 rv = -EINVAL;
1374 goto out_err;
1377 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1379 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1380 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1381 || (msg->cmd == IPMI_GET_MSG_CMD)
1382 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1384 /* We don't let the user do these, since we manage
1385 the sequence numbers. */
1386 spin_lock_irqsave(&intf->counter_lock, flags);
1387 intf->sent_invalid_commands++;
1388 spin_unlock_irqrestore(&intf->counter_lock, flags);
1389 rv = -EINVAL;
1390 goto out_err;
1393 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1394 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1395 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1396 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1398 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1399 intf->auto_maintenance_timeout
1400 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1401 if (!intf->maintenance_mode
1402 && !intf->maintenance_mode_enable)
1404 intf->maintenance_mode_enable = 1;
1405 maintenance_mode_update(intf);
1407 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1408 flags);
1411 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1412 spin_lock_irqsave(&intf->counter_lock, flags);
1413 intf->sent_invalid_commands++;
1414 spin_unlock_irqrestore(&intf->counter_lock, flags);
1415 rv = -EMSGSIZE;
1416 goto out_err;
1419 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1420 smi_msg->data[1] = msg->cmd;
1421 smi_msg->msgid = msgid;
1422 smi_msg->user_data = recv_msg;
1423 if (msg->data_len > 0)
1424 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1425 smi_msg->data_size = msg->data_len + 2;
1426 spin_lock_irqsave(&intf->counter_lock, flags);
1427 intf->sent_local_commands++;
1428 spin_unlock_irqrestore(&intf->counter_lock, flags);
1429 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1430 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1432 struct ipmi_ipmb_addr *ipmb_addr;
1433 unsigned char ipmb_seq;
1434 long seqid;
1435 int broadcast = 0;
1437 if (addr->channel >= IPMI_MAX_CHANNELS) {
1438 spin_lock_irqsave(&intf->counter_lock, flags);
1439 intf->sent_invalid_commands++;
1440 spin_unlock_irqrestore(&intf->counter_lock, flags);
1441 rv = -EINVAL;
1442 goto out_err;
1445 if (intf->channels[addr->channel].medium
1446 != IPMI_CHANNEL_MEDIUM_IPMB)
1448 spin_lock_irqsave(&intf->counter_lock, flags);
1449 intf->sent_invalid_commands++;
1450 spin_unlock_irqrestore(&intf->counter_lock, flags);
1451 rv = -EINVAL;
1452 goto out_err;
1455 if (retries < 0) {
1456 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1457 retries = 0; /* Don't retry broadcasts. */
1458 else
1459 retries = 4;
1461 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1462 /* Broadcasts add a zero at the beginning of the
1463 message, but otherwise is the same as an IPMB
1464 address. */
1465 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1466 broadcast = 1;
1470 /* Default to 1 second retries. */
1471 if (retry_time_ms == 0)
1472 retry_time_ms = 1000;
1474 /* 9 for the header and 1 for the checksum, plus
1475 possibly one for the broadcast. */
1476 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1477 spin_lock_irqsave(&intf->counter_lock, flags);
1478 intf->sent_invalid_commands++;
1479 spin_unlock_irqrestore(&intf->counter_lock, flags);
1480 rv = -EMSGSIZE;
1481 goto out_err;
1484 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1485 if (ipmb_addr->lun > 3) {
1486 spin_lock_irqsave(&intf->counter_lock, flags);
1487 intf->sent_invalid_commands++;
1488 spin_unlock_irqrestore(&intf->counter_lock, flags);
1489 rv = -EINVAL;
1490 goto out_err;
1493 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1495 if (recv_msg->msg.netfn & 0x1) {
1496 /* It's a response, so use the user's sequence
1497 from msgid. */
1498 spin_lock_irqsave(&intf->counter_lock, flags);
1499 intf->sent_ipmb_responses++;
1500 spin_unlock_irqrestore(&intf->counter_lock, flags);
1501 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1502 msgid, broadcast,
1503 source_address, source_lun);
1505 /* Save the receive message so we can use it
1506 to deliver the response. */
1507 smi_msg->user_data = recv_msg;
1508 } else {
1509 /* It's a command, so get a sequence for it. */
1511 spin_lock_irqsave(&(intf->seq_lock), flags);
1513 spin_lock(&intf->counter_lock);
1514 intf->sent_ipmb_commands++;
1515 spin_unlock(&intf->counter_lock);
1517 /* Create a sequence number with a 1 second
1518 timeout and 4 retries. */
1519 rv = intf_next_seq(intf,
1520 recv_msg,
1521 retry_time_ms,
1522 retries,
1523 broadcast,
1524 &ipmb_seq,
1525 &seqid);
1526 if (rv) {
1527 /* We have used up all the sequence numbers,
1528 probably, so abort. */
1529 spin_unlock_irqrestore(&(intf->seq_lock),
1530 flags);
1531 goto out_err;
1534 /* Store the sequence number in the message,
1535 so that when the send message response
1536 comes back we can start the timer. */
1537 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1538 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1539 ipmb_seq, broadcast,
1540 source_address, source_lun);
1542 /* Copy the message into the recv message data, so we
1543 can retransmit it later if necessary. */
1544 memcpy(recv_msg->msg_data, smi_msg->data,
1545 smi_msg->data_size);
1546 recv_msg->msg.data = recv_msg->msg_data;
1547 recv_msg->msg.data_len = smi_msg->data_size;
1549 /* We don't unlock until here, because we need
1550 to copy the completed message into the
1551 recv_msg before we release the lock.
1552 Otherwise, race conditions may bite us. I
1553 know that's pretty paranoid, but I prefer
1554 to be correct. */
1555 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1557 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1558 struct ipmi_lan_addr *lan_addr;
1559 unsigned char ipmb_seq;
1560 long seqid;
1562 if (addr->channel >= IPMI_MAX_CHANNELS) {
1563 spin_lock_irqsave(&intf->counter_lock, flags);
1564 intf->sent_invalid_commands++;
1565 spin_unlock_irqrestore(&intf->counter_lock, flags);
1566 rv = -EINVAL;
1567 goto out_err;
1570 if ((intf->channels[addr->channel].medium
1571 != IPMI_CHANNEL_MEDIUM_8023LAN)
1572 && (intf->channels[addr->channel].medium
1573 != IPMI_CHANNEL_MEDIUM_ASYNC))
1575 spin_lock_irqsave(&intf->counter_lock, flags);
1576 intf->sent_invalid_commands++;
1577 spin_unlock_irqrestore(&intf->counter_lock, flags);
1578 rv = -EINVAL;
1579 goto out_err;
1582 retries = 4;
1584 /* Default to 1 second retries. */
1585 if (retry_time_ms == 0)
1586 retry_time_ms = 1000;
1588 /* 11 for the header and 1 for the checksum. */
1589 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1590 spin_lock_irqsave(&intf->counter_lock, flags);
1591 intf->sent_invalid_commands++;
1592 spin_unlock_irqrestore(&intf->counter_lock, flags);
1593 rv = -EMSGSIZE;
1594 goto out_err;
1597 lan_addr = (struct ipmi_lan_addr *) addr;
1598 if (lan_addr->lun > 3) {
1599 spin_lock_irqsave(&intf->counter_lock, flags);
1600 intf->sent_invalid_commands++;
1601 spin_unlock_irqrestore(&intf->counter_lock, flags);
1602 rv = -EINVAL;
1603 goto out_err;
1606 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1608 if (recv_msg->msg.netfn & 0x1) {
1609 /* It's a response, so use the user's sequence
1610 from msgid. */
1611 spin_lock_irqsave(&intf->counter_lock, flags);
1612 intf->sent_lan_responses++;
1613 spin_unlock_irqrestore(&intf->counter_lock, flags);
1614 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1615 msgid, source_lun);
1617 /* Save the receive message so we can use it
1618 to deliver the response. */
1619 smi_msg->user_data = recv_msg;
1620 } else {
1621 /* It's a command, so get a sequence for it. */
1623 spin_lock_irqsave(&(intf->seq_lock), flags);
1625 spin_lock(&intf->counter_lock);
1626 intf->sent_lan_commands++;
1627 spin_unlock(&intf->counter_lock);
1629 /* Create a sequence number with a 1 second
1630 timeout and 4 retries. */
1631 rv = intf_next_seq(intf,
1632 recv_msg,
1633 retry_time_ms,
1634 retries,
1636 &ipmb_seq,
1637 &seqid);
1638 if (rv) {
1639 /* We have used up all the sequence numbers,
1640 probably, so abort. */
1641 spin_unlock_irqrestore(&(intf->seq_lock),
1642 flags);
1643 goto out_err;
1646 /* Store the sequence number in the message,
1647 so that when the send message response
1648 comes back we can start the timer. */
1649 format_lan_msg(smi_msg, msg, lan_addr,
1650 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1651 ipmb_seq, source_lun);
1653 /* Copy the message into the recv message data, so we
1654 can retransmit it later if necessary. */
1655 memcpy(recv_msg->msg_data, smi_msg->data,
1656 smi_msg->data_size);
1657 recv_msg->msg.data = recv_msg->msg_data;
1658 recv_msg->msg.data_len = smi_msg->data_size;
1660 /* We don't unlock until here, because we need
1661 to copy the completed message into the
1662 recv_msg before we release the lock.
1663 Otherwise, race conditions may bite us. I
1664 know that's pretty paranoid, but I prefer
1665 to be correct. */
1666 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1668 } else {
1669 /* Unknown address type. */
1670 spin_lock_irqsave(&intf->counter_lock, flags);
1671 intf->sent_invalid_commands++;
1672 spin_unlock_irqrestore(&intf->counter_lock, flags);
1673 rv = -EINVAL;
1674 goto out_err;
1677 #ifdef DEBUG_MSGING
1679 int m;
1680 for (m = 0; m < smi_msg->data_size; m++)
1681 printk(" %2.2x", smi_msg->data[m]);
1682 printk("\n");
1684 #endif
1686 handlers->sender(intf->send_info, smi_msg, priority);
1687 rcu_read_unlock();
1689 return 0;
1691 out_err:
1692 rcu_read_unlock();
1693 ipmi_free_smi_msg(smi_msg);
1694 ipmi_free_recv_msg(recv_msg);
1695 return rv;
1698 static int check_addr(ipmi_smi_t intf,
1699 struct ipmi_addr *addr,
1700 unsigned char *saddr,
1701 unsigned char *lun)
1703 if (addr->channel >= IPMI_MAX_CHANNELS)
1704 return -EINVAL;
1705 *lun = intf->channels[addr->channel].lun;
1706 *saddr = intf->channels[addr->channel].address;
1707 return 0;
1710 int ipmi_request_settime(ipmi_user_t user,
1711 struct ipmi_addr *addr,
1712 long msgid,
1713 struct kernel_ipmi_msg *msg,
1714 void *user_msg_data,
1715 int priority,
1716 int retries,
1717 unsigned int retry_time_ms)
1719 unsigned char saddr, lun;
1720 int rv;
1722 if (!user)
1723 return -EINVAL;
1724 rv = check_addr(user->intf, addr, &saddr, &lun);
1725 if (rv)
1726 return rv;
1727 return i_ipmi_request(user,
1728 user->intf,
1729 addr,
1730 msgid,
1731 msg,
1732 user_msg_data,
1733 NULL, NULL,
1734 priority,
1735 saddr,
1736 lun,
1737 retries,
1738 retry_time_ms);
1741 int ipmi_request_supply_msgs(ipmi_user_t user,
1742 struct ipmi_addr *addr,
1743 long msgid,
1744 struct kernel_ipmi_msg *msg,
1745 void *user_msg_data,
1746 void *supplied_smi,
1747 struct ipmi_recv_msg *supplied_recv,
1748 int priority)
1750 unsigned char saddr, lun;
1751 int rv;
1753 if (!user)
1754 return -EINVAL;
1755 rv = check_addr(user->intf, addr, &saddr, &lun);
1756 if (rv)
1757 return rv;
1758 return i_ipmi_request(user,
1759 user->intf,
1760 addr,
1761 msgid,
1762 msg,
1763 user_msg_data,
1764 supplied_smi,
1765 supplied_recv,
1766 priority,
1767 saddr,
1768 lun,
1769 -1, 0);
1772 #ifdef CONFIG_PROC_FS
1773 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1774 int count, int *eof, void *data)
1776 char *out = (char *) page;
1777 ipmi_smi_t intf = data;
1778 int i;
1779 int rv = 0;
1781 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1782 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1783 out[rv-1] = '\n'; /* Replace the final space with a newline */
1784 out[rv] = '\0';
1785 rv++;
1786 return rv;
1789 static int version_file_read_proc(char *page, char **start, off_t off,
1790 int count, int *eof, void *data)
1792 char *out = (char *) page;
1793 ipmi_smi_t intf = data;
1795 return sprintf(out, "%d.%d\n",
1796 ipmi_version_major(&intf->bmc->id),
1797 ipmi_version_minor(&intf->bmc->id));
1800 static int stat_file_read_proc(char *page, char **start, off_t off,
1801 int count, int *eof, void *data)
1803 char *out = (char *) page;
1804 ipmi_smi_t intf = data;
1806 out += sprintf(out, "sent_invalid_commands: %d\n",
1807 intf->sent_invalid_commands);
1808 out += sprintf(out, "sent_local_commands: %d\n",
1809 intf->sent_local_commands);
1810 out += sprintf(out, "handled_local_responses: %d\n",
1811 intf->handled_local_responses);
1812 out += sprintf(out, "unhandled_local_responses: %d\n",
1813 intf->unhandled_local_responses);
1814 out += sprintf(out, "sent_ipmb_commands: %d\n",
1815 intf->sent_ipmb_commands);
1816 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1817 intf->sent_ipmb_command_errs);
1818 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1819 intf->retransmitted_ipmb_commands);
1820 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1821 intf->timed_out_ipmb_commands);
1822 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1823 intf->timed_out_ipmb_broadcasts);
1824 out += sprintf(out, "sent_ipmb_responses: %d\n",
1825 intf->sent_ipmb_responses);
1826 out += sprintf(out, "handled_ipmb_responses: %d\n",
1827 intf->handled_ipmb_responses);
1828 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1829 intf->invalid_ipmb_responses);
1830 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1831 intf->unhandled_ipmb_responses);
1832 out += sprintf(out, "sent_lan_commands: %d\n",
1833 intf->sent_lan_commands);
1834 out += sprintf(out, "sent_lan_command_errs: %d\n",
1835 intf->sent_lan_command_errs);
1836 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1837 intf->retransmitted_lan_commands);
1838 out += sprintf(out, "timed_out_lan_commands: %d\n",
1839 intf->timed_out_lan_commands);
1840 out += sprintf(out, "sent_lan_responses: %d\n",
1841 intf->sent_lan_responses);
1842 out += sprintf(out, "handled_lan_responses: %d\n",
1843 intf->handled_lan_responses);
1844 out += sprintf(out, "invalid_lan_responses: %d\n",
1845 intf->invalid_lan_responses);
1846 out += sprintf(out, "unhandled_lan_responses: %d\n",
1847 intf->unhandled_lan_responses);
1848 out += sprintf(out, "handled_commands: %d\n",
1849 intf->handled_commands);
1850 out += sprintf(out, "invalid_commands: %d\n",
1851 intf->invalid_commands);
1852 out += sprintf(out, "unhandled_commands: %d\n",
1853 intf->unhandled_commands);
1854 out += sprintf(out, "invalid_events: %d\n",
1855 intf->invalid_events);
1856 out += sprintf(out, "events: %d\n",
1857 intf->events);
1859 return (out - ((char *) page));
1861 #endif /* CONFIG_PROC_FS */
1863 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1864 read_proc_t *read_proc, write_proc_t *write_proc,
1865 void *data, struct module *owner)
1867 int rv = 0;
1868 #ifdef CONFIG_PROC_FS
1869 struct proc_dir_entry *file;
1870 struct ipmi_proc_entry *entry;
1872 /* Create a list element. */
1873 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1874 if (!entry)
1875 return -ENOMEM;
1876 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1877 if (!entry->name) {
1878 kfree(entry);
1879 return -ENOMEM;
1881 strcpy(entry->name, name);
1883 file = create_proc_entry(name, 0, smi->proc_dir);
1884 if (!file) {
1885 kfree(entry->name);
1886 kfree(entry);
1887 rv = -ENOMEM;
1888 } else {
1889 file->nlink = 1;
1890 file->data = data;
1891 file->read_proc = read_proc;
1892 file->write_proc = write_proc;
1893 file->owner = owner;
1895 spin_lock(&smi->proc_entry_lock);
1896 /* Stick it on the list. */
1897 entry->next = smi->proc_entries;
1898 smi->proc_entries = entry;
1899 spin_unlock(&smi->proc_entry_lock);
1901 #endif /* CONFIG_PROC_FS */
1903 return rv;
1906 static int add_proc_entries(ipmi_smi_t smi, int num)
1908 int rv = 0;
1910 #ifdef CONFIG_PROC_FS
1911 sprintf(smi->proc_dir_name, "%d", num);
1912 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1913 if (!smi->proc_dir)
1914 rv = -ENOMEM;
1915 else {
1916 smi->proc_dir->owner = THIS_MODULE;
1919 if (rv == 0)
1920 rv = ipmi_smi_add_proc_entry(smi, "stats",
1921 stat_file_read_proc, NULL,
1922 smi, THIS_MODULE);
1924 if (rv == 0)
1925 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1926 ipmb_file_read_proc, NULL,
1927 smi, THIS_MODULE);
1929 if (rv == 0)
1930 rv = ipmi_smi_add_proc_entry(smi, "version",
1931 version_file_read_proc, NULL,
1932 smi, THIS_MODULE);
1933 #endif /* CONFIG_PROC_FS */
1935 return rv;
1938 static void remove_proc_entries(ipmi_smi_t smi)
1940 #ifdef CONFIG_PROC_FS
1941 struct ipmi_proc_entry *entry;
1943 spin_lock(&smi->proc_entry_lock);
1944 while (smi->proc_entries) {
1945 entry = smi->proc_entries;
1946 smi->proc_entries = entry->next;
1948 remove_proc_entry(entry->name, smi->proc_dir);
1949 kfree(entry->name);
1950 kfree(entry);
1952 spin_unlock(&smi->proc_entry_lock);
1953 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1954 #endif /* CONFIG_PROC_FS */
1957 static int __find_bmc_guid(struct device *dev, void *data)
1959 unsigned char *id = data;
1960 struct bmc_device *bmc = dev_get_drvdata(dev);
1961 return memcmp(bmc->guid, id, 16) == 0;
1964 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1965 unsigned char *guid)
1967 struct device *dev;
1969 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1970 if (dev)
1971 return dev_get_drvdata(dev);
1972 else
1973 return NULL;
1976 struct prod_dev_id {
1977 unsigned int product_id;
1978 unsigned char device_id;
1981 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1983 struct prod_dev_id *id = data;
1984 struct bmc_device *bmc = dev_get_drvdata(dev);
1986 return (bmc->id.product_id == id->product_id
1987 && bmc->id.device_id == id->device_id);
1990 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1991 struct device_driver *drv,
1992 unsigned int product_id, unsigned char device_id)
1994 struct prod_dev_id id = {
1995 .product_id = product_id,
1996 .device_id = device_id,
1998 struct device *dev;
2000 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2001 if (dev)
2002 return dev_get_drvdata(dev);
2003 else
2004 return NULL;
2007 static ssize_t device_id_show(struct device *dev,
2008 struct device_attribute *attr,
2009 char *buf)
2011 struct bmc_device *bmc = dev_get_drvdata(dev);
2013 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2016 static ssize_t provides_dev_sdrs_show(struct device *dev,
2017 struct device_attribute *attr,
2018 char *buf)
2020 struct bmc_device *bmc = dev_get_drvdata(dev);
2022 return snprintf(buf, 10, "%u\n",
2023 (bmc->id.device_revision & 0x80) >> 7);
2026 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2027 char *buf)
2029 struct bmc_device *bmc = dev_get_drvdata(dev);
2031 return snprintf(buf, 20, "%u\n",
2032 bmc->id.device_revision & 0x0F);
2035 static ssize_t firmware_rev_show(struct device *dev,
2036 struct device_attribute *attr,
2037 char *buf)
2039 struct bmc_device *bmc = dev_get_drvdata(dev);
2041 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2042 bmc->id.firmware_revision_2);
2045 static ssize_t ipmi_version_show(struct device *dev,
2046 struct device_attribute *attr,
2047 char *buf)
2049 struct bmc_device *bmc = dev_get_drvdata(dev);
2051 return snprintf(buf, 20, "%u.%u\n",
2052 ipmi_version_major(&bmc->id),
2053 ipmi_version_minor(&bmc->id));
2056 static ssize_t add_dev_support_show(struct device *dev,
2057 struct device_attribute *attr,
2058 char *buf)
2060 struct bmc_device *bmc = dev_get_drvdata(dev);
2062 return snprintf(buf, 10, "0x%02x\n",
2063 bmc->id.additional_device_support);
2066 static ssize_t manufacturer_id_show(struct device *dev,
2067 struct device_attribute *attr,
2068 char *buf)
2070 struct bmc_device *bmc = dev_get_drvdata(dev);
2072 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2075 static ssize_t product_id_show(struct device *dev,
2076 struct device_attribute *attr,
2077 char *buf)
2079 struct bmc_device *bmc = dev_get_drvdata(dev);
2081 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2084 static ssize_t aux_firmware_rev_show(struct device *dev,
2085 struct device_attribute *attr,
2086 char *buf)
2088 struct bmc_device *bmc = dev_get_drvdata(dev);
2090 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2091 bmc->id.aux_firmware_revision[3],
2092 bmc->id.aux_firmware_revision[2],
2093 bmc->id.aux_firmware_revision[1],
2094 bmc->id.aux_firmware_revision[0]);
2097 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2098 char *buf)
2100 struct bmc_device *bmc = dev_get_drvdata(dev);
2102 return snprintf(buf, 100, "%Lx%Lx\n",
2103 (long long) bmc->guid[0],
2104 (long long) bmc->guid[8]);
2107 static void remove_files(struct bmc_device *bmc)
2109 if (!bmc->dev)
2110 return;
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->device_id_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->provides_dev_sdrs_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->revision_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->firmware_rev_attr);
2120 device_remove_file(&bmc->dev->dev,
2121 &bmc->version_attr);
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->add_dev_support_attr);
2124 device_remove_file(&bmc->dev->dev,
2125 &bmc->manufacturer_id_attr);
2126 device_remove_file(&bmc->dev->dev,
2127 &bmc->product_id_attr);
2129 if (bmc->id.aux_firmware_revision_set)
2130 device_remove_file(&bmc->dev->dev,
2131 &bmc->aux_firmware_rev_attr);
2132 if (bmc->guid_set)
2133 device_remove_file(&bmc->dev->dev,
2134 &bmc->guid_attr);
2137 static void
2138 cleanup_bmc_device(struct kref *ref)
2140 struct bmc_device *bmc;
2142 bmc = container_of(ref, struct bmc_device, refcount);
2144 remove_files(bmc);
2145 if (bmc->dev)
2146 platform_device_unregister(bmc->dev);
2147 kfree(bmc);
2150 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2152 struct bmc_device *bmc = intf->bmc;
2154 if (intf->sysfs_name) {
2155 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2156 kfree(intf->sysfs_name);
2157 intf->sysfs_name = NULL;
2159 if (intf->my_dev_name) {
2160 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2161 kfree(intf->my_dev_name);
2162 intf->my_dev_name = NULL;
2165 mutex_lock(&ipmidriver_mutex);
2166 kref_put(&bmc->refcount, cleanup_bmc_device);
2167 intf->bmc = NULL;
2168 mutex_unlock(&ipmidriver_mutex);
2171 static int create_files(struct bmc_device *bmc)
2173 int err;
2175 bmc->device_id_attr.attr.name = "device_id";
2176 bmc->device_id_attr.attr.owner = THIS_MODULE;
2177 bmc->device_id_attr.attr.mode = S_IRUGO;
2178 bmc->device_id_attr.show = device_id_show;
2180 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2181 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2182 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2183 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2185 bmc->revision_attr.attr.name = "revision";
2186 bmc->revision_attr.attr.owner = THIS_MODULE;
2187 bmc->revision_attr.attr.mode = S_IRUGO;
2188 bmc->revision_attr.show = revision_show;
2190 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2191 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2192 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2193 bmc->firmware_rev_attr.show = firmware_rev_show;
2195 bmc->version_attr.attr.name = "ipmi_version";
2196 bmc->version_attr.attr.owner = THIS_MODULE;
2197 bmc->version_attr.attr.mode = S_IRUGO;
2198 bmc->version_attr.show = ipmi_version_show;
2200 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2201 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2202 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2203 bmc->add_dev_support_attr.show = add_dev_support_show;
2205 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2206 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2207 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2208 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2210 bmc->product_id_attr.attr.name = "product_id";
2211 bmc->product_id_attr.attr.owner = THIS_MODULE;
2212 bmc->product_id_attr.attr.mode = S_IRUGO;
2213 bmc->product_id_attr.show = product_id_show;
2215 bmc->guid_attr.attr.name = "guid";
2216 bmc->guid_attr.attr.owner = THIS_MODULE;
2217 bmc->guid_attr.attr.mode = S_IRUGO;
2218 bmc->guid_attr.show = guid_show;
2220 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2221 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2222 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2223 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2225 err = device_create_file(&bmc->dev->dev,
2226 &bmc->device_id_attr);
2227 if (err) goto out;
2228 err = device_create_file(&bmc->dev->dev,
2229 &bmc->provides_dev_sdrs_attr);
2230 if (err) goto out_devid;
2231 err = device_create_file(&bmc->dev->dev,
2232 &bmc->revision_attr);
2233 if (err) goto out_sdrs;
2234 err = device_create_file(&bmc->dev->dev,
2235 &bmc->firmware_rev_attr);
2236 if (err) goto out_rev;
2237 err = device_create_file(&bmc->dev->dev,
2238 &bmc->version_attr);
2239 if (err) goto out_firm;
2240 err = device_create_file(&bmc->dev->dev,
2241 &bmc->add_dev_support_attr);
2242 if (err) goto out_version;
2243 err = device_create_file(&bmc->dev->dev,
2244 &bmc->manufacturer_id_attr);
2245 if (err) goto out_add_dev;
2246 err = device_create_file(&bmc->dev->dev,
2247 &bmc->product_id_attr);
2248 if (err) goto out_manu;
2249 if (bmc->id.aux_firmware_revision_set) {
2250 err = device_create_file(&bmc->dev->dev,
2251 &bmc->aux_firmware_rev_attr);
2252 if (err) goto out_prod_id;
2254 if (bmc->guid_set) {
2255 err = device_create_file(&bmc->dev->dev,
2256 &bmc->guid_attr);
2257 if (err) goto out_aux_firm;
2260 return 0;
2262 out_aux_firm:
2263 if (bmc->id.aux_firmware_revision_set)
2264 device_remove_file(&bmc->dev->dev,
2265 &bmc->aux_firmware_rev_attr);
2266 out_prod_id:
2267 device_remove_file(&bmc->dev->dev,
2268 &bmc->product_id_attr);
2269 out_manu:
2270 device_remove_file(&bmc->dev->dev,
2271 &bmc->manufacturer_id_attr);
2272 out_add_dev:
2273 device_remove_file(&bmc->dev->dev,
2274 &bmc->add_dev_support_attr);
2275 out_version:
2276 device_remove_file(&bmc->dev->dev,
2277 &bmc->version_attr);
2278 out_firm:
2279 device_remove_file(&bmc->dev->dev,
2280 &bmc->firmware_rev_attr);
2281 out_rev:
2282 device_remove_file(&bmc->dev->dev,
2283 &bmc->revision_attr);
2284 out_sdrs:
2285 device_remove_file(&bmc->dev->dev,
2286 &bmc->provides_dev_sdrs_attr);
2287 out_devid:
2288 device_remove_file(&bmc->dev->dev,
2289 &bmc->device_id_attr);
2290 out:
2291 return err;
2294 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2295 const char *sysfs_name)
2297 int rv;
2298 struct bmc_device *bmc = intf->bmc;
2299 struct bmc_device *old_bmc;
2300 int size;
2301 char dummy[1];
2303 mutex_lock(&ipmidriver_mutex);
2306 * Try to find if there is an bmc_device struct
2307 * representing the interfaced BMC already
2309 if (bmc->guid_set)
2310 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2311 else
2312 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2313 bmc->id.product_id,
2314 bmc->id.device_id);
2317 * If there is already an bmc_device, free the new one,
2318 * otherwise register the new BMC device
2320 if (old_bmc) {
2321 kfree(bmc);
2322 intf->bmc = old_bmc;
2323 bmc = old_bmc;
2325 kref_get(&bmc->refcount);
2326 mutex_unlock(&ipmidriver_mutex);
2328 printk(KERN_INFO
2329 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2330 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2331 bmc->id.manufacturer_id,
2332 bmc->id.product_id,
2333 bmc->id.device_id);
2334 } else {
2335 char name[14];
2336 unsigned char orig_dev_id = bmc->id.device_id;
2337 int warn_printed = 0;
2339 snprintf(name, sizeof(name),
2340 "ipmi_bmc.%4.4x", bmc->id.product_id);
2342 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2343 bmc->id.product_id,
2344 bmc->id.device_id))
2346 if (!warn_printed) {
2347 printk(KERN_WARNING PFX
2348 "This machine has two different BMCs"
2349 " with the same product id and device"
2350 " id. This is an error in the"
2351 " firmware, but incrementing the"
2352 " device id to work around the problem."
2353 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2354 bmc->id.product_id, bmc->id.device_id);
2355 warn_printed = 1;
2357 bmc->id.device_id++; /* Wraps at 255 */
2358 if (bmc->id.device_id == orig_dev_id) {
2359 printk(KERN_ERR PFX
2360 "Out of device ids!\n");
2361 break;
2365 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2366 if (!bmc->dev) {
2367 mutex_unlock(&ipmidriver_mutex);
2368 printk(KERN_ERR
2369 "ipmi_msghandler:"
2370 " Unable to allocate platform device\n");
2371 return -ENOMEM;
2373 bmc->dev->dev.driver = &ipmidriver;
2374 dev_set_drvdata(&bmc->dev->dev, bmc);
2375 kref_init(&bmc->refcount);
2377 rv = platform_device_add(bmc->dev);
2378 mutex_unlock(&ipmidriver_mutex);
2379 if (rv) {
2380 platform_device_put(bmc->dev);
2381 bmc->dev = NULL;
2382 printk(KERN_ERR
2383 "ipmi_msghandler:"
2384 " Unable to register bmc device: %d\n",
2385 rv);
2386 /* Don't go to out_err, you can only do that if
2387 the device is registered already. */
2388 return rv;
2391 rv = create_files(bmc);
2392 if (rv) {
2393 mutex_lock(&ipmidriver_mutex);
2394 platform_device_unregister(bmc->dev);
2395 mutex_unlock(&ipmidriver_mutex);
2397 return rv;
2400 printk(KERN_INFO
2401 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2402 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2403 bmc->id.manufacturer_id,
2404 bmc->id.product_id,
2405 bmc->id.device_id);
2409 * create symlink from system interface device to bmc device
2410 * and back.
2412 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2413 if (!intf->sysfs_name) {
2414 rv = -ENOMEM;
2415 printk(KERN_ERR
2416 "ipmi_msghandler: allocate link to BMC: %d\n",
2417 rv);
2418 goto out_err;
2421 rv = sysfs_create_link(&intf->si_dev->kobj,
2422 &bmc->dev->dev.kobj, intf->sysfs_name);
2423 if (rv) {
2424 kfree(intf->sysfs_name);
2425 intf->sysfs_name = NULL;
2426 printk(KERN_ERR
2427 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2428 rv);
2429 goto out_err;
2432 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2433 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2434 if (!intf->my_dev_name) {
2435 kfree(intf->sysfs_name);
2436 intf->sysfs_name = NULL;
2437 rv = -ENOMEM;
2438 printk(KERN_ERR
2439 "ipmi_msghandler: allocate link from BMC: %d\n",
2440 rv);
2441 goto out_err;
2443 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2445 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2446 intf->my_dev_name);
2447 if (rv) {
2448 kfree(intf->sysfs_name);
2449 intf->sysfs_name = NULL;
2450 kfree(intf->my_dev_name);
2451 intf->my_dev_name = NULL;
2452 printk(KERN_ERR
2453 "ipmi_msghandler:"
2454 " Unable to create symlink to bmc: %d\n",
2455 rv);
2456 goto out_err;
2459 return 0;
2461 out_err:
2462 ipmi_bmc_unregister(intf);
2463 return rv;
2466 static int
2467 send_guid_cmd(ipmi_smi_t intf, int chan)
2469 struct kernel_ipmi_msg msg;
2470 struct ipmi_system_interface_addr si;
2472 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2473 si.channel = IPMI_BMC_CHANNEL;
2474 si.lun = 0;
2476 msg.netfn = IPMI_NETFN_APP_REQUEST;
2477 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2478 msg.data = NULL;
2479 msg.data_len = 0;
2480 return i_ipmi_request(NULL,
2481 intf,
2482 (struct ipmi_addr *) &si,
2484 &msg,
2485 intf,
2486 NULL,
2487 NULL,
2489 intf->channels[0].address,
2490 intf->channels[0].lun,
2491 -1, 0);
2494 static void
2495 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2497 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2498 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2499 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2500 /* Not for me */
2501 return;
2503 if (msg->msg.data[0] != 0) {
2504 /* Error from getting the GUID, the BMC doesn't have one. */
2505 intf->bmc->guid_set = 0;
2506 goto out;
2509 if (msg->msg.data_len < 17) {
2510 intf->bmc->guid_set = 0;
2511 printk(KERN_WARNING PFX
2512 "guid_handler: The GUID response from the BMC was too"
2513 " short, it was %d but should have been 17. Assuming"
2514 " GUID is not available.\n",
2515 msg->msg.data_len);
2516 goto out;
2519 memcpy(intf->bmc->guid, msg->msg.data, 16);
2520 intf->bmc->guid_set = 1;
2521 out:
2522 wake_up(&intf->waitq);
2525 static void
2526 get_guid(ipmi_smi_t intf)
2528 int rv;
2530 intf->bmc->guid_set = 0x2;
2531 intf->null_user_handler = guid_handler;
2532 rv = send_guid_cmd(intf, 0);
2533 if (rv)
2534 /* Send failed, no GUID available. */
2535 intf->bmc->guid_set = 0;
2536 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2537 intf->null_user_handler = NULL;
2540 static int
2541 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2543 struct kernel_ipmi_msg msg;
2544 unsigned char data[1];
2545 struct ipmi_system_interface_addr si;
2547 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2548 si.channel = IPMI_BMC_CHANNEL;
2549 si.lun = 0;
2551 msg.netfn = IPMI_NETFN_APP_REQUEST;
2552 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2553 msg.data = data;
2554 msg.data_len = 1;
2555 data[0] = chan;
2556 return i_ipmi_request(NULL,
2557 intf,
2558 (struct ipmi_addr *) &si,
2560 &msg,
2561 intf,
2562 NULL,
2563 NULL,
2565 intf->channels[0].address,
2566 intf->channels[0].lun,
2567 -1, 0);
2570 static void
2571 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2573 int rv = 0;
2574 int chan;
2576 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2577 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2578 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2580 /* It's the one we want */
2581 if (msg->msg.data[0] != 0) {
2582 /* Got an error from the channel, just go on. */
2584 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2585 /* If the MC does not support this
2586 command, that is legal. We just
2587 assume it has one IPMB at channel
2588 zero. */
2589 intf->channels[0].medium
2590 = IPMI_CHANNEL_MEDIUM_IPMB;
2591 intf->channels[0].protocol
2592 = IPMI_CHANNEL_PROTOCOL_IPMB;
2593 rv = -ENOSYS;
2595 intf->curr_channel = IPMI_MAX_CHANNELS;
2596 wake_up(&intf->waitq);
2597 goto out;
2599 goto next_channel;
2601 if (msg->msg.data_len < 4) {
2602 /* Message not big enough, just go on. */
2603 goto next_channel;
2605 chan = intf->curr_channel;
2606 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2607 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2609 next_channel:
2610 intf->curr_channel++;
2611 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2612 wake_up(&intf->waitq);
2613 else
2614 rv = send_channel_info_cmd(intf, intf->curr_channel);
2616 if (rv) {
2617 /* Got an error somehow, just give up. */
2618 intf->curr_channel = IPMI_MAX_CHANNELS;
2619 wake_up(&intf->waitq);
2621 printk(KERN_WARNING PFX
2622 "Error sending channel information: %d\n",
2623 rv);
2626 out:
2627 return;
2630 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2631 void *send_info,
2632 struct ipmi_device_id *device_id,
2633 struct device *si_dev,
2634 const char *sysfs_name,
2635 unsigned char slave_addr)
2637 int i, j;
2638 int rv;
2639 ipmi_smi_t intf;
2640 ipmi_smi_t tintf;
2641 struct list_head *link;
2643 /* Make sure the driver is actually initialized, this handles
2644 problems with initialization order. */
2645 if (!initialized) {
2646 rv = ipmi_init_msghandler();
2647 if (rv)
2648 return rv;
2649 /* The init code doesn't return an error if it was turned
2650 off, but it won't initialize. Check that. */
2651 if (!initialized)
2652 return -ENODEV;
2655 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2656 if (!intf)
2657 return -ENOMEM;
2658 memset(intf, 0, sizeof(*intf));
2660 intf->ipmi_version_major = ipmi_version_major(device_id);
2661 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2663 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2664 if (!intf->bmc) {
2665 kfree(intf);
2666 return -ENOMEM;
2668 intf->intf_num = -1; /* Mark it invalid for now. */
2669 kref_init(&intf->refcount);
2670 intf->bmc->id = *device_id;
2671 intf->si_dev = si_dev;
2672 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2673 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2674 intf->channels[j].lun = 2;
2676 if (slave_addr != 0)
2677 intf->channels[0].address = slave_addr;
2678 INIT_LIST_HEAD(&intf->users);
2679 intf->handlers = handlers;
2680 intf->send_info = send_info;
2681 spin_lock_init(&intf->seq_lock);
2682 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2683 intf->seq_table[j].inuse = 0;
2684 intf->seq_table[j].seqid = 0;
2686 intf->curr_seq = 0;
2687 #ifdef CONFIG_PROC_FS
2688 spin_lock_init(&intf->proc_entry_lock);
2689 #endif
2690 spin_lock_init(&intf->waiting_msgs_lock);
2691 INIT_LIST_HEAD(&intf->waiting_msgs);
2692 spin_lock_init(&intf->events_lock);
2693 INIT_LIST_HEAD(&intf->waiting_events);
2694 intf->waiting_events_count = 0;
2695 mutex_init(&intf->cmd_rcvrs_mutex);
2696 spin_lock_init(&intf->maintenance_mode_lock);
2697 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2698 init_waitqueue_head(&intf->waitq);
2700 spin_lock_init(&intf->counter_lock);
2701 intf->proc_dir = NULL;
2703 mutex_lock(&smi_watchers_mutex);
2704 mutex_lock(&ipmi_interfaces_mutex);
2705 /* Look for a hole in the numbers. */
2706 i = 0;
2707 link = &ipmi_interfaces;
2708 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2709 if (tintf->intf_num != i) {
2710 link = &tintf->link;
2711 break;
2713 i++;
2715 /* Add the new interface in numeric order. */
2716 if (i == 0)
2717 list_add_rcu(&intf->link, &ipmi_interfaces);
2718 else
2719 list_add_tail_rcu(&intf->link, link);
2721 rv = handlers->start_processing(send_info, intf);
2722 if (rv)
2723 goto out;
2725 get_guid(intf);
2727 if ((intf->ipmi_version_major > 1)
2728 || ((intf->ipmi_version_major == 1)
2729 && (intf->ipmi_version_minor >= 5)))
2731 /* Start scanning the channels to see what is
2732 available. */
2733 intf->null_user_handler = channel_handler;
2734 intf->curr_channel = 0;
2735 rv = send_channel_info_cmd(intf, 0);
2736 if (rv)
2737 goto out;
2739 /* Wait for the channel info to be read. */
2740 wait_event(intf->waitq,
2741 intf->curr_channel >= IPMI_MAX_CHANNELS);
2742 intf->null_user_handler = NULL;
2743 } else {
2744 /* Assume a single IPMB channel at zero. */
2745 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2746 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2749 if (rv == 0)
2750 rv = add_proc_entries(intf, i);
2752 rv = ipmi_bmc_register(intf, i, sysfs_name);
2754 out:
2755 if (rv) {
2756 if (intf->proc_dir)
2757 remove_proc_entries(intf);
2758 intf->handlers = NULL;
2759 list_del_rcu(&intf->link);
2760 mutex_unlock(&ipmi_interfaces_mutex);
2761 mutex_unlock(&smi_watchers_mutex);
2762 synchronize_rcu();
2763 kref_put(&intf->refcount, intf_free);
2764 } else {
2765 /* After this point the interface is legal to use. */
2766 intf->intf_num = i;
2767 mutex_unlock(&ipmi_interfaces_mutex);
2768 call_smi_watchers(i, intf->si_dev);
2769 mutex_unlock(&smi_watchers_mutex);
2772 return rv;
2775 static void cleanup_smi_msgs(ipmi_smi_t intf)
2777 int i;
2778 struct seq_table *ent;
2780 /* No need for locks, the interface is down. */
2781 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2782 ent = &(intf->seq_table[i]);
2783 if (!ent->inuse)
2784 continue;
2785 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2789 int ipmi_unregister_smi(ipmi_smi_t intf)
2791 struct ipmi_smi_watcher *w;
2792 int intf_num = intf->intf_num;
2794 ipmi_bmc_unregister(intf);
2796 mutex_lock(&smi_watchers_mutex);
2797 mutex_lock(&ipmi_interfaces_mutex);
2798 intf->intf_num = -1;
2799 intf->handlers = NULL;
2800 list_del_rcu(&intf->link);
2801 mutex_unlock(&ipmi_interfaces_mutex);
2802 synchronize_rcu();
2804 cleanup_smi_msgs(intf);
2806 remove_proc_entries(intf);
2808 /* Call all the watcher interfaces to tell them that
2809 an interface is gone. */
2810 list_for_each_entry(w, &smi_watchers, link)
2811 w->smi_gone(intf_num);
2812 mutex_unlock(&smi_watchers_mutex);
2814 kref_put(&intf->refcount, intf_free);
2815 return 0;
2818 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2819 struct ipmi_smi_msg *msg)
2821 struct ipmi_ipmb_addr ipmb_addr;
2822 struct ipmi_recv_msg *recv_msg;
2823 unsigned long flags;
2826 /* This is 11, not 10, because the response must contain a
2827 * completion code. */
2828 if (msg->rsp_size < 11) {
2829 /* Message not big enough, just ignore it. */
2830 spin_lock_irqsave(&intf->counter_lock, flags);
2831 intf->invalid_ipmb_responses++;
2832 spin_unlock_irqrestore(&intf->counter_lock, flags);
2833 return 0;
2836 if (msg->rsp[2] != 0) {
2837 /* An error getting the response, just ignore it. */
2838 return 0;
2841 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2842 ipmb_addr.slave_addr = msg->rsp[6];
2843 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2844 ipmb_addr.lun = msg->rsp[7] & 3;
2846 /* It's a response from a remote entity. Look up the sequence
2847 number and handle the response. */
2848 if (intf_find_seq(intf,
2849 msg->rsp[7] >> 2,
2850 msg->rsp[3] & 0x0f,
2851 msg->rsp[8],
2852 (msg->rsp[4] >> 2) & (~1),
2853 (struct ipmi_addr *) &(ipmb_addr),
2854 &recv_msg))
2856 /* We were unable to find the sequence number,
2857 so just nuke the message. */
2858 spin_lock_irqsave(&intf->counter_lock, flags);
2859 intf->unhandled_ipmb_responses++;
2860 spin_unlock_irqrestore(&intf->counter_lock, flags);
2861 return 0;
2864 memcpy(recv_msg->msg_data,
2865 &(msg->rsp[9]),
2866 msg->rsp_size - 9);
2867 /* THe other fields matched, so no need to set them, except
2868 for netfn, which needs to be the response that was
2869 returned, not the request value. */
2870 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2871 recv_msg->msg.data = recv_msg->msg_data;
2872 recv_msg->msg.data_len = msg->rsp_size - 10;
2873 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2874 spin_lock_irqsave(&intf->counter_lock, flags);
2875 intf->handled_ipmb_responses++;
2876 spin_unlock_irqrestore(&intf->counter_lock, flags);
2877 deliver_response(recv_msg);
2879 return 0;
2882 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2883 struct ipmi_smi_msg *msg)
2885 struct cmd_rcvr *rcvr;
2886 int rv = 0;
2887 unsigned char netfn;
2888 unsigned char cmd;
2889 unsigned char chan;
2890 ipmi_user_t user = NULL;
2891 struct ipmi_ipmb_addr *ipmb_addr;
2892 struct ipmi_recv_msg *recv_msg;
2893 unsigned long flags;
2894 struct ipmi_smi_handlers *handlers;
2896 if (msg->rsp_size < 10) {
2897 /* Message not big enough, just ignore it. */
2898 spin_lock_irqsave(&intf->counter_lock, flags);
2899 intf->invalid_commands++;
2900 spin_unlock_irqrestore(&intf->counter_lock, flags);
2901 return 0;
2904 if (msg->rsp[2] != 0) {
2905 /* An error getting the response, just ignore it. */
2906 return 0;
2909 netfn = msg->rsp[4] >> 2;
2910 cmd = msg->rsp[8];
2911 chan = msg->rsp[3] & 0xf;
2913 rcu_read_lock();
2914 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2915 if (rcvr) {
2916 user = rcvr->user;
2917 kref_get(&user->refcount);
2918 } else
2919 user = NULL;
2920 rcu_read_unlock();
2922 if (user == NULL) {
2923 /* We didn't find a user, deliver an error response. */
2924 spin_lock_irqsave(&intf->counter_lock, flags);
2925 intf->unhandled_commands++;
2926 spin_unlock_irqrestore(&intf->counter_lock, flags);
2928 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2929 msg->data[1] = IPMI_SEND_MSG_CMD;
2930 msg->data[2] = msg->rsp[3];
2931 msg->data[3] = msg->rsp[6];
2932 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2933 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2934 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2935 /* rqseq/lun */
2936 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2937 msg->data[8] = msg->rsp[8]; /* cmd */
2938 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2939 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2940 msg->data_size = 11;
2942 #ifdef DEBUG_MSGING
2944 int m;
2945 printk("Invalid command:");
2946 for (m = 0; m < msg->data_size; m++)
2947 printk(" %2.2x", msg->data[m]);
2948 printk("\n");
2950 #endif
2951 rcu_read_lock();
2952 handlers = intf->handlers;
2953 if (handlers) {
2954 handlers->sender(intf->send_info, msg, 0);
2955 /* We used the message, so return the value
2956 that causes it to not be freed or
2957 queued. */
2958 rv = -1;
2960 rcu_read_unlock();
2961 } else {
2962 /* Deliver the message to the user. */
2963 spin_lock_irqsave(&intf->counter_lock, flags);
2964 intf->handled_commands++;
2965 spin_unlock_irqrestore(&intf->counter_lock, flags);
2967 recv_msg = ipmi_alloc_recv_msg();
2968 if (!recv_msg) {
2969 /* We couldn't allocate memory for the
2970 message, so requeue it for handling
2971 later. */
2972 rv = 1;
2973 kref_put(&user->refcount, free_user);
2974 } else {
2975 /* Extract the source address from the data. */
2976 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2977 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2978 ipmb_addr->slave_addr = msg->rsp[6];
2979 ipmb_addr->lun = msg->rsp[7] & 3;
2980 ipmb_addr->channel = msg->rsp[3] & 0xf;
2982 /* Extract the rest of the message information
2983 from the IPMB header.*/
2984 recv_msg->user = user;
2985 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2986 recv_msg->msgid = msg->rsp[7] >> 2;
2987 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2988 recv_msg->msg.cmd = msg->rsp[8];
2989 recv_msg->msg.data = recv_msg->msg_data;
2991 /* We chop off 10, not 9 bytes because the checksum
2992 at the end also needs to be removed. */
2993 recv_msg->msg.data_len = msg->rsp_size - 10;
2994 memcpy(recv_msg->msg_data,
2995 &(msg->rsp[9]),
2996 msg->rsp_size - 10);
2997 deliver_response(recv_msg);
3001 return rv;
3004 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3005 struct ipmi_smi_msg *msg)
3007 struct ipmi_lan_addr lan_addr;
3008 struct ipmi_recv_msg *recv_msg;
3009 unsigned long flags;
3012 /* This is 13, not 12, because the response must contain a
3013 * completion code. */
3014 if (msg->rsp_size < 13) {
3015 /* Message not big enough, just ignore it. */
3016 spin_lock_irqsave(&intf->counter_lock, flags);
3017 intf->invalid_lan_responses++;
3018 spin_unlock_irqrestore(&intf->counter_lock, flags);
3019 return 0;
3022 if (msg->rsp[2] != 0) {
3023 /* An error getting the response, just ignore it. */
3024 return 0;
3027 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3028 lan_addr.session_handle = msg->rsp[4];
3029 lan_addr.remote_SWID = msg->rsp[8];
3030 lan_addr.local_SWID = msg->rsp[5];
3031 lan_addr.channel = msg->rsp[3] & 0x0f;
3032 lan_addr.privilege = msg->rsp[3] >> 4;
3033 lan_addr.lun = msg->rsp[9] & 3;
3035 /* It's a response from a remote entity. Look up the sequence
3036 number and handle the response. */
3037 if (intf_find_seq(intf,
3038 msg->rsp[9] >> 2,
3039 msg->rsp[3] & 0x0f,
3040 msg->rsp[10],
3041 (msg->rsp[6] >> 2) & (~1),
3042 (struct ipmi_addr *) &(lan_addr),
3043 &recv_msg))
3045 /* We were unable to find the sequence number,
3046 so just nuke the message. */
3047 spin_lock_irqsave(&intf->counter_lock, flags);
3048 intf->unhandled_lan_responses++;
3049 spin_unlock_irqrestore(&intf->counter_lock, flags);
3050 return 0;
3053 memcpy(recv_msg->msg_data,
3054 &(msg->rsp[11]),
3055 msg->rsp_size - 11);
3056 /* The other fields matched, so no need to set them, except
3057 for netfn, which needs to be the response that was
3058 returned, not the request value. */
3059 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3060 recv_msg->msg.data = recv_msg->msg_data;
3061 recv_msg->msg.data_len = msg->rsp_size - 12;
3062 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3063 spin_lock_irqsave(&intf->counter_lock, flags);
3064 intf->handled_lan_responses++;
3065 spin_unlock_irqrestore(&intf->counter_lock, flags);
3066 deliver_response(recv_msg);
3068 return 0;
3071 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3072 struct ipmi_smi_msg *msg)
3074 struct cmd_rcvr *rcvr;
3075 int rv = 0;
3076 unsigned char netfn;
3077 unsigned char cmd;
3078 unsigned char chan;
3079 ipmi_user_t user = NULL;
3080 struct ipmi_lan_addr *lan_addr;
3081 struct ipmi_recv_msg *recv_msg;
3082 unsigned long flags;
3084 if (msg->rsp_size < 12) {
3085 /* Message not big enough, just ignore it. */
3086 spin_lock_irqsave(&intf->counter_lock, flags);
3087 intf->invalid_commands++;
3088 spin_unlock_irqrestore(&intf->counter_lock, flags);
3089 return 0;
3092 if (msg->rsp[2] != 0) {
3093 /* An error getting the response, just ignore it. */
3094 return 0;
3097 netfn = msg->rsp[6] >> 2;
3098 cmd = msg->rsp[10];
3099 chan = msg->rsp[3] & 0xf;
3101 rcu_read_lock();
3102 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3103 if (rcvr) {
3104 user = rcvr->user;
3105 kref_get(&user->refcount);
3106 } else
3107 user = NULL;
3108 rcu_read_unlock();
3110 if (user == NULL) {
3111 /* We didn't find a user, just give up. */
3112 spin_lock_irqsave(&intf->counter_lock, flags);
3113 intf->unhandled_commands++;
3114 spin_unlock_irqrestore(&intf->counter_lock, flags);
3116 rv = 0; /* Don't do anything with these messages, just
3117 allow them to be freed. */
3118 } else {
3119 /* Deliver the message to the user. */
3120 spin_lock_irqsave(&intf->counter_lock, flags);
3121 intf->handled_commands++;
3122 spin_unlock_irqrestore(&intf->counter_lock, flags);
3124 recv_msg = ipmi_alloc_recv_msg();
3125 if (!recv_msg) {
3126 /* We couldn't allocate memory for the
3127 message, so requeue it for handling
3128 later. */
3129 rv = 1;
3130 kref_put(&user->refcount, free_user);
3131 } else {
3132 /* Extract the source address from the data. */
3133 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3134 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3135 lan_addr->session_handle = msg->rsp[4];
3136 lan_addr->remote_SWID = msg->rsp[8];
3137 lan_addr->local_SWID = msg->rsp[5];
3138 lan_addr->lun = msg->rsp[9] & 3;
3139 lan_addr->channel = msg->rsp[3] & 0xf;
3140 lan_addr->privilege = msg->rsp[3] >> 4;
3142 /* Extract the rest of the message information
3143 from the IPMB header.*/
3144 recv_msg->user = user;
3145 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3146 recv_msg->msgid = msg->rsp[9] >> 2;
3147 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3148 recv_msg->msg.cmd = msg->rsp[10];
3149 recv_msg->msg.data = recv_msg->msg_data;
3151 /* We chop off 12, not 11 bytes because the checksum
3152 at the end also needs to be removed. */
3153 recv_msg->msg.data_len = msg->rsp_size - 12;
3154 memcpy(recv_msg->msg_data,
3155 &(msg->rsp[11]),
3156 msg->rsp_size - 12);
3157 deliver_response(recv_msg);
3161 return rv;
3164 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3165 struct ipmi_smi_msg *msg)
3167 struct ipmi_system_interface_addr *smi_addr;
3169 recv_msg->msgid = 0;
3170 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3171 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3172 smi_addr->channel = IPMI_BMC_CHANNEL;
3173 smi_addr->lun = msg->rsp[0] & 3;
3174 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3175 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3176 recv_msg->msg.cmd = msg->rsp[1];
3177 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3178 recv_msg->msg.data = recv_msg->msg_data;
3179 recv_msg->msg.data_len = msg->rsp_size - 3;
3182 static int handle_read_event_rsp(ipmi_smi_t intf,
3183 struct ipmi_smi_msg *msg)
3185 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3186 struct list_head msgs;
3187 ipmi_user_t user;
3188 int rv = 0;
3189 int deliver_count = 0;
3190 unsigned long flags;
3192 if (msg->rsp_size < 19) {
3193 /* Message is too small to be an IPMB event. */
3194 spin_lock_irqsave(&intf->counter_lock, flags);
3195 intf->invalid_events++;
3196 spin_unlock_irqrestore(&intf->counter_lock, flags);
3197 return 0;
3200 if (msg->rsp[2] != 0) {
3201 /* An error getting the event, just ignore it. */
3202 return 0;
3205 INIT_LIST_HEAD(&msgs);
3207 spin_lock_irqsave(&intf->events_lock, flags);
3209 spin_lock(&intf->counter_lock);
3210 intf->events++;
3211 spin_unlock(&intf->counter_lock);
3213 /* Allocate and fill in one message for every user that is getting
3214 events. */
3215 rcu_read_lock();
3216 list_for_each_entry_rcu(user, &intf->users, link) {
3217 if (!user->gets_events)
3218 continue;
3220 recv_msg = ipmi_alloc_recv_msg();
3221 if (!recv_msg) {
3222 rcu_read_unlock();
3223 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3224 link) {
3225 list_del(&recv_msg->link);
3226 ipmi_free_recv_msg(recv_msg);
3228 /* We couldn't allocate memory for the
3229 message, so requeue it for handling
3230 later. */
3231 rv = 1;
3232 goto out;
3235 deliver_count++;
3237 copy_event_into_recv_msg(recv_msg, msg);
3238 recv_msg->user = user;
3239 kref_get(&user->refcount);
3240 list_add_tail(&(recv_msg->link), &msgs);
3242 rcu_read_unlock();
3244 if (deliver_count) {
3245 /* Now deliver all the messages. */
3246 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3247 list_del(&recv_msg->link);
3248 deliver_response(recv_msg);
3250 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3251 /* No one to receive the message, put it in queue if there's
3252 not already too many things in the queue. */
3253 recv_msg = ipmi_alloc_recv_msg();
3254 if (!recv_msg) {
3255 /* We couldn't allocate memory for the
3256 message, so requeue it for handling
3257 later. */
3258 rv = 1;
3259 goto out;
3262 copy_event_into_recv_msg(recv_msg, msg);
3263 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3264 intf->waiting_events_count++;
3265 } else {
3266 /* There's too many things in the queue, discard this
3267 message. */
3268 printk(KERN_WARNING PFX "Event queue full, discarding an"
3269 " incoming event\n");
3272 out:
3273 spin_unlock_irqrestore(&(intf->events_lock), flags);
3275 return rv;
3278 static int handle_bmc_rsp(ipmi_smi_t intf,
3279 struct ipmi_smi_msg *msg)
3281 struct ipmi_recv_msg *recv_msg;
3282 unsigned long flags;
3283 struct ipmi_user *user;
3285 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3286 if (recv_msg == NULL)
3288 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3289 "could be because of a malformed message, or\n"
3290 "because of a hardware error. Contact your\n"
3291 "hardware vender for assistance\n");
3292 return 0;
3295 user = recv_msg->user;
3296 /* Make sure the user still exists. */
3297 if (user && !user->valid) {
3298 /* The user for the message went away, so give up. */
3299 spin_lock_irqsave(&intf->counter_lock, flags);
3300 intf->unhandled_local_responses++;
3301 spin_unlock_irqrestore(&intf->counter_lock, flags);
3302 ipmi_free_recv_msg(recv_msg);
3303 } else {
3304 struct ipmi_system_interface_addr *smi_addr;
3306 spin_lock_irqsave(&intf->counter_lock, flags);
3307 intf->handled_local_responses++;
3308 spin_unlock_irqrestore(&intf->counter_lock, flags);
3309 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3310 recv_msg->msgid = msg->msgid;
3311 smi_addr = ((struct ipmi_system_interface_addr *)
3312 &(recv_msg->addr));
3313 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3314 smi_addr->channel = IPMI_BMC_CHANNEL;
3315 smi_addr->lun = msg->rsp[0] & 3;
3316 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3317 recv_msg->msg.cmd = msg->rsp[1];
3318 memcpy(recv_msg->msg_data,
3319 &(msg->rsp[2]),
3320 msg->rsp_size - 2);
3321 recv_msg->msg.data = recv_msg->msg_data;
3322 recv_msg->msg.data_len = msg->rsp_size - 2;
3323 deliver_response(recv_msg);
3326 return 0;
3329 /* Handle a new message. Return 1 if the message should be requeued,
3330 0 if the message should be freed, or -1 if the message should not
3331 be freed or requeued. */
3332 static int handle_new_recv_msg(ipmi_smi_t intf,
3333 struct ipmi_smi_msg *msg)
3335 int requeue;
3336 int chan;
3338 #ifdef DEBUG_MSGING
3339 int m;
3340 printk("Recv:");
3341 for (m = 0; m < msg->rsp_size; m++)
3342 printk(" %2.2x", msg->rsp[m]);
3343 printk("\n");
3344 #endif
3345 if (msg->rsp_size < 2) {
3346 /* Message is too small to be correct. */
3347 printk(KERN_WARNING PFX "BMC returned to small a message"
3348 " for netfn %x cmd %x, got %d bytes\n",
3349 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3351 /* Generate an error response for the message. */
3352 msg->rsp[0] = msg->data[0] | (1 << 2);
3353 msg->rsp[1] = msg->data[1];
3354 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3355 msg->rsp_size = 3;
3356 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3357 || (msg->rsp[1] != msg->data[1])) /* Command */
3359 /* The response is not even marginally correct. */
3360 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3361 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3362 (msg->data[0] >> 2) | 1, msg->data[1],
3363 msg->rsp[0] >> 2, msg->rsp[1]);
3365 /* Generate an error response for the message. */
3366 msg->rsp[0] = msg->data[0] | (1 << 2);
3367 msg->rsp[1] = msg->data[1];
3368 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3369 msg->rsp_size = 3;
3372 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3373 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3374 && (msg->user_data != NULL))
3376 /* It's a response to a response we sent. For this we
3377 deliver a send message response to the user. */
3378 struct ipmi_recv_msg *recv_msg = msg->user_data;
3380 requeue = 0;
3381 if (msg->rsp_size < 2)
3382 /* Message is too small to be correct. */
3383 goto out;
3385 chan = msg->data[2] & 0x0f;
3386 if (chan >= IPMI_MAX_CHANNELS)
3387 /* Invalid channel number */
3388 goto out;
3390 if (!recv_msg)
3391 goto out;
3393 /* Make sure the user still exists. */
3394 if (!recv_msg->user || !recv_msg->user->valid)
3395 goto out;
3397 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3398 recv_msg->msg.data = recv_msg->msg_data;
3399 recv_msg->msg.data_len = 1;
3400 recv_msg->msg_data[0] = msg->rsp[2];
3401 deliver_response(recv_msg);
3402 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3403 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3405 /* It's from the receive queue. */
3406 chan = msg->rsp[3] & 0xf;
3407 if (chan >= IPMI_MAX_CHANNELS) {
3408 /* Invalid channel number */
3409 requeue = 0;
3410 goto out;
3413 switch (intf->channels[chan].medium) {
3414 case IPMI_CHANNEL_MEDIUM_IPMB:
3415 if (msg->rsp[4] & 0x04) {
3416 /* It's a response, so find the
3417 requesting message and send it up. */
3418 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3419 } else {
3420 /* It's a command to the SMS from some other
3421 entity. Handle that. */
3422 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3424 break;
3426 case IPMI_CHANNEL_MEDIUM_8023LAN:
3427 case IPMI_CHANNEL_MEDIUM_ASYNC:
3428 if (msg->rsp[6] & 0x04) {
3429 /* It's a response, so find the
3430 requesting message and send it up. */
3431 requeue = handle_lan_get_msg_rsp(intf, msg);
3432 } else {
3433 /* It's a command to the SMS from some other
3434 entity. Handle that. */
3435 requeue = handle_lan_get_msg_cmd(intf, msg);
3437 break;
3439 default:
3440 /* We don't handle the channel type, so just
3441 * free the message. */
3442 requeue = 0;
3445 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3446 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3448 /* It's an asyncronous event. */
3449 requeue = handle_read_event_rsp(intf, msg);
3450 } else {
3451 /* It's a response from the local BMC. */
3452 requeue = handle_bmc_rsp(intf, msg);
3455 out:
3456 return requeue;
3459 /* Handle a new message from the lower layer. */
3460 void ipmi_smi_msg_received(ipmi_smi_t intf,
3461 struct ipmi_smi_msg *msg)
3463 unsigned long flags;
3464 int rv;
3467 if ((msg->data_size >= 2)
3468 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3469 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3470 && (msg->user_data == NULL))
3472 /* This is the local response to a command send, start
3473 the timer for these. The user_data will not be
3474 NULL if this is a response send, and we will let
3475 response sends just go through. */
3477 /* Check for errors, if we get certain errors (ones
3478 that mean basically we can try again later), we
3479 ignore them and start the timer. Otherwise we
3480 report the error immediately. */
3481 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3482 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3483 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3484 && (msg->rsp[2] != IPMI_BUS_ERR)
3485 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3487 int chan = msg->rsp[3] & 0xf;
3489 /* Got an error sending the message, handle it. */
3490 spin_lock_irqsave(&intf->counter_lock, flags);
3491 if (chan >= IPMI_MAX_CHANNELS)
3492 ; /* This shouldn't happen */
3493 else if ((intf->channels[chan].medium
3494 == IPMI_CHANNEL_MEDIUM_8023LAN)
3495 || (intf->channels[chan].medium
3496 == IPMI_CHANNEL_MEDIUM_ASYNC))
3497 intf->sent_lan_command_errs++;
3498 else
3499 intf->sent_ipmb_command_errs++;
3500 spin_unlock_irqrestore(&intf->counter_lock, flags);
3501 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3502 } else {
3503 /* The message was sent, start the timer. */
3504 intf_start_seq_timer(intf, msg->msgid);
3507 ipmi_free_smi_msg(msg);
3508 goto out;
3511 /* To preserve message order, if the list is not empty, we
3512 tack this message onto the end of the list. */
3513 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3514 if (!list_empty(&intf->waiting_msgs)) {
3515 list_add_tail(&msg->link, &intf->waiting_msgs);
3516 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3517 goto out;
3519 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3521 rv = handle_new_recv_msg(intf, msg);
3522 if (rv > 0) {
3523 /* Could not handle the message now, just add it to a
3524 list to handle later. */
3525 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3526 list_add_tail(&msg->link, &intf->waiting_msgs);
3527 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3528 } else if (rv == 0) {
3529 ipmi_free_smi_msg(msg);
3532 out:
3533 return;
3536 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3538 ipmi_user_t user;
3540 rcu_read_lock();
3541 list_for_each_entry_rcu(user, &intf->users, link) {
3542 if (!user->handler->ipmi_watchdog_pretimeout)
3543 continue;
3545 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3547 rcu_read_unlock();
3551 static struct ipmi_smi_msg *
3552 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3553 unsigned char seq, long seqid)
3555 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3556 if (!smi_msg)
3557 /* If we can't allocate the message, then just return, we
3558 get 4 retries, so this should be ok. */
3559 return NULL;
3561 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3562 smi_msg->data_size = recv_msg->msg.data_len;
3563 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3565 #ifdef DEBUG_MSGING
3567 int m;
3568 printk("Resend: ");
3569 for (m = 0; m < smi_msg->data_size; m++)
3570 printk(" %2.2x", smi_msg->data[m]);
3571 printk("\n");
3573 #endif
3574 return smi_msg;
3577 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3578 struct list_head *timeouts, long timeout_period,
3579 int slot, unsigned long *flags)
3581 struct ipmi_recv_msg *msg;
3582 struct ipmi_smi_handlers *handlers;
3584 if (intf->intf_num == -1)
3585 return;
3587 if (!ent->inuse)
3588 return;
3590 ent->timeout -= timeout_period;
3591 if (ent->timeout > 0)
3592 return;
3594 if (ent->retries_left == 0) {
3595 /* The message has used all its retries. */
3596 ent->inuse = 0;
3597 msg = ent->recv_msg;
3598 list_add_tail(&msg->link, timeouts);
3599 spin_lock(&intf->counter_lock);
3600 if (ent->broadcast)
3601 intf->timed_out_ipmb_broadcasts++;
3602 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3603 intf->timed_out_lan_commands++;
3604 else
3605 intf->timed_out_ipmb_commands++;
3606 spin_unlock(&intf->counter_lock);
3607 } else {
3608 struct ipmi_smi_msg *smi_msg;
3609 /* More retries, send again. */
3611 /* Start with the max timer, set to normal
3612 timer after the message is sent. */
3613 ent->timeout = MAX_MSG_TIMEOUT;
3614 ent->retries_left--;
3615 spin_lock(&intf->counter_lock);
3616 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3617 intf->retransmitted_lan_commands++;
3618 else
3619 intf->retransmitted_ipmb_commands++;
3620 spin_unlock(&intf->counter_lock);
3622 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3623 ent->seqid);
3624 if (!smi_msg)
3625 return;
3627 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3629 /* Send the new message. We send with a zero
3630 * priority. It timed out, I doubt time is
3631 * that critical now, and high priority
3632 * messages are really only for messages to the
3633 * local MC, which don't get resent. */
3634 handlers = intf->handlers;
3635 if (handlers)
3636 intf->handlers->sender(intf->send_info,
3637 smi_msg, 0);
3638 else
3639 ipmi_free_smi_msg(smi_msg);
3641 spin_lock_irqsave(&intf->seq_lock, *flags);
3645 static void ipmi_timeout_handler(long timeout_period)
3647 ipmi_smi_t intf;
3648 struct list_head timeouts;
3649 struct ipmi_recv_msg *msg, *msg2;
3650 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3651 unsigned long flags;
3652 int i;
3654 INIT_LIST_HEAD(&timeouts);
3656 rcu_read_lock();
3657 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3658 /* See if any waiting messages need to be processed. */
3659 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3660 list_for_each_entry_safe(smi_msg, smi_msg2,
3661 &intf->waiting_msgs, link) {
3662 if (!handle_new_recv_msg(intf, smi_msg)) {
3663 list_del(&smi_msg->link);
3664 ipmi_free_smi_msg(smi_msg);
3665 } else {
3666 /* To preserve message order, quit if we
3667 can't handle a message. */
3668 break;
3671 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3673 /* Go through the seq table and find any messages that
3674 have timed out, putting them in the timeouts
3675 list. */
3676 spin_lock_irqsave(&intf->seq_lock, flags);
3677 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3678 check_msg_timeout(intf, &(intf->seq_table[i]),
3679 &timeouts, timeout_period, i,
3680 &flags);
3681 spin_unlock_irqrestore(&intf->seq_lock, flags);
3683 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3684 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3687 * Maintenance mode handling. Check the timeout
3688 * optimistically before we claim the lock. It may
3689 * mean a timeout gets missed occasionally, but that
3690 * only means the timeout gets extended by one period
3691 * in that case. No big deal, and it avoids the lock
3692 * most of the time.
3694 if (intf->auto_maintenance_timeout > 0) {
3695 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3696 if (intf->auto_maintenance_timeout > 0) {
3697 intf->auto_maintenance_timeout
3698 -= timeout_period;
3699 if (!intf->maintenance_mode
3700 && (intf->auto_maintenance_timeout <= 0))
3702 intf->maintenance_mode_enable = 0;
3703 maintenance_mode_update(intf);
3706 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3707 flags);
3710 rcu_read_unlock();
3713 static void ipmi_request_event(void)
3715 ipmi_smi_t intf;
3716 struct ipmi_smi_handlers *handlers;
3718 rcu_read_lock();
3719 /* Called from the timer, no need to check if handlers is
3720 * valid. */
3721 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3722 /* No event requests when in maintenance mode. */
3723 if (intf->maintenance_mode_enable)
3724 continue;
3726 handlers = intf->handlers;
3727 if (handlers)
3728 handlers->request_events(intf->send_info);
3730 rcu_read_unlock();
3733 static struct timer_list ipmi_timer;
3735 /* Call every ~100 ms. */
3736 #define IPMI_TIMEOUT_TIME 100
3738 /* How many jiffies does it take to get to the timeout time. */
3739 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3741 /* Request events from the queue every second (this is the number of
3742 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3743 future, IPMI will add a way to know immediately if an event is in
3744 the queue and this silliness can go away. */
3745 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3747 static atomic_t stop_operation;
3748 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3750 static void ipmi_timeout(unsigned long data)
3752 if (atomic_read(&stop_operation))
3753 return;
3755 ticks_to_req_ev--;
3756 if (ticks_to_req_ev == 0) {
3757 ipmi_request_event();
3758 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3761 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3763 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3767 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3768 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3770 /* FIXME - convert these to slabs. */
3771 static void free_smi_msg(struct ipmi_smi_msg *msg)
3773 atomic_dec(&smi_msg_inuse_count);
3774 kfree(msg);
3777 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3779 struct ipmi_smi_msg *rv;
3780 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3781 if (rv) {
3782 rv->done = free_smi_msg;
3783 rv->user_data = NULL;
3784 atomic_inc(&smi_msg_inuse_count);
3786 return rv;
3789 static void free_recv_msg(struct ipmi_recv_msg *msg)
3791 atomic_dec(&recv_msg_inuse_count);
3792 kfree(msg);
3795 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3797 struct ipmi_recv_msg *rv;
3799 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3800 if (rv) {
3801 rv->user = NULL;
3802 rv->done = free_recv_msg;
3803 atomic_inc(&recv_msg_inuse_count);
3805 return rv;
3808 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3810 if (msg->user)
3811 kref_put(&msg->user->refcount, free_user);
3812 msg->done(msg);
3815 #ifdef CONFIG_IPMI_PANIC_EVENT
3817 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3821 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3825 #ifdef CONFIG_IPMI_PANIC_STRING
3826 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3828 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3829 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3830 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3831 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3833 /* A get event receiver command, save it. */
3834 intf->event_receiver = msg->msg.data[1];
3835 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3839 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3841 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3842 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3843 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3844 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3846 /* A get device id command, save if we are an event
3847 receiver or generator. */
3848 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3849 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3852 #endif
3854 static void send_panic_events(char *str)
3856 struct kernel_ipmi_msg msg;
3857 ipmi_smi_t intf;
3858 unsigned char data[16];
3859 struct ipmi_system_interface_addr *si;
3860 struct ipmi_addr addr;
3861 struct ipmi_smi_msg smi_msg;
3862 struct ipmi_recv_msg recv_msg;
3864 si = (struct ipmi_system_interface_addr *) &addr;
3865 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3866 si->channel = IPMI_BMC_CHANNEL;
3867 si->lun = 0;
3869 /* Fill in an event telling that we have failed. */
3870 msg.netfn = 0x04; /* Sensor or Event. */
3871 msg.cmd = 2; /* Platform event command. */
3872 msg.data = data;
3873 msg.data_len = 8;
3874 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3875 data[1] = 0x03; /* This is for IPMI 1.0. */
3876 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3877 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3878 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3880 /* Put a few breadcrumbs in. Hopefully later we can add more things
3881 to make the panic events more useful. */
3882 if (str) {
3883 data[3] = str[0];
3884 data[6] = str[1];
3885 data[7] = str[2];
3888 smi_msg.done = dummy_smi_done_handler;
3889 recv_msg.done = dummy_recv_done_handler;
3891 /* For every registered interface, send the event. */
3892 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3893 if (!intf->handlers)
3894 /* Interface is not ready. */
3895 continue;
3897 /* Send the event announcing the panic. */
3898 intf->handlers->set_run_to_completion(intf->send_info, 1);
3899 i_ipmi_request(NULL,
3900 intf,
3901 &addr,
3903 &msg,
3904 intf,
3905 &smi_msg,
3906 &recv_msg,
3908 intf->channels[0].address,
3909 intf->channels[0].lun,
3910 0, 1); /* Don't retry, and don't wait. */
3913 #ifdef CONFIG_IPMI_PANIC_STRING
3914 /* On every interface, dump a bunch of OEM event holding the
3915 string. */
3916 if (!str)
3917 return;
3919 /* For every registered interface, send the event. */
3920 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3921 char *p = str;
3922 struct ipmi_ipmb_addr *ipmb;
3923 int j;
3925 if (intf->intf_num == -1)
3926 /* Interface was not ready yet. */
3927 continue;
3929 /* First job here is to figure out where to send the
3930 OEM events. There's no way in IPMI to send OEM
3931 events using an event send command, so we have to
3932 find the SEL to put them in and stick them in
3933 there. */
3935 /* Get capabilities from the get device id. */
3936 intf->local_sel_device = 0;
3937 intf->local_event_generator = 0;
3938 intf->event_receiver = 0;
3940 /* Request the device info from the local MC. */
3941 msg.netfn = IPMI_NETFN_APP_REQUEST;
3942 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3943 msg.data = NULL;
3944 msg.data_len = 0;
3945 intf->null_user_handler = device_id_fetcher;
3946 i_ipmi_request(NULL,
3947 intf,
3948 &addr,
3950 &msg,
3951 intf,
3952 &smi_msg,
3953 &recv_msg,
3955 intf->channels[0].address,
3956 intf->channels[0].lun,
3957 0, 1); /* Don't retry, and don't wait. */
3959 if (intf->local_event_generator) {
3960 /* Request the event receiver from the local MC. */
3961 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3962 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3963 msg.data = NULL;
3964 msg.data_len = 0;
3965 intf->null_user_handler = event_receiver_fetcher;
3966 i_ipmi_request(NULL,
3967 intf,
3968 &addr,
3970 &msg,
3971 intf,
3972 &smi_msg,
3973 &recv_msg,
3975 intf->channels[0].address,
3976 intf->channels[0].lun,
3977 0, 1); /* no retry, and no wait. */
3979 intf->null_user_handler = NULL;
3981 /* Validate the event receiver. The low bit must not
3982 be 1 (it must be a valid IPMB address), it cannot
3983 be zero, and it must not be my address. */
3984 if (((intf->event_receiver & 1) == 0)
3985 && (intf->event_receiver != 0)
3986 && (intf->event_receiver != intf->channels[0].address))
3988 /* The event receiver is valid, send an IPMB
3989 message. */
3990 ipmb = (struct ipmi_ipmb_addr *) &addr;
3991 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3992 ipmb->channel = 0; /* FIXME - is this right? */
3993 ipmb->lun = intf->event_receiver_lun;
3994 ipmb->slave_addr = intf->event_receiver;
3995 } else if (intf->local_sel_device) {
3996 /* The event receiver was not valid (or was
3997 me), but I am an SEL device, just dump it
3998 in my SEL. */
3999 si = (struct ipmi_system_interface_addr *) &addr;
4000 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4001 si->channel = IPMI_BMC_CHANNEL;
4002 si->lun = 0;
4003 } else
4004 continue; /* No where to send the event. */
4007 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4008 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4009 msg.data = data;
4010 msg.data_len = 16;
4012 j = 0;
4013 while (*p) {
4014 int size = strlen(p);
4016 if (size > 11)
4017 size = 11;
4018 data[0] = 0;
4019 data[1] = 0;
4020 data[2] = 0xf0; /* OEM event without timestamp. */
4021 data[3] = intf->channels[0].address;
4022 data[4] = j++; /* sequence # */
4023 /* Always give 11 bytes, so strncpy will fill
4024 it with zeroes for me. */
4025 strncpy(data+5, p, 11);
4026 p += size;
4028 i_ipmi_request(NULL,
4029 intf,
4030 &addr,
4032 &msg,
4033 intf,
4034 &smi_msg,
4035 &recv_msg,
4037 intf->channels[0].address,
4038 intf->channels[0].lun,
4039 0, 1); /* no retry, and no wait. */
4042 #endif /* CONFIG_IPMI_PANIC_STRING */
4044 #endif /* CONFIG_IPMI_PANIC_EVENT */
4046 static int has_panicked;
4048 static int panic_event(struct notifier_block *this,
4049 unsigned long event,
4050 void *ptr)
4052 ipmi_smi_t intf;
4054 if (has_panicked)
4055 return NOTIFY_DONE;
4056 has_panicked = 1;
4058 /* For every registered interface, set it to run to completion. */
4059 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4060 if (!intf->handlers)
4061 /* Interface is not ready. */
4062 continue;
4064 intf->handlers->set_run_to_completion(intf->send_info, 1);
4067 #ifdef CONFIG_IPMI_PANIC_EVENT
4068 send_panic_events(ptr);
4069 #endif
4071 return NOTIFY_DONE;
4074 static struct notifier_block panic_block = {
4075 .notifier_call = panic_event,
4076 .next = NULL,
4077 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4080 static int ipmi_init_msghandler(void)
4082 int rv;
4084 if (initialized)
4085 return 0;
4087 rv = driver_register(&ipmidriver);
4088 if (rv) {
4089 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4090 return rv;
4093 printk(KERN_INFO "ipmi message handler version "
4094 IPMI_DRIVER_VERSION "\n");
4096 #ifdef CONFIG_PROC_FS
4097 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4098 if (!proc_ipmi_root) {
4099 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4100 return -ENOMEM;
4103 proc_ipmi_root->owner = THIS_MODULE;
4104 #endif /* CONFIG_PROC_FS */
4106 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4107 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4109 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4111 initialized = 1;
4113 return 0;
4116 static __init int ipmi_init_msghandler_mod(void)
4118 ipmi_init_msghandler();
4119 return 0;
4122 static __exit void cleanup_ipmi(void)
4124 int count;
4126 if (!initialized)
4127 return;
4129 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4131 /* This can't be called if any interfaces exist, so no worry about
4132 shutting down the interfaces. */
4134 /* Tell the timer to stop, then wait for it to stop. This avoids
4135 problems with race conditions removing the timer here. */
4136 atomic_inc(&stop_operation);
4137 del_timer_sync(&ipmi_timer);
4139 #ifdef CONFIG_PROC_FS
4140 remove_proc_entry(proc_ipmi_root->name, &proc_root);
4141 #endif /* CONFIG_PROC_FS */
4143 driver_unregister(&ipmidriver);
4145 initialized = 0;
4147 /* Check for buffer leaks. */
4148 count = atomic_read(&smi_msg_inuse_count);
4149 if (count != 0)
4150 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4151 count);
4152 count = atomic_read(&recv_msg_inuse_count);
4153 if (count != 0)
4154 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4155 count);
4157 module_exit(cleanup_ipmi);
4159 module_init(ipmi_init_msghandler_mod);
4160 MODULE_LICENSE("GPL");
4161 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4162 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4163 MODULE_VERSION(IPMI_DRIVER_VERSION);
4165 EXPORT_SYMBOL(ipmi_create_user);
4166 EXPORT_SYMBOL(ipmi_destroy_user);
4167 EXPORT_SYMBOL(ipmi_get_version);
4168 EXPORT_SYMBOL(ipmi_request_settime);
4169 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4170 EXPORT_SYMBOL(ipmi_register_smi);
4171 EXPORT_SYMBOL(ipmi_unregister_smi);
4172 EXPORT_SYMBOL(ipmi_register_for_cmd);
4173 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4174 EXPORT_SYMBOL(ipmi_smi_msg_received);
4175 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4176 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4177 EXPORT_SYMBOL(ipmi_addr_length);
4178 EXPORT_SYMBOL(ipmi_validate_addr);
4179 EXPORT_SYMBOL(ipmi_set_gets_events);
4180 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4181 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4182 EXPORT_SYMBOL(ipmi_set_my_address);
4183 EXPORT_SYMBOL(ipmi_get_my_address);
4184 EXPORT_SYMBOL(ipmi_set_my_LUN);
4185 EXPORT_SYMBOL(ipmi_get_my_LUN);
4186 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4187 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4188 EXPORT_SYMBOL(ipmi_free_recv_msg);