Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
[linux-2.6.22.y-op.git] / drivers / char / ipmi / ipmi_msghandler.c
blobb03ddab1bef57bae9a81acd79d31c42297d364cd
1 /*
2 * ipmi_msghandler.c
4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/mutex.h>
42 #include <linux/slab.h>
43 #include <linux/ipmi.h>
44 #include <linux/ipmi_smi.h>
45 #include <linux/notifier.h>
46 #include <linux/init.h>
47 #include <linux/proc_fs.h>
48 #include <linux/rcupdate.h>
50 #define PFX "IPMI message handler: "
52 #define IPMI_DRIVER_VERSION "39.0"
54 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55 static int ipmi_init_msghandler(void);
57 static int initialized = 0;
59 #ifdef CONFIG_PROC_FS
60 struct proc_dir_entry *proc_ipmi_root = NULL;
61 EXPORT_SYMBOL(proc_ipmi_root);
62 #endif /* CONFIG_PROC_FS */
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
72 * The main "user" data structure.
74 struct ipmi_user
76 struct list_head link;
78 /* Set to "0" when the user is destroyed. */
79 int valid;
81 struct kref refcount;
83 /* The upper layer that handles receive messages. */
84 struct ipmi_user_hndl *handler;
85 void *handler_data;
87 /* The interface this user is bound to. */
88 ipmi_smi_t intf;
90 /* Does this interface receive IPMI events? */
91 int gets_events;
94 struct cmd_rcvr
96 struct list_head link;
98 ipmi_user_t user;
99 unsigned char netfn;
100 unsigned char cmd;
103 * This is used to form a linked lised during mass deletion.
104 * Since this is in an RCU list, we cannot use the link above
105 * or change any data until the RCU period completes. So we
106 * use this next variable during mass deletion so we can have
107 * a list and don't have to wait and restart the search on
108 * every individual deletion of a command. */
109 struct cmd_rcvr *next;
112 struct seq_table
114 unsigned int inuse : 1;
115 unsigned int broadcast : 1;
117 unsigned long timeout;
118 unsigned long orig_timeout;
119 unsigned int retries_left;
121 /* To verify on an incoming send message response that this is
122 the message that the response is for, we keep a sequence id
123 and increment it every time we send a message. */
124 long seqid;
126 /* This is held so we can properly respond to the message on a
127 timeout, and it is used to hold the temporary data for
128 retransmission, too. */
129 struct ipmi_recv_msg *recv_msg;
132 /* Store the information in a msgid (long) to allow us to find a
133 sequence table entry from the msgid. */
134 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 do { \
138 seq = ((msgid >> 26) & 0x3f); \
139 seqid = (msgid & 0x3fffff); \
140 } while (0)
142 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
144 struct ipmi_channel
146 unsigned char medium;
147 unsigned char protocol;
149 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
150 but may be changed by the user. */
151 unsigned char address;
153 /* My LUN. This should generally stay the SMS LUN, but just in
154 case... */
155 unsigned char lun;
158 #ifdef CONFIG_PROC_FS
159 struct ipmi_proc_entry
161 char *name;
162 struct ipmi_proc_entry *next;
164 #endif
166 struct bmc_device
168 struct platform_device *dev;
169 struct ipmi_device_id id;
170 unsigned char guid[16];
171 int guid_set;
173 struct kref refcount;
175 /* bmc device attributes */
176 struct device_attribute device_id_attr;
177 struct device_attribute provides_dev_sdrs_attr;
178 struct device_attribute revision_attr;
179 struct device_attribute firmware_rev_attr;
180 struct device_attribute version_attr;
181 struct device_attribute add_dev_support_attr;
182 struct device_attribute manufacturer_id_attr;
183 struct device_attribute product_id_attr;
184 struct device_attribute guid_attr;
185 struct device_attribute aux_firmware_rev_attr;
188 #define IPMI_IPMB_NUM_SEQ 64
189 #define IPMI_MAX_CHANNELS 16
190 struct ipmi_smi
192 /* What interface number are we? */
193 int intf_num;
195 struct kref refcount;
197 /* The list of upper layers that are using me. seq_lock
198 * protects this. */
199 struct list_head users;
201 /* Used for wake ups at startup. */
202 wait_queue_head_t waitq;
204 struct bmc_device *bmc;
205 char *my_dev_name;
207 /* This is the lower-layer's sender routine. */
208 struct ipmi_smi_handlers *handlers;
209 void *send_info;
211 #ifdef CONFIG_PROC_FS
212 /* A list of proc entries for this interface. This does not
213 need a lock, only one thread creates it and only one thread
214 destroys it. */
215 spinlock_t proc_entry_lock;
216 struct ipmi_proc_entry *proc_entries;
217 #endif
219 /* Driver-model device for the system interface. */
220 struct device *si_dev;
222 /* A table of sequence numbers for this interface. We use the
223 sequence numbers for IPMB messages that go out of the
224 interface to match them up with their responses. A routine
225 is called periodically to time the items in this list. */
226 spinlock_t seq_lock;
227 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
228 int curr_seq;
230 /* Messages that were delayed for some reason (out of memory,
231 for instance), will go in here to be processed later in a
232 periodic timer interrupt. */
233 spinlock_t waiting_msgs_lock;
234 struct list_head waiting_msgs;
236 /* The list of command receivers that are registered for commands
237 on this interface. */
238 struct mutex cmd_rcvrs_mutex;
239 struct list_head cmd_rcvrs;
241 /* Events that were queues because no one was there to receive
242 them. */
243 spinlock_t events_lock; /* For dealing with event stuff. */
244 struct list_head waiting_events;
245 unsigned int waiting_events_count; /* How many events in queue? */
247 /* The event receiver for my BMC, only really used at panic
248 shutdown as a place to store this. */
249 unsigned char event_receiver;
250 unsigned char event_receiver_lun;
251 unsigned char local_sel_device;
252 unsigned char local_event_generator;
254 /* A cheap hack, if this is non-null and a message to an
255 interface comes in with a NULL user, call this routine with
256 it. Note that the message will still be freed by the
257 caller. This only works on the system interface. */
258 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
260 /* When we are scanning the channels for an SMI, this will
261 tell which channel we are scanning. */
262 int curr_channel;
264 /* Channel information */
265 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
267 /* Proc FS stuff. */
268 struct proc_dir_entry *proc_dir;
269 char proc_dir_name[10];
271 spinlock_t counter_lock; /* For making counters atomic. */
273 /* Commands we got that were invalid. */
274 unsigned int sent_invalid_commands;
276 /* Commands we sent to the MC. */
277 unsigned int sent_local_commands;
278 /* Responses from the MC that were delivered to a user. */
279 unsigned int handled_local_responses;
280 /* Responses from the MC that were not delivered to a user. */
281 unsigned int unhandled_local_responses;
283 /* Commands we sent out to the IPMB bus. */
284 unsigned int sent_ipmb_commands;
285 /* Commands sent on the IPMB that had errors on the SEND CMD */
286 unsigned int sent_ipmb_command_errs;
287 /* Each retransmit increments this count. */
288 unsigned int retransmitted_ipmb_commands;
289 /* When a message times out (runs out of retransmits) this is
290 incremented. */
291 unsigned int timed_out_ipmb_commands;
293 /* This is like above, but for broadcasts. Broadcasts are
294 *not* included in the above count (they are expected to
295 time out). */
296 unsigned int timed_out_ipmb_broadcasts;
298 /* Responses I have sent to the IPMB bus. */
299 unsigned int sent_ipmb_responses;
301 /* The response was delivered to the user. */
302 unsigned int handled_ipmb_responses;
303 /* The response had invalid data in it. */
304 unsigned int invalid_ipmb_responses;
305 /* The response didn't have anyone waiting for it. */
306 unsigned int unhandled_ipmb_responses;
308 /* Commands we sent out to the IPMB bus. */
309 unsigned int sent_lan_commands;
310 /* Commands sent on the IPMB that had errors on the SEND CMD */
311 unsigned int sent_lan_command_errs;
312 /* Each retransmit increments this count. */
313 unsigned int retransmitted_lan_commands;
314 /* When a message times out (runs out of retransmits) this is
315 incremented. */
316 unsigned int timed_out_lan_commands;
318 /* Responses I have sent to the IPMB bus. */
319 unsigned int sent_lan_responses;
321 /* The response was delivered to the user. */
322 unsigned int handled_lan_responses;
323 /* The response had invalid data in it. */
324 unsigned int invalid_lan_responses;
325 /* The response didn't have anyone waiting for it. */
326 unsigned int unhandled_lan_responses;
328 /* The command was delivered to the user. */
329 unsigned int handled_commands;
330 /* The command had invalid data in it. */
331 unsigned int invalid_commands;
332 /* The command didn't have anyone waiting for it. */
333 unsigned int unhandled_commands;
335 /* Invalid data in an event. */
336 unsigned int invalid_events;
337 /* Events that were received with the proper format. */
338 unsigned int events;
340 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
342 /* Used to mark an interface entry that cannot be used but is not a
343 * free entry, either, primarily used at creation and deletion time so
344 * a slot doesn't get reused too quickly. */
345 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
346 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
347 || (i == IPMI_INVALID_INTERFACE_ENTRY))
350 * The driver model view of the IPMI messaging driver.
352 static struct device_driver ipmidriver = {
353 .name = "ipmi",
354 .bus = &platform_bus_type
356 static DEFINE_MUTEX(ipmidriver_mutex);
358 #define MAX_IPMI_INTERFACES 4
359 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
361 /* Directly protects the ipmi_interfaces data structure. */
362 static DEFINE_SPINLOCK(interfaces_lock);
364 /* List of watchers that want to know when smi's are added and
365 deleted. */
366 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
367 static DECLARE_RWSEM(smi_watchers_sem);
370 static void free_recv_msg_list(struct list_head *q)
372 struct ipmi_recv_msg *msg, *msg2;
374 list_for_each_entry_safe(msg, msg2, q, link) {
375 list_del(&msg->link);
376 ipmi_free_recv_msg(msg);
380 static void clean_up_interface_data(ipmi_smi_t intf)
382 int i;
383 struct cmd_rcvr *rcvr, *rcvr2;
384 struct list_head list;
386 free_recv_msg_list(&intf->waiting_msgs);
387 free_recv_msg_list(&intf->waiting_events);
389 /* Wholesale remove all the entries from the list in the
390 * interface and wait for RCU to know that none are in use. */
391 mutex_lock(&intf->cmd_rcvrs_mutex);
392 list_add_rcu(&list, &intf->cmd_rcvrs);
393 list_del_rcu(&intf->cmd_rcvrs);
394 mutex_unlock(&intf->cmd_rcvrs_mutex);
395 synchronize_rcu();
397 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
398 kfree(rcvr);
400 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
401 if ((intf->seq_table[i].inuse)
402 && (intf->seq_table[i].recv_msg))
404 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
409 static void intf_free(struct kref *ref)
411 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
413 clean_up_interface_data(intf);
414 kfree(intf);
417 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
419 int i;
420 unsigned long flags;
422 down_write(&smi_watchers_sem);
423 list_add(&(watcher->link), &smi_watchers);
424 up_write(&smi_watchers_sem);
425 spin_lock_irqsave(&interfaces_lock, flags);
426 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
427 ipmi_smi_t intf = ipmi_interfaces[i];
428 if (IPMI_INVALID_INTERFACE(intf))
429 continue;
430 spin_unlock_irqrestore(&interfaces_lock, flags);
431 watcher->new_smi(i, intf->si_dev);
432 spin_lock_irqsave(&interfaces_lock, flags);
434 spin_unlock_irqrestore(&interfaces_lock, flags);
435 return 0;
438 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
440 down_write(&smi_watchers_sem);
441 list_del(&(watcher->link));
442 up_write(&smi_watchers_sem);
443 return 0;
446 static void
447 call_smi_watchers(int i, struct device *dev)
449 struct ipmi_smi_watcher *w;
451 down_read(&smi_watchers_sem);
452 list_for_each_entry(w, &smi_watchers, link) {
453 if (try_module_get(w->owner)) {
454 w->new_smi(i, dev);
455 module_put(w->owner);
458 up_read(&smi_watchers_sem);
461 static int
462 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
464 if (addr1->addr_type != addr2->addr_type)
465 return 0;
467 if (addr1->channel != addr2->channel)
468 return 0;
470 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
471 struct ipmi_system_interface_addr *smi_addr1
472 = (struct ipmi_system_interface_addr *) addr1;
473 struct ipmi_system_interface_addr *smi_addr2
474 = (struct ipmi_system_interface_addr *) addr2;
475 return (smi_addr1->lun == smi_addr2->lun);
478 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
479 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
481 struct ipmi_ipmb_addr *ipmb_addr1
482 = (struct ipmi_ipmb_addr *) addr1;
483 struct ipmi_ipmb_addr *ipmb_addr2
484 = (struct ipmi_ipmb_addr *) addr2;
486 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
487 && (ipmb_addr1->lun == ipmb_addr2->lun));
490 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
491 struct ipmi_lan_addr *lan_addr1
492 = (struct ipmi_lan_addr *) addr1;
493 struct ipmi_lan_addr *lan_addr2
494 = (struct ipmi_lan_addr *) addr2;
496 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
497 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
498 && (lan_addr1->session_handle
499 == lan_addr2->session_handle)
500 && (lan_addr1->lun == lan_addr2->lun));
503 return 1;
506 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
508 if (len < sizeof(struct ipmi_system_interface_addr)) {
509 return -EINVAL;
512 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
513 if (addr->channel != IPMI_BMC_CHANNEL)
514 return -EINVAL;
515 return 0;
518 if ((addr->channel == IPMI_BMC_CHANNEL)
519 || (addr->channel >= IPMI_MAX_CHANNELS)
520 || (addr->channel < 0))
521 return -EINVAL;
523 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
524 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
526 if (len < sizeof(struct ipmi_ipmb_addr)) {
527 return -EINVAL;
529 return 0;
532 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
533 if (len < sizeof(struct ipmi_lan_addr)) {
534 return -EINVAL;
536 return 0;
539 return -EINVAL;
542 unsigned int ipmi_addr_length(int addr_type)
544 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
545 return sizeof(struct ipmi_system_interface_addr);
547 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
548 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
550 return sizeof(struct ipmi_ipmb_addr);
553 if (addr_type == IPMI_LAN_ADDR_TYPE)
554 return sizeof(struct ipmi_lan_addr);
556 return 0;
559 static void deliver_response(struct ipmi_recv_msg *msg)
561 if (!msg->user) {
562 ipmi_smi_t intf = msg->user_msg_data;
563 unsigned long flags;
565 /* Special handling for NULL users. */
566 if (intf->null_user_handler) {
567 intf->null_user_handler(intf, msg);
568 spin_lock_irqsave(&intf->counter_lock, flags);
569 intf->handled_local_responses++;
570 spin_unlock_irqrestore(&intf->counter_lock, flags);
571 } else {
572 /* No handler, so give up. */
573 spin_lock_irqsave(&intf->counter_lock, flags);
574 intf->unhandled_local_responses++;
575 spin_unlock_irqrestore(&intf->counter_lock, flags);
577 ipmi_free_recv_msg(msg);
578 } else {
579 ipmi_user_t user = msg->user;
580 user->handler->ipmi_recv_hndl(msg, user->handler_data);
584 /* Find the next sequence number not being used and add the given
585 message with the given timeout to the sequence table. This must be
586 called with the interface's seq_lock held. */
587 static int intf_next_seq(ipmi_smi_t intf,
588 struct ipmi_recv_msg *recv_msg,
589 unsigned long timeout,
590 int retries,
591 int broadcast,
592 unsigned char *seq,
593 long *seqid)
595 int rv = 0;
596 unsigned int i;
598 for (i = intf->curr_seq;
599 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
600 i = (i+1)%IPMI_IPMB_NUM_SEQ)
602 if (!intf->seq_table[i].inuse)
603 break;
606 if (!intf->seq_table[i].inuse) {
607 intf->seq_table[i].recv_msg = recv_msg;
609 /* Start with the maximum timeout, when the send response
610 comes in we will start the real timer. */
611 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
612 intf->seq_table[i].orig_timeout = timeout;
613 intf->seq_table[i].retries_left = retries;
614 intf->seq_table[i].broadcast = broadcast;
615 intf->seq_table[i].inuse = 1;
616 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
617 *seq = i;
618 *seqid = intf->seq_table[i].seqid;
619 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
620 } else {
621 rv = -EAGAIN;
624 return rv;
627 /* Return the receive message for the given sequence number and
628 release the sequence number so it can be reused. Some other data
629 is passed in to be sure the message matches up correctly (to help
630 guard against message coming in after their timeout and the
631 sequence number being reused). */
632 static int intf_find_seq(ipmi_smi_t intf,
633 unsigned char seq,
634 short channel,
635 unsigned char cmd,
636 unsigned char netfn,
637 struct ipmi_addr *addr,
638 struct ipmi_recv_msg **recv_msg)
640 int rv = -ENODEV;
641 unsigned long flags;
643 if (seq >= IPMI_IPMB_NUM_SEQ)
644 return -EINVAL;
646 spin_lock_irqsave(&(intf->seq_lock), flags);
647 if (intf->seq_table[seq].inuse) {
648 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
650 if ((msg->addr.channel == channel)
651 && (msg->msg.cmd == cmd)
652 && (msg->msg.netfn == netfn)
653 && (ipmi_addr_equal(addr, &(msg->addr))))
655 *recv_msg = msg;
656 intf->seq_table[seq].inuse = 0;
657 rv = 0;
660 spin_unlock_irqrestore(&(intf->seq_lock), flags);
662 return rv;
666 /* Start the timer for a specific sequence table entry. */
667 static int intf_start_seq_timer(ipmi_smi_t intf,
668 long msgid)
670 int rv = -ENODEV;
671 unsigned long flags;
672 unsigned char seq;
673 unsigned long seqid;
676 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
678 spin_lock_irqsave(&(intf->seq_lock), flags);
679 /* We do this verification because the user can be deleted
680 while a message is outstanding. */
681 if ((intf->seq_table[seq].inuse)
682 && (intf->seq_table[seq].seqid == seqid))
684 struct seq_table *ent = &(intf->seq_table[seq]);
685 ent->timeout = ent->orig_timeout;
686 rv = 0;
688 spin_unlock_irqrestore(&(intf->seq_lock), flags);
690 return rv;
693 /* Got an error for the send message for a specific sequence number. */
694 static int intf_err_seq(ipmi_smi_t intf,
695 long msgid,
696 unsigned int err)
698 int rv = -ENODEV;
699 unsigned long flags;
700 unsigned char seq;
701 unsigned long seqid;
702 struct ipmi_recv_msg *msg = NULL;
705 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
707 spin_lock_irqsave(&(intf->seq_lock), flags);
708 /* We do this verification because the user can be deleted
709 while a message is outstanding. */
710 if ((intf->seq_table[seq].inuse)
711 && (intf->seq_table[seq].seqid == seqid))
713 struct seq_table *ent = &(intf->seq_table[seq]);
715 ent->inuse = 0;
716 msg = ent->recv_msg;
717 rv = 0;
719 spin_unlock_irqrestore(&(intf->seq_lock), flags);
721 if (msg) {
722 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
723 msg->msg_data[0] = err;
724 msg->msg.netfn |= 1; /* Convert to a response. */
725 msg->msg.data_len = 1;
726 msg->msg.data = msg->msg_data;
727 deliver_response(msg);
730 return rv;
734 int ipmi_create_user(unsigned int if_num,
735 struct ipmi_user_hndl *handler,
736 void *handler_data,
737 ipmi_user_t *user)
739 unsigned long flags;
740 ipmi_user_t new_user;
741 int rv = 0;
742 ipmi_smi_t intf;
744 /* There is no module usecount here, because it's not
745 required. Since this can only be used by and called from
746 other modules, they will implicitly use this module, and
747 thus this can't be removed unless the other modules are
748 removed. */
750 if (handler == NULL)
751 return -EINVAL;
753 /* Make sure the driver is actually initialized, this handles
754 problems with initialization order. */
755 if (!initialized) {
756 rv = ipmi_init_msghandler();
757 if (rv)
758 return rv;
760 /* The init code doesn't return an error if it was turned
761 off, but it won't initialize. Check that. */
762 if (!initialized)
763 return -ENODEV;
766 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
767 if (!new_user)
768 return -ENOMEM;
770 spin_lock_irqsave(&interfaces_lock, flags);
771 intf = ipmi_interfaces[if_num];
772 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
773 spin_unlock_irqrestore(&interfaces_lock, flags);
774 rv = -EINVAL;
775 goto out_kfree;
778 /* Note that each existing user holds a refcount to the interface. */
779 kref_get(&intf->refcount);
780 spin_unlock_irqrestore(&interfaces_lock, flags);
782 kref_init(&new_user->refcount);
783 new_user->handler = handler;
784 new_user->handler_data = handler_data;
785 new_user->intf = intf;
786 new_user->gets_events = 0;
788 if (!try_module_get(intf->handlers->owner)) {
789 rv = -ENODEV;
790 goto out_kref;
793 if (intf->handlers->inc_usecount) {
794 rv = intf->handlers->inc_usecount(intf->send_info);
795 if (rv) {
796 module_put(intf->handlers->owner);
797 goto out_kref;
801 new_user->valid = 1;
802 spin_lock_irqsave(&intf->seq_lock, flags);
803 list_add_rcu(&new_user->link, &intf->users);
804 spin_unlock_irqrestore(&intf->seq_lock, flags);
805 *user = new_user;
806 return 0;
808 out_kref:
809 kref_put(&intf->refcount, intf_free);
810 out_kfree:
811 kfree(new_user);
812 return rv;
815 static void free_user(struct kref *ref)
817 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
818 kfree(user);
821 int ipmi_destroy_user(ipmi_user_t user)
823 ipmi_smi_t intf = user->intf;
824 int i;
825 unsigned long flags;
826 struct cmd_rcvr *rcvr;
827 struct cmd_rcvr *rcvrs = NULL;
829 user->valid = 0;
831 /* Remove the user from the interface's sequence table. */
832 spin_lock_irqsave(&intf->seq_lock, flags);
833 list_del_rcu(&user->link);
835 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
836 if (intf->seq_table[i].inuse
837 && (intf->seq_table[i].recv_msg->user == user))
839 intf->seq_table[i].inuse = 0;
842 spin_unlock_irqrestore(&intf->seq_lock, flags);
845 * Remove the user from the command receiver's table. First
846 * we build a list of everything (not using the standard link,
847 * since other things may be using it till we do
848 * synchronize_rcu()) then free everything in that list.
850 mutex_lock(&intf->cmd_rcvrs_mutex);
851 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
852 if (rcvr->user == user) {
853 list_del_rcu(&rcvr->link);
854 rcvr->next = rcvrs;
855 rcvrs = rcvr;
858 mutex_unlock(&intf->cmd_rcvrs_mutex);
859 synchronize_rcu();
860 while (rcvrs) {
861 rcvr = rcvrs;
862 rcvrs = rcvr->next;
863 kfree(rcvr);
866 module_put(intf->handlers->owner);
867 if (intf->handlers->dec_usecount)
868 intf->handlers->dec_usecount(intf->send_info);
870 kref_put(&intf->refcount, intf_free);
872 kref_put(&user->refcount, free_user);
874 return 0;
877 void ipmi_get_version(ipmi_user_t user,
878 unsigned char *major,
879 unsigned char *minor)
881 *major = ipmi_version_major(&user->intf->bmc->id);
882 *minor = ipmi_version_minor(&user->intf->bmc->id);
885 int ipmi_set_my_address(ipmi_user_t user,
886 unsigned int channel,
887 unsigned char address)
889 if (channel >= IPMI_MAX_CHANNELS)
890 return -EINVAL;
891 user->intf->channels[channel].address = address;
892 return 0;
895 int ipmi_get_my_address(ipmi_user_t user,
896 unsigned int channel,
897 unsigned char *address)
899 if (channel >= IPMI_MAX_CHANNELS)
900 return -EINVAL;
901 *address = user->intf->channels[channel].address;
902 return 0;
905 int ipmi_set_my_LUN(ipmi_user_t user,
906 unsigned int channel,
907 unsigned char LUN)
909 if (channel >= IPMI_MAX_CHANNELS)
910 return -EINVAL;
911 user->intf->channels[channel].lun = LUN & 0x3;
912 return 0;
915 int ipmi_get_my_LUN(ipmi_user_t user,
916 unsigned int channel,
917 unsigned char *address)
919 if (channel >= IPMI_MAX_CHANNELS)
920 return -EINVAL;
921 *address = user->intf->channels[channel].lun;
922 return 0;
925 int ipmi_set_gets_events(ipmi_user_t user, int val)
927 unsigned long flags;
928 ipmi_smi_t intf = user->intf;
929 struct ipmi_recv_msg *msg, *msg2;
930 struct list_head msgs;
932 INIT_LIST_HEAD(&msgs);
934 spin_lock_irqsave(&intf->events_lock, flags);
935 user->gets_events = val;
937 if (val) {
938 /* Deliver any queued events. */
939 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
940 list_move_tail(&msg->link, &msgs);
941 intf->waiting_events_count = 0;
944 /* Hold the events lock while doing this to preserve order. */
945 list_for_each_entry_safe(msg, msg2, &msgs, link) {
946 msg->user = user;
947 kref_get(&user->refcount);
948 deliver_response(msg);
951 spin_unlock_irqrestore(&intf->events_lock, flags);
953 return 0;
956 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
957 unsigned char netfn,
958 unsigned char cmd)
960 struct cmd_rcvr *rcvr;
962 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
963 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
964 return rcvr;
966 return NULL;
969 int ipmi_register_for_cmd(ipmi_user_t user,
970 unsigned char netfn,
971 unsigned char cmd)
973 ipmi_smi_t intf = user->intf;
974 struct cmd_rcvr *rcvr;
975 struct cmd_rcvr *entry;
976 int rv = 0;
979 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
980 if (!rcvr)
981 return -ENOMEM;
982 rcvr->cmd = cmd;
983 rcvr->netfn = netfn;
984 rcvr->user = user;
986 mutex_lock(&intf->cmd_rcvrs_mutex);
987 /* Make sure the command/netfn is not already registered. */
988 entry = find_cmd_rcvr(intf, netfn, cmd);
989 if (entry) {
990 rv = -EBUSY;
991 goto out_unlock;
994 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
996 out_unlock:
997 mutex_unlock(&intf->cmd_rcvrs_mutex);
998 if (rv)
999 kfree(rcvr);
1001 return rv;
1004 int ipmi_unregister_for_cmd(ipmi_user_t user,
1005 unsigned char netfn,
1006 unsigned char cmd)
1008 ipmi_smi_t intf = user->intf;
1009 struct cmd_rcvr *rcvr;
1011 mutex_lock(&intf->cmd_rcvrs_mutex);
1012 /* Make sure the command/netfn is not already registered. */
1013 rcvr = find_cmd_rcvr(intf, netfn, cmd);
1014 if ((rcvr) && (rcvr->user == user)) {
1015 list_del_rcu(&rcvr->link);
1016 mutex_unlock(&intf->cmd_rcvrs_mutex);
1017 synchronize_rcu();
1018 kfree(rcvr);
1019 return 0;
1020 } else {
1021 mutex_unlock(&intf->cmd_rcvrs_mutex);
1022 return -ENOENT;
1026 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1028 ipmi_smi_t intf = user->intf;
1029 intf->handlers->set_run_to_completion(intf->send_info, val);
1032 static unsigned char
1033 ipmb_checksum(unsigned char *data, int size)
1035 unsigned char csum = 0;
1037 for (; size > 0; size--, data++)
1038 csum += *data;
1040 return -csum;
1043 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1044 struct kernel_ipmi_msg *msg,
1045 struct ipmi_ipmb_addr *ipmb_addr,
1046 long msgid,
1047 unsigned char ipmb_seq,
1048 int broadcast,
1049 unsigned char source_address,
1050 unsigned char source_lun)
1052 int i = broadcast;
1054 /* Format the IPMB header data. */
1055 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1056 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1057 smi_msg->data[2] = ipmb_addr->channel;
1058 if (broadcast)
1059 smi_msg->data[3] = 0;
1060 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1061 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1062 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1063 smi_msg->data[i+6] = source_address;
1064 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1065 smi_msg->data[i+8] = msg->cmd;
1067 /* Now tack on the data to the message. */
1068 if (msg->data_len > 0)
1069 memcpy(&(smi_msg->data[i+9]), msg->data,
1070 msg->data_len);
1071 smi_msg->data_size = msg->data_len + 9;
1073 /* Now calculate the checksum and tack it on. */
1074 smi_msg->data[i+smi_msg->data_size]
1075 = ipmb_checksum(&(smi_msg->data[i+6]),
1076 smi_msg->data_size-6);
1078 /* Add on the checksum size and the offset from the
1079 broadcast. */
1080 smi_msg->data_size += 1 + i;
1082 smi_msg->msgid = msgid;
1085 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1086 struct kernel_ipmi_msg *msg,
1087 struct ipmi_lan_addr *lan_addr,
1088 long msgid,
1089 unsigned char ipmb_seq,
1090 unsigned char source_lun)
1092 /* Format the IPMB header data. */
1093 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1094 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1095 smi_msg->data[2] = lan_addr->channel;
1096 smi_msg->data[3] = lan_addr->session_handle;
1097 smi_msg->data[4] = lan_addr->remote_SWID;
1098 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1099 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1100 smi_msg->data[7] = lan_addr->local_SWID;
1101 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1102 smi_msg->data[9] = msg->cmd;
1104 /* Now tack on the data to the message. */
1105 if (msg->data_len > 0)
1106 memcpy(&(smi_msg->data[10]), msg->data,
1107 msg->data_len);
1108 smi_msg->data_size = msg->data_len + 10;
1110 /* Now calculate the checksum and tack it on. */
1111 smi_msg->data[smi_msg->data_size]
1112 = ipmb_checksum(&(smi_msg->data[7]),
1113 smi_msg->data_size-7);
1115 /* Add on the checksum size and the offset from the
1116 broadcast. */
1117 smi_msg->data_size += 1;
1119 smi_msg->msgid = msgid;
1122 /* Separate from ipmi_request so that the user does not have to be
1123 supplied in certain circumstances (mainly at panic time). If
1124 messages are supplied, they will be freed, even if an error
1125 occurs. */
1126 static int i_ipmi_request(ipmi_user_t user,
1127 ipmi_smi_t intf,
1128 struct ipmi_addr *addr,
1129 long msgid,
1130 struct kernel_ipmi_msg *msg,
1131 void *user_msg_data,
1132 void *supplied_smi,
1133 struct ipmi_recv_msg *supplied_recv,
1134 int priority,
1135 unsigned char source_address,
1136 unsigned char source_lun,
1137 int retries,
1138 unsigned int retry_time_ms)
1140 int rv = 0;
1141 struct ipmi_smi_msg *smi_msg;
1142 struct ipmi_recv_msg *recv_msg;
1143 unsigned long flags;
1146 if (supplied_recv) {
1147 recv_msg = supplied_recv;
1148 } else {
1149 recv_msg = ipmi_alloc_recv_msg();
1150 if (recv_msg == NULL) {
1151 return -ENOMEM;
1154 recv_msg->user_msg_data = user_msg_data;
1156 if (supplied_smi) {
1157 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1158 } else {
1159 smi_msg = ipmi_alloc_smi_msg();
1160 if (smi_msg == NULL) {
1161 ipmi_free_recv_msg(recv_msg);
1162 return -ENOMEM;
1166 recv_msg->user = user;
1167 if (user)
1168 kref_get(&user->refcount);
1169 recv_msg->msgid = msgid;
1170 /* Store the message to send in the receive message so timeout
1171 responses can get the proper response data. */
1172 recv_msg->msg = *msg;
1174 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1175 struct ipmi_system_interface_addr *smi_addr;
1177 if (msg->netfn & 1) {
1178 /* Responses are not allowed to the SMI. */
1179 rv = -EINVAL;
1180 goto out_err;
1183 smi_addr = (struct ipmi_system_interface_addr *) addr;
1184 if (smi_addr->lun > 3) {
1185 spin_lock_irqsave(&intf->counter_lock, flags);
1186 intf->sent_invalid_commands++;
1187 spin_unlock_irqrestore(&intf->counter_lock, flags);
1188 rv = -EINVAL;
1189 goto out_err;
1192 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1194 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1195 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1196 || (msg->cmd == IPMI_GET_MSG_CMD)
1197 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1199 /* We don't let the user do these, since we manage
1200 the sequence numbers. */
1201 spin_lock_irqsave(&intf->counter_lock, flags);
1202 intf->sent_invalid_commands++;
1203 spin_unlock_irqrestore(&intf->counter_lock, flags);
1204 rv = -EINVAL;
1205 goto out_err;
1208 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1209 spin_lock_irqsave(&intf->counter_lock, flags);
1210 intf->sent_invalid_commands++;
1211 spin_unlock_irqrestore(&intf->counter_lock, flags);
1212 rv = -EMSGSIZE;
1213 goto out_err;
1216 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1217 smi_msg->data[1] = msg->cmd;
1218 smi_msg->msgid = msgid;
1219 smi_msg->user_data = recv_msg;
1220 if (msg->data_len > 0)
1221 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1222 smi_msg->data_size = msg->data_len + 2;
1223 spin_lock_irqsave(&intf->counter_lock, flags);
1224 intf->sent_local_commands++;
1225 spin_unlock_irqrestore(&intf->counter_lock, flags);
1226 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1227 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1229 struct ipmi_ipmb_addr *ipmb_addr;
1230 unsigned char ipmb_seq;
1231 long seqid;
1232 int broadcast = 0;
1234 if (addr->channel >= IPMI_MAX_CHANNELS) {
1235 spin_lock_irqsave(&intf->counter_lock, flags);
1236 intf->sent_invalid_commands++;
1237 spin_unlock_irqrestore(&intf->counter_lock, flags);
1238 rv = -EINVAL;
1239 goto out_err;
1242 if (intf->channels[addr->channel].medium
1243 != IPMI_CHANNEL_MEDIUM_IPMB)
1245 spin_lock_irqsave(&intf->counter_lock, flags);
1246 intf->sent_invalid_commands++;
1247 spin_unlock_irqrestore(&intf->counter_lock, flags);
1248 rv = -EINVAL;
1249 goto out_err;
1252 if (retries < 0) {
1253 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1254 retries = 0; /* Don't retry broadcasts. */
1255 else
1256 retries = 4;
1258 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1259 /* Broadcasts add a zero at the beginning of the
1260 message, but otherwise is the same as an IPMB
1261 address. */
1262 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1263 broadcast = 1;
1267 /* Default to 1 second retries. */
1268 if (retry_time_ms == 0)
1269 retry_time_ms = 1000;
1271 /* 9 for the header and 1 for the checksum, plus
1272 possibly one for the broadcast. */
1273 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1274 spin_lock_irqsave(&intf->counter_lock, flags);
1275 intf->sent_invalid_commands++;
1276 spin_unlock_irqrestore(&intf->counter_lock, flags);
1277 rv = -EMSGSIZE;
1278 goto out_err;
1281 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1282 if (ipmb_addr->lun > 3) {
1283 spin_lock_irqsave(&intf->counter_lock, flags);
1284 intf->sent_invalid_commands++;
1285 spin_unlock_irqrestore(&intf->counter_lock, flags);
1286 rv = -EINVAL;
1287 goto out_err;
1290 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1292 if (recv_msg->msg.netfn & 0x1) {
1293 /* It's a response, so use the user's sequence
1294 from msgid. */
1295 spin_lock_irqsave(&intf->counter_lock, flags);
1296 intf->sent_ipmb_responses++;
1297 spin_unlock_irqrestore(&intf->counter_lock, flags);
1298 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1299 msgid, broadcast,
1300 source_address, source_lun);
1302 /* Save the receive message so we can use it
1303 to deliver the response. */
1304 smi_msg->user_data = recv_msg;
1305 } else {
1306 /* It's a command, so get a sequence for it. */
1308 spin_lock_irqsave(&(intf->seq_lock), flags);
1310 spin_lock(&intf->counter_lock);
1311 intf->sent_ipmb_commands++;
1312 spin_unlock(&intf->counter_lock);
1314 /* Create a sequence number with a 1 second
1315 timeout and 4 retries. */
1316 rv = intf_next_seq(intf,
1317 recv_msg,
1318 retry_time_ms,
1319 retries,
1320 broadcast,
1321 &ipmb_seq,
1322 &seqid);
1323 if (rv) {
1324 /* We have used up all the sequence numbers,
1325 probably, so abort. */
1326 spin_unlock_irqrestore(&(intf->seq_lock),
1327 flags);
1328 goto out_err;
1331 /* Store the sequence number in the message,
1332 so that when the send message response
1333 comes back we can start the timer. */
1334 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1335 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1336 ipmb_seq, broadcast,
1337 source_address, source_lun);
1339 /* Copy the message into the recv message data, so we
1340 can retransmit it later if necessary. */
1341 memcpy(recv_msg->msg_data, smi_msg->data,
1342 smi_msg->data_size);
1343 recv_msg->msg.data = recv_msg->msg_data;
1344 recv_msg->msg.data_len = smi_msg->data_size;
1346 /* We don't unlock until here, because we need
1347 to copy the completed message into the
1348 recv_msg before we release the lock.
1349 Otherwise, race conditions may bite us. I
1350 know that's pretty paranoid, but I prefer
1351 to be correct. */
1352 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1354 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1355 struct ipmi_lan_addr *lan_addr;
1356 unsigned char ipmb_seq;
1357 long seqid;
1359 if (addr->channel >= IPMI_MAX_CHANNELS) {
1360 spin_lock_irqsave(&intf->counter_lock, flags);
1361 intf->sent_invalid_commands++;
1362 spin_unlock_irqrestore(&intf->counter_lock, flags);
1363 rv = -EINVAL;
1364 goto out_err;
1367 if ((intf->channels[addr->channel].medium
1368 != IPMI_CHANNEL_MEDIUM_8023LAN)
1369 && (intf->channels[addr->channel].medium
1370 != IPMI_CHANNEL_MEDIUM_ASYNC))
1372 spin_lock_irqsave(&intf->counter_lock, flags);
1373 intf->sent_invalid_commands++;
1374 spin_unlock_irqrestore(&intf->counter_lock, flags);
1375 rv = -EINVAL;
1376 goto out_err;
1379 retries = 4;
1381 /* Default to 1 second retries. */
1382 if (retry_time_ms == 0)
1383 retry_time_ms = 1000;
1385 /* 11 for the header and 1 for the checksum. */
1386 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1387 spin_lock_irqsave(&intf->counter_lock, flags);
1388 intf->sent_invalid_commands++;
1389 spin_unlock_irqrestore(&intf->counter_lock, flags);
1390 rv = -EMSGSIZE;
1391 goto out_err;
1394 lan_addr = (struct ipmi_lan_addr *) addr;
1395 if (lan_addr->lun > 3) {
1396 spin_lock_irqsave(&intf->counter_lock, flags);
1397 intf->sent_invalid_commands++;
1398 spin_unlock_irqrestore(&intf->counter_lock, flags);
1399 rv = -EINVAL;
1400 goto out_err;
1403 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1405 if (recv_msg->msg.netfn & 0x1) {
1406 /* It's a response, so use the user's sequence
1407 from msgid. */
1408 spin_lock_irqsave(&intf->counter_lock, flags);
1409 intf->sent_lan_responses++;
1410 spin_unlock_irqrestore(&intf->counter_lock, flags);
1411 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1412 msgid, source_lun);
1414 /* Save the receive message so we can use it
1415 to deliver the response. */
1416 smi_msg->user_data = recv_msg;
1417 } else {
1418 /* It's a command, so get a sequence for it. */
1420 spin_lock_irqsave(&(intf->seq_lock), flags);
1422 spin_lock(&intf->counter_lock);
1423 intf->sent_lan_commands++;
1424 spin_unlock(&intf->counter_lock);
1426 /* Create a sequence number with a 1 second
1427 timeout and 4 retries. */
1428 rv = intf_next_seq(intf,
1429 recv_msg,
1430 retry_time_ms,
1431 retries,
1433 &ipmb_seq,
1434 &seqid);
1435 if (rv) {
1436 /* We have used up all the sequence numbers,
1437 probably, so abort. */
1438 spin_unlock_irqrestore(&(intf->seq_lock),
1439 flags);
1440 goto out_err;
1443 /* Store the sequence number in the message,
1444 so that when the send message response
1445 comes back we can start the timer. */
1446 format_lan_msg(smi_msg, msg, lan_addr,
1447 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1448 ipmb_seq, source_lun);
1450 /* Copy the message into the recv message data, so we
1451 can retransmit it later if necessary. */
1452 memcpy(recv_msg->msg_data, smi_msg->data,
1453 smi_msg->data_size);
1454 recv_msg->msg.data = recv_msg->msg_data;
1455 recv_msg->msg.data_len = smi_msg->data_size;
1457 /* We don't unlock until here, because we need
1458 to copy the completed message into the
1459 recv_msg before we release the lock.
1460 Otherwise, race conditions may bite us. I
1461 know that's pretty paranoid, but I prefer
1462 to be correct. */
1463 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1465 } else {
1466 /* Unknown address type. */
1467 spin_lock_irqsave(&intf->counter_lock, flags);
1468 intf->sent_invalid_commands++;
1469 spin_unlock_irqrestore(&intf->counter_lock, flags);
1470 rv = -EINVAL;
1471 goto out_err;
1474 #ifdef DEBUG_MSGING
1476 int m;
1477 for (m = 0; m < smi_msg->data_size; m++)
1478 printk(" %2.2x", smi_msg->data[m]);
1479 printk("\n");
1481 #endif
1482 intf->handlers->sender(intf->send_info, smi_msg, priority);
1484 return 0;
1486 out_err:
1487 ipmi_free_smi_msg(smi_msg);
1488 ipmi_free_recv_msg(recv_msg);
1489 return rv;
1492 static int check_addr(ipmi_smi_t intf,
1493 struct ipmi_addr *addr,
1494 unsigned char *saddr,
1495 unsigned char *lun)
1497 if (addr->channel >= IPMI_MAX_CHANNELS)
1498 return -EINVAL;
1499 *lun = intf->channels[addr->channel].lun;
1500 *saddr = intf->channels[addr->channel].address;
1501 return 0;
1504 int ipmi_request_settime(ipmi_user_t user,
1505 struct ipmi_addr *addr,
1506 long msgid,
1507 struct kernel_ipmi_msg *msg,
1508 void *user_msg_data,
1509 int priority,
1510 int retries,
1511 unsigned int retry_time_ms)
1513 unsigned char saddr, lun;
1514 int rv;
1516 if (!user)
1517 return -EINVAL;
1518 rv = check_addr(user->intf, addr, &saddr, &lun);
1519 if (rv)
1520 return rv;
1521 return i_ipmi_request(user,
1522 user->intf,
1523 addr,
1524 msgid,
1525 msg,
1526 user_msg_data,
1527 NULL, NULL,
1528 priority,
1529 saddr,
1530 lun,
1531 retries,
1532 retry_time_ms);
1535 int ipmi_request_supply_msgs(ipmi_user_t user,
1536 struct ipmi_addr *addr,
1537 long msgid,
1538 struct kernel_ipmi_msg *msg,
1539 void *user_msg_data,
1540 void *supplied_smi,
1541 struct ipmi_recv_msg *supplied_recv,
1542 int priority)
1544 unsigned char saddr, lun;
1545 int rv;
1547 if (!user)
1548 return -EINVAL;
1549 rv = check_addr(user->intf, addr, &saddr, &lun);
1550 if (rv)
1551 return rv;
1552 return i_ipmi_request(user,
1553 user->intf,
1554 addr,
1555 msgid,
1556 msg,
1557 user_msg_data,
1558 supplied_smi,
1559 supplied_recv,
1560 priority,
1561 saddr,
1562 lun,
1563 -1, 0);
1566 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1567 int count, int *eof, void *data)
1569 char *out = (char *) page;
1570 ipmi_smi_t intf = data;
1571 int i;
1572 int rv = 0;
1574 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1575 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1576 out[rv-1] = '\n'; /* Replace the final space with a newline */
1577 out[rv] = '\0';
1578 rv++;
1579 return rv;
1582 static int version_file_read_proc(char *page, char **start, off_t off,
1583 int count, int *eof, void *data)
1585 char *out = (char *) page;
1586 ipmi_smi_t intf = data;
1588 return sprintf(out, "%d.%d\n",
1589 ipmi_version_major(&intf->bmc->id),
1590 ipmi_version_minor(&intf->bmc->id));
1593 static int stat_file_read_proc(char *page, char **start, off_t off,
1594 int count, int *eof, void *data)
1596 char *out = (char *) page;
1597 ipmi_smi_t intf = data;
1599 out += sprintf(out, "sent_invalid_commands: %d\n",
1600 intf->sent_invalid_commands);
1601 out += sprintf(out, "sent_local_commands: %d\n",
1602 intf->sent_local_commands);
1603 out += sprintf(out, "handled_local_responses: %d\n",
1604 intf->handled_local_responses);
1605 out += sprintf(out, "unhandled_local_responses: %d\n",
1606 intf->unhandled_local_responses);
1607 out += sprintf(out, "sent_ipmb_commands: %d\n",
1608 intf->sent_ipmb_commands);
1609 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1610 intf->sent_ipmb_command_errs);
1611 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1612 intf->retransmitted_ipmb_commands);
1613 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1614 intf->timed_out_ipmb_commands);
1615 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1616 intf->timed_out_ipmb_broadcasts);
1617 out += sprintf(out, "sent_ipmb_responses: %d\n",
1618 intf->sent_ipmb_responses);
1619 out += sprintf(out, "handled_ipmb_responses: %d\n",
1620 intf->handled_ipmb_responses);
1621 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1622 intf->invalid_ipmb_responses);
1623 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1624 intf->unhandled_ipmb_responses);
1625 out += sprintf(out, "sent_lan_commands: %d\n",
1626 intf->sent_lan_commands);
1627 out += sprintf(out, "sent_lan_command_errs: %d\n",
1628 intf->sent_lan_command_errs);
1629 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1630 intf->retransmitted_lan_commands);
1631 out += sprintf(out, "timed_out_lan_commands: %d\n",
1632 intf->timed_out_lan_commands);
1633 out += sprintf(out, "sent_lan_responses: %d\n",
1634 intf->sent_lan_responses);
1635 out += sprintf(out, "handled_lan_responses: %d\n",
1636 intf->handled_lan_responses);
1637 out += sprintf(out, "invalid_lan_responses: %d\n",
1638 intf->invalid_lan_responses);
1639 out += sprintf(out, "unhandled_lan_responses: %d\n",
1640 intf->unhandled_lan_responses);
1641 out += sprintf(out, "handled_commands: %d\n",
1642 intf->handled_commands);
1643 out += sprintf(out, "invalid_commands: %d\n",
1644 intf->invalid_commands);
1645 out += sprintf(out, "unhandled_commands: %d\n",
1646 intf->unhandled_commands);
1647 out += sprintf(out, "invalid_events: %d\n",
1648 intf->invalid_events);
1649 out += sprintf(out, "events: %d\n",
1650 intf->events);
1652 return (out - ((char *) page));
1655 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1656 read_proc_t *read_proc, write_proc_t *write_proc,
1657 void *data, struct module *owner)
1659 int rv = 0;
1660 #ifdef CONFIG_PROC_FS
1661 struct proc_dir_entry *file;
1662 struct ipmi_proc_entry *entry;
1664 /* Create a list element. */
1665 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1666 if (!entry)
1667 return -ENOMEM;
1668 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1669 if (!entry->name) {
1670 kfree(entry);
1671 return -ENOMEM;
1673 strcpy(entry->name, name);
1675 file = create_proc_entry(name, 0, smi->proc_dir);
1676 if (!file) {
1677 kfree(entry->name);
1678 kfree(entry);
1679 rv = -ENOMEM;
1680 } else {
1681 file->nlink = 1;
1682 file->data = data;
1683 file->read_proc = read_proc;
1684 file->write_proc = write_proc;
1685 file->owner = owner;
1687 spin_lock(&smi->proc_entry_lock);
1688 /* Stick it on the list. */
1689 entry->next = smi->proc_entries;
1690 smi->proc_entries = entry;
1691 spin_unlock(&smi->proc_entry_lock);
1693 #endif /* CONFIG_PROC_FS */
1695 return rv;
1698 static int add_proc_entries(ipmi_smi_t smi, int num)
1700 int rv = 0;
1702 #ifdef CONFIG_PROC_FS
1703 sprintf(smi->proc_dir_name, "%d", num);
1704 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1705 if (!smi->proc_dir)
1706 rv = -ENOMEM;
1707 else {
1708 smi->proc_dir->owner = THIS_MODULE;
1711 if (rv == 0)
1712 rv = ipmi_smi_add_proc_entry(smi, "stats",
1713 stat_file_read_proc, NULL,
1714 smi, THIS_MODULE);
1716 if (rv == 0)
1717 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1718 ipmb_file_read_proc, NULL,
1719 smi, THIS_MODULE);
1721 if (rv == 0)
1722 rv = ipmi_smi_add_proc_entry(smi, "version",
1723 version_file_read_proc, NULL,
1724 smi, THIS_MODULE);
1725 #endif /* CONFIG_PROC_FS */
1727 return rv;
1730 static void remove_proc_entries(ipmi_smi_t smi)
1732 #ifdef CONFIG_PROC_FS
1733 struct ipmi_proc_entry *entry;
1735 spin_lock(&smi->proc_entry_lock);
1736 while (smi->proc_entries) {
1737 entry = smi->proc_entries;
1738 smi->proc_entries = entry->next;
1740 remove_proc_entry(entry->name, smi->proc_dir);
1741 kfree(entry->name);
1742 kfree(entry);
1744 spin_unlock(&smi->proc_entry_lock);
1745 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1746 #endif /* CONFIG_PROC_FS */
1749 static int __find_bmc_guid(struct device *dev, void *data)
1751 unsigned char *id = data;
1752 struct bmc_device *bmc = dev_get_drvdata(dev);
1753 return memcmp(bmc->guid, id, 16) == 0;
1756 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1757 unsigned char *guid)
1759 struct device *dev;
1761 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1762 if (dev)
1763 return dev_get_drvdata(dev);
1764 else
1765 return NULL;
1768 struct prod_dev_id {
1769 unsigned int product_id;
1770 unsigned char device_id;
1773 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1775 struct prod_dev_id *id = data;
1776 struct bmc_device *bmc = dev_get_drvdata(dev);
1778 return (bmc->id.product_id == id->product_id
1779 && bmc->id.product_id == id->product_id
1780 && bmc->id.device_id == id->device_id);
1783 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1784 struct device_driver *drv,
1785 unsigned char product_id, unsigned char device_id)
1787 struct prod_dev_id id = {
1788 .product_id = product_id,
1789 .device_id = device_id,
1791 struct device *dev;
1793 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1794 if (dev)
1795 return dev_get_drvdata(dev);
1796 else
1797 return NULL;
1800 static ssize_t device_id_show(struct device *dev,
1801 struct device_attribute *attr,
1802 char *buf)
1804 struct bmc_device *bmc = dev_get_drvdata(dev);
1806 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1809 static ssize_t provides_dev_sdrs_show(struct device *dev,
1810 struct device_attribute *attr,
1811 char *buf)
1813 struct bmc_device *bmc = dev_get_drvdata(dev);
1815 return snprintf(buf, 10, "%u\n",
1816 bmc->id.device_revision && 0x80 >> 7);
1819 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1820 char *buf)
1822 struct bmc_device *bmc = dev_get_drvdata(dev);
1824 return snprintf(buf, 20, "%u\n",
1825 bmc->id.device_revision && 0x0F);
1828 static ssize_t firmware_rev_show(struct device *dev,
1829 struct device_attribute *attr,
1830 char *buf)
1832 struct bmc_device *bmc = dev_get_drvdata(dev);
1834 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1835 bmc->id.firmware_revision_2);
1838 static ssize_t ipmi_version_show(struct device *dev,
1839 struct device_attribute *attr,
1840 char *buf)
1842 struct bmc_device *bmc = dev_get_drvdata(dev);
1844 return snprintf(buf, 20, "%u.%u\n",
1845 ipmi_version_major(&bmc->id),
1846 ipmi_version_minor(&bmc->id));
1849 static ssize_t add_dev_support_show(struct device *dev,
1850 struct device_attribute *attr,
1851 char *buf)
1853 struct bmc_device *bmc = dev_get_drvdata(dev);
1855 return snprintf(buf, 10, "0x%02x\n",
1856 bmc->id.additional_device_support);
1859 static ssize_t manufacturer_id_show(struct device *dev,
1860 struct device_attribute *attr,
1861 char *buf)
1863 struct bmc_device *bmc = dev_get_drvdata(dev);
1865 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1868 static ssize_t product_id_show(struct device *dev,
1869 struct device_attribute *attr,
1870 char *buf)
1872 struct bmc_device *bmc = dev_get_drvdata(dev);
1874 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1877 static ssize_t aux_firmware_rev_show(struct device *dev,
1878 struct device_attribute *attr,
1879 char *buf)
1881 struct bmc_device *bmc = dev_get_drvdata(dev);
1883 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1884 bmc->id.aux_firmware_revision[3],
1885 bmc->id.aux_firmware_revision[2],
1886 bmc->id.aux_firmware_revision[1],
1887 bmc->id.aux_firmware_revision[0]);
1890 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1891 char *buf)
1893 struct bmc_device *bmc = dev_get_drvdata(dev);
1895 return snprintf(buf, 100, "%Lx%Lx\n",
1896 (long long) bmc->guid[0],
1897 (long long) bmc->guid[8]);
1900 static void
1901 cleanup_bmc_device(struct kref *ref)
1903 struct bmc_device *bmc;
1905 bmc = container_of(ref, struct bmc_device, refcount);
1907 device_remove_file(&bmc->dev->dev,
1908 &bmc->device_id_attr);
1909 device_remove_file(&bmc->dev->dev,
1910 &bmc->provides_dev_sdrs_attr);
1911 device_remove_file(&bmc->dev->dev,
1912 &bmc->revision_attr);
1913 device_remove_file(&bmc->dev->dev,
1914 &bmc->firmware_rev_attr);
1915 device_remove_file(&bmc->dev->dev,
1916 &bmc->version_attr);
1917 device_remove_file(&bmc->dev->dev,
1918 &bmc->add_dev_support_attr);
1919 device_remove_file(&bmc->dev->dev,
1920 &bmc->manufacturer_id_attr);
1921 device_remove_file(&bmc->dev->dev,
1922 &bmc->product_id_attr);
1923 if (bmc->id.aux_firmware_revision_set)
1924 device_remove_file(&bmc->dev->dev,
1925 &bmc->aux_firmware_rev_attr);
1926 if (bmc->guid_set)
1927 device_remove_file(&bmc->dev->dev,
1928 &bmc->guid_attr);
1929 platform_device_unregister(bmc->dev);
1930 kfree(bmc);
1933 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1935 struct bmc_device *bmc = intf->bmc;
1937 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1938 if (intf->my_dev_name) {
1939 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1940 kfree(intf->my_dev_name);
1941 intf->my_dev_name = NULL;
1944 mutex_lock(&ipmidriver_mutex);
1945 kref_put(&bmc->refcount, cleanup_bmc_device);
1946 mutex_unlock(&ipmidriver_mutex);
1949 static int ipmi_bmc_register(ipmi_smi_t intf)
1951 int rv;
1952 struct bmc_device *bmc = intf->bmc;
1953 struct bmc_device *old_bmc;
1954 int size;
1955 char dummy[1];
1957 mutex_lock(&ipmidriver_mutex);
1960 * Try to find if there is an bmc_device struct
1961 * representing the interfaced BMC already
1963 if (bmc->guid_set)
1964 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1965 else
1966 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
1967 bmc->id.product_id,
1968 bmc->id.device_id);
1971 * If there is already an bmc_device, free the new one,
1972 * otherwise register the new BMC device
1974 if (old_bmc) {
1975 kfree(bmc);
1976 intf->bmc = old_bmc;
1977 bmc = old_bmc;
1979 kref_get(&bmc->refcount);
1980 mutex_unlock(&ipmidriver_mutex);
1982 printk(KERN_INFO
1983 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
1984 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
1985 bmc->id.manufacturer_id,
1986 bmc->id.product_id,
1987 bmc->id.device_id);
1988 } else {
1989 bmc->dev = platform_device_alloc("ipmi_bmc",
1990 bmc->id.device_id);
1991 if (!bmc->dev) {
1992 printk(KERN_ERR
1993 "ipmi_msghandler:"
1994 " Unable to allocate platform device\n");
1995 return -ENOMEM;
1997 bmc->dev->dev.driver = &ipmidriver;
1998 dev_set_drvdata(&bmc->dev->dev, bmc);
1999 kref_init(&bmc->refcount);
2001 rv = platform_device_register(bmc->dev);
2002 mutex_unlock(&ipmidriver_mutex);
2003 if (rv) {
2004 printk(KERN_ERR
2005 "ipmi_msghandler:"
2006 " Unable to register bmc device: %d\n",
2007 rv);
2008 /* Don't go to out_err, you can only do that if
2009 the device is registered already. */
2010 return rv;
2013 bmc->device_id_attr.attr.name = "device_id";
2014 bmc->device_id_attr.attr.owner = THIS_MODULE;
2015 bmc->device_id_attr.attr.mode = S_IRUGO;
2016 bmc->device_id_attr.show = device_id_show;
2018 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2019 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2020 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2021 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2024 bmc->revision_attr.attr.name = "revision";
2025 bmc->revision_attr.attr.owner = THIS_MODULE;
2026 bmc->revision_attr.attr.mode = S_IRUGO;
2027 bmc->revision_attr.show = revision_show;
2029 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2030 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2031 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2032 bmc->firmware_rev_attr.show = firmware_rev_show;
2034 bmc->version_attr.attr.name = "ipmi_version";
2035 bmc->version_attr.attr.owner = THIS_MODULE;
2036 bmc->version_attr.attr.mode = S_IRUGO;
2037 bmc->version_attr.show = ipmi_version_show;
2039 bmc->add_dev_support_attr.attr.name
2040 = "additional_device_support";
2041 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2042 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2043 bmc->add_dev_support_attr.show = add_dev_support_show;
2045 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2046 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2047 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2048 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2050 bmc->product_id_attr.attr.name = "product_id";
2051 bmc->product_id_attr.attr.owner = THIS_MODULE;
2052 bmc->product_id_attr.attr.mode = S_IRUGO;
2053 bmc->product_id_attr.show = product_id_show;
2055 bmc->guid_attr.attr.name = "guid";
2056 bmc->guid_attr.attr.owner = THIS_MODULE;
2057 bmc->guid_attr.attr.mode = S_IRUGO;
2058 bmc->guid_attr.show = guid_show;
2060 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2061 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2062 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2063 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2065 device_create_file(&bmc->dev->dev,
2066 &bmc->device_id_attr);
2067 device_create_file(&bmc->dev->dev,
2068 &bmc->provides_dev_sdrs_attr);
2069 device_create_file(&bmc->dev->dev,
2070 &bmc->revision_attr);
2071 device_create_file(&bmc->dev->dev,
2072 &bmc->firmware_rev_attr);
2073 device_create_file(&bmc->dev->dev,
2074 &bmc->version_attr);
2075 device_create_file(&bmc->dev->dev,
2076 &bmc->add_dev_support_attr);
2077 device_create_file(&bmc->dev->dev,
2078 &bmc->manufacturer_id_attr);
2079 device_create_file(&bmc->dev->dev,
2080 &bmc->product_id_attr);
2081 if (bmc->id.aux_firmware_revision_set)
2082 device_create_file(&bmc->dev->dev,
2083 &bmc->aux_firmware_rev_attr);
2084 if (bmc->guid_set)
2085 device_create_file(&bmc->dev->dev,
2086 &bmc->guid_attr);
2088 printk(KERN_INFO
2089 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2090 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2091 bmc->id.manufacturer_id,
2092 bmc->id.product_id,
2093 bmc->id.device_id);
2097 * create symlink from system interface device to bmc device
2098 * and back.
2100 rv = sysfs_create_link(&intf->si_dev->kobj,
2101 &bmc->dev->dev.kobj, "bmc");
2102 if (rv) {
2103 printk(KERN_ERR
2104 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2105 rv);
2106 goto out_err;
2109 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2110 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2111 if (!intf->my_dev_name) {
2112 rv = -ENOMEM;
2113 printk(KERN_ERR
2114 "ipmi_msghandler: allocate link from BMC: %d\n",
2115 rv);
2116 goto out_err;
2118 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2120 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2121 intf->my_dev_name);
2122 if (rv) {
2123 kfree(intf->my_dev_name);
2124 intf->my_dev_name = NULL;
2125 printk(KERN_ERR
2126 "ipmi_msghandler:"
2127 " Unable to create symlink to bmc: %d\n",
2128 rv);
2129 goto out_err;
2132 return 0;
2134 out_err:
2135 ipmi_bmc_unregister(intf);
2136 return rv;
2139 static int
2140 send_guid_cmd(ipmi_smi_t intf, int chan)
2142 struct kernel_ipmi_msg msg;
2143 struct ipmi_system_interface_addr si;
2145 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2146 si.channel = IPMI_BMC_CHANNEL;
2147 si.lun = 0;
2149 msg.netfn = IPMI_NETFN_APP_REQUEST;
2150 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2151 msg.data = NULL;
2152 msg.data_len = 0;
2153 return i_ipmi_request(NULL,
2154 intf,
2155 (struct ipmi_addr *) &si,
2157 &msg,
2158 intf,
2159 NULL,
2160 NULL,
2162 intf->channels[0].address,
2163 intf->channels[0].lun,
2164 -1, 0);
2167 static void
2168 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2170 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2171 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2172 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2173 /* Not for me */
2174 return;
2176 if (msg->msg.data[0] != 0) {
2177 /* Error from getting the GUID, the BMC doesn't have one. */
2178 intf->bmc->guid_set = 0;
2179 goto out;
2182 if (msg->msg.data_len < 17) {
2183 intf->bmc->guid_set = 0;
2184 printk(KERN_WARNING PFX
2185 "guid_handler: The GUID response from the BMC was too"
2186 " short, it was %d but should have been 17. Assuming"
2187 " GUID is not available.\n",
2188 msg->msg.data_len);
2189 goto out;
2192 memcpy(intf->bmc->guid, msg->msg.data, 16);
2193 intf->bmc->guid_set = 1;
2194 out:
2195 wake_up(&intf->waitq);
2198 static void
2199 get_guid(ipmi_smi_t intf)
2201 int rv;
2203 intf->bmc->guid_set = 0x2;
2204 intf->null_user_handler = guid_handler;
2205 rv = send_guid_cmd(intf, 0);
2206 if (rv)
2207 /* Send failed, no GUID available. */
2208 intf->bmc->guid_set = 0;
2209 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2210 intf->null_user_handler = NULL;
2213 static int
2214 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2216 struct kernel_ipmi_msg msg;
2217 unsigned char data[1];
2218 struct ipmi_system_interface_addr si;
2220 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2221 si.channel = IPMI_BMC_CHANNEL;
2222 si.lun = 0;
2224 msg.netfn = IPMI_NETFN_APP_REQUEST;
2225 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2226 msg.data = data;
2227 msg.data_len = 1;
2228 data[0] = chan;
2229 return i_ipmi_request(NULL,
2230 intf,
2231 (struct ipmi_addr *) &si,
2233 &msg,
2234 intf,
2235 NULL,
2236 NULL,
2238 intf->channels[0].address,
2239 intf->channels[0].lun,
2240 -1, 0);
2243 static void
2244 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2246 int rv = 0;
2247 int chan;
2249 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2250 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2251 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2253 /* It's the one we want */
2254 if (msg->msg.data[0] != 0) {
2255 /* Got an error from the channel, just go on. */
2257 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2258 /* If the MC does not support this
2259 command, that is legal. We just
2260 assume it has one IPMB at channel
2261 zero. */
2262 intf->channels[0].medium
2263 = IPMI_CHANNEL_MEDIUM_IPMB;
2264 intf->channels[0].protocol
2265 = IPMI_CHANNEL_PROTOCOL_IPMB;
2266 rv = -ENOSYS;
2268 intf->curr_channel = IPMI_MAX_CHANNELS;
2269 wake_up(&intf->waitq);
2270 goto out;
2272 goto next_channel;
2274 if (msg->msg.data_len < 4) {
2275 /* Message not big enough, just go on. */
2276 goto next_channel;
2278 chan = intf->curr_channel;
2279 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2280 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2282 next_channel:
2283 intf->curr_channel++;
2284 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2285 wake_up(&intf->waitq);
2286 else
2287 rv = send_channel_info_cmd(intf, intf->curr_channel);
2289 if (rv) {
2290 /* Got an error somehow, just give up. */
2291 intf->curr_channel = IPMI_MAX_CHANNELS;
2292 wake_up(&intf->waitq);
2294 printk(KERN_WARNING PFX
2295 "Error sending channel information: %d\n",
2296 rv);
2299 out:
2300 return;
2303 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2304 void *send_info,
2305 struct ipmi_device_id *device_id,
2306 struct device *si_dev,
2307 unsigned char slave_addr)
2309 int i, j;
2310 int rv;
2311 ipmi_smi_t intf;
2312 unsigned long flags;
2313 int version_major;
2314 int version_minor;
2316 version_major = ipmi_version_major(device_id);
2317 version_minor = ipmi_version_minor(device_id);
2319 /* Make sure the driver is actually initialized, this handles
2320 problems with initialization order. */
2321 if (!initialized) {
2322 rv = ipmi_init_msghandler();
2323 if (rv)
2324 return rv;
2325 /* The init code doesn't return an error if it was turned
2326 off, but it won't initialize. Check that. */
2327 if (!initialized)
2328 return -ENODEV;
2331 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2332 if (!intf)
2333 return -ENOMEM;
2334 memset(intf, 0, sizeof(*intf));
2335 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2336 if (!intf->bmc) {
2337 kfree(intf);
2338 return -ENOMEM;
2340 intf->intf_num = -1;
2341 kref_init(&intf->refcount);
2342 intf->bmc->id = *device_id;
2343 intf->si_dev = si_dev;
2344 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2345 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2346 intf->channels[j].lun = 2;
2348 if (slave_addr != 0)
2349 intf->channels[0].address = slave_addr;
2350 INIT_LIST_HEAD(&intf->users);
2351 intf->handlers = handlers;
2352 intf->send_info = send_info;
2353 spin_lock_init(&intf->seq_lock);
2354 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2355 intf->seq_table[j].inuse = 0;
2356 intf->seq_table[j].seqid = 0;
2358 intf->curr_seq = 0;
2359 #ifdef CONFIG_PROC_FS
2360 spin_lock_init(&intf->proc_entry_lock);
2361 #endif
2362 spin_lock_init(&intf->waiting_msgs_lock);
2363 INIT_LIST_HEAD(&intf->waiting_msgs);
2364 spin_lock_init(&intf->events_lock);
2365 INIT_LIST_HEAD(&intf->waiting_events);
2366 intf->waiting_events_count = 0;
2367 mutex_init(&intf->cmd_rcvrs_mutex);
2368 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2369 init_waitqueue_head(&intf->waitq);
2371 spin_lock_init(&intf->counter_lock);
2372 intf->proc_dir = NULL;
2374 rv = -ENOMEM;
2375 spin_lock_irqsave(&interfaces_lock, flags);
2376 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2377 if (ipmi_interfaces[i] == NULL) {
2378 intf->intf_num = i;
2379 /* Reserve the entry till we are done. */
2380 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2381 rv = 0;
2382 break;
2385 spin_unlock_irqrestore(&interfaces_lock, flags);
2386 if (rv)
2387 goto out;
2389 rv = handlers->start_processing(send_info, intf);
2390 if (rv)
2391 goto out;
2393 get_guid(intf);
2395 if ((version_major > 1)
2396 || ((version_major == 1) && (version_minor >= 5)))
2398 /* Start scanning the channels to see what is
2399 available. */
2400 intf->null_user_handler = channel_handler;
2401 intf->curr_channel = 0;
2402 rv = send_channel_info_cmd(intf, 0);
2403 if (rv)
2404 goto out;
2406 /* Wait for the channel info to be read. */
2407 wait_event(intf->waitq,
2408 intf->curr_channel >= IPMI_MAX_CHANNELS);
2409 intf->null_user_handler = NULL;
2410 } else {
2411 /* Assume a single IPMB channel at zero. */
2412 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2413 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2416 if (rv == 0)
2417 rv = add_proc_entries(intf, i);
2419 rv = ipmi_bmc_register(intf);
2421 out:
2422 if (rv) {
2423 if (intf->proc_dir)
2424 remove_proc_entries(intf);
2425 kref_put(&intf->refcount, intf_free);
2426 if (i < MAX_IPMI_INTERFACES) {
2427 spin_lock_irqsave(&interfaces_lock, flags);
2428 ipmi_interfaces[i] = NULL;
2429 spin_unlock_irqrestore(&interfaces_lock, flags);
2431 } else {
2432 spin_lock_irqsave(&interfaces_lock, flags);
2433 ipmi_interfaces[i] = intf;
2434 spin_unlock_irqrestore(&interfaces_lock, flags);
2435 call_smi_watchers(i, intf->si_dev);
2438 return rv;
2441 int ipmi_unregister_smi(ipmi_smi_t intf)
2443 int i;
2444 struct ipmi_smi_watcher *w;
2445 unsigned long flags;
2447 ipmi_bmc_unregister(intf);
2449 spin_lock_irqsave(&interfaces_lock, flags);
2450 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2451 if (ipmi_interfaces[i] == intf) {
2452 /* Set the interface number reserved until we
2453 * are done. */
2454 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2455 intf->intf_num = -1;
2456 break;
2459 spin_unlock_irqrestore(&interfaces_lock,flags);
2461 if (i == MAX_IPMI_INTERFACES)
2462 return -ENODEV;
2464 remove_proc_entries(intf);
2466 /* Call all the watcher interfaces to tell them that
2467 an interface is gone. */
2468 down_read(&smi_watchers_sem);
2469 list_for_each_entry(w, &smi_watchers, link)
2470 w->smi_gone(i);
2471 up_read(&smi_watchers_sem);
2473 /* Allow the entry to be reused now. */
2474 spin_lock_irqsave(&interfaces_lock, flags);
2475 ipmi_interfaces[i] = NULL;
2476 spin_unlock_irqrestore(&interfaces_lock,flags);
2478 kref_put(&intf->refcount, intf_free);
2479 return 0;
2482 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2483 struct ipmi_smi_msg *msg)
2485 struct ipmi_ipmb_addr ipmb_addr;
2486 struct ipmi_recv_msg *recv_msg;
2487 unsigned long flags;
2490 /* This is 11, not 10, because the response must contain a
2491 * completion code. */
2492 if (msg->rsp_size < 11) {
2493 /* Message not big enough, just ignore it. */
2494 spin_lock_irqsave(&intf->counter_lock, flags);
2495 intf->invalid_ipmb_responses++;
2496 spin_unlock_irqrestore(&intf->counter_lock, flags);
2497 return 0;
2500 if (msg->rsp[2] != 0) {
2501 /* An error getting the response, just ignore it. */
2502 return 0;
2505 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2506 ipmb_addr.slave_addr = msg->rsp[6];
2507 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2508 ipmb_addr.lun = msg->rsp[7] & 3;
2510 /* It's a response from a remote entity. Look up the sequence
2511 number and handle the response. */
2512 if (intf_find_seq(intf,
2513 msg->rsp[7] >> 2,
2514 msg->rsp[3] & 0x0f,
2515 msg->rsp[8],
2516 (msg->rsp[4] >> 2) & (~1),
2517 (struct ipmi_addr *) &(ipmb_addr),
2518 &recv_msg))
2520 /* We were unable to find the sequence number,
2521 so just nuke the message. */
2522 spin_lock_irqsave(&intf->counter_lock, flags);
2523 intf->unhandled_ipmb_responses++;
2524 spin_unlock_irqrestore(&intf->counter_lock, flags);
2525 return 0;
2528 memcpy(recv_msg->msg_data,
2529 &(msg->rsp[9]),
2530 msg->rsp_size - 9);
2531 /* THe other fields matched, so no need to set them, except
2532 for netfn, which needs to be the response that was
2533 returned, not the request value. */
2534 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2535 recv_msg->msg.data = recv_msg->msg_data;
2536 recv_msg->msg.data_len = msg->rsp_size - 10;
2537 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2538 spin_lock_irqsave(&intf->counter_lock, flags);
2539 intf->handled_ipmb_responses++;
2540 spin_unlock_irqrestore(&intf->counter_lock, flags);
2541 deliver_response(recv_msg);
2543 return 0;
2546 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2547 struct ipmi_smi_msg *msg)
2549 struct cmd_rcvr *rcvr;
2550 int rv = 0;
2551 unsigned char netfn;
2552 unsigned char cmd;
2553 ipmi_user_t user = NULL;
2554 struct ipmi_ipmb_addr *ipmb_addr;
2555 struct ipmi_recv_msg *recv_msg;
2556 unsigned long flags;
2558 if (msg->rsp_size < 10) {
2559 /* Message not big enough, just ignore it. */
2560 spin_lock_irqsave(&intf->counter_lock, flags);
2561 intf->invalid_commands++;
2562 spin_unlock_irqrestore(&intf->counter_lock, flags);
2563 return 0;
2566 if (msg->rsp[2] != 0) {
2567 /* An error getting the response, just ignore it. */
2568 return 0;
2571 netfn = msg->rsp[4] >> 2;
2572 cmd = msg->rsp[8];
2574 rcu_read_lock();
2575 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2576 if (rcvr) {
2577 user = rcvr->user;
2578 kref_get(&user->refcount);
2579 } else
2580 user = NULL;
2581 rcu_read_unlock();
2583 if (user == NULL) {
2584 /* We didn't find a user, deliver an error response. */
2585 spin_lock_irqsave(&intf->counter_lock, flags);
2586 intf->unhandled_commands++;
2587 spin_unlock_irqrestore(&intf->counter_lock, flags);
2589 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2590 msg->data[1] = IPMI_SEND_MSG_CMD;
2591 msg->data[2] = msg->rsp[3];
2592 msg->data[3] = msg->rsp[6];
2593 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2594 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2595 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2596 /* rqseq/lun */
2597 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2598 msg->data[8] = msg->rsp[8]; /* cmd */
2599 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2600 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2601 msg->data_size = 11;
2603 #ifdef DEBUG_MSGING
2605 int m;
2606 printk("Invalid command:");
2607 for (m = 0; m < msg->data_size; m++)
2608 printk(" %2.2x", msg->data[m]);
2609 printk("\n");
2611 #endif
2612 intf->handlers->sender(intf->send_info, msg, 0);
2614 rv = -1; /* We used the message, so return the value that
2615 causes it to not be freed or queued. */
2616 } else {
2617 /* Deliver the message to the user. */
2618 spin_lock_irqsave(&intf->counter_lock, flags);
2619 intf->handled_commands++;
2620 spin_unlock_irqrestore(&intf->counter_lock, flags);
2622 recv_msg = ipmi_alloc_recv_msg();
2623 if (!recv_msg) {
2624 /* We couldn't allocate memory for the
2625 message, so requeue it for handling
2626 later. */
2627 rv = 1;
2628 kref_put(&user->refcount, free_user);
2629 } else {
2630 /* Extract the source address from the data. */
2631 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2632 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2633 ipmb_addr->slave_addr = msg->rsp[6];
2634 ipmb_addr->lun = msg->rsp[7] & 3;
2635 ipmb_addr->channel = msg->rsp[3] & 0xf;
2637 /* Extract the rest of the message information
2638 from the IPMB header.*/
2639 recv_msg->user = user;
2640 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2641 recv_msg->msgid = msg->rsp[7] >> 2;
2642 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2643 recv_msg->msg.cmd = msg->rsp[8];
2644 recv_msg->msg.data = recv_msg->msg_data;
2646 /* We chop off 10, not 9 bytes because the checksum
2647 at the end also needs to be removed. */
2648 recv_msg->msg.data_len = msg->rsp_size - 10;
2649 memcpy(recv_msg->msg_data,
2650 &(msg->rsp[9]),
2651 msg->rsp_size - 10);
2652 deliver_response(recv_msg);
2656 return rv;
2659 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2660 struct ipmi_smi_msg *msg)
2662 struct ipmi_lan_addr lan_addr;
2663 struct ipmi_recv_msg *recv_msg;
2664 unsigned long flags;
2667 /* This is 13, not 12, because the response must contain a
2668 * completion code. */
2669 if (msg->rsp_size < 13) {
2670 /* Message not big enough, just ignore it. */
2671 spin_lock_irqsave(&intf->counter_lock, flags);
2672 intf->invalid_lan_responses++;
2673 spin_unlock_irqrestore(&intf->counter_lock, flags);
2674 return 0;
2677 if (msg->rsp[2] != 0) {
2678 /* An error getting the response, just ignore it. */
2679 return 0;
2682 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2683 lan_addr.session_handle = msg->rsp[4];
2684 lan_addr.remote_SWID = msg->rsp[8];
2685 lan_addr.local_SWID = msg->rsp[5];
2686 lan_addr.channel = msg->rsp[3] & 0x0f;
2687 lan_addr.privilege = msg->rsp[3] >> 4;
2688 lan_addr.lun = msg->rsp[9] & 3;
2690 /* It's a response from a remote entity. Look up the sequence
2691 number and handle the response. */
2692 if (intf_find_seq(intf,
2693 msg->rsp[9] >> 2,
2694 msg->rsp[3] & 0x0f,
2695 msg->rsp[10],
2696 (msg->rsp[6] >> 2) & (~1),
2697 (struct ipmi_addr *) &(lan_addr),
2698 &recv_msg))
2700 /* We were unable to find the sequence number,
2701 so just nuke the message. */
2702 spin_lock_irqsave(&intf->counter_lock, flags);
2703 intf->unhandled_lan_responses++;
2704 spin_unlock_irqrestore(&intf->counter_lock, flags);
2705 return 0;
2708 memcpy(recv_msg->msg_data,
2709 &(msg->rsp[11]),
2710 msg->rsp_size - 11);
2711 /* The other fields matched, so no need to set them, except
2712 for netfn, which needs to be the response that was
2713 returned, not the request value. */
2714 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2715 recv_msg->msg.data = recv_msg->msg_data;
2716 recv_msg->msg.data_len = msg->rsp_size - 12;
2717 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2718 spin_lock_irqsave(&intf->counter_lock, flags);
2719 intf->handled_lan_responses++;
2720 spin_unlock_irqrestore(&intf->counter_lock, flags);
2721 deliver_response(recv_msg);
2723 return 0;
2726 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2727 struct ipmi_smi_msg *msg)
2729 struct cmd_rcvr *rcvr;
2730 int rv = 0;
2731 unsigned char netfn;
2732 unsigned char cmd;
2733 ipmi_user_t user = NULL;
2734 struct ipmi_lan_addr *lan_addr;
2735 struct ipmi_recv_msg *recv_msg;
2736 unsigned long flags;
2738 if (msg->rsp_size < 12) {
2739 /* Message not big enough, just ignore it. */
2740 spin_lock_irqsave(&intf->counter_lock, flags);
2741 intf->invalid_commands++;
2742 spin_unlock_irqrestore(&intf->counter_lock, flags);
2743 return 0;
2746 if (msg->rsp[2] != 0) {
2747 /* An error getting the response, just ignore it. */
2748 return 0;
2751 netfn = msg->rsp[6] >> 2;
2752 cmd = msg->rsp[10];
2754 rcu_read_lock();
2755 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2756 if (rcvr) {
2757 user = rcvr->user;
2758 kref_get(&user->refcount);
2759 } else
2760 user = NULL;
2761 rcu_read_unlock();
2763 if (user == NULL) {
2764 /* We didn't find a user, just give up. */
2765 spin_lock_irqsave(&intf->counter_lock, flags);
2766 intf->unhandled_commands++;
2767 spin_unlock_irqrestore(&intf->counter_lock, flags);
2769 rv = 0; /* Don't do anything with these messages, just
2770 allow them to be freed. */
2771 } else {
2772 /* Deliver the message to the user. */
2773 spin_lock_irqsave(&intf->counter_lock, flags);
2774 intf->handled_commands++;
2775 spin_unlock_irqrestore(&intf->counter_lock, flags);
2777 recv_msg = ipmi_alloc_recv_msg();
2778 if (!recv_msg) {
2779 /* We couldn't allocate memory for the
2780 message, so requeue it for handling
2781 later. */
2782 rv = 1;
2783 kref_put(&user->refcount, free_user);
2784 } else {
2785 /* Extract the source address from the data. */
2786 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2787 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2788 lan_addr->session_handle = msg->rsp[4];
2789 lan_addr->remote_SWID = msg->rsp[8];
2790 lan_addr->local_SWID = msg->rsp[5];
2791 lan_addr->lun = msg->rsp[9] & 3;
2792 lan_addr->channel = msg->rsp[3] & 0xf;
2793 lan_addr->privilege = msg->rsp[3] >> 4;
2795 /* Extract the rest of the message information
2796 from the IPMB header.*/
2797 recv_msg->user = user;
2798 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2799 recv_msg->msgid = msg->rsp[9] >> 2;
2800 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2801 recv_msg->msg.cmd = msg->rsp[10];
2802 recv_msg->msg.data = recv_msg->msg_data;
2804 /* We chop off 12, not 11 bytes because the checksum
2805 at the end also needs to be removed. */
2806 recv_msg->msg.data_len = msg->rsp_size - 12;
2807 memcpy(recv_msg->msg_data,
2808 &(msg->rsp[11]),
2809 msg->rsp_size - 12);
2810 deliver_response(recv_msg);
2814 return rv;
2817 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2818 struct ipmi_smi_msg *msg)
2820 struct ipmi_system_interface_addr *smi_addr;
2822 recv_msg->msgid = 0;
2823 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2824 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2825 smi_addr->channel = IPMI_BMC_CHANNEL;
2826 smi_addr->lun = msg->rsp[0] & 3;
2827 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2828 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2829 recv_msg->msg.cmd = msg->rsp[1];
2830 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2831 recv_msg->msg.data = recv_msg->msg_data;
2832 recv_msg->msg.data_len = msg->rsp_size - 3;
2835 static int handle_read_event_rsp(ipmi_smi_t intf,
2836 struct ipmi_smi_msg *msg)
2838 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2839 struct list_head msgs;
2840 ipmi_user_t user;
2841 int rv = 0;
2842 int deliver_count = 0;
2843 unsigned long flags;
2845 if (msg->rsp_size < 19) {
2846 /* Message is too small to be an IPMB event. */
2847 spin_lock_irqsave(&intf->counter_lock, flags);
2848 intf->invalid_events++;
2849 spin_unlock_irqrestore(&intf->counter_lock, flags);
2850 return 0;
2853 if (msg->rsp[2] != 0) {
2854 /* An error getting the event, just ignore it. */
2855 return 0;
2858 INIT_LIST_HEAD(&msgs);
2860 spin_lock_irqsave(&intf->events_lock, flags);
2862 spin_lock(&intf->counter_lock);
2863 intf->events++;
2864 spin_unlock(&intf->counter_lock);
2866 /* Allocate and fill in one message for every user that is getting
2867 events. */
2868 rcu_read_lock();
2869 list_for_each_entry_rcu(user, &intf->users, link) {
2870 if (!user->gets_events)
2871 continue;
2873 recv_msg = ipmi_alloc_recv_msg();
2874 if (!recv_msg) {
2875 rcu_read_unlock();
2876 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2877 link) {
2878 list_del(&recv_msg->link);
2879 ipmi_free_recv_msg(recv_msg);
2881 /* We couldn't allocate memory for the
2882 message, so requeue it for handling
2883 later. */
2884 rv = 1;
2885 goto out;
2888 deliver_count++;
2890 copy_event_into_recv_msg(recv_msg, msg);
2891 recv_msg->user = user;
2892 kref_get(&user->refcount);
2893 list_add_tail(&(recv_msg->link), &msgs);
2895 rcu_read_unlock();
2897 if (deliver_count) {
2898 /* Now deliver all the messages. */
2899 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2900 list_del(&recv_msg->link);
2901 deliver_response(recv_msg);
2903 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2904 /* No one to receive the message, put it in queue if there's
2905 not already too many things in the queue. */
2906 recv_msg = ipmi_alloc_recv_msg();
2907 if (!recv_msg) {
2908 /* We couldn't allocate memory for the
2909 message, so requeue it for handling
2910 later. */
2911 rv = 1;
2912 goto out;
2915 copy_event_into_recv_msg(recv_msg, msg);
2916 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2917 intf->waiting_events_count++;
2918 } else {
2919 /* There's too many things in the queue, discard this
2920 message. */
2921 printk(KERN_WARNING PFX "Event queue full, discarding an"
2922 " incoming event\n");
2925 out:
2926 spin_unlock_irqrestore(&(intf->events_lock), flags);
2928 return rv;
2931 static int handle_bmc_rsp(ipmi_smi_t intf,
2932 struct ipmi_smi_msg *msg)
2934 struct ipmi_recv_msg *recv_msg;
2935 unsigned long flags;
2936 struct ipmi_user *user;
2938 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2939 if (recv_msg == NULL)
2941 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2942 "could be because of a malformed message, or\n"
2943 "because of a hardware error. Contact your\n"
2944 "hardware vender for assistance\n");
2945 return 0;
2948 user = recv_msg->user;
2949 /* Make sure the user still exists. */
2950 if (user && !user->valid) {
2951 /* The user for the message went away, so give up. */
2952 spin_lock_irqsave(&intf->counter_lock, flags);
2953 intf->unhandled_local_responses++;
2954 spin_unlock_irqrestore(&intf->counter_lock, flags);
2955 ipmi_free_recv_msg(recv_msg);
2956 } else {
2957 struct ipmi_system_interface_addr *smi_addr;
2959 spin_lock_irqsave(&intf->counter_lock, flags);
2960 intf->handled_local_responses++;
2961 spin_unlock_irqrestore(&intf->counter_lock, flags);
2962 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2963 recv_msg->msgid = msg->msgid;
2964 smi_addr = ((struct ipmi_system_interface_addr *)
2965 &(recv_msg->addr));
2966 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2967 smi_addr->channel = IPMI_BMC_CHANNEL;
2968 smi_addr->lun = msg->rsp[0] & 3;
2969 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2970 recv_msg->msg.cmd = msg->rsp[1];
2971 memcpy(recv_msg->msg_data,
2972 &(msg->rsp[2]),
2973 msg->rsp_size - 2);
2974 recv_msg->msg.data = recv_msg->msg_data;
2975 recv_msg->msg.data_len = msg->rsp_size - 2;
2976 deliver_response(recv_msg);
2979 return 0;
2982 /* Handle a new message. Return 1 if the message should be requeued,
2983 0 if the message should be freed, or -1 if the message should not
2984 be freed or requeued. */
2985 static int handle_new_recv_msg(ipmi_smi_t intf,
2986 struct ipmi_smi_msg *msg)
2988 int requeue;
2989 int chan;
2991 #ifdef DEBUG_MSGING
2992 int m;
2993 printk("Recv:");
2994 for (m = 0; m < msg->rsp_size; m++)
2995 printk(" %2.2x", msg->rsp[m]);
2996 printk("\n");
2997 #endif
2998 if (msg->rsp_size < 2) {
2999 /* Message is too small to be correct. */
3000 printk(KERN_WARNING PFX "BMC returned to small a message"
3001 " for netfn %x cmd %x, got %d bytes\n",
3002 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3004 /* Generate an error response for the message. */
3005 msg->rsp[0] = msg->data[0] | (1 << 2);
3006 msg->rsp[1] = msg->data[1];
3007 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3008 msg->rsp_size = 3;
3009 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3010 || (msg->rsp[1] != msg->data[1])) /* Command */
3012 /* The response is not even marginally correct. */
3013 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3014 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3015 (msg->data[0] >> 2) | 1, msg->data[1],
3016 msg->rsp[0] >> 2, msg->rsp[1]);
3018 /* Generate an error response for the message. */
3019 msg->rsp[0] = msg->data[0] | (1 << 2);
3020 msg->rsp[1] = msg->data[1];
3021 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3022 msg->rsp_size = 3;
3025 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3026 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3027 && (msg->user_data != NULL))
3029 /* It's a response to a response we sent. For this we
3030 deliver a send message response to the user. */
3031 struct ipmi_recv_msg *recv_msg = msg->user_data;
3033 requeue = 0;
3034 if (msg->rsp_size < 2)
3035 /* Message is too small to be correct. */
3036 goto out;
3038 chan = msg->data[2] & 0x0f;
3039 if (chan >= IPMI_MAX_CHANNELS)
3040 /* Invalid channel number */
3041 goto out;
3043 if (!recv_msg)
3044 goto out;
3046 /* Make sure the user still exists. */
3047 if (!recv_msg->user || !recv_msg->user->valid)
3048 goto out;
3050 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3051 recv_msg->msg.data = recv_msg->msg_data;
3052 recv_msg->msg.data_len = 1;
3053 recv_msg->msg_data[0] = msg->rsp[2];
3054 deliver_response(recv_msg);
3055 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3056 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3058 /* It's from the receive queue. */
3059 chan = msg->rsp[3] & 0xf;
3060 if (chan >= IPMI_MAX_CHANNELS) {
3061 /* Invalid channel number */
3062 requeue = 0;
3063 goto out;
3066 switch (intf->channels[chan].medium) {
3067 case IPMI_CHANNEL_MEDIUM_IPMB:
3068 if (msg->rsp[4] & 0x04) {
3069 /* It's a response, so find the
3070 requesting message and send it up. */
3071 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3072 } else {
3073 /* It's a command to the SMS from some other
3074 entity. Handle that. */
3075 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3077 break;
3079 case IPMI_CHANNEL_MEDIUM_8023LAN:
3080 case IPMI_CHANNEL_MEDIUM_ASYNC:
3081 if (msg->rsp[6] & 0x04) {
3082 /* It's a response, so find the
3083 requesting message and send it up. */
3084 requeue = handle_lan_get_msg_rsp(intf, msg);
3085 } else {
3086 /* It's a command to the SMS from some other
3087 entity. Handle that. */
3088 requeue = handle_lan_get_msg_cmd(intf, msg);
3090 break;
3092 default:
3093 /* We don't handle the channel type, so just
3094 * free the message. */
3095 requeue = 0;
3098 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3099 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3101 /* It's an asyncronous event. */
3102 requeue = handle_read_event_rsp(intf, msg);
3103 } else {
3104 /* It's a response from the local BMC. */
3105 requeue = handle_bmc_rsp(intf, msg);
3108 out:
3109 return requeue;
3112 /* Handle a new message from the lower layer. */
3113 void ipmi_smi_msg_received(ipmi_smi_t intf,
3114 struct ipmi_smi_msg *msg)
3116 unsigned long flags;
3117 int rv;
3120 if ((msg->data_size >= 2)
3121 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3122 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3123 && (msg->user_data == NULL))
3125 /* This is the local response to a command send, start
3126 the timer for these. The user_data will not be
3127 NULL if this is a response send, and we will let
3128 response sends just go through. */
3130 /* Check for errors, if we get certain errors (ones
3131 that mean basically we can try again later), we
3132 ignore them and start the timer. Otherwise we
3133 report the error immediately. */
3134 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3135 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3136 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3138 int chan = msg->rsp[3] & 0xf;
3140 /* Got an error sending the message, handle it. */
3141 spin_lock_irqsave(&intf->counter_lock, flags);
3142 if (chan >= IPMI_MAX_CHANNELS)
3143 ; /* This shouldn't happen */
3144 else if ((intf->channels[chan].medium
3145 == IPMI_CHANNEL_MEDIUM_8023LAN)
3146 || (intf->channels[chan].medium
3147 == IPMI_CHANNEL_MEDIUM_ASYNC))
3148 intf->sent_lan_command_errs++;
3149 else
3150 intf->sent_ipmb_command_errs++;
3151 spin_unlock_irqrestore(&intf->counter_lock, flags);
3152 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3153 } else {
3154 /* The message was sent, start the timer. */
3155 intf_start_seq_timer(intf, msg->msgid);
3158 ipmi_free_smi_msg(msg);
3159 goto out;
3162 /* To preserve message order, if the list is not empty, we
3163 tack this message onto the end of the list. */
3164 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3165 if (!list_empty(&intf->waiting_msgs)) {
3166 list_add_tail(&msg->link, &intf->waiting_msgs);
3167 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3168 goto out;
3170 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3172 rv = handle_new_recv_msg(intf, msg);
3173 if (rv > 0) {
3174 /* Could not handle the message now, just add it to a
3175 list to handle later. */
3176 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3177 list_add_tail(&msg->link, &intf->waiting_msgs);
3178 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3179 } else if (rv == 0) {
3180 ipmi_free_smi_msg(msg);
3183 out:
3184 return;
3187 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3189 ipmi_user_t user;
3191 rcu_read_lock();
3192 list_for_each_entry_rcu(user, &intf->users, link) {
3193 if (!user->handler->ipmi_watchdog_pretimeout)
3194 continue;
3196 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3198 rcu_read_unlock();
3201 static void
3202 handle_msg_timeout(struct ipmi_recv_msg *msg)
3204 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3205 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3206 msg->msg.netfn |= 1; /* Convert to a response. */
3207 msg->msg.data_len = 1;
3208 msg->msg.data = msg->msg_data;
3209 deliver_response(msg);
3212 static struct ipmi_smi_msg *
3213 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3214 unsigned char seq, long seqid)
3216 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3217 if (!smi_msg)
3218 /* If we can't allocate the message, then just return, we
3219 get 4 retries, so this should be ok. */
3220 return NULL;
3222 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3223 smi_msg->data_size = recv_msg->msg.data_len;
3224 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3226 #ifdef DEBUG_MSGING
3228 int m;
3229 printk("Resend: ");
3230 for (m = 0; m < smi_msg->data_size; m++)
3231 printk(" %2.2x", smi_msg->data[m]);
3232 printk("\n");
3234 #endif
3235 return smi_msg;
3238 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3239 struct list_head *timeouts, long timeout_period,
3240 int slot, unsigned long *flags)
3242 struct ipmi_recv_msg *msg;
3244 if (!ent->inuse)
3245 return;
3247 ent->timeout -= timeout_period;
3248 if (ent->timeout > 0)
3249 return;
3251 if (ent->retries_left == 0) {
3252 /* The message has used all its retries. */
3253 ent->inuse = 0;
3254 msg = ent->recv_msg;
3255 list_add_tail(&msg->link, timeouts);
3256 spin_lock(&intf->counter_lock);
3257 if (ent->broadcast)
3258 intf->timed_out_ipmb_broadcasts++;
3259 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3260 intf->timed_out_lan_commands++;
3261 else
3262 intf->timed_out_ipmb_commands++;
3263 spin_unlock(&intf->counter_lock);
3264 } else {
3265 struct ipmi_smi_msg *smi_msg;
3266 /* More retries, send again. */
3268 /* Start with the max timer, set to normal
3269 timer after the message is sent. */
3270 ent->timeout = MAX_MSG_TIMEOUT;
3271 ent->retries_left--;
3272 spin_lock(&intf->counter_lock);
3273 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3274 intf->retransmitted_lan_commands++;
3275 else
3276 intf->retransmitted_ipmb_commands++;
3277 spin_unlock(&intf->counter_lock);
3279 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3280 ent->seqid);
3281 if (!smi_msg)
3282 return;
3284 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3285 /* Send the new message. We send with a zero
3286 * priority. It timed out, I doubt time is
3287 * that critical now, and high priority
3288 * messages are really only for messages to the
3289 * local MC, which don't get resent. */
3290 intf->handlers->sender(intf->send_info,
3291 smi_msg, 0);
3292 spin_lock_irqsave(&intf->seq_lock, *flags);
3296 static void ipmi_timeout_handler(long timeout_period)
3298 ipmi_smi_t intf;
3299 struct list_head timeouts;
3300 struct ipmi_recv_msg *msg, *msg2;
3301 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3302 unsigned long flags;
3303 int i, j;
3305 INIT_LIST_HEAD(&timeouts);
3307 spin_lock(&interfaces_lock);
3308 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3309 intf = ipmi_interfaces[i];
3310 if (IPMI_INVALID_INTERFACE(intf))
3311 continue;
3312 kref_get(&intf->refcount);
3313 spin_unlock(&interfaces_lock);
3315 /* See if any waiting messages need to be processed. */
3316 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3317 list_for_each_entry_safe(smi_msg, smi_msg2,
3318 &intf->waiting_msgs, link) {
3319 if (!handle_new_recv_msg(intf, smi_msg)) {
3320 list_del(&smi_msg->link);
3321 ipmi_free_smi_msg(smi_msg);
3322 } else {
3323 /* To preserve message order, quit if we
3324 can't handle a message. */
3325 break;
3328 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3330 /* Go through the seq table and find any messages that
3331 have timed out, putting them in the timeouts
3332 list. */
3333 spin_lock_irqsave(&intf->seq_lock, flags);
3334 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3335 check_msg_timeout(intf, &(intf->seq_table[j]),
3336 &timeouts, timeout_period, j,
3337 &flags);
3338 spin_unlock_irqrestore(&intf->seq_lock, flags);
3340 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3341 handle_msg_timeout(msg);
3343 kref_put(&intf->refcount, intf_free);
3344 spin_lock(&interfaces_lock);
3346 spin_unlock(&interfaces_lock);
3349 static void ipmi_request_event(void)
3351 ipmi_smi_t intf;
3352 int i;
3354 spin_lock(&interfaces_lock);
3355 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3356 intf = ipmi_interfaces[i];
3357 if (IPMI_INVALID_INTERFACE(intf))
3358 continue;
3360 intf->handlers->request_events(intf->send_info);
3362 spin_unlock(&interfaces_lock);
3365 static struct timer_list ipmi_timer;
3367 /* Call every ~100 ms. */
3368 #define IPMI_TIMEOUT_TIME 100
3370 /* How many jiffies does it take to get to the timeout time. */
3371 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3373 /* Request events from the queue every second (this is the number of
3374 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3375 future, IPMI will add a way to know immediately if an event is in
3376 the queue and this silliness can go away. */
3377 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3379 static atomic_t stop_operation;
3380 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3382 static void ipmi_timeout(unsigned long data)
3384 if (atomic_read(&stop_operation))
3385 return;
3387 ticks_to_req_ev--;
3388 if (ticks_to_req_ev == 0) {
3389 ipmi_request_event();
3390 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3393 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3395 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3399 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3400 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3402 /* FIXME - convert these to slabs. */
3403 static void free_smi_msg(struct ipmi_smi_msg *msg)
3405 atomic_dec(&smi_msg_inuse_count);
3406 kfree(msg);
3409 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3411 struct ipmi_smi_msg *rv;
3412 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3413 if (rv) {
3414 rv->done = free_smi_msg;
3415 rv->user_data = NULL;
3416 atomic_inc(&smi_msg_inuse_count);
3418 return rv;
3421 static void free_recv_msg(struct ipmi_recv_msg *msg)
3423 atomic_dec(&recv_msg_inuse_count);
3424 kfree(msg);
3427 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3429 struct ipmi_recv_msg *rv;
3431 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3432 if (rv) {
3433 rv->done = free_recv_msg;
3434 atomic_inc(&recv_msg_inuse_count);
3436 return rv;
3439 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3441 if (msg->user)
3442 kref_put(&msg->user->refcount, free_user);
3443 msg->done(msg);
3446 #ifdef CONFIG_IPMI_PANIC_EVENT
3448 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3452 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3456 #ifdef CONFIG_IPMI_PANIC_STRING
3457 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3459 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3460 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3461 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3462 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3464 /* A get event receiver command, save it. */
3465 intf->event_receiver = msg->msg.data[1];
3466 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3470 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3472 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3473 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3474 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3475 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3477 /* A get device id command, save if we are an event
3478 receiver or generator. */
3479 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3480 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3483 #endif
3485 static void send_panic_events(char *str)
3487 struct kernel_ipmi_msg msg;
3488 ipmi_smi_t intf;
3489 unsigned char data[16];
3490 int i;
3491 struct ipmi_system_interface_addr *si;
3492 struct ipmi_addr addr;
3493 struct ipmi_smi_msg smi_msg;
3494 struct ipmi_recv_msg recv_msg;
3496 si = (struct ipmi_system_interface_addr *) &addr;
3497 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3498 si->channel = IPMI_BMC_CHANNEL;
3499 si->lun = 0;
3501 /* Fill in an event telling that we have failed. */
3502 msg.netfn = 0x04; /* Sensor or Event. */
3503 msg.cmd = 2; /* Platform event command. */
3504 msg.data = data;
3505 msg.data_len = 8;
3506 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3507 data[1] = 0x03; /* This is for IPMI 1.0. */
3508 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3509 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3510 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3512 /* Put a few breadcrumbs in. Hopefully later we can add more things
3513 to make the panic events more useful. */
3514 if (str) {
3515 data[3] = str[0];
3516 data[6] = str[1];
3517 data[7] = str[2];
3520 smi_msg.done = dummy_smi_done_handler;
3521 recv_msg.done = dummy_recv_done_handler;
3523 /* For every registered interface, send the event. */
3524 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3525 intf = ipmi_interfaces[i];
3526 if (IPMI_INVALID_INTERFACE(intf))
3527 continue;
3529 /* Send the event announcing the panic. */
3530 intf->handlers->set_run_to_completion(intf->send_info, 1);
3531 i_ipmi_request(NULL,
3532 intf,
3533 &addr,
3535 &msg,
3536 intf,
3537 &smi_msg,
3538 &recv_msg,
3540 intf->channels[0].address,
3541 intf->channels[0].lun,
3542 0, 1); /* Don't retry, and don't wait. */
3545 #ifdef CONFIG_IPMI_PANIC_STRING
3546 /* On every interface, dump a bunch of OEM event holding the
3547 string. */
3548 if (!str)
3549 return;
3551 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3552 char *p = str;
3553 struct ipmi_ipmb_addr *ipmb;
3554 int j;
3556 intf = ipmi_interfaces[i];
3557 if (IPMI_INVALID_INTERFACE(intf))
3558 continue;
3560 /* First job here is to figure out where to send the
3561 OEM events. There's no way in IPMI to send OEM
3562 events using an event send command, so we have to
3563 find the SEL to put them in and stick them in
3564 there. */
3566 /* Get capabilities from the get device id. */
3567 intf->local_sel_device = 0;
3568 intf->local_event_generator = 0;
3569 intf->event_receiver = 0;
3571 /* Request the device info from the local MC. */
3572 msg.netfn = IPMI_NETFN_APP_REQUEST;
3573 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3574 msg.data = NULL;
3575 msg.data_len = 0;
3576 intf->null_user_handler = device_id_fetcher;
3577 i_ipmi_request(NULL,
3578 intf,
3579 &addr,
3581 &msg,
3582 intf,
3583 &smi_msg,
3584 &recv_msg,
3586 intf->channels[0].address,
3587 intf->channels[0].lun,
3588 0, 1); /* Don't retry, and don't wait. */
3590 if (intf->local_event_generator) {
3591 /* Request the event receiver from the local MC. */
3592 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3593 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3594 msg.data = NULL;
3595 msg.data_len = 0;
3596 intf->null_user_handler = event_receiver_fetcher;
3597 i_ipmi_request(NULL,
3598 intf,
3599 &addr,
3601 &msg,
3602 intf,
3603 &smi_msg,
3604 &recv_msg,
3606 intf->channels[0].address,
3607 intf->channels[0].lun,
3608 0, 1); /* no retry, and no wait. */
3610 intf->null_user_handler = NULL;
3612 /* Validate the event receiver. The low bit must not
3613 be 1 (it must be a valid IPMB address), it cannot
3614 be zero, and it must not be my address. */
3615 if (((intf->event_receiver & 1) == 0)
3616 && (intf->event_receiver != 0)
3617 && (intf->event_receiver != intf->channels[0].address))
3619 /* The event receiver is valid, send an IPMB
3620 message. */
3621 ipmb = (struct ipmi_ipmb_addr *) &addr;
3622 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3623 ipmb->channel = 0; /* FIXME - is this right? */
3624 ipmb->lun = intf->event_receiver_lun;
3625 ipmb->slave_addr = intf->event_receiver;
3626 } else if (intf->local_sel_device) {
3627 /* The event receiver was not valid (or was
3628 me), but I am an SEL device, just dump it
3629 in my SEL. */
3630 si = (struct ipmi_system_interface_addr *) &addr;
3631 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3632 si->channel = IPMI_BMC_CHANNEL;
3633 si->lun = 0;
3634 } else
3635 continue; /* No where to send the event. */
3638 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3639 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3640 msg.data = data;
3641 msg.data_len = 16;
3643 j = 0;
3644 while (*p) {
3645 int size = strlen(p);
3647 if (size > 11)
3648 size = 11;
3649 data[0] = 0;
3650 data[1] = 0;
3651 data[2] = 0xf0; /* OEM event without timestamp. */
3652 data[3] = intf->channels[0].address;
3653 data[4] = j++; /* sequence # */
3654 /* Always give 11 bytes, so strncpy will fill
3655 it with zeroes for me. */
3656 strncpy(data+5, p, 11);
3657 p += size;
3659 i_ipmi_request(NULL,
3660 intf,
3661 &addr,
3663 &msg,
3664 intf,
3665 &smi_msg,
3666 &recv_msg,
3668 intf->channels[0].address,
3669 intf->channels[0].lun,
3670 0, 1); /* no retry, and no wait. */
3673 #endif /* CONFIG_IPMI_PANIC_STRING */
3675 #endif /* CONFIG_IPMI_PANIC_EVENT */
3677 static int has_panicked = 0;
3679 static int panic_event(struct notifier_block *this,
3680 unsigned long event,
3681 void *ptr)
3683 int i;
3684 ipmi_smi_t intf;
3686 if (has_panicked)
3687 return NOTIFY_DONE;
3688 has_panicked = 1;
3690 /* For every registered interface, set it to run to completion. */
3691 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3692 intf = ipmi_interfaces[i];
3693 if (IPMI_INVALID_INTERFACE(intf))
3694 continue;
3696 intf->handlers->set_run_to_completion(intf->send_info, 1);
3699 #ifdef CONFIG_IPMI_PANIC_EVENT
3700 send_panic_events(ptr);
3701 #endif
3703 return NOTIFY_DONE;
3706 static struct notifier_block panic_block = {
3707 .notifier_call = panic_event,
3708 .next = NULL,
3709 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3712 static int ipmi_init_msghandler(void)
3714 int i;
3715 int rv;
3717 if (initialized)
3718 return 0;
3720 rv = driver_register(&ipmidriver);
3721 if (rv) {
3722 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3723 return rv;
3726 printk(KERN_INFO "ipmi message handler version "
3727 IPMI_DRIVER_VERSION "\n");
3729 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3730 ipmi_interfaces[i] = NULL;
3732 #ifdef CONFIG_PROC_FS
3733 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3734 if (!proc_ipmi_root) {
3735 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3736 return -ENOMEM;
3739 proc_ipmi_root->owner = THIS_MODULE;
3740 #endif /* CONFIG_PROC_FS */
3742 init_timer(&ipmi_timer);
3743 ipmi_timer.data = 0;
3744 ipmi_timer.function = ipmi_timeout;
3745 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3746 add_timer(&ipmi_timer);
3748 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3750 initialized = 1;
3752 return 0;
3755 static __init int ipmi_init_msghandler_mod(void)
3757 ipmi_init_msghandler();
3758 return 0;
3761 static __exit void cleanup_ipmi(void)
3763 int count;
3765 if (!initialized)
3766 return;
3768 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3770 /* This can't be called if any interfaces exist, so no worry about
3771 shutting down the interfaces. */
3773 /* Tell the timer to stop, then wait for it to stop. This avoids
3774 problems with race conditions removing the timer here. */
3775 atomic_inc(&stop_operation);
3776 del_timer_sync(&ipmi_timer);
3778 #ifdef CONFIG_PROC_FS
3779 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3780 #endif /* CONFIG_PROC_FS */
3782 driver_unregister(&ipmidriver);
3784 initialized = 0;
3786 /* Check for buffer leaks. */
3787 count = atomic_read(&smi_msg_inuse_count);
3788 if (count != 0)
3789 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3790 count);
3791 count = atomic_read(&recv_msg_inuse_count);
3792 if (count != 0)
3793 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3794 count);
3796 module_exit(cleanup_ipmi);
3798 module_init(ipmi_init_msghandler_mod);
3799 MODULE_LICENSE("GPL");
3800 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3801 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3802 MODULE_VERSION(IPMI_DRIVER_VERSION);
3804 EXPORT_SYMBOL(ipmi_create_user);
3805 EXPORT_SYMBOL(ipmi_destroy_user);
3806 EXPORT_SYMBOL(ipmi_get_version);
3807 EXPORT_SYMBOL(ipmi_request_settime);
3808 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3809 EXPORT_SYMBOL(ipmi_register_smi);
3810 EXPORT_SYMBOL(ipmi_unregister_smi);
3811 EXPORT_SYMBOL(ipmi_register_for_cmd);
3812 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3813 EXPORT_SYMBOL(ipmi_smi_msg_received);
3814 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3815 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3816 EXPORT_SYMBOL(ipmi_addr_length);
3817 EXPORT_SYMBOL(ipmi_validate_addr);
3818 EXPORT_SYMBOL(ipmi_set_gets_events);
3819 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3820 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3821 EXPORT_SYMBOL(ipmi_set_my_address);
3822 EXPORT_SYMBOL(ipmi_get_my_address);
3823 EXPORT_SYMBOL(ipmi_set_my_LUN);
3824 EXPORT_SYMBOL(ipmi_get_my_LUN);
3825 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3826 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3827 EXPORT_SYMBOL(ipmi_free_recv_msg);