4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.1"
53 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized
;
59 static struct proc_dir_entry
*proc_ipmi_root
;
60 #endif /* CONFIG_PROC_FS */
62 /* Remain in auto-maintenance mode for this amount of time (in ms). */
63 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
65 #define MAX_EVENTS_IN_QUEUE 25
67 /* Don't let a message sit in a queue forever, always time it with at lest
68 the max message timer. This is in milliseconds. */
69 #define MAX_MSG_TIMEOUT 60000
73 * The main "user" data structure.
77 struct list_head link
;
79 /* Set to "0" when the user is destroyed. */
84 /* The upper layer that handles receive messages. */
85 struct ipmi_user_hndl
*handler
;
88 /* The interface this user is bound to. */
91 /* Does this interface receive IPMI events? */
97 struct list_head link
;
105 * This is used to form a linked lised during mass deletion.
106 * Since this is in an RCU list, we cannot use the link above
107 * or change any data until the RCU period completes. So we
108 * use this next variable during mass deletion so we can have
109 * a list and don't have to wait and restart the search on
110 * every individual deletion of a command. */
111 struct cmd_rcvr
*next
;
116 unsigned int inuse
: 1;
117 unsigned int broadcast
: 1;
119 unsigned long timeout
;
120 unsigned long orig_timeout
;
121 unsigned int retries_left
;
123 /* To verify on an incoming send message response that this is
124 the message that the response is for, we keep a sequence id
125 and increment it every time we send a message. */
128 /* This is held so we can properly respond to the message on a
129 timeout, and it is used to hold the temporary data for
130 retransmission, too. */
131 struct ipmi_recv_msg
*recv_msg
;
134 /* Store the information in a msgid (long) to allow us to find a
135 sequence table entry from the msgid. */
136 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
138 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
140 seq = ((msgid >> 26) & 0x3f); \
141 seqid = (msgid & 0x3fffff); \
144 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
148 unsigned char medium
;
149 unsigned char protocol
;
151 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
152 but may be changed by the user. */
153 unsigned char address
;
155 /* My LUN. This should generally stay the SMS LUN, but just in
160 #ifdef CONFIG_PROC_FS
161 struct ipmi_proc_entry
164 struct ipmi_proc_entry
*next
;
170 struct platform_device
*dev
;
171 struct ipmi_device_id id
;
172 unsigned char guid
[16];
175 struct kref refcount
;
177 /* bmc device attributes */
178 struct device_attribute device_id_attr
;
179 struct device_attribute provides_dev_sdrs_attr
;
180 struct device_attribute revision_attr
;
181 struct device_attribute firmware_rev_attr
;
182 struct device_attribute version_attr
;
183 struct device_attribute add_dev_support_attr
;
184 struct device_attribute manufacturer_id_attr
;
185 struct device_attribute product_id_attr
;
186 struct device_attribute guid_attr
;
187 struct device_attribute aux_firmware_rev_attr
;
190 #define IPMI_IPMB_NUM_SEQ 64
191 #define IPMI_MAX_CHANNELS 16
194 /* What interface number are we? */
197 struct kref refcount
;
199 /* Used for a list of interfaces. */
200 struct list_head link
;
202 /* The list of upper layers that are using me. seq_lock
204 struct list_head users
;
206 /* Information to supply to users. */
207 unsigned char ipmi_version_major
;
208 unsigned char ipmi_version_minor
;
210 /* Used for wake ups at startup. */
211 wait_queue_head_t waitq
;
213 struct bmc_device
*bmc
;
217 /* This is the lower-layer's sender routine. Note that you
218 * must either be holding the ipmi_interfaces_mutex or be in
219 * an umpreemptible region to use this. You must fetch the
220 * value into a local variable and make sure it is not NULL. */
221 struct ipmi_smi_handlers
*handlers
;
224 #ifdef CONFIG_PROC_FS
225 /* A list of proc entries for this interface. This does not
226 need a lock, only one thread creates it and only one thread
228 spinlock_t proc_entry_lock
;
229 struct ipmi_proc_entry
*proc_entries
;
232 /* Driver-model device for the system interface. */
233 struct device
*si_dev
;
235 /* A table of sequence numbers for this interface. We use the
236 sequence numbers for IPMB messages that go out of the
237 interface to match them up with their responses. A routine
238 is called periodically to time the items in this list. */
240 struct seq_table seq_table
[IPMI_IPMB_NUM_SEQ
];
243 /* Messages that were delayed for some reason (out of memory,
244 for instance), will go in here to be processed later in a
245 periodic timer interrupt. */
246 spinlock_t waiting_msgs_lock
;
247 struct list_head waiting_msgs
;
249 /* The list of command receivers that are registered for commands
250 on this interface. */
251 struct mutex cmd_rcvrs_mutex
;
252 struct list_head cmd_rcvrs
;
254 /* Events that were queues because no one was there to receive
256 spinlock_t events_lock
; /* For dealing with event stuff. */
257 struct list_head waiting_events
;
258 unsigned int waiting_events_count
; /* How many events in queue? */
259 int delivering_events
;
261 /* The event receiver for my BMC, only really used at panic
262 shutdown as a place to store this. */
263 unsigned char event_receiver
;
264 unsigned char event_receiver_lun
;
265 unsigned char local_sel_device
;
266 unsigned char local_event_generator
;
268 /* For handling of maintenance mode. */
269 int maintenance_mode
;
270 int maintenance_mode_enable
;
271 int auto_maintenance_timeout
;
272 spinlock_t maintenance_mode_lock
; /* Used in a timer... */
274 /* A cheap hack, if this is non-null and a message to an
275 interface comes in with a NULL user, call this routine with
276 it. Note that the message will still be freed by the
277 caller. This only works on the system interface. */
278 void (*null_user_handler
)(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
);
280 /* When we are scanning the channels for an SMI, this will
281 tell which channel we are scanning. */
284 /* Channel information */
285 struct ipmi_channel channels
[IPMI_MAX_CHANNELS
];
288 struct proc_dir_entry
*proc_dir
;
289 char proc_dir_name
[10];
291 spinlock_t counter_lock
; /* For making counters atomic. */
293 /* Commands we got that were invalid. */
294 unsigned int sent_invalid_commands
;
296 /* Commands we sent to the MC. */
297 unsigned int sent_local_commands
;
298 /* Responses from the MC that were delivered to a user. */
299 unsigned int handled_local_responses
;
300 /* Responses from the MC that were not delivered to a user. */
301 unsigned int unhandled_local_responses
;
303 /* Commands we sent out to the IPMB bus. */
304 unsigned int sent_ipmb_commands
;
305 /* Commands sent on the IPMB that had errors on the SEND CMD */
306 unsigned int sent_ipmb_command_errs
;
307 /* Each retransmit increments this count. */
308 unsigned int retransmitted_ipmb_commands
;
309 /* When a message times out (runs out of retransmits) this is
311 unsigned int timed_out_ipmb_commands
;
313 /* This is like above, but for broadcasts. Broadcasts are
314 *not* included in the above count (they are expected to
316 unsigned int timed_out_ipmb_broadcasts
;
318 /* Responses I have sent to the IPMB bus. */
319 unsigned int sent_ipmb_responses
;
321 /* The response was delivered to the user. */
322 unsigned int handled_ipmb_responses
;
323 /* The response had invalid data in it. */
324 unsigned int invalid_ipmb_responses
;
325 /* The response didn't have anyone waiting for it. */
326 unsigned int unhandled_ipmb_responses
;
328 /* Commands we sent out to the IPMB bus. */
329 unsigned int sent_lan_commands
;
330 /* Commands sent on the IPMB that had errors on the SEND CMD */
331 unsigned int sent_lan_command_errs
;
332 /* Each retransmit increments this count. */
333 unsigned int retransmitted_lan_commands
;
334 /* When a message times out (runs out of retransmits) this is
336 unsigned int timed_out_lan_commands
;
338 /* Responses I have sent to the IPMB bus. */
339 unsigned int sent_lan_responses
;
341 /* The response was delivered to the user. */
342 unsigned int handled_lan_responses
;
343 /* The response had invalid data in it. */
344 unsigned int invalid_lan_responses
;
345 /* The response didn't have anyone waiting for it. */
346 unsigned int unhandled_lan_responses
;
348 /* The command was delivered to the user. */
349 unsigned int handled_commands
;
350 /* The command had invalid data in it. */
351 unsigned int invalid_commands
;
352 /* The command didn't have anyone waiting for it. */
353 unsigned int unhandled_commands
;
355 /* Invalid data in an event. */
356 unsigned int invalid_events
;
357 /* Events that were received with the proper format. */
360 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
363 * The driver model view of the IPMI messaging driver.
365 static struct device_driver ipmidriver
= {
367 .bus
= &platform_bus_type
369 static DEFINE_MUTEX(ipmidriver_mutex
);
371 static struct list_head ipmi_interfaces
= LIST_HEAD_INIT(ipmi_interfaces
);
372 static DEFINE_MUTEX(ipmi_interfaces_mutex
);
374 /* List of watchers that want to know when smi's are added and
376 static struct list_head smi_watchers
= LIST_HEAD_INIT(smi_watchers
);
377 static DEFINE_MUTEX(smi_watchers_mutex
);
380 static void free_recv_msg_list(struct list_head
*q
)
382 struct ipmi_recv_msg
*msg
, *msg2
;
384 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
385 list_del(&msg
->link
);
386 ipmi_free_recv_msg(msg
);
390 static void free_smi_msg_list(struct list_head
*q
)
392 struct ipmi_smi_msg
*msg
, *msg2
;
394 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
395 list_del(&msg
->link
);
396 ipmi_free_smi_msg(msg
);
400 static void clean_up_interface_data(ipmi_smi_t intf
)
403 struct cmd_rcvr
*rcvr
, *rcvr2
;
404 struct list_head list
;
406 free_smi_msg_list(&intf
->waiting_msgs
);
407 free_recv_msg_list(&intf
->waiting_events
);
410 * Wholesale remove all the entries from the list in the
411 * interface and wait for RCU to know that none are in use.
413 mutex_lock(&intf
->cmd_rcvrs_mutex
);
414 INIT_LIST_HEAD(&list
);
415 list_splice_init_rcu(&intf
->cmd_rcvrs
, &list
, synchronize_rcu
);
416 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
418 list_for_each_entry_safe(rcvr
, rcvr2
, &list
, link
)
421 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
422 if ((intf
->seq_table
[i
].inuse
)
423 && (intf
->seq_table
[i
].recv_msg
))
425 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
430 static void intf_free(struct kref
*ref
)
432 ipmi_smi_t intf
= container_of(ref
, struct ipmi_smi
, refcount
);
434 clean_up_interface_data(intf
);
438 struct watcher_entry
{
441 struct list_head link
;
444 int ipmi_smi_watcher_register(struct ipmi_smi_watcher
*watcher
)
447 struct list_head to_deliver
= LIST_HEAD_INIT(to_deliver
);
448 struct watcher_entry
*e
, *e2
;
450 mutex_lock(&smi_watchers_mutex
);
452 mutex_lock(&ipmi_interfaces_mutex
);
454 /* Build a list of things to deliver. */
455 list_for_each_entry(intf
, &ipmi_interfaces
, link
) {
456 if (intf
->intf_num
== -1)
458 e
= kmalloc(sizeof(*e
), GFP_KERNEL
);
461 kref_get(&intf
->refcount
);
463 e
->intf_num
= intf
->intf_num
;
464 list_add_tail(&e
->link
, &to_deliver
);
467 /* We will succeed, so add it to the list. */
468 list_add(&watcher
->link
, &smi_watchers
);
470 mutex_unlock(&ipmi_interfaces_mutex
);
472 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
474 watcher
->new_smi(e
->intf_num
, e
->intf
->si_dev
);
475 kref_put(&e
->intf
->refcount
, intf_free
);
479 mutex_unlock(&smi_watchers_mutex
);
484 mutex_unlock(&ipmi_interfaces_mutex
);
485 mutex_unlock(&smi_watchers_mutex
);
486 list_for_each_entry_safe(e
, e2
, &to_deliver
, link
) {
488 kref_put(&e
->intf
->refcount
, intf_free
);
494 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher
*watcher
)
496 mutex_lock(&smi_watchers_mutex
);
497 list_del(&(watcher
->link
));
498 mutex_unlock(&smi_watchers_mutex
);
503 * Must be called with smi_watchers_mutex held.
506 call_smi_watchers(int i
, struct device
*dev
)
508 struct ipmi_smi_watcher
*w
;
510 list_for_each_entry(w
, &smi_watchers
, link
) {
511 if (try_module_get(w
->owner
)) {
513 module_put(w
->owner
);
519 ipmi_addr_equal(struct ipmi_addr
*addr1
, struct ipmi_addr
*addr2
)
521 if (addr1
->addr_type
!= addr2
->addr_type
)
524 if (addr1
->channel
!= addr2
->channel
)
527 if (addr1
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
528 struct ipmi_system_interface_addr
*smi_addr1
529 = (struct ipmi_system_interface_addr
*) addr1
;
530 struct ipmi_system_interface_addr
*smi_addr2
531 = (struct ipmi_system_interface_addr
*) addr2
;
532 return (smi_addr1
->lun
== smi_addr2
->lun
);
535 if ((addr1
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
536 || (addr1
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
538 struct ipmi_ipmb_addr
*ipmb_addr1
539 = (struct ipmi_ipmb_addr
*) addr1
;
540 struct ipmi_ipmb_addr
*ipmb_addr2
541 = (struct ipmi_ipmb_addr
*) addr2
;
543 return ((ipmb_addr1
->slave_addr
== ipmb_addr2
->slave_addr
)
544 && (ipmb_addr1
->lun
== ipmb_addr2
->lun
));
547 if (addr1
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
548 struct ipmi_lan_addr
*lan_addr1
549 = (struct ipmi_lan_addr
*) addr1
;
550 struct ipmi_lan_addr
*lan_addr2
551 = (struct ipmi_lan_addr
*) addr2
;
553 return ((lan_addr1
->remote_SWID
== lan_addr2
->remote_SWID
)
554 && (lan_addr1
->local_SWID
== lan_addr2
->local_SWID
)
555 && (lan_addr1
->session_handle
556 == lan_addr2
->session_handle
)
557 && (lan_addr1
->lun
== lan_addr2
->lun
));
563 int ipmi_validate_addr(struct ipmi_addr
*addr
, int len
)
565 if (len
< sizeof(struct ipmi_system_interface_addr
)) {
569 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
570 if (addr
->channel
!= IPMI_BMC_CHANNEL
)
575 if ((addr
->channel
== IPMI_BMC_CHANNEL
)
576 || (addr
->channel
>= IPMI_MAX_CHANNELS
)
577 || (addr
->channel
< 0))
580 if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
581 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
583 if (len
< sizeof(struct ipmi_ipmb_addr
)) {
589 if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
590 if (len
< sizeof(struct ipmi_lan_addr
)) {
599 unsigned int ipmi_addr_length(int addr_type
)
601 if (addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
602 return sizeof(struct ipmi_system_interface_addr
);
604 if ((addr_type
== IPMI_IPMB_ADDR_TYPE
)
605 || (addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
607 return sizeof(struct ipmi_ipmb_addr
);
610 if (addr_type
== IPMI_LAN_ADDR_TYPE
)
611 return sizeof(struct ipmi_lan_addr
);
616 static void deliver_response(struct ipmi_recv_msg
*msg
)
619 ipmi_smi_t intf
= msg
->user_msg_data
;
622 /* Special handling for NULL users. */
623 if (intf
->null_user_handler
) {
624 intf
->null_user_handler(intf
, msg
);
625 spin_lock_irqsave(&intf
->counter_lock
, flags
);
626 intf
->handled_local_responses
++;
627 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
629 /* No handler, so give up. */
630 spin_lock_irqsave(&intf
->counter_lock
, flags
);
631 intf
->unhandled_local_responses
++;
632 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
634 ipmi_free_recv_msg(msg
);
636 ipmi_user_t user
= msg
->user
;
637 user
->handler
->ipmi_recv_hndl(msg
, user
->handler_data
);
642 deliver_err_response(struct ipmi_recv_msg
*msg
, int err
)
644 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
645 msg
->msg_data
[0] = err
;
646 msg
->msg
.netfn
|= 1; /* Convert to a response. */
647 msg
->msg
.data_len
= 1;
648 msg
->msg
.data
= msg
->msg_data
;
649 deliver_response(msg
);
652 /* Find the next sequence number not being used and add the given
653 message with the given timeout to the sequence table. This must be
654 called with the interface's seq_lock held. */
655 static int intf_next_seq(ipmi_smi_t intf
,
656 struct ipmi_recv_msg
*recv_msg
,
657 unsigned long timeout
,
666 for (i
= intf
->curr_seq
;
667 (i
+1)%IPMI_IPMB_NUM_SEQ
!= intf
->curr_seq
;
668 i
= (i
+1)%IPMI_IPMB_NUM_SEQ
)
670 if (!intf
->seq_table
[i
].inuse
)
674 if (!intf
->seq_table
[i
].inuse
) {
675 intf
->seq_table
[i
].recv_msg
= recv_msg
;
677 /* Start with the maximum timeout, when the send response
678 comes in we will start the real timer. */
679 intf
->seq_table
[i
].timeout
= MAX_MSG_TIMEOUT
;
680 intf
->seq_table
[i
].orig_timeout
= timeout
;
681 intf
->seq_table
[i
].retries_left
= retries
;
682 intf
->seq_table
[i
].broadcast
= broadcast
;
683 intf
->seq_table
[i
].inuse
= 1;
684 intf
->seq_table
[i
].seqid
= NEXT_SEQID(intf
->seq_table
[i
].seqid
);
686 *seqid
= intf
->seq_table
[i
].seqid
;
687 intf
->curr_seq
= (i
+1)%IPMI_IPMB_NUM_SEQ
;
695 /* Return the receive message for the given sequence number and
696 release the sequence number so it can be reused. Some other data
697 is passed in to be sure the message matches up correctly (to help
698 guard against message coming in after their timeout and the
699 sequence number being reused). */
700 static int intf_find_seq(ipmi_smi_t intf
,
705 struct ipmi_addr
*addr
,
706 struct ipmi_recv_msg
**recv_msg
)
711 if (seq
>= IPMI_IPMB_NUM_SEQ
)
714 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
715 if (intf
->seq_table
[seq
].inuse
) {
716 struct ipmi_recv_msg
*msg
= intf
->seq_table
[seq
].recv_msg
;
718 if ((msg
->addr
.channel
== channel
)
719 && (msg
->msg
.cmd
== cmd
)
720 && (msg
->msg
.netfn
== netfn
)
721 && (ipmi_addr_equal(addr
, &(msg
->addr
))))
724 intf
->seq_table
[seq
].inuse
= 0;
728 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
734 /* Start the timer for a specific sequence table entry. */
735 static int intf_start_seq_timer(ipmi_smi_t intf
,
744 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
746 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
747 /* We do this verification because the user can be deleted
748 while a message is outstanding. */
749 if ((intf
->seq_table
[seq
].inuse
)
750 && (intf
->seq_table
[seq
].seqid
== seqid
))
752 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
753 ent
->timeout
= ent
->orig_timeout
;
756 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
761 /* Got an error for the send message for a specific sequence number. */
762 static int intf_err_seq(ipmi_smi_t intf
,
770 struct ipmi_recv_msg
*msg
= NULL
;
773 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
775 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
776 /* We do this verification because the user can be deleted
777 while a message is outstanding. */
778 if ((intf
->seq_table
[seq
].inuse
)
779 && (intf
->seq_table
[seq
].seqid
== seqid
))
781 struct seq_table
*ent
= &(intf
->seq_table
[seq
]);
787 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
790 deliver_err_response(msg
, err
);
796 int ipmi_create_user(unsigned int if_num
,
797 struct ipmi_user_hndl
*handler
,
802 ipmi_user_t new_user
;
806 /* There is no module usecount here, because it's not
807 required. Since this can only be used by and called from
808 other modules, they will implicitly use this module, and
809 thus this can't be removed unless the other modules are
815 /* Make sure the driver is actually initialized, this handles
816 problems with initialization order. */
818 rv
= ipmi_init_msghandler();
822 /* The init code doesn't return an error if it was turned
823 off, but it won't initialize. Check that. */
828 new_user
= kmalloc(sizeof(*new_user
), GFP_KERNEL
);
832 mutex_lock(&ipmi_interfaces_mutex
);
833 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
834 if (intf
->intf_num
== if_num
)
837 /* Not found, return an error */
842 /* Note that each existing user holds a refcount to the interface. */
843 kref_get(&intf
->refcount
);
845 kref_init(&new_user
->refcount
);
846 new_user
->handler
= handler
;
847 new_user
->handler_data
= handler_data
;
848 new_user
->intf
= intf
;
849 new_user
->gets_events
= 0;
851 if (!try_module_get(intf
->handlers
->owner
)) {
856 if (intf
->handlers
->inc_usecount
) {
857 rv
= intf
->handlers
->inc_usecount(intf
->send_info
);
859 module_put(intf
->handlers
->owner
);
864 /* Hold the lock so intf->handlers is guaranteed to be good
866 mutex_unlock(&ipmi_interfaces_mutex
);
869 spin_lock_irqsave(&intf
->seq_lock
, flags
);
870 list_add_rcu(&new_user
->link
, &intf
->users
);
871 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
876 kref_put(&intf
->refcount
, intf_free
);
878 mutex_unlock(&ipmi_interfaces_mutex
);
883 static void free_user(struct kref
*ref
)
885 ipmi_user_t user
= container_of(ref
, struct ipmi_user
, refcount
);
889 int ipmi_destroy_user(ipmi_user_t user
)
891 ipmi_smi_t intf
= user
->intf
;
894 struct cmd_rcvr
*rcvr
;
895 struct cmd_rcvr
*rcvrs
= NULL
;
899 /* Remove the user from the interface's sequence table. */
900 spin_lock_irqsave(&intf
->seq_lock
, flags
);
901 list_del_rcu(&user
->link
);
903 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
904 if (intf
->seq_table
[i
].inuse
905 && (intf
->seq_table
[i
].recv_msg
->user
== user
))
907 intf
->seq_table
[i
].inuse
= 0;
908 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
911 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
914 * Remove the user from the command receiver's table. First
915 * we build a list of everything (not using the standard link,
916 * since other things may be using it till we do
917 * synchronize_rcu()) then free everything in that list.
919 mutex_lock(&intf
->cmd_rcvrs_mutex
);
920 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
921 if (rcvr
->user
== user
) {
922 list_del_rcu(&rcvr
->link
);
927 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
935 mutex_lock(&ipmi_interfaces_mutex
);
936 if (intf
->handlers
) {
937 module_put(intf
->handlers
->owner
);
938 if (intf
->handlers
->dec_usecount
)
939 intf
->handlers
->dec_usecount(intf
->send_info
);
941 mutex_unlock(&ipmi_interfaces_mutex
);
943 kref_put(&intf
->refcount
, intf_free
);
945 kref_put(&user
->refcount
, free_user
);
950 void ipmi_get_version(ipmi_user_t user
,
951 unsigned char *major
,
952 unsigned char *minor
)
954 *major
= user
->intf
->ipmi_version_major
;
955 *minor
= user
->intf
->ipmi_version_minor
;
958 int ipmi_set_my_address(ipmi_user_t user
,
959 unsigned int channel
,
960 unsigned char address
)
962 if (channel
>= IPMI_MAX_CHANNELS
)
964 user
->intf
->channels
[channel
].address
= address
;
968 int ipmi_get_my_address(ipmi_user_t user
,
969 unsigned int channel
,
970 unsigned char *address
)
972 if (channel
>= IPMI_MAX_CHANNELS
)
974 *address
= user
->intf
->channels
[channel
].address
;
978 int ipmi_set_my_LUN(ipmi_user_t user
,
979 unsigned int channel
,
982 if (channel
>= IPMI_MAX_CHANNELS
)
984 user
->intf
->channels
[channel
].lun
= LUN
& 0x3;
988 int ipmi_get_my_LUN(ipmi_user_t user
,
989 unsigned int channel
,
990 unsigned char *address
)
992 if (channel
>= IPMI_MAX_CHANNELS
)
994 *address
= user
->intf
->channels
[channel
].lun
;
998 int ipmi_get_maintenance_mode(ipmi_user_t user
)
1001 unsigned long flags
;
1003 spin_lock_irqsave(&user
->intf
->maintenance_mode_lock
, flags
);
1004 mode
= user
->intf
->maintenance_mode
;
1005 spin_unlock_irqrestore(&user
->intf
->maintenance_mode_lock
, flags
);
1009 EXPORT_SYMBOL(ipmi_get_maintenance_mode
);
1011 static void maintenance_mode_update(ipmi_smi_t intf
)
1013 if (intf
->handlers
->set_maintenance_mode
)
1014 intf
->handlers
->set_maintenance_mode(
1015 intf
->send_info
, intf
->maintenance_mode_enable
);
1018 int ipmi_set_maintenance_mode(ipmi_user_t user
, int mode
)
1021 unsigned long flags
;
1022 ipmi_smi_t intf
= user
->intf
;
1024 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1025 if (intf
->maintenance_mode
!= mode
) {
1027 case IPMI_MAINTENANCE_MODE_AUTO
:
1028 intf
->maintenance_mode
= mode
;
1029 intf
->maintenance_mode_enable
1030 = (intf
->auto_maintenance_timeout
> 0);
1033 case IPMI_MAINTENANCE_MODE_OFF
:
1034 intf
->maintenance_mode
= mode
;
1035 intf
->maintenance_mode_enable
= 0;
1038 case IPMI_MAINTENANCE_MODE_ON
:
1039 intf
->maintenance_mode
= mode
;
1040 intf
->maintenance_mode_enable
= 1;
1048 maintenance_mode_update(intf
);
1051 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
, flags
);
1055 EXPORT_SYMBOL(ipmi_set_maintenance_mode
);
1057 int ipmi_set_gets_events(ipmi_user_t user
, int val
)
1059 unsigned long flags
;
1060 ipmi_smi_t intf
= user
->intf
;
1061 struct ipmi_recv_msg
*msg
, *msg2
;
1062 struct list_head msgs
;
1064 INIT_LIST_HEAD(&msgs
);
1066 spin_lock_irqsave(&intf
->events_lock
, flags
);
1067 user
->gets_events
= val
;
1069 if (intf
->delivering_events
)
1071 * Another thread is delivering events for this, so
1072 * let it handle any new events.
1076 /* Deliver any queued events. */
1077 while (user
->gets_events
&& !list_empty(&intf
->waiting_events
)) {
1078 list_for_each_entry_safe(msg
, msg2
, &intf
->waiting_events
, link
)
1079 list_move_tail(&msg
->link
, &msgs
);
1080 intf
->waiting_events_count
= 0;
1082 intf
->delivering_events
= 1;
1083 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1085 list_for_each_entry_safe(msg
, msg2
, &msgs
, link
) {
1087 kref_get(&user
->refcount
);
1088 deliver_response(msg
);
1091 spin_lock_irqsave(&intf
->events_lock
, flags
);
1092 intf
->delivering_events
= 0;
1096 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1101 static struct cmd_rcvr
*find_cmd_rcvr(ipmi_smi_t intf
,
1102 unsigned char netfn
,
1106 struct cmd_rcvr
*rcvr
;
1108 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1109 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1110 && (rcvr
->chans
& (1 << chan
)))
1116 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf
,
1117 unsigned char netfn
,
1121 struct cmd_rcvr
*rcvr
;
1123 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
) {
1124 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1125 && (rcvr
->chans
& chans
))
1131 int ipmi_register_for_cmd(ipmi_user_t user
,
1132 unsigned char netfn
,
1136 ipmi_smi_t intf
= user
->intf
;
1137 struct cmd_rcvr
*rcvr
;
1141 rcvr
= kmalloc(sizeof(*rcvr
), GFP_KERNEL
);
1145 rcvr
->netfn
= netfn
;
1146 rcvr
->chans
= chans
;
1149 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1150 /* Make sure the command/netfn is not already registered. */
1151 if (!is_cmd_rcvr_exclusive(intf
, netfn
, cmd
, chans
)) {
1156 list_add_rcu(&rcvr
->link
, &intf
->cmd_rcvrs
);
1159 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1166 int ipmi_unregister_for_cmd(ipmi_user_t user
,
1167 unsigned char netfn
,
1171 ipmi_smi_t intf
= user
->intf
;
1172 struct cmd_rcvr
*rcvr
;
1173 struct cmd_rcvr
*rcvrs
= NULL
;
1174 int i
, rv
= -ENOENT
;
1176 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1177 for (i
= 0; i
< IPMI_NUM_CHANNELS
; i
++) {
1178 if (((1 << i
) & chans
) == 0)
1180 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, i
);
1183 if (rcvr
->user
== user
) {
1185 rcvr
->chans
&= ~chans
;
1186 if (rcvr
->chans
== 0) {
1187 list_del_rcu(&rcvr
->link
);
1193 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1203 void ipmi_user_set_run_to_completion(ipmi_user_t user
, int val
)
1205 ipmi_smi_t intf
= user
->intf
;
1207 intf
->handlers
->set_run_to_completion(intf
->send_info
, val
);
1210 static unsigned char
1211 ipmb_checksum(unsigned char *data
, int size
)
1213 unsigned char csum
= 0;
1215 for (; size
> 0; size
--, data
++)
1221 static inline void format_ipmb_msg(struct ipmi_smi_msg
*smi_msg
,
1222 struct kernel_ipmi_msg
*msg
,
1223 struct ipmi_ipmb_addr
*ipmb_addr
,
1225 unsigned char ipmb_seq
,
1227 unsigned char source_address
,
1228 unsigned char source_lun
)
1232 /* Format the IPMB header data. */
1233 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1234 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1235 smi_msg
->data
[2] = ipmb_addr
->channel
;
1237 smi_msg
->data
[3] = 0;
1238 smi_msg
->data
[i
+3] = ipmb_addr
->slave_addr
;
1239 smi_msg
->data
[i
+4] = (msg
->netfn
<< 2) | (ipmb_addr
->lun
& 0x3);
1240 smi_msg
->data
[i
+5] = ipmb_checksum(&(smi_msg
->data
[i
+3]), 2);
1241 smi_msg
->data
[i
+6] = source_address
;
1242 smi_msg
->data
[i
+7] = (ipmb_seq
<< 2) | source_lun
;
1243 smi_msg
->data
[i
+8] = msg
->cmd
;
1245 /* Now tack on the data to the message. */
1246 if (msg
->data_len
> 0)
1247 memcpy(&(smi_msg
->data
[i
+9]), msg
->data
,
1249 smi_msg
->data_size
= msg
->data_len
+ 9;
1251 /* Now calculate the checksum and tack it on. */
1252 smi_msg
->data
[i
+smi_msg
->data_size
]
1253 = ipmb_checksum(&(smi_msg
->data
[i
+6]),
1254 smi_msg
->data_size
-6);
1256 /* Add on the checksum size and the offset from the
1258 smi_msg
->data_size
+= 1 + i
;
1260 smi_msg
->msgid
= msgid
;
1263 static inline void format_lan_msg(struct ipmi_smi_msg
*smi_msg
,
1264 struct kernel_ipmi_msg
*msg
,
1265 struct ipmi_lan_addr
*lan_addr
,
1267 unsigned char ipmb_seq
,
1268 unsigned char source_lun
)
1270 /* Format the IPMB header data. */
1271 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1272 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1273 smi_msg
->data
[2] = lan_addr
->channel
;
1274 smi_msg
->data
[3] = lan_addr
->session_handle
;
1275 smi_msg
->data
[4] = lan_addr
->remote_SWID
;
1276 smi_msg
->data
[5] = (msg
->netfn
<< 2) | (lan_addr
->lun
& 0x3);
1277 smi_msg
->data
[6] = ipmb_checksum(&(smi_msg
->data
[4]), 2);
1278 smi_msg
->data
[7] = lan_addr
->local_SWID
;
1279 smi_msg
->data
[8] = (ipmb_seq
<< 2) | source_lun
;
1280 smi_msg
->data
[9] = msg
->cmd
;
1282 /* Now tack on the data to the message. */
1283 if (msg
->data_len
> 0)
1284 memcpy(&(smi_msg
->data
[10]), msg
->data
,
1286 smi_msg
->data_size
= msg
->data_len
+ 10;
1288 /* Now calculate the checksum and tack it on. */
1289 smi_msg
->data
[smi_msg
->data_size
]
1290 = ipmb_checksum(&(smi_msg
->data
[7]),
1291 smi_msg
->data_size
-7);
1293 /* Add on the checksum size and the offset from the
1295 smi_msg
->data_size
+= 1;
1297 smi_msg
->msgid
= msgid
;
1300 /* Separate from ipmi_request so that the user does not have to be
1301 supplied in certain circumstances (mainly at panic time). If
1302 messages are supplied, they will be freed, even if an error
1304 static int i_ipmi_request(ipmi_user_t user
,
1306 struct ipmi_addr
*addr
,
1308 struct kernel_ipmi_msg
*msg
,
1309 void *user_msg_data
,
1311 struct ipmi_recv_msg
*supplied_recv
,
1313 unsigned char source_address
,
1314 unsigned char source_lun
,
1316 unsigned int retry_time_ms
)
1319 struct ipmi_smi_msg
*smi_msg
;
1320 struct ipmi_recv_msg
*recv_msg
;
1321 unsigned long flags
;
1322 struct ipmi_smi_handlers
*handlers
;
1325 if (supplied_recv
) {
1326 recv_msg
= supplied_recv
;
1328 recv_msg
= ipmi_alloc_recv_msg();
1329 if (recv_msg
== NULL
) {
1333 recv_msg
->user_msg_data
= user_msg_data
;
1336 smi_msg
= (struct ipmi_smi_msg
*) supplied_smi
;
1338 smi_msg
= ipmi_alloc_smi_msg();
1339 if (smi_msg
== NULL
) {
1340 ipmi_free_recv_msg(recv_msg
);
1346 handlers
= intf
->handlers
;
1352 recv_msg
->user
= user
;
1354 kref_get(&user
->refcount
);
1355 recv_msg
->msgid
= msgid
;
1356 /* Store the message to send in the receive message so timeout
1357 responses can get the proper response data. */
1358 recv_msg
->msg
= *msg
;
1360 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
1361 struct ipmi_system_interface_addr
*smi_addr
;
1363 if (msg
->netfn
& 1) {
1364 /* Responses are not allowed to the SMI. */
1369 smi_addr
= (struct ipmi_system_interface_addr
*) addr
;
1370 if (smi_addr
->lun
> 3) {
1371 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1372 intf
->sent_invalid_commands
++;
1373 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1378 memcpy(&recv_msg
->addr
, smi_addr
, sizeof(*smi_addr
));
1380 if ((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1381 && ((msg
->cmd
== IPMI_SEND_MSG_CMD
)
1382 || (msg
->cmd
== IPMI_GET_MSG_CMD
)
1383 || (msg
->cmd
== IPMI_READ_EVENT_MSG_BUFFER_CMD
)))
1385 /* We don't let the user do these, since we manage
1386 the sequence numbers. */
1387 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1388 intf
->sent_invalid_commands
++;
1389 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1394 if (((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1395 && ((msg
->cmd
== IPMI_COLD_RESET_CMD
)
1396 || (msg
->cmd
== IPMI_WARM_RESET_CMD
)))
1397 || (msg
->netfn
== IPMI_NETFN_FIRMWARE_REQUEST
))
1399 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1400 intf
->auto_maintenance_timeout
1401 = IPMI_MAINTENANCE_MODE_TIMEOUT
;
1402 if (!intf
->maintenance_mode
1403 && !intf
->maintenance_mode_enable
)
1405 intf
->maintenance_mode_enable
= 1;
1406 maintenance_mode_update(intf
);
1408 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
1412 if ((msg
->data_len
+ 2) > IPMI_MAX_MSG_LENGTH
) {
1413 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1414 intf
->sent_invalid_commands
++;
1415 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1420 smi_msg
->data
[0] = (msg
->netfn
<< 2) | (smi_addr
->lun
& 0x3);
1421 smi_msg
->data
[1] = msg
->cmd
;
1422 smi_msg
->msgid
= msgid
;
1423 smi_msg
->user_data
= recv_msg
;
1424 if (msg
->data_len
> 0)
1425 memcpy(&(smi_msg
->data
[2]), msg
->data
, msg
->data_len
);
1426 smi_msg
->data_size
= msg
->data_len
+ 2;
1427 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1428 intf
->sent_local_commands
++;
1429 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1430 } else if ((addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
)
1431 || (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
1433 struct ipmi_ipmb_addr
*ipmb_addr
;
1434 unsigned char ipmb_seq
;
1438 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1439 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1440 intf
->sent_invalid_commands
++;
1441 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1446 if (intf
->channels
[addr
->channel
].medium
1447 != IPMI_CHANNEL_MEDIUM_IPMB
)
1449 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1450 intf
->sent_invalid_commands
++;
1451 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1457 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
)
1458 retries
= 0; /* Don't retry broadcasts. */
1462 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
) {
1463 /* Broadcasts add a zero at the beginning of the
1464 message, but otherwise is the same as an IPMB
1466 addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
1471 /* Default to 1 second retries. */
1472 if (retry_time_ms
== 0)
1473 retry_time_ms
= 1000;
1475 /* 9 for the header and 1 for the checksum, plus
1476 possibly one for the broadcast. */
1477 if ((msg
->data_len
+ 10 + broadcast
) > IPMI_MAX_MSG_LENGTH
) {
1478 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1479 intf
->sent_invalid_commands
++;
1480 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1485 ipmb_addr
= (struct ipmi_ipmb_addr
*) addr
;
1486 if (ipmb_addr
->lun
> 3) {
1487 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1488 intf
->sent_invalid_commands
++;
1489 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1494 memcpy(&recv_msg
->addr
, ipmb_addr
, sizeof(*ipmb_addr
));
1496 if (recv_msg
->msg
.netfn
& 0x1) {
1497 /* It's a response, so use the user's sequence
1499 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1500 intf
->sent_ipmb_responses
++;
1501 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1502 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
, msgid
,
1504 source_address
, source_lun
);
1506 /* Save the receive message so we can use it
1507 to deliver the response. */
1508 smi_msg
->user_data
= recv_msg
;
1510 /* It's a command, so get a sequence for it. */
1512 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1514 spin_lock(&intf
->counter_lock
);
1515 intf
->sent_ipmb_commands
++;
1516 spin_unlock(&intf
->counter_lock
);
1518 /* Create a sequence number with a 1 second
1519 timeout and 4 retries. */
1520 rv
= intf_next_seq(intf
,
1528 /* We have used up all the sequence numbers,
1529 probably, so abort. */
1530 spin_unlock_irqrestore(&(intf
->seq_lock
),
1535 /* Store the sequence number in the message,
1536 so that when the send message response
1537 comes back we can start the timer. */
1538 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
,
1539 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1540 ipmb_seq
, broadcast
,
1541 source_address
, source_lun
);
1543 /* Copy the message into the recv message data, so we
1544 can retransmit it later if necessary. */
1545 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1546 smi_msg
->data_size
);
1547 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1548 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1550 /* We don't unlock until here, because we need
1551 to copy the completed message into the
1552 recv_msg before we release the lock.
1553 Otherwise, race conditions may bite us. I
1554 know that's pretty paranoid, but I prefer
1556 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1558 } else if (addr
->addr_type
== IPMI_LAN_ADDR_TYPE
) {
1559 struct ipmi_lan_addr
*lan_addr
;
1560 unsigned char ipmb_seq
;
1563 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1564 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1565 intf
->sent_invalid_commands
++;
1566 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1571 if ((intf
->channels
[addr
->channel
].medium
1572 != IPMI_CHANNEL_MEDIUM_8023LAN
)
1573 && (intf
->channels
[addr
->channel
].medium
1574 != IPMI_CHANNEL_MEDIUM_ASYNC
))
1576 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1577 intf
->sent_invalid_commands
++;
1578 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1585 /* Default to 1 second retries. */
1586 if (retry_time_ms
== 0)
1587 retry_time_ms
= 1000;
1589 /* 11 for the header and 1 for the checksum. */
1590 if ((msg
->data_len
+ 12) > IPMI_MAX_MSG_LENGTH
) {
1591 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1592 intf
->sent_invalid_commands
++;
1593 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1598 lan_addr
= (struct ipmi_lan_addr
*) addr
;
1599 if (lan_addr
->lun
> 3) {
1600 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1601 intf
->sent_invalid_commands
++;
1602 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1607 memcpy(&recv_msg
->addr
, lan_addr
, sizeof(*lan_addr
));
1609 if (recv_msg
->msg
.netfn
& 0x1) {
1610 /* It's a response, so use the user's sequence
1612 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1613 intf
->sent_lan_responses
++;
1614 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1615 format_lan_msg(smi_msg
, msg
, lan_addr
, msgid
,
1618 /* Save the receive message so we can use it
1619 to deliver the response. */
1620 smi_msg
->user_data
= recv_msg
;
1622 /* It's a command, so get a sequence for it. */
1624 spin_lock_irqsave(&(intf
->seq_lock
), flags
);
1626 spin_lock(&intf
->counter_lock
);
1627 intf
->sent_lan_commands
++;
1628 spin_unlock(&intf
->counter_lock
);
1630 /* Create a sequence number with a 1 second
1631 timeout and 4 retries. */
1632 rv
= intf_next_seq(intf
,
1640 /* We have used up all the sequence numbers,
1641 probably, so abort. */
1642 spin_unlock_irqrestore(&(intf
->seq_lock
),
1647 /* Store the sequence number in the message,
1648 so that when the send message response
1649 comes back we can start the timer. */
1650 format_lan_msg(smi_msg
, msg
, lan_addr
,
1651 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
1652 ipmb_seq
, source_lun
);
1654 /* Copy the message into the recv message data, so we
1655 can retransmit it later if necessary. */
1656 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
1657 smi_msg
->data_size
);
1658 recv_msg
->msg
.data
= recv_msg
->msg_data
;
1659 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
1661 /* We don't unlock until here, because we need
1662 to copy the completed message into the
1663 recv_msg before we release the lock.
1664 Otherwise, race conditions may bite us. I
1665 know that's pretty paranoid, but I prefer
1667 spin_unlock_irqrestore(&(intf
->seq_lock
), flags
);
1670 /* Unknown address type. */
1671 spin_lock_irqsave(&intf
->counter_lock
, flags
);
1672 intf
->sent_invalid_commands
++;
1673 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
1681 for (m
= 0; m
< smi_msg
->data_size
; m
++)
1682 printk(" %2.2x", smi_msg
->data
[m
]);
1687 handlers
->sender(intf
->send_info
, smi_msg
, priority
);
1694 ipmi_free_smi_msg(smi_msg
);
1695 ipmi_free_recv_msg(recv_msg
);
1699 static int check_addr(ipmi_smi_t intf
,
1700 struct ipmi_addr
*addr
,
1701 unsigned char *saddr
,
1704 if (addr
->channel
>= IPMI_MAX_CHANNELS
)
1706 *lun
= intf
->channels
[addr
->channel
].lun
;
1707 *saddr
= intf
->channels
[addr
->channel
].address
;
1711 int ipmi_request_settime(ipmi_user_t user
,
1712 struct ipmi_addr
*addr
,
1714 struct kernel_ipmi_msg
*msg
,
1715 void *user_msg_data
,
1718 unsigned int retry_time_ms
)
1720 unsigned char saddr
, lun
;
1725 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1728 return i_ipmi_request(user
,
1742 int ipmi_request_supply_msgs(ipmi_user_t user
,
1743 struct ipmi_addr
*addr
,
1745 struct kernel_ipmi_msg
*msg
,
1746 void *user_msg_data
,
1748 struct ipmi_recv_msg
*supplied_recv
,
1751 unsigned char saddr
, lun
;
1756 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
1759 return i_ipmi_request(user
,
1773 #ifdef CONFIG_PROC_FS
1774 static int ipmb_file_read_proc(char *page
, char **start
, off_t off
,
1775 int count
, int *eof
, void *data
)
1777 char *out
= (char *) page
;
1778 ipmi_smi_t intf
= data
;
1782 for (i
= 0; i
< IPMI_MAX_CHANNELS
; i
++)
1783 rv
+= sprintf(out
+rv
, "%x ", intf
->channels
[i
].address
);
1784 out
[rv
-1] = '\n'; /* Replace the final space with a newline */
1790 static int version_file_read_proc(char *page
, char **start
, off_t off
,
1791 int count
, int *eof
, void *data
)
1793 char *out
= (char *) page
;
1794 ipmi_smi_t intf
= data
;
1796 return sprintf(out
, "%d.%d\n",
1797 ipmi_version_major(&intf
->bmc
->id
),
1798 ipmi_version_minor(&intf
->bmc
->id
));
1801 static int stat_file_read_proc(char *page
, char **start
, off_t off
,
1802 int count
, int *eof
, void *data
)
1804 char *out
= (char *) page
;
1805 ipmi_smi_t intf
= data
;
1807 out
+= sprintf(out
, "sent_invalid_commands: %d\n",
1808 intf
->sent_invalid_commands
);
1809 out
+= sprintf(out
, "sent_local_commands: %d\n",
1810 intf
->sent_local_commands
);
1811 out
+= sprintf(out
, "handled_local_responses: %d\n",
1812 intf
->handled_local_responses
);
1813 out
+= sprintf(out
, "unhandled_local_responses: %d\n",
1814 intf
->unhandled_local_responses
);
1815 out
+= sprintf(out
, "sent_ipmb_commands: %d\n",
1816 intf
->sent_ipmb_commands
);
1817 out
+= sprintf(out
, "sent_ipmb_command_errs: %d\n",
1818 intf
->sent_ipmb_command_errs
);
1819 out
+= sprintf(out
, "retransmitted_ipmb_commands: %d\n",
1820 intf
->retransmitted_ipmb_commands
);
1821 out
+= sprintf(out
, "timed_out_ipmb_commands: %d\n",
1822 intf
->timed_out_ipmb_commands
);
1823 out
+= sprintf(out
, "timed_out_ipmb_broadcasts: %d\n",
1824 intf
->timed_out_ipmb_broadcasts
);
1825 out
+= sprintf(out
, "sent_ipmb_responses: %d\n",
1826 intf
->sent_ipmb_responses
);
1827 out
+= sprintf(out
, "handled_ipmb_responses: %d\n",
1828 intf
->handled_ipmb_responses
);
1829 out
+= sprintf(out
, "invalid_ipmb_responses: %d\n",
1830 intf
->invalid_ipmb_responses
);
1831 out
+= sprintf(out
, "unhandled_ipmb_responses: %d\n",
1832 intf
->unhandled_ipmb_responses
);
1833 out
+= sprintf(out
, "sent_lan_commands: %d\n",
1834 intf
->sent_lan_commands
);
1835 out
+= sprintf(out
, "sent_lan_command_errs: %d\n",
1836 intf
->sent_lan_command_errs
);
1837 out
+= sprintf(out
, "retransmitted_lan_commands: %d\n",
1838 intf
->retransmitted_lan_commands
);
1839 out
+= sprintf(out
, "timed_out_lan_commands: %d\n",
1840 intf
->timed_out_lan_commands
);
1841 out
+= sprintf(out
, "sent_lan_responses: %d\n",
1842 intf
->sent_lan_responses
);
1843 out
+= sprintf(out
, "handled_lan_responses: %d\n",
1844 intf
->handled_lan_responses
);
1845 out
+= sprintf(out
, "invalid_lan_responses: %d\n",
1846 intf
->invalid_lan_responses
);
1847 out
+= sprintf(out
, "unhandled_lan_responses: %d\n",
1848 intf
->unhandled_lan_responses
);
1849 out
+= sprintf(out
, "handled_commands: %d\n",
1850 intf
->handled_commands
);
1851 out
+= sprintf(out
, "invalid_commands: %d\n",
1852 intf
->invalid_commands
);
1853 out
+= sprintf(out
, "unhandled_commands: %d\n",
1854 intf
->unhandled_commands
);
1855 out
+= sprintf(out
, "invalid_events: %d\n",
1856 intf
->invalid_events
);
1857 out
+= sprintf(out
, "events: %d\n",
1860 return (out
- ((char *) page
));
1862 #endif /* CONFIG_PROC_FS */
1864 int ipmi_smi_add_proc_entry(ipmi_smi_t smi
, char *name
,
1865 read_proc_t
*read_proc
, write_proc_t
*write_proc
,
1866 void *data
, struct module
*owner
)
1869 #ifdef CONFIG_PROC_FS
1870 struct proc_dir_entry
*file
;
1871 struct ipmi_proc_entry
*entry
;
1873 /* Create a list element. */
1874 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1877 entry
->name
= kmalloc(strlen(name
)+1, GFP_KERNEL
);
1882 strcpy(entry
->name
, name
);
1884 file
= create_proc_entry(name
, 0, smi
->proc_dir
);
1891 file
->read_proc
= read_proc
;
1892 file
->write_proc
= write_proc
;
1893 file
->owner
= owner
;
1895 spin_lock(&smi
->proc_entry_lock
);
1896 /* Stick it on the list. */
1897 entry
->next
= smi
->proc_entries
;
1898 smi
->proc_entries
= entry
;
1899 spin_unlock(&smi
->proc_entry_lock
);
1901 #endif /* CONFIG_PROC_FS */
1906 static int add_proc_entries(ipmi_smi_t smi
, int num
)
1910 #ifdef CONFIG_PROC_FS
1911 sprintf(smi
->proc_dir_name
, "%d", num
);
1912 smi
->proc_dir
= proc_mkdir(smi
->proc_dir_name
, proc_ipmi_root
);
1916 smi
->proc_dir
->owner
= THIS_MODULE
;
1920 rv
= ipmi_smi_add_proc_entry(smi
, "stats",
1921 stat_file_read_proc
, NULL
,
1925 rv
= ipmi_smi_add_proc_entry(smi
, "ipmb",
1926 ipmb_file_read_proc
, NULL
,
1930 rv
= ipmi_smi_add_proc_entry(smi
, "version",
1931 version_file_read_proc
, NULL
,
1933 #endif /* CONFIG_PROC_FS */
1938 static void remove_proc_entries(ipmi_smi_t smi
)
1940 #ifdef CONFIG_PROC_FS
1941 struct ipmi_proc_entry
*entry
;
1943 spin_lock(&smi
->proc_entry_lock
);
1944 while (smi
->proc_entries
) {
1945 entry
= smi
->proc_entries
;
1946 smi
->proc_entries
= entry
->next
;
1948 remove_proc_entry(entry
->name
, smi
->proc_dir
);
1952 spin_unlock(&smi
->proc_entry_lock
);
1953 remove_proc_entry(smi
->proc_dir_name
, proc_ipmi_root
);
1954 #endif /* CONFIG_PROC_FS */
1957 static int __find_bmc_guid(struct device
*dev
, void *data
)
1959 unsigned char *id
= data
;
1960 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1961 return memcmp(bmc
->guid
, id
, 16) == 0;
1964 static struct bmc_device
*ipmi_find_bmc_guid(struct device_driver
*drv
,
1965 unsigned char *guid
)
1969 dev
= driver_find_device(drv
, NULL
, guid
, __find_bmc_guid
);
1971 return dev_get_drvdata(dev
);
1976 struct prod_dev_id
{
1977 unsigned int product_id
;
1978 unsigned char device_id
;
1981 static int __find_bmc_prod_dev_id(struct device
*dev
, void *data
)
1983 struct prod_dev_id
*id
= data
;
1984 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
1986 return (bmc
->id
.product_id
== id
->product_id
1987 && bmc
->id
.device_id
== id
->device_id
);
1990 static struct bmc_device
*ipmi_find_bmc_prod_dev_id(
1991 struct device_driver
*drv
,
1992 unsigned int product_id
, unsigned char device_id
)
1994 struct prod_dev_id id
= {
1995 .product_id
= product_id
,
1996 .device_id
= device_id
,
2000 dev
= driver_find_device(drv
, NULL
, &id
, __find_bmc_prod_dev_id
);
2002 return dev_get_drvdata(dev
);
2007 static ssize_t
device_id_show(struct device
*dev
,
2008 struct device_attribute
*attr
,
2011 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2013 return snprintf(buf
, 10, "%u\n", bmc
->id
.device_id
);
2016 static ssize_t
provides_dev_sdrs_show(struct device
*dev
,
2017 struct device_attribute
*attr
,
2020 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2022 return snprintf(buf
, 10, "%u\n",
2023 (bmc
->id
.device_revision
& 0x80) >> 7);
2026 static ssize_t
revision_show(struct device
*dev
, struct device_attribute
*attr
,
2029 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2031 return snprintf(buf
, 20, "%u\n",
2032 bmc
->id
.device_revision
& 0x0F);
2035 static ssize_t
firmware_rev_show(struct device
*dev
,
2036 struct device_attribute
*attr
,
2039 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2041 return snprintf(buf
, 20, "%u.%x\n", bmc
->id
.firmware_revision_1
,
2042 bmc
->id
.firmware_revision_2
);
2045 static ssize_t
ipmi_version_show(struct device
*dev
,
2046 struct device_attribute
*attr
,
2049 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2051 return snprintf(buf
, 20, "%u.%u\n",
2052 ipmi_version_major(&bmc
->id
),
2053 ipmi_version_minor(&bmc
->id
));
2056 static ssize_t
add_dev_support_show(struct device
*dev
,
2057 struct device_attribute
*attr
,
2060 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2062 return snprintf(buf
, 10, "0x%02x\n",
2063 bmc
->id
.additional_device_support
);
2066 static ssize_t
manufacturer_id_show(struct device
*dev
,
2067 struct device_attribute
*attr
,
2070 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2072 return snprintf(buf
, 20, "0x%6.6x\n", bmc
->id
.manufacturer_id
);
2075 static ssize_t
product_id_show(struct device
*dev
,
2076 struct device_attribute
*attr
,
2079 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2081 return snprintf(buf
, 10, "0x%4.4x\n", bmc
->id
.product_id
);
2084 static ssize_t
aux_firmware_rev_show(struct device
*dev
,
2085 struct device_attribute
*attr
,
2088 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2090 return snprintf(buf
, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2091 bmc
->id
.aux_firmware_revision
[3],
2092 bmc
->id
.aux_firmware_revision
[2],
2093 bmc
->id
.aux_firmware_revision
[1],
2094 bmc
->id
.aux_firmware_revision
[0]);
2097 static ssize_t
guid_show(struct device
*dev
, struct device_attribute
*attr
,
2100 struct bmc_device
*bmc
= dev_get_drvdata(dev
);
2102 return snprintf(buf
, 100, "%Lx%Lx\n",
2103 (long long) bmc
->guid
[0],
2104 (long long) bmc
->guid
[8]);
2107 static void remove_files(struct bmc_device
*bmc
)
2112 device_remove_file(&bmc
->dev
->dev
,
2113 &bmc
->device_id_attr
);
2114 device_remove_file(&bmc
->dev
->dev
,
2115 &bmc
->provides_dev_sdrs_attr
);
2116 device_remove_file(&bmc
->dev
->dev
,
2117 &bmc
->revision_attr
);
2118 device_remove_file(&bmc
->dev
->dev
,
2119 &bmc
->firmware_rev_attr
);
2120 device_remove_file(&bmc
->dev
->dev
,
2121 &bmc
->version_attr
);
2122 device_remove_file(&bmc
->dev
->dev
,
2123 &bmc
->add_dev_support_attr
);
2124 device_remove_file(&bmc
->dev
->dev
,
2125 &bmc
->manufacturer_id_attr
);
2126 device_remove_file(&bmc
->dev
->dev
,
2127 &bmc
->product_id_attr
);
2129 if (bmc
->id
.aux_firmware_revision_set
)
2130 device_remove_file(&bmc
->dev
->dev
,
2131 &bmc
->aux_firmware_rev_attr
);
2133 device_remove_file(&bmc
->dev
->dev
,
2138 cleanup_bmc_device(struct kref
*ref
)
2140 struct bmc_device
*bmc
;
2142 bmc
= container_of(ref
, struct bmc_device
, refcount
);
2145 platform_device_unregister(bmc
->dev
);
2149 static void ipmi_bmc_unregister(ipmi_smi_t intf
)
2151 struct bmc_device
*bmc
= intf
->bmc
;
2153 if (intf
->sysfs_name
) {
2154 sysfs_remove_link(&intf
->si_dev
->kobj
, intf
->sysfs_name
);
2155 kfree(intf
->sysfs_name
);
2156 intf
->sysfs_name
= NULL
;
2158 if (intf
->my_dev_name
) {
2159 sysfs_remove_link(&bmc
->dev
->dev
.kobj
, intf
->my_dev_name
);
2160 kfree(intf
->my_dev_name
);
2161 intf
->my_dev_name
= NULL
;
2164 mutex_lock(&ipmidriver_mutex
);
2165 kref_put(&bmc
->refcount
, cleanup_bmc_device
);
2167 mutex_unlock(&ipmidriver_mutex
);
2170 static int create_files(struct bmc_device
*bmc
)
2174 bmc
->device_id_attr
.attr
.name
= "device_id";
2175 bmc
->device_id_attr
.attr
.owner
= THIS_MODULE
;
2176 bmc
->device_id_attr
.attr
.mode
= S_IRUGO
;
2177 bmc
->device_id_attr
.show
= device_id_show
;
2179 bmc
->provides_dev_sdrs_attr
.attr
.name
= "provides_device_sdrs";
2180 bmc
->provides_dev_sdrs_attr
.attr
.owner
= THIS_MODULE
;
2181 bmc
->provides_dev_sdrs_attr
.attr
.mode
= S_IRUGO
;
2182 bmc
->provides_dev_sdrs_attr
.show
= provides_dev_sdrs_show
;
2184 bmc
->revision_attr
.attr
.name
= "revision";
2185 bmc
->revision_attr
.attr
.owner
= THIS_MODULE
;
2186 bmc
->revision_attr
.attr
.mode
= S_IRUGO
;
2187 bmc
->revision_attr
.show
= revision_show
;
2189 bmc
->firmware_rev_attr
.attr
.name
= "firmware_revision";
2190 bmc
->firmware_rev_attr
.attr
.owner
= THIS_MODULE
;
2191 bmc
->firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2192 bmc
->firmware_rev_attr
.show
= firmware_rev_show
;
2194 bmc
->version_attr
.attr
.name
= "ipmi_version";
2195 bmc
->version_attr
.attr
.owner
= THIS_MODULE
;
2196 bmc
->version_attr
.attr
.mode
= S_IRUGO
;
2197 bmc
->version_attr
.show
= ipmi_version_show
;
2199 bmc
->add_dev_support_attr
.attr
.name
= "additional_device_support";
2200 bmc
->add_dev_support_attr
.attr
.owner
= THIS_MODULE
;
2201 bmc
->add_dev_support_attr
.attr
.mode
= S_IRUGO
;
2202 bmc
->add_dev_support_attr
.show
= add_dev_support_show
;
2204 bmc
->manufacturer_id_attr
.attr
.name
= "manufacturer_id";
2205 bmc
->manufacturer_id_attr
.attr
.owner
= THIS_MODULE
;
2206 bmc
->manufacturer_id_attr
.attr
.mode
= S_IRUGO
;
2207 bmc
->manufacturer_id_attr
.show
= manufacturer_id_show
;
2209 bmc
->product_id_attr
.attr
.name
= "product_id";
2210 bmc
->product_id_attr
.attr
.owner
= THIS_MODULE
;
2211 bmc
->product_id_attr
.attr
.mode
= S_IRUGO
;
2212 bmc
->product_id_attr
.show
= product_id_show
;
2214 bmc
->guid_attr
.attr
.name
= "guid";
2215 bmc
->guid_attr
.attr
.owner
= THIS_MODULE
;
2216 bmc
->guid_attr
.attr
.mode
= S_IRUGO
;
2217 bmc
->guid_attr
.show
= guid_show
;
2219 bmc
->aux_firmware_rev_attr
.attr
.name
= "aux_firmware_revision";
2220 bmc
->aux_firmware_rev_attr
.attr
.owner
= THIS_MODULE
;
2221 bmc
->aux_firmware_rev_attr
.attr
.mode
= S_IRUGO
;
2222 bmc
->aux_firmware_rev_attr
.show
= aux_firmware_rev_show
;
2224 err
= device_create_file(&bmc
->dev
->dev
,
2225 &bmc
->device_id_attr
);
2227 err
= device_create_file(&bmc
->dev
->dev
,
2228 &bmc
->provides_dev_sdrs_attr
);
2229 if (err
) goto out_devid
;
2230 err
= device_create_file(&bmc
->dev
->dev
,
2231 &bmc
->revision_attr
);
2232 if (err
) goto out_sdrs
;
2233 err
= device_create_file(&bmc
->dev
->dev
,
2234 &bmc
->firmware_rev_attr
);
2235 if (err
) goto out_rev
;
2236 err
= device_create_file(&bmc
->dev
->dev
,
2237 &bmc
->version_attr
);
2238 if (err
) goto out_firm
;
2239 err
= device_create_file(&bmc
->dev
->dev
,
2240 &bmc
->add_dev_support_attr
);
2241 if (err
) goto out_version
;
2242 err
= device_create_file(&bmc
->dev
->dev
,
2243 &bmc
->manufacturer_id_attr
);
2244 if (err
) goto out_add_dev
;
2245 err
= device_create_file(&bmc
->dev
->dev
,
2246 &bmc
->product_id_attr
);
2247 if (err
) goto out_manu
;
2248 if (bmc
->id
.aux_firmware_revision_set
) {
2249 err
= device_create_file(&bmc
->dev
->dev
,
2250 &bmc
->aux_firmware_rev_attr
);
2251 if (err
) goto out_prod_id
;
2253 if (bmc
->guid_set
) {
2254 err
= device_create_file(&bmc
->dev
->dev
,
2256 if (err
) goto out_aux_firm
;
2262 if (bmc
->id
.aux_firmware_revision_set
)
2263 device_remove_file(&bmc
->dev
->dev
,
2264 &bmc
->aux_firmware_rev_attr
);
2266 device_remove_file(&bmc
->dev
->dev
,
2267 &bmc
->product_id_attr
);
2269 device_remove_file(&bmc
->dev
->dev
,
2270 &bmc
->manufacturer_id_attr
);
2272 device_remove_file(&bmc
->dev
->dev
,
2273 &bmc
->add_dev_support_attr
);
2275 device_remove_file(&bmc
->dev
->dev
,
2276 &bmc
->version_attr
);
2278 device_remove_file(&bmc
->dev
->dev
,
2279 &bmc
->firmware_rev_attr
);
2281 device_remove_file(&bmc
->dev
->dev
,
2282 &bmc
->revision_attr
);
2284 device_remove_file(&bmc
->dev
->dev
,
2285 &bmc
->provides_dev_sdrs_attr
);
2287 device_remove_file(&bmc
->dev
->dev
,
2288 &bmc
->device_id_attr
);
2293 static int ipmi_bmc_register(ipmi_smi_t intf
, int ifnum
,
2294 const char *sysfs_name
)
2297 struct bmc_device
*bmc
= intf
->bmc
;
2298 struct bmc_device
*old_bmc
;
2302 mutex_lock(&ipmidriver_mutex
);
2305 * Try to find if there is an bmc_device struct
2306 * representing the interfaced BMC already
2309 old_bmc
= ipmi_find_bmc_guid(&ipmidriver
, bmc
->guid
);
2311 old_bmc
= ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2316 * If there is already an bmc_device, free the new one,
2317 * otherwise register the new BMC device
2321 intf
->bmc
= old_bmc
;
2324 kref_get(&bmc
->refcount
);
2325 mutex_unlock(&ipmidriver_mutex
);
2328 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2329 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2330 bmc
->id
.manufacturer_id
,
2335 unsigned char orig_dev_id
= bmc
->id
.device_id
;
2336 int warn_printed
= 0;
2338 snprintf(name
, sizeof(name
),
2339 "ipmi_bmc.%4.4x", bmc
->id
.product_id
);
2341 while (ipmi_find_bmc_prod_dev_id(&ipmidriver
,
2343 bmc
->id
.device_id
)) {
2344 if (!warn_printed
) {
2345 printk(KERN_WARNING PFX
2346 "This machine has two different BMCs"
2347 " with the same product id and device"
2348 " id. This is an error in the"
2349 " firmware, but incrementing the"
2350 " device id to work around the problem."
2351 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2352 bmc
->id
.product_id
, bmc
->id
.device_id
);
2355 bmc
->id
.device_id
++; /* Wraps at 255 */
2356 if (bmc
->id
.device_id
== orig_dev_id
) {
2358 "Out of device ids!\n");
2363 bmc
->dev
= platform_device_alloc(name
, bmc
->id
.device_id
);
2365 mutex_unlock(&ipmidriver_mutex
);
2368 " Unable to allocate platform device\n");
2371 bmc
->dev
->dev
.driver
= &ipmidriver
;
2372 dev_set_drvdata(&bmc
->dev
->dev
, bmc
);
2373 kref_init(&bmc
->refcount
);
2375 rv
= platform_device_add(bmc
->dev
);
2376 mutex_unlock(&ipmidriver_mutex
);
2378 platform_device_put(bmc
->dev
);
2382 " Unable to register bmc device: %d\n",
2384 /* Don't go to out_err, you can only do that if
2385 the device is registered already. */
2389 rv
= create_files(bmc
);
2391 mutex_lock(&ipmidriver_mutex
);
2392 platform_device_unregister(bmc
->dev
);
2393 mutex_unlock(&ipmidriver_mutex
);
2399 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2400 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2401 bmc
->id
.manufacturer_id
,
2407 * create symlink from system interface device to bmc device
2410 intf
->sysfs_name
= kstrdup(sysfs_name
, GFP_KERNEL
);
2411 if (!intf
->sysfs_name
) {
2414 "ipmi_msghandler: allocate link to BMC: %d\n",
2419 rv
= sysfs_create_link(&intf
->si_dev
->kobj
,
2420 &bmc
->dev
->dev
.kobj
, intf
->sysfs_name
);
2422 kfree(intf
->sysfs_name
);
2423 intf
->sysfs_name
= NULL
;
2425 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2430 size
= snprintf(dummy
, 0, "ipmi%d", ifnum
);
2431 intf
->my_dev_name
= kmalloc(size
+1, GFP_KERNEL
);
2432 if (!intf
->my_dev_name
) {
2433 kfree(intf
->sysfs_name
);
2434 intf
->sysfs_name
= NULL
;
2437 "ipmi_msghandler: allocate link from BMC: %d\n",
2441 snprintf(intf
->my_dev_name
, size
+1, "ipmi%d", ifnum
);
2443 rv
= sysfs_create_link(&bmc
->dev
->dev
.kobj
, &intf
->si_dev
->kobj
,
2446 kfree(intf
->sysfs_name
);
2447 intf
->sysfs_name
= NULL
;
2448 kfree(intf
->my_dev_name
);
2449 intf
->my_dev_name
= NULL
;
2452 " Unable to create symlink to bmc: %d\n",
2460 ipmi_bmc_unregister(intf
);
2465 send_guid_cmd(ipmi_smi_t intf
, int chan
)
2467 struct kernel_ipmi_msg msg
;
2468 struct ipmi_system_interface_addr si
;
2470 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2471 si
.channel
= IPMI_BMC_CHANNEL
;
2474 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2475 msg
.cmd
= IPMI_GET_DEVICE_GUID_CMD
;
2478 return i_ipmi_request(NULL
,
2480 (struct ipmi_addr
*) &si
,
2487 intf
->channels
[0].address
,
2488 intf
->channels
[0].lun
,
2493 guid_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2495 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2496 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
2497 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_GUID_CMD
))
2501 if (msg
->msg
.data
[0] != 0) {
2502 /* Error from getting the GUID, the BMC doesn't have one. */
2503 intf
->bmc
->guid_set
= 0;
2507 if (msg
->msg
.data_len
< 17) {
2508 intf
->bmc
->guid_set
= 0;
2509 printk(KERN_WARNING PFX
2510 "guid_handler: The GUID response from the BMC was too"
2511 " short, it was %d but should have been 17. Assuming"
2512 " GUID is not available.\n",
2517 memcpy(intf
->bmc
->guid
, msg
->msg
.data
, 16);
2518 intf
->bmc
->guid_set
= 1;
2520 wake_up(&intf
->waitq
);
2524 get_guid(ipmi_smi_t intf
)
2528 intf
->bmc
->guid_set
= 0x2;
2529 intf
->null_user_handler
= guid_handler
;
2530 rv
= send_guid_cmd(intf
, 0);
2532 /* Send failed, no GUID available. */
2533 intf
->bmc
->guid_set
= 0;
2534 wait_event(intf
->waitq
, intf
->bmc
->guid_set
!= 2);
2535 intf
->null_user_handler
= NULL
;
2539 send_channel_info_cmd(ipmi_smi_t intf
, int chan
)
2541 struct kernel_ipmi_msg msg
;
2542 unsigned char data
[1];
2543 struct ipmi_system_interface_addr si
;
2545 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2546 si
.channel
= IPMI_BMC_CHANNEL
;
2549 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2550 msg
.cmd
= IPMI_GET_CHANNEL_INFO_CMD
;
2554 return i_ipmi_request(NULL
,
2556 (struct ipmi_addr
*) &si
,
2563 intf
->channels
[0].address
,
2564 intf
->channels
[0].lun
,
2569 channel_handler(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
2574 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2575 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
2576 && (msg
->msg
.cmd
== IPMI_GET_CHANNEL_INFO_CMD
))
2578 /* It's the one we want */
2579 if (msg
->msg
.data
[0] != 0) {
2580 /* Got an error from the channel, just go on. */
2582 if (msg
->msg
.data
[0] == IPMI_INVALID_COMMAND_ERR
) {
2583 /* If the MC does not support this
2584 command, that is legal. We just
2585 assume it has one IPMB at channel
2587 intf
->channels
[0].medium
2588 = IPMI_CHANNEL_MEDIUM_IPMB
;
2589 intf
->channels
[0].protocol
2590 = IPMI_CHANNEL_PROTOCOL_IPMB
;
2593 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2594 wake_up(&intf
->waitq
);
2599 if (msg
->msg
.data_len
< 4) {
2600 /* Message not big enough, just go on. */
2603 chan
= intf
->curr_channel
;
2604 intf
->channels
[chan
].medium
= msg
->msg
.data
[2] & 0x7f;
2605 intf
->channels
[chan
].protocol
= msg
->msg
.data
[3] & 0x1f;
2608 intf
->curr_channel
++;
2609 if (intf
->curr_channel
>= IPMI_MAX_CHANNELS
)
2610 wake_up(&intf
->waitq
);
2612 rv
= send_channel_info_cmd(intf
, intf
->curr_channel
);
2615 /* Got an error somehow, just give up. */
2616 intf
->curr_channel
= IPMI_MAX_CHANNELS
;
2617 wake_up(&intf
->waitq
);
2619 printk(KERN_WARNING PFX
2620 "Error sending channel information: %d\n",
2628 int ipmi_register_smi(struct ipmi_smi_handlers
*handlers
,
2630 struct ipmi_device_id
*device_id
,
2631 struct device
*si_dev
,
2632 const char *sysfs_name
,
2633 unsigned char slave_addr
)
2639 struct list_head
*link
;
2641 /* Make sure the driver is actually initialized, this handles
2642 problems with initialization order. */
2644 rv
= ipmi_init_msghandler();
2647 /* The init code doesn't return an error if it was turned
2648 off, but it won't initialize. Check that. */
2653 intf
= kmalloc(sizeof(*intf
), GFP_KERNEL
);
2656 memset(intf
, 0, sizeof(*intf
));
2658 intf
->ipmi_version_major
= ipmi_version_major(device_id
);
2659 intf
->ipmi_version_minor
= ipmi_version_minor(device_id
);
2661 intf
->bmc
= kzalloc(sizeof(*intf
->bmc
), GFP_KERNEL
);
2666 intf
->intf_num
= -1; /* Mark it invalid for now. */
2667 kref_init(&intf
->refcount
);
2668 intf
->bmc
->id
= *device_id
;
2669 intf
->si_dev
= si_dev
;
2670 for (j
= 0; j
< IPMI_MAX_CHANNELS
; j
++) {
2671 intf
->channels
[j
].address
= IPMI_BMC_SLAVE_ADDR
;
2672 intf
->channels
[j
].lun
= 2;
2674 if (slave_addr
!= 0)
2675 intf
->channels
[0].address
= slave_addr
;
2676 INIT_LIST_HEAD(&intf
->users
);
2677 intf
->handlers
= handlers
;
2678 intf
->send_info
= send_info
;
2679 spin_lock_init(&intf
->seq_lock
);
2680 for (j
= 0; j
< IPMI_IPMB_NUM_SEQ
; j
++) {
2681 intf
->seq_table
[j
].inuse
= 0;
2682 intf
->seq_table
[j
].seqid
= 0;
2685 #ifdef CONFIG_PROC_FS
2686 spin_lock_init(&intf
->proc_entry_lock
);
2688 spin_lock_init(&intf
->waiting_msgs_lock
);
2689 INIT_LIST_HEAD(&intf
->waiting_msgs
);
2690 spin_lock_init(&intf
->events_lock
);
2691 INIT_LIST_HEAD(&intf
->waiting_events
);
2692 intf
->waiting_events_count
= 0;
2693 mutex_init(&intf
->cmd_rcvrs_mutex
);
2694 spin_lock_init(&intf
->maintenance_mode_lock
);
2695 INIT_LIST_HEAD(&intf
->cmd_rcvrs
);
2696 init_waitqueue_head(&intf
->waitq
);
2698 spin_lock_init(&intf
->counter_lock
);
2699 intf
->proc_dir
= NULL
;
2701 mutex_lock(&smi_watchers_mutex
);
2702 mutex_lock(&ipmi_interfaces_mutex
);
2703 /* Look for a hole in the numbers. */
2705 link
= &ipmi_interfaces
;
2706 list_for_each_entry_rcu(tintf
, &ipmi_interfaces
, link
) {
2707 if (tintf
->intf_num
!= i
) {
2708 link
= &tintf
->link
;
2713 /* Add the new interface in numeric order. */
2715 list_add_rcu(&intf
->link
, &ipmi_interfaces
);
2717 list_add_tail_rcu(&intf
->link
, link
);
2719 rv
= handlers
->start_processing(send_info
, intf
);
2725 if ((intf
->ipmi_version_major
> 1)
2726 || ((intf
->ipmi_version_major
== 1)
2727 && (intf
->ipmi_version_minor
>= 5)))
2729 /* Start scanning the channels to see what is
2731 intf
->null_user_handler
= channel_handler
;
2732 intf
->curr_channel
= 0;
2733 rv
= send_channel_info_cmd(intf
, 0);
2737 /* Wait for the channel info to be read. */
2738 wait_event(intf
->waitq
,
2739 intf
->curr_channel
>= IPMI_MAX_CHANNELS
);
2740 intf
->null_user_handler
= NULL
;
2742 /* Assume a single IPMB channel at zero. */
2743 intf
->channels
[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB
;
2744 intf
->channels
[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB
;
2748 rv
= add_proc_entries(intf
, i
);
2750 rv
= ipmi_bmc_register(intf
, i
, sysfs_name
);
2755 remove_proc_entries(intf
);
2756 intf
->handlers
= NULL
;
2757 list_del_rcu(&intf
->link
);
2758 mutex_unlock(&ipmi_interfaces_mutex
);
2759 mutex_unlock(&smi_watchers_mutex
);
2761 kref_put(&intf
->refcount
, intf_free
);
2764 * Keep memory order straight for RCU readers. Make
2765 * sure everything else is committed to memory before
2766 * setting intf_num to mark the interface valid.
2770 mutex_unlock(&ipmi_interfaces_mutex
);
2771 /* After this point the interface is legal to use. */
2772 call_smi_watchers(i
, intf
->si_dev
);
2773 mutex_unlock(&smi_watchers_mutex
);
2779 static void cleanup_smi_msgs(ipmi_smi_t intf
)
2782 struct seq_table
*ent
;
2784 /* No need for locks, the interface is down. */
2785 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
2786 ent
= &(intf
->seq_table
[i
]);
2789 deliver_err_response(ent
->recv_msg
, IPMI_ERR_UNSPECIFIED
);
2793 int ipmi_unregister_smi(ipmi_smi_t intf
)
2795 struct ipmi_smi_watcher
*w
;
2796 int intf_num
= intf
->intf_num
;
2798 ipmi_bmc_unregister(intf
);
2800 mutex_lock(&smi_watchers_mutex
);
2801 mutex_lock(&ipmi_interfaces_mutex
);
2802 intf
->intf_num
= -1;
2803 intf
->handlers
= NULL
;
2804 list_del_rcu(&intf
->link
);
2805 mutex_unlock(&ipmi_interfaces_mutex
);
2808 cleanup_smi_msgs(intf
);
2810 remove_proc_entries(intf
);
2812 /* Call all the watcher interfaces to tell them that
2813 an interface is gone. */
2814 list_for_each_entry(w
, &smi_watchers
, link
)
2815 w
->smi_gone(intf_num
);
2816 mutex_unlock(&smi_watchers_mutex
);
2818 kref_put(&intf
->refcount
, intf_free
);
2822 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf
,
2823 struct ipmi_smi_msg
*msg
)
2825 struct ipmi_ipmb_addr ipmb_addr
;
2826 struct ipmi_recv_msg
*recv_msg
;
2827 unsigned long flags
;
2830 /* This is 11, not 10, because the response must contain a
2831 * completion code. */
2832 if (msg
->rsp_size
< 11) {
2833 /* Message not big enough, just ignore it. */
2834 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2835 intf
->invalid_ipmb_responses
++;
2836 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2840 if (msg
->rsp
[2] != 0) {
2841 /* An error getting the response, just ignore it. */
2845 ipmb_addr
.addr_type
= IPMI_IPMB_ADDR_TYPE
;
2846 ipmb_addr
.slave_addr
= msg
->rsp
[6];
2847 ipmb_addr
.channel
= msg
->rsp
[3] & 0x0f;
2848 ipmb_addr
.lun
= msg
->rsp
[7] & 3;
2850 /* It's a response from a remote entity. Look up the sequence
2851 number and handle the response. */
2852 if (intf_find_seq(intf
,
2856 (msg
->rsp
[4] >> 2) & (~1),
2857 (struct ipmi_addr
*) &(ipmb_addr
),
2860 /* We were unable to find the sequence number,
2861 so just nuke the message. */
2862 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2863 intf
->unhandled_ipmb_responses
++;
2864 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2868 memcpy(recv_msg
->msg_data
,
2871 /* THe other fields matched, so no need to set them, except
2872 for netfn, which needs to be the response that was
2873 returned, not the request value. */
2874 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2875 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2876 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2877 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
2878 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2879 intf
->handled_ipmb_responses
++;
2880 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2881 deliver_response(recv_msg
);
2886 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf
,
2887 struct ipmi_smi_msg
*msg
)
2889 struct cmd_rcvr
*rcvr
;
2891 unsigned char netfn
;
2894 ipmi_user_t user
= NULL
;
2895 struct ipmi_ipmb_addr
*ipmb_addr
;
2896 struct ipmi_recv_msg
*recv_msg
;
2897 unsigned long flags
;
2898 struct ipmi_smi_handlers
*handlers
;
2900 if (msg
->rsp_size
< 10) {
2901 /* Message not big enough, just ignore it. */
2902 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2903 intf
->invalid_commands
++;
2904 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2908 if (msg
->rsp
[2] != 0) {
2909 /* An error getting the response, just ignore it. */
2913 netfn
= msg
->rsp
[4] >> 2;
2915 chan
= msg
->rsp
[3] & 0xf;
2918 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
2921 kref_get(&user
->refcount
);
2927 /* We didn't find a user, deliver an error response. */
2928 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2929 intf
->unhandled_commands
++;
2930 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2932 msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
2933 msg
->data
[1] = IPMI_SEND_MSG_CMD
;
2934 msg
->data
[2] = msg
->rsp
[3];
2935 msg
->data
[3] = msg
->rsp
[6];
2936 msg
->data
[4] = ((netfn
+ 1) << 2) | (msg
->rsp
[7] & 0x3);
2937 msg
->data
[5] = ipmb_checksum(&(msg
->data
[3]), 2);
2938 msg
->data
[6] = intf
->channels
[msg
->rsp
[3] & 0xf].address
;
2940 msg
->data
[7] = (msg
->rsp
[7] & 0xfc) | (msg
->rsp
[4] & 0x3);
2941 msg
->data
[8] = msg
->rsp
[8]; /* cmd */
2942 msg
->data
[9] = IPMI_INVALID_CMD_COMPLETION_CODE
;
2943 msg
->data
[10] = ipmb_checksum(&(msg
->data
[6]), 4);
2944 msg
->data_size
= 11;
2949 printk("Invalid command:");
2950 for (m
= 0; m
< msg
->data_size
; m
++)
2951 printk(" %2.2x", msg
->data
[m
]);
2956 handlers
= intf
->handlers
;
2958 handlers
->sender(intf
->send_info
, msg
, 0);
2959 /* We used the message, so return the value
2960 that causes it to not be freed or
2966 /* Deliver the message to the user. */
2967 spin_lock_irqsave(&intf
->counter_lock
, flags
);
2968 intf
->handled_commands
++;
2969 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
2971 recv_msg
= ipmi_alloc_recv_msg();
2973 /* We couldn't allocate memory for the
2974 message, so requeue it for handling
2977 kref_put(&user
->refcount
, free_user
);
2979 /* Extract the source address from the data. */
2980 ipmb_addr
= (struct ipmi_ipmb_addr
*) &recv_msg
->addr
;
2981 ipmb_addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
2982 ipmb_addr
->slave_addr
= msg
->rsp
[6];
2983 ipmb_addr
->lun
= msg
->rsp
[7] & 3;
2984 ipmb_addr
->channel
= msg
->rsp
[3] & 0xf;
2986 /* Extract the rest of the message information
2987 from the IPMB header.*/
2988 recv_msg
->user
= user
;
2989 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
2990 recv_msg
->msgid
= msg
->rsp
[7] >> 2;
2991 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
2992 recv_msg
->msg
.cmd
= msg
->rsp
[8];
2993 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2995 /* We chop off 10, not 9 bytes because the checksum
2996 at the end also needs to be removed. */
2997 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
2998 memcpy(recv_msg
->msg_data
,
3000 msg
->rsp_size
- 10);
3001 deliver_response(recv_msg
);
3008 static int handle_lan_get_msg_rsp(ipmi_smi_t intf
,
3009 struct ipmi_smi_msg
*msg
)
3011 struct ipmi_lan_addr lan_addr
;
3012 struct ipmi_recv_msg
*recv_msg
;
3013 unsigned long flags
;
3016 /* This is 13, not 12, because the response must contain a
3017 * completion code. */
3018 if (msg
->rsp_size
< 13) {
3019 /* Message not big enough, just ignore it. */
3020 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3021 intf
->invalid_lan_responses
++;
3022 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3026 if (msg
->rsp
[2] != 0) {
3027 /* An error getting the response, just ignore it. */
3031 lan_addr
.addr_type
= IPMI_LAN_ADDR_TYPE
;
3032 lan_addr
.session_handle
= msg
->rsp
[4];
3033 lan_addr
.remote_SWID
= msg
->rsp
[8];
3034 lan_addr
.local_SWID
= msg
->rsp
[5];
3035 lan_addr
.channel
= msg
->rsp
[3] & 0x0f;
3036 lan_addr
.privilege
= msg
->rsp
[3] >> 4;
3037 lan_addr
.lun
= msg
->rsp
[9] & 3;
3039 /* It's a response from a remote entity. Look up the sequence
3040 number and handle the response. */
3041 if (intf_find_seq(intf
,
3045 (msg
->rsp
[6] >> 2) & (~1),
3046 (struct ipmi_addr
*) &(lan_addr
),
3049 /* We were unable to find the sequence number,
3050 so just nuke the message. */
3051 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3052 intf
->unhandled_lan_responses
++;
3053 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3057 memcpy(recv_msg
->msg_data
,
3059 msg
->rsp_size
- 11);
3060 /* The other fields matched, so no need to set them, except
3061 for netfn, which needs to be the response that was
3062 returned, not the request value. */
3063 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3064 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3065 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3066 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3067 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3068 intf
->handled_lan_responses
++;
3069 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3070 deliver_response(recv_msg
);
3075 static int handle_lan_get_msg_cmd(ipmi_smi_t intf
,
3076 struct ipmi_smi_msg
*msg
)
3078 struct cmd_rcvr
*rcvr
;
3080 unsigned char netfn
;
3083 ipmi_user_t user
= NULL
;
3084 struct ipmi_lan_addr
*lan_addr
;
3085 struct ipmi_recv_msg
*recv_msg
;
3086 unsigned long flags
;
3088 if (msg
->rsp_size
< 12) {
3089 /* Message not big enough, just ignore it. */
3090 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3091 intf
->invalid_commands
++;
3092 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3096 if (msg
->rsp
[2] != 0) {
3097 /* An error getting the response, just ignore it. */
3101 netfn
= msg
->rsp
[6] >> 2;
3103 chan
= msg
->rsp
[3] & 0xf;
3106 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3109 kref_get(&user
->refcount
);
3115 /* We didn't find a user, just give up. */
3116 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3117 intf
->unhandled_commands
++;
3118 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3120 rv
= 0; /* Don't do anything with these messages, just
3121 allow them to be freed. */
3123 /* Deliver the message to the user. */
3124 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3125 intf
->handled_commands
++;
3126 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3128 recv_msg
= ipmi_alloc_recv_msg();
3130 /* We couldn't allocate memory for the
3131 message, so requeue it for handling
3134 kref_put(&user
->refcount
, free_user
);
3136 /* Extract the source address from the data. */
3137 lan_addr
= (struct ipmi_lan_addr
*) &recv_msg
->addr
;
3138 lan_addr
->addr_type
= IPMI_LAN_ADDR_TYPE
;
3139 lan_addr
->session_handle
= msg
->rsp
[4];
3140 lan_addr
->remote_SWID
= msg
->rsp
[8];
3141 lan_addr
->local_SWID
= msg
->rsp
[5];
3142 lan_addr
->lun
= msg
->rsp
[9] & 3;
3143 lan_addr
->channel
= msg
->rsp
[3] & 0xf;
3144 lan_addr
->privilege
= msg
->rsp
[3] >> 4;
3146 /* Extract the rest of the message information
3147 from the IPMB header.*/
3148 recv_msg
->user
= user
;
3149 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3150 recv_msg
->msgid
= msg
->rsp
[9] >> 2;
3151 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3152 recv_msg
->msg
.cmd
= msg
->rsp
[10];
3153 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3155 /* We chop off 12, not 11 bytes because the checksum
3156 at the end also needs to be removed. */
3157 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3158 memcpy(recv_msg
->msg_data
,
3160 msg
->rsp_size
- 12);
3161 deliver_response(recv_msg
);
3168 static void copy_event_into_recv_msg(struct ipmi_recv_msg
*recv_msg
,
3169 struct ipmi_smi_msg
*msg
)
3171 struct ipmi_system_interface_addr
*smi_addr
;
3173 recv_msg
->msgid
= 0;
3174 smi_addr
= (struct ipmi_system_interface_addr
*) &(recv_msg
->addr
);
3175 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3176 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
3177 smi_addr
->lun
= msg
->rsp
[0] & 3;
3178 recv_msg
->recv_type
= IPMI_ASYNC_EVENT_RECV_TYPE
;
3179 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
3180 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3181 memcpy(recv_msg
->msg_data
, &(msg
->rsp
[3]), msg
->rsp_size
- 3);
3182 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3183 recv_msg
->msg
.data_len
= msg
->rsp_size
- 3;
3186 static int handle_read_event_rsp(ipmi_smi_t intf
,
3187 struct ipmi_smi_msg
*msg
)
3189 struct ipmi_recv_msg
*recv_msg
, *recv_msg2
;
3190 struct list_head msgs
;
3193 int deliver_count
= 0;
3194 unsigned long flags
;
3196 if (msg
->rsp_size
< 19) {
3197 /* Message is too small to be an IPMB event. */
3198 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3199 intf
->invalid_events
++;
3200 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3204 if (msg
->rsp
[2] != 0) {
3205 /* An error getting the event, just ignore it. */
3209 INIT_LIST_HEAD(&msgs
);
3211 spin_lock_irqsave(&intf
->events_lock
, flags
);
3213 spin_lock(&intf
->counter_lock
);
3215 spin_unlock(&intf
->counter_lock
);
3217 /* Allocate and fill in one message for every user that is getting
3220 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3221 if (!user
->gets_events
)
3224 recv_msg
= ipmi_alloc_recv_msg();
3227 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
,
3229 list_del(&recv_msg
->link
);
3230 ipmi_free_recv_msg(recv_msg
);
3232 /* We couldn't allocate memory for the
3233 message, so requeue it for handling
3241 copy_event_into_recv_msg(recv_msg
, msg
);
3242 recv_msg
->user
= user
;
3243 kref_get(&user
->refcount
);
3244 list_add_tail(&(recv_msg
->link
), &msgs
);
3248 if (deliver_count
) {
3249 /* Now deliver all the messages. */
3250 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
, link
) {
3251 list_del(&recv_msg
->link
);
3252 deliver_response(recv_msg
);
3254 } else if (intf
->waiting_events_count
< MAX_EVENTS_IN_QUEUE
) {
3255 /* No one to receive the message, put it in queue if there's
3256 not already too many things in the queue. */
3257 recv_msg
= ipmi_alloc_recv_msg();
3259 /* We couldn't allocate memory for the
3260 message, so requeue it for handling
3266 copy_event_into_recv_msg(recv_msg
, msg
);
3267 list_add_tail(&(recv_msg
->link
), &(intf
->waiting_events
));
3268 intf
->waiting_events_count
++;
3270 /* There's too many things in the queue, discard this
3272 printk(KERN_WARNING PFX
"Event queue full, discarding an"
3273 " incoming event\n");
3277 spin_unlock_irqrestore(&(intf
->events_lock
), flags
);
3282 static int handle_bmc_rsp(ipmi_smi_t intf
,
3283 struct ipmi_smi_msg
*msg
)
3285 struct ipmi_recv_msg
*recv_msg
;
3286 unsigned long flags
;
3287 struct ipmi_user
*user
;
3289 recv_msg
= (struct ipmi_recv_msg
*) msg
->user_data
;
3290 if (recv_msg
== NULL
)
3292 printk(KERN_WARNING
"IPMI message received with no owner. This\n"
3293 "could be because of a malformed message, or\n"
3294 "because of a hardware error. Contact your\n"
3295 "hardware vender for assistance\n");
3299 user
= recv_msg
->user
;
3300 /* Make sure the user still exists. */
3301 if (user
&& !user
->valid
) {
3302 /* The user for the message went away, so give up. */
3303 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3304 intf
->unhandled_local_responses
++;
3305 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3306 ipmi_free_recv_msg(recv_msg
);
3308 struct ipmi_system_interface_addr
*smi_addr
;
3310 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3311 intf
->handled_local_responses
++;
3312 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3313 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3314 recv_msg
->msgid
= msg
->msgid
;
3315 smi_addr
= ((struct ipmi_system_interface_addr
*)
3317 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3318 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
3319 smi_addr
->lun
= msg
->rsp
[0] & 3;
3320 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
3321 recv_msg
->msg
.cmd
= msg
->rsp
[1];
3322 memcpy(recv_msg
->msg_data
,
3325 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3326 recv_msg
->msg
.data_len
= msg
->rsp_size
- 2;
3327 deliver_response(recv_msg
);
3333 /* Handle a new message. Return 1 if the message should be requeued,
3334 0 if the message should be freed, or -1 if the message should not
3335 be freed or requeued. */
3336 static int handle_new_recv_msg(ipmi_smi_t intf
,
3337 struct ipmi_smi_msg
*msg
)
3345 for (m
= 0; m
< msg
->rsp_size
; m
++)
3346 printk(" %2.2x", msg
->rsp
[m
]);
3349 if (msg
->rsp_size
< 2) {
3350 /* Message is too small to be correct. */
3351 printk(KERN_WARNING PFX
"BMC returned to small a message"
3352 " for netfn %x cmd %x, got %d bytes\n",
3353 (msg
->data
[0] >> 2) | 1, msg
->data
[1], msg
->rsp_size
);
3355 /* Generate an error response for the message. */
3356 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3357 msg
->rsp
[1] = msg
->data
[1];
3358 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3360 } else if (((msg
->rsp
[0] >> 2) != ((msg
->data
[0] >> 2) | 1))/* Netfn */
3361 || (msg
->rsp
[1] != msg
->data
[1])) /* Command */
3363 /* The response is not even marginally correct. */
3364 printk(KERN_WARNING PFX
"BMC returned incorrect response,"
3365 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3366 (msg
->data
[0] >> 2) | 1, msg
->data
[1],
3367 msg
->rsp
[0] >> 2, msg
->rsp
[1]);
3369 /* Generate an error response for the message. */
3370 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
3371 msg
->rsp
[1] = msg
->data
[1];
3372 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
3376 if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3377 && (msg
->rsp
[1] == IPMI_SEND_MSG_CMD
)
3378 && (msg
->user_data
!= NULL
))
3380 /* It's a response to a response we sent. For this we
3381 deliver a send message response to the user. */
3382 struct ipmi_recv_msg
*recv_msg
= msg
->user_data
;
3385 if (msg
->rsp_size
< 2)
3386 /* Message is too small to be correct. */
3389 chan
= msg
->data
[2] & 0x0f;
3390 if (chan
>= IPMI_MAX_CHANNELS
)
3391 /* Invalid channel number */
3397 /* Make sure the user still exists. */
3398 if (!recv_msg
->user
|| !recv_msg
->user
->valid
)
3401 recv_msg
->recv_type
= IPMI_RESPONSE_RESPONSE_TYPE
;
3402 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3403 recv_msg
->msg
.data_len
= 1;
3404 recv_msg
->msg_data
[0] = msg
->rsp
[2];
3405 deliver_response(recv_msg
);
3406 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3407 && (msg
->rsp
[1] == IPMI_GET_MSG_CMD
))
3409 /* It's from the receive queue. */
3410 chan
= msg
->rsp
[3] & 0xf;
3411 if (chan
>= IPMI_MAX_CHANNELS
) {
3412 /* Invalid channel number */
3417 switch (intf
->channels
[chan
].medium
) {
3418 case IPMI_CHANNEL_MEDIUM_IPMB
:
3419 if (msg
->rsp
[4] & 0x04) {
3420 /* It's a response, so find the
3421 requesting message and send it up. */
3422 requeue
= handle_ipmb_get_msg_rsp(intf
, msg
);
3424 /* It's a command to the SMS from some other
3425 entity. Handle that. */
3426 requeue
= handle_ipmb_get_msg_cmd(intf
, msg
);
3430 case IPMI_CHANNEL_MEDIUM_8023LAN
:
3431 case IPMI_CHANNEL_MEDIUM_ASYNC
:
3432 if (msg
->rsp
[6] & 0x04) {
3433 /* It's a response, so find the
3434 requesting message and send it up. */
3435 requeue
= handle_lan_get_msg_rsp(intf
, msg
);
3437 /* It's a command to the SMS from some other
3438 entity. Handle that. */
3439 requeue
= handle_lan_get_msg_cmd(intf
, msg
);
3444 /* We don't handle the channel type, so just
3445 * free the message. */
3449 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
3450 && (msg
->rsp
[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD
))
3452 /* It's an asyncronous event. */
3453 requeue
= handle_read_event_rsp(intf
, msg
);
3455 /* It's a response from the local BMC. */
3456 requeue
= handle_bmc_rsp(intf
, msg
);
3463 /* Handle a new message from the lower layer. */
3464 void ipmi_smi_msg_received(ipmi_smi_t intf
,
3465 struct ipmi_smi_msg
*msg
)
3467 unsigned long flags
;
3471 if ((msg
->data_size
>= 2)
3472 && (msg
->data
[0] == (IPMI_NETFN_APP_REQUEST
<< 2))
3473 && (msg
->data
[1] == IPMI_SEND_MSG_CMD
)
3474 && (msg
->user_data
== NULL
))
3476 /* This is the local response to a command send, start
3477 the timer for these. The user_data will not be
3478 NULL if this is a response send, and we will let
3479 response sends just go through. */
3481 /* Check for errors, if we get certain errors (ones
3482 that mean basically we can try again later), we
3483 ignore them and start the timer. Otherwise we
3484 report the error immediately. */
3485 if ((msg
->rsp_size
>= 3) && (msg
->rsp
[2] != 0)
3486 && (msg
->rsp
[2] != IPMI_NODE_BUSY_ERR
)
3487 && (msg
->rsp
[2] != IPMI_LOST_ARBITRATION_ERR
)
3488 && (msg
->rsp
[2] != IPMI_BUS_ERR
)
3489 && (msg
->rsp
[2] != IPMI_NAK_ON_WRITE_ERR
))
3491 int chan
= msg
->rsp
[3] & 0xf;
3493 /* Got an error sending the message, handle it. */
3494 spin_lock_irqsave(&intf
->counter_lock
, flags
);
3495 if (chan
>= IPMI_MAX_CHANNELS
)
3496 ; /* This shouldn't happen */
3497 else if ((intf
->channels
[chan
].medium
3498 == IPMI_CHANNEL_MEDIUM_8023LAN
)
3499 || (intf
->channels
[chan
].medium
3500 == IPMI_CHANNEL_MEDIUM_ASYNC
))
3501 intf
->sent_lan_command_errs
++;
3503 intf
->sent_ipmb_command_errs
++;
3504 spin_unlock_irqrestore(&intf
->counter_lock
, flags
);
3505 intf_err_seq(intf
, msg
->msgid
, msg
->rsp
[2]);
3507 /* The message was sent, start the timer. */
3508 intf_start_seq_timer(intf
, msg
->msgid
);
3511 ipmi_free_smi_msg(msg
);
3515 /* To preserve message order, if the list is not empty, we
3516 tack this message onto the end of the list. */
3517 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3518 if (!list_empty(&intf
->waiting_msgs
)) {
3519 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3520 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3523 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3525 rv
= handle_new_recv_msg(intf
, msg
);
3527 /* Could not handle the message now, just add it to a
3528 list to handle later. */
3529 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3530 list_add_tail(&msg
->link
, &intf
->waiting_msgs
);
3531 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3532 } else if (rv
== 0) {
3533 ipmi_free_smi_msg(msg
);
3540 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf
)
3545 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
3546 if (!user
->handler
->ipmi_watchdog_pretimeout
)
3549 user
->handler
->ipmi_watchdog_pretimeout(user
->handler_data
);
3555 static struct ipmi_smi_msg
*
3556 smi_from_recv_msg(ipmi_smi_t intf
, struct ipmi_recv_msg
*recv_msg
,
3557 unsigned char seq
, long seqid
)
3559 struct ipmi_smi_msg
*smi_msg
= ipmi_alloc_smi_msg();
3561 /* If we can't allocate the message, then just return, we
3562 get 4 retries, so this should be ok. */
3565 memcpy(smi_msg
->data
, recv_msg
->msg
.data
, recv_msg
->msg
.data_len
);
3566 smi_msg
->data_size
= recv_msg
->msg
.data_len
;
3567 smi_msg
->msgid
= STORE_SEQ_IN_MSGID(seq
, seqid
);
3573 for (m
= 0; m
< smi_msg
->data_size
; m
++)
3574 printk(" %2.2x", smi_msg
->data
[m
]);
3581 static void check_msg_timeout(ipmi_smi_t intf
, struct seq_table
*ent
,
3582 struct list_head
*timeouts
, long timeout_period
,
3583 int slot
, unsigned long *flags
)
3585 struct ipmi_recv_msg
*msg
;
3586 struct ipmi_smi_handlers
*handlers
;
3588 if (intf
->intf_num
== -1)
3594 ent
->timeout
-= timeout_period
;
3595 if (ent
->timeout
> 0)
3598 if (ent
->retries_left
== 0) {
3599 /* The message has used all its retries. */
3601 msg
= ent
->recv_msg
;
3602 list_add_tail(&msg
->link
, timeouts
);
3603 spin_lock(&intf
->counter_lock
);
3605 intf
->timed_out_ipmb_broadcasts
++;
3606 else if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3607 intf
->timed_out_lan_commands
++;
3609 intf
->timed_out_ipmb_commands
++;
3610 spin_unlock(&intf
->counter_lock
);
3612 struct ipmi_smi_msg
*smi_msg
;
3613 /* More retries, send again. */
3615 /* Start with the max timer, set to normal
3616 timer after the message is sent. */
3617 ent
->timeout
= MAX_MSG_TIMEOUT
;
3618 ent
->retries_left
--;
3619 spin_lock(&intf
->counter_lock
);
3620 if (ent
->recv_msg
->addr
.addr_type
== IPMI_LAN_ADDR_TYPE
)
3621 intf
->retransmitted_lan_commands
++;
3623 intf
->retransmitted_ipmb_commands
++;
3624 spin_unlock(&intf
->counter_lock
);
3626 smi_msg
= smi_from_recv_msg(intf
, ent
->recv_msg
, slot
,
3631 spin_unlock_irqrestore(&intf
->seq_lock
, *flags
);
3633 /* Send the new message. We send with a zero
3634 * priority. It timed out, I doubt time is
3635 * that critical now, and high priority
3636 * messages are really only for messages to the
3637 * local MC, which don't get resent. */
3638 handlers
= intf
->handlers
;
3640 intf
->handlers
->sender(intf
->send_info
,
3643 ipmi_free_smi_msg(smi_msg
);
3645 spin_lock_irqsave(&intf
->seq_lock
, *flags
);
3649 static void ipmi_timeout_handler(long timeout_period
)
3652 struct list_head timeouts
;
3653 struct ipmi_recv_msg
*msg
, *msg2
;
3654 struct ipmi_smi_msg
*smi_msg
, *smi_msg2
;
3655 unsigned long flags
;
3659 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3660 /* See if any waiting messages need to be processed. */
3661 spin_lock_irqsave(&intf
->waiting_msgs_lock
, flags
);
3662 list_for_each_entry_safe(smi_msg
, smi_msg2
,
3663 &intf
->waiting_msgs
, link
) {
3664 if (!handle_new_recv_msg(intf
, smi_msg
)) {
3665 list_del(&smi_msg
->link
);
3666 ipmi_free_smi_msg(smi_msg
);
3668 /* To preserve message order, quit if we
3669 can't handle a message. */
3673 spin_unlock_irqrestore(&intf
->waiting_msgs_lock
, flags
);
3675 /* Go through the seq table and find any messages that
3676 have timed out, putting them in the timeouts
3678 INIT_LIST_HEAD(&timeouts
);
3679 spin_lock_irqsave(&intf
->seq_lock
, flags
);
3680 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++)
3681 check_msg_timeout(intf
, &(intf
->seq_table
[i
]),
3682 &timeouts
, timeout_period
, i
,
3684 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
3686 list_for_each_entry_safe(msg
, msg2
, &timeouts
, link
)
3687 deliver_err_response(msg
, IPMI_TIMEOUT_COMPLETION_CODE
);
3690 * Maintenance mode handling. Check the timeout
3691 * optimistically before we claim the lock. It may
3692 * mean a timeout gets missed occasionally, but that
3693 * only means the timeout gets extended by one period
3694 * in that case. No big deal, and it avoids the lock
3697 if (intf
->auto_maintenance_timeout
> 0) {
3698 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
3699 if (intf
->auto_maintenance_timeout
> 0) {
3700 intf
->auto_maintenance_timeout
3702 if (!intf
->maintenance_mode
3703 && (intf
->auto_maintenance_timeout
<= 0))
3705 intf
->maintenance_mode_enable
= 0;
3706 maintenance_mode_update(intf
);
3709 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
3716 static void ipmi_request_event(void)
3719 struct ipmi_smi_handlers
*handlers
;
3722 /* Called from the timer, no need to check if handlers is
3724 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3725 /* No event requests when in maintenance mode. */
3726 if (intf
->maintenance_mode_enable
)
3729 handlers
= intf
->handlers
;
3731 handlers
->request_events(intf
->send_info
);
3736 static struct timer_list ipmi_timer
;
3738 /* Call every ~100 ms. */
3739 #define IPMI_TIMEOUT_TIME 100
3741 /* How many jiffies does it take to get to the timeout time. */
3742 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3744 /* Request events from the queue every second (this is the number of
3745 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3746 future, IPMI will add a way to know immediately if an event is in
3747 the queue and this silliness can go away. */
3748 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3750 static atomic_t stop_operation
;
3751 static unsigned int ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3753 static void ipmi_timeout(unsigned long data
)
3755 if (atomic_read(&stop_operation
))
3759 if (ticks_to_req_ev
== 0) {
3760 ipmi_request_event();
3761 ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3764 ipmi_timeout_handler(IPMI_TIMEOUT_TIME
);
3766 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
3770 static atomic_t smi_msg_inuse_count
= ATOMIC_INIT(0);
3771 static atomic_t recv_msg_inuse_count
= ATOMIC_INIT(0);
3773 /* FIXME - convert these to slabs. */
3774 static void free_smi_msg(struct ipmi_smi_msg
*msg
)
3776 atomic_dec(&smi_msg_inuse_count
);
3780 struct ipmi_smi_msg
*ipmi_alloc_smi_msg(void)
3782 struct ipmi_smi_msg
*rv
;
3783 rv
= kmalloc(sizeof(struct ipmi_smi_msg
), GFP_ATOMIC
);
3785 rv
->done
= free_smi_msg
;
3786 rv
->user_data
= NULL
;
3787 atomic_inc(&smi_msg_inuse_count
);
3792 static void free_recv_msg(struct ipmi_recv_msg
*msg
)
3794 atomic_dec(&recv_msg_inuse_count
);
3798 struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void)
3800 struct ipmi_recv_msg
*rv
;
3802 rv
= kmalloc(sizeof(struct ipmi_recv_msg
), GFP_ATOMIC
);
3805 rv
->done
= free_recv_msg
;
3806 atomic_inc(&recv_msg_inuse_count
);
3811 void ipmi_free_recv_msg(struct ipmi_recv_msg
*msg
)
3814 kref_put(&msg
->user
->refcount
, free_user
);
3818 #ifdef CONFIG_IPMI_PANIC_EVENT
3820 static void dummy_smi_done_handler(struct ipmi_smi_msg
*msg
)
3824 static void dummy_recv_done_handler(struct ipmi_recv_msg
*msg
)
3828 #ifdef CONFIG_IPMI_PANIC_STRING
3829 static void event_receiver_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3831 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3832 && (msg
->msg
.netfn
== IPMI_NETFN_SENSOR_EVENT_RESPONSE
)
3833 && (msg
->msg
.cmd
== IPMI_GET_EVENT_RECEIVER_CMD
)
3834 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3836 /* A get event receiver command, save it. */
3837 intf
->event_receiver
= msg
->msg
.data
[1];
3838 intf
->event_receiver_lun
= msg
->msg
.data
[2] & 0x3;
3842 static void device_id_fetcher(ipmi_smi_t intf
, struct ipmi_recv_msg
*msg
)
3844 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3845 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
3846 && (msg
->msg
.cmd
== IPMI_GET_DEVICE_ID_CMD
)
3847 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
))
3849 /* A get device id command, save if we are an event
3850 receiver or generator. */
3851 intf
->local_sel_device
= (msg
->msg
.data
[6] >> 2) & 1;
3852 intf
->local_event_generator
= (msg
->msg
.data
[6] >> 5) & 1;
3857 static void send_panic_events(char *str
)
3859 struct kernel_ipmi_msg msg
;
3861 unsigned char data
[16];
3862 struct ipmi_system_interface_addr
*si
;
3863 struct ipmi_addr addr
;
3864 struct ipmi_smi_msg smi_msg
;
3865 struct ipmi_recv_msg recv_msg
;
3867 si
= (struct ipmi_system_interface_addr
*) &addr
;
3868 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3869 si
->channel
= IPMI_BMC_CHANNEL
;
3872 /* Fill in an event telling that we have failed. */
3873 msg
.netfn
= 0x04; /* Sensor or Event. */
3874 msg
.cmd
= 2; /* Platform event command. */
3877 data
[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3878 data
[1] = 0x03; /* This is for IPMI 1.0. */
3879 data
[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3880 data
[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3881 data
[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3883 /* Put a few breadcrumbs in. Hopefully later we can add more things
3884 to make the panic events more useful. */
3891 smi_msg
.done
= dummy_smi_done_handler
;
3892 recv_msg
.done
= dummy_recv_done_handler
;
3894 /* For every registered interface, send the event. */
3895 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3896 if (!intf
->handlers
)
3897 /* Interface is not ready. */
3900 /* Send the event announcing the panic. */
3901 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
3902 i_ipmi_request(NULL
,
3911 intf
->channels
[0].address
,
3912 intf
->channels
[0].lun
,
3913 0, 1); /* Don't retry, and don't wait. */
3916 #ifdef CONFIG_IPMI_PANIC_STRING
3917 /* On every interface, dump a bunch of OEM event holding the
3922 /* For every registered interface, send the event. */
3923 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
3925 struct ipmi_ipmb_addr
*ipmb
;
3928 if (intf
->intf_num
== -1)
3929 /* Interface was not ready yet. */
3933 * intf_num is used as an marker to tell if the
3934 * interface is valid. Thus we need a read barrier to
3935 * make sure data fetched before checking intf_num
3940 /* First job here is to figure out where to send the
3941 OEM events. There's no way in IPMI to send OEM
3942 events using an event send command, so we have to
3943 find the SEL to put them in and stick them in
3946 /* Get capabilities from the get device id. */
3947 intf
->local_sel_device
= 0;
3948 intf
->local_event_generator
= 0;
3949 intf
->event_receiver
= 0;
3951 /* Request the device info from the local MC. */
3952 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3953 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
3956 intf
->null_user_handler
= device_id_fetcher
;
3957 i_ipmi_request(NULL
,
3966 intf
->channels
[0].address
,
3967 intf
->channels
[0].lun
,
3968 0, 1); /* Don't retry, and don't wait. */
3970 if (intf
->local_event_generator
) {
3971 /* Request the event receiver from the local MC. */
3972 msg
.netfn
= IPMI_NETFN_SENSOR_EVENT_REQUEST
;
3973 msg
.cmd
= IPMI_GET_EVENT_RECEIVER_CMD
;
3976 intf
->null_user_handler
= event_receiver_fetcher
;
3977 i_ipmi_request(NULL
,
3986 intf
->channels
[0].address
,
3987 intf
->channels
[0].lun
,
3988 0, 1); /* no retry, and no wait. */
3990 intf
->null_user_handler
= NULL
;
3992 /* Validate the event receiver. The low bit must not
3993 be 1 (it must be a valid IPMB address), it cannot
3994 be zero, and it must not be my address. */
3995 if (((intf
->event_receiver
& 1) == 0)
3996 && (intf
->event_receiver
!= 0)
3997 && (intf
->event_receiver
!= intf
->channels
[0].address
))
3999 /* The event receiver is valid, send an IPMB
4001 ipmb
= (struct ipmi_ipmb_addr
*) &addr
;
4002 ipmb
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
4003 ipmb
->channel
= 0; /* FIXME - is this right? */
4004 ipmb
->lun
= intf
->event_receiver_lun
;
4005 ipmb
->slave_addr
= intf
->event_receiver
;
4006 } else if (intf
->local_sel_device
) {
4007 /* The event receiver was not valid (or was
4008 me), but I am an SEL device, just dump it
4010 si
= (struct ipmi_system_interface_addr
*) &addr
;
4011 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4012 si
->channel
= IPMI_BMC_CHANNEL
;
4015 continue; /* No where to send the event. */
4018 msg
.netfn
= IPMI_NETFN_STORAGE_REQUEST
; /* Storage. */
4019 msg
.cmd
= IPMI_ADD_SEL_ENTRY_CMD
;
4025 int size
= strlen(p
);
4031 data
[2] = 0xf0; /* OEM event without timestamp. */
4032 data
[3] = intf
->channels
[0].address
;
4033 data
[4] = j
++; /* sequence # */
4034 /* Always give 11 bytes, so strncpy will fill
4035 it with zeroes for me. */
4036 strncpy(data
+5, p
, 11);
4039 i_ipmi_request(NULL
,
4048 intf
->channels
[0].address
,
4049 intf
->channels
[0].lun
,
4050 0, 1); /* no retry, and no wait. */
4053 #endif /* CONFIG_IPMI_PANIC_STRING */
4055 #endif /* CONFIG_IPMI_PANIC_EVENT */
4057 static int has_panicked
;
4059 static int panic_event(struct notifier_block
*this,
4060 unsigned long event
,
4069 /* For every registered interface, set it to run to completion. */
4070 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4071 if (!intf
->handlers
)
4072 /* Interface is not ready. */
4075 intf
->handlers
->set_run_to_completion(intf
->send_info
, 1);
4078 #ifdef CONFIG_IPMI_PANIC_EVENT
4079 send_panic_events(ptr
);
4085 static struct notifier_block panic_block
= {
4086 .notifier_call
= panic_event
,
4088 .priority
= 200 /* priority: INT_MAX >= x >= 0 */
4091 static int ipmi_init_msghandler(void)
4098 rv
= driver_register(&ipmidriver
);
4100 printk(KERN_ERR PFX
"Could not register IPMI driver\n");
4104 printk(KERN_INFO
"ipmi message handler version "
4105 IPMI_DRIVER_VERSION
"\n");
4107 #ifdef CONFIG_PROC_FS
4108 proc_ipmi_root
= proc_mkdir("ipmi", NULL
);
4109 if (!proc_ipmi_root
) {
4110 printk(KERN_ERR PFX
"Unable to create IPMI proc dir");
4114 proc_ipmi_root
->owner
= THIS_MODULE
;
4115 #endif /* CONFIG_PROC_FS */
4117 setup_timer(&ipmi_timer
, ipmi_timeout
, 0);
4118 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4120 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
4127 static __init
int ipmi_init_msghandler_mod(void)
4129 ipmi_init_msghandler();
4133 static __exit
void cleanup_ipmi(void)
4140 atomic_notifier_chain_unregister(&panic_notifier_list
, &panic_block
);
4142 /* This can't be called if any interfaces exist, so no worry about
4143 shutting down the interfaces. */
4145 /* Tell the timer to stop, then wait for it to stop. This avoids
4146 problems with race conditions removing the timer here. */
4147 atomic_inc(&stop_operation
);
4148 del_timer_sync(&ipmi_timer
);
4150 #ifdef CONFIG_PROC_FS
4151 remove_proc_entry(proc_ipmi_root
->name
, &proc_root
);
4152 #endif /* CONFIG_PROC_FS */
4154 driver_unregister(&ipmidriver
);
4158 /* Check for buffer leaks. */
4159 count
= atomic_read(&smi_msg_inuse_count
);
4161 printk(KERN_WARNING PFX
"SMI message count %d at exit\n",
4163 count
= atomic_read(&recv_msg_inuse_count
);
4165 printk(KERN_WARNING PFX
"recv message count %d at exit\n",
4168 module_exit(cleanup_ipmi
);
4170 module_init(ipmi_init_msghandler_mod
);
4171 MODULE_LICENSE("GPL");
4172 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4173 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4174 MODULE_VERSION(IPMI_DRIVER_VERSION
);
4176 EXPORT_SYMBOL(ipmi_create_user
);
4177 EXPORT_SYMBOL(ipmi_destroy_user
);
4178 EXPORT_SYMBOL(ipmi_get_version
);
4179 EXPORT_SYMBOL(ipmi_request_settime
);
4180 EXPORT_SYMBOL(ipmi_request_supply_msgs
);
4181 EXPORT_SYMBOL(ipmi_register_smi
);
4182 EXPORT_SYMBOL(ipmi_unregister_smi
);
4183 EXPORT_SYMBOL(ipmi_register_for_cmd
);
4184 EXPORT_SYMBOL(ipmi_unregister_for_cmd
);
4185 EXPORT_SYMBOL(ipmi_smi_msg_received
);
4186 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout
);
4187 EXPORT_SYMBOL(ipmi_alloc_smi_msg
);
4188 EXPORT_SYMBOL(ipmi_addr_length
);
4189 EXPORT_SYMBOL(ipmi_validate_addr
);
4190 EXPORT_SYMBOL(ipmi_set_gets_events
);
4191 EXPORT_SYMBOL(ipmi_smi_watcher_register
);
4192 EXPORT_SYMBOL(ipmi_smi_watcher_unregister
);
4193 EXPORT_SYMBOL(ipmi_set_my_address
);
4194 EXPORT_SYMBOL(ipmi_get_my_address
);
4195 EXPORT_SYMBOL(ipmi_set_my_LUN
);
4196 EXPORT_SYMBOL(ipmi_get_my_LUN
);
4197 EXPORT_SYMBOL(ipmi_smi_add_proc_entry
);
4198 EXPORT_SYMBOL(ipmi_user_set_run_to_completion
);
4199 EXPORT_SYMBOL(ipmi_free_recv_msg
);