[PATCH] IPMI: Fix BT long busy
[linux-2.6/zen-sources.git] / drivers / char / ipmi / ipmi_si_intf.c
blob81a0c89598e790bc767b6552642da1dc69b3968f
1 /*
2 * ipmi_si.c
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
56 #include <asm/irq.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
60 #include <asm/io.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
67 #define PFX "ipmi_si: "
69 /* Measure times between events in the driver. */
70 #undef DEBUG_TIMING
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC 10000
74 #define SI_USEC_PER_JIFFY (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
77 short timeout */
79 enum si_intf_state {
80 SI_NORMAL,
81 SI_GETTING_FLAGS,
82 SI_GETTING_EVENTS,
83 SI_CLEARING_FLAGS,
84 SI_CLEARING_FLAGS_THEN_SET_IRQ,
85 SI_GETTING_MESSAGES,
86 SI_ENABLE_INTERRUPTS1,
87 SI_ENABLE_INTERRUPTS2
88 /* FIXME - add watchdog stuff. */
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG 2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
96 enum si_type {
97 SI_KCS, SI_SMIC, SI_BT
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
101 #define DEVICE_NAME "ipmi_si"
103 static struct device_driver ipmi_driver =
105 .name = DEVICE_NAME,
106 .bus = &platform_bus_type
109 struct smi_info
111 int intf_num;
112 ipmi_smi_t intf;
113 struct si_sm_data *si_sm;
114 struct si_sm_handlers *handlers;
115 enum si_type si_type;
116 spinlock_t si_lock;
117 spinlock_t msg_lock;
118 struct list_head xmit_msgs;
119 struct list_head hp_xmit_msgs;
120 struct ipmi_smi_msg *curr_msg;
121 enum si_intf_state si_state;
123 /* Used to handle the various types of I/O that can occur with
124 IPMI */
125 struct si_sm_io io;
126 int (*io_setup)(struct smi_info *info);
127 void (*io_cleanup)(struct smi_info *info);
128 int (*irq_setup)(struct smi_info *info);
129 void (*irq_cleanup)(struct smi_info *info);
130 unsigned int io_size;
131 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132 void (*addr_source_cleanup)(struct smi_info *info);
133 void *addr_source_data;
135 /* Per-OEM handler, called from handle_flags().
136 Returns 1 when handle_flags() needs to be re-run
137 or 0 indicating it set si_state itself.
139 int (*oem_data_avail_handler)(struct smi_info *smi_info);
141 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142 is set to hold the flags until we are done handling everything
143 from the flags. */
144 #define RECEIVE_MSG_AVAIL 0x01
145 #define EVENT_MSG_BUFFER_FULL 0x02
146 #define WDT_PRE_TIMEOUT_INT 0x08
147 #define OEM0_DATA_AVAIL 0x20
148 #define OEM1_DATA_AVAIL 0x40
149 #define OEM2_DATA_AVAIL 0x80
150 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
151 OEM1_DATA_AVAIL | \
152 OEM2_DATA_AVAIL)
153 unsigned char msg_flags;
155 /* If set to true, this will request events the next time the
156 state machine is idle. */
157 atomic_t req_events;
159 /* If true, run the state machine to completion on every send
160 call. Generally used after a panic to make sure stuff goes
161 out. */
162 int run_to_completion;
164 /* The I/O port of an SI interface. */
165 int port;
167 /* The space between start addresses of the two ports. For
168 instance, if the first port is 0xca2 and the spacing is 4, then
169 the second port is 0xca6. */
170 unsigned int spacing;
172 /* zero if no irq; */
173 int irq;
175 /* The timer for this si. */
176 struct timer_list si_timer;
178 /* The time (in jiffies) the last timeout occurred at. */
179 unsigned long last_timeout_jiffies;
181 /* Used to gracefully stop the timer without race conditions. */
182 atomic_t stop_operation;
184 /* The driver will disable interrupts when it gets into a
185 situation where it cannot handle messages due to lack of
186 memory. Once that situation clears up, it will re-enable
187 interrupts. */
188 int interrupt_disabled;
190 /* From the get device id response... */
191 struct ipmi_device_id device_id;
193 /* Driver model stuff. */
194 struct device *dev;
195 struct platform_device *pdev;
197 /* True if we allocated the device, false if it came from
198 * someplace else (like PCI). */
199 int dev_registered;
201 /* Slave address, could be reported from DMI. */
202 unsigned char slave_addr;
204 /* Counters and things for the proc filesystem. */
205 spinlock_t count_lock;
206 unsigned long short_timeouts;
207 unsigned long long_timeouts;
208 unsigned long timeout_restarts;
209 unsigned long idles;
210 unsigned long interrupts;
211 unsigned long attentions;
212 unsigned long flag_fetches;
213 unsigned long hosed_count;
214 unsigned long complete_transactions;
215 unsigned long events;
216 unsigned long watchdog_pretimeouts;
217 unsigned long incoming_messages;
219 struct task_struct *thread;
221 struct list_head link;
224 #define SI_MAX_PARMS 4
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
229 static int unload_when_empty = 1;
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
237 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
240 static void deliver_recv_msg(struct smi_info *smi_info,
241 struct ipmi_smi_msg *msg)
243 /* Deliver the message to the upper layer with the lock
244 released. */
245 spin_unlock(&(smi_info->si_lock));
246 ipmi_smi_msg_received(smi_info->intf, msg);
247 spin_lock(&(smi_info->si_lock));
250 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
252 struct ipmi_smi_msg *msg = smi_info->curr_msg;
254 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
255 cCode = IPMI_ERR_UNSPECIFIED;
256 /* else use it as is */
258 /* Make it a reponse */
259 msg->rsp[0] = msg->data[0] | 4;
260 msg->rsp[1] = msg->data[1];
261 msg->rsp[2] = cCode;
262 msg->rsp_size = 3;
264 smi_info->curr_msg = NULL;
265 deliver_recv_msg(smi_info, msg);
268 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
270 int rv;
271 struct list_head *entry = NULL;
272 #ifdef DEBUG_TIMING
273 struct timeval t;
274 #endif
276 /* No need to save flags, we aleady have interrupts off and we
277 already hold the SMI lock. */
278 spin_lock(&(smi_info->msg_lock));
280 /* Pick the high priority queue first. */
281 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
282 entry = smi_info->hp_xmit_msgs.next;
283 } else if (!list_empty(&(smi_info->xmit_msgs))) {
284 entry = smi_info->xmit_msgs.next;
287 if (!entry) {
288 smi_info->curr_msg = NULL;
289 rv = SI_SM_IDLE;
290 } else {
291 int err;
293 list_del(entry);
294 smi_info->curr_msg = list_entry(entry,
295 struct ipmi_smi_msg,
296 link);
297 #ifdef DEBUG_TIMING
298 do_gettimeofday(&t);
299 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
300 #endif
301 err = atomic_notifier_call_chain(&xaction_notifier_list,
302 0, smi_info);
303 if (err & NOTIFY_STOP_MASK) {
304 rv = SI_SM_CALL_WITHOUT_DELAY;
305 goto out;
307 err = smi_info->handlers->start_transaction(
308 smi_info->si_sm,
309 smi_info->curr_msg->data,
310 smi_info->curr_msg->data_size);
311 if (err) {
312 return_hosed_msg(smi_info, err);
315 rv = SI_SM_CALL_WITHOUT_DELAY;
317 out:
318 spin_unlock(&(smi_info->msg_lock));
320 return rv;
323 static void start_enable_irq(struct smi_info *smi_info)
325 unsigned char msg[2];
327 /* If we are enabling interrupts, we have to tell the
328 BMC to use them. */
329 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
330 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
332 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
333 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
336 static void start_clear_flags(struct smi_info *smi_info)
338 unsigned char msg[3];
340 /* Make sure the watchdog pre-timeout flag is not set at startup. */
341 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
342 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
343 msg[2] = WDT_PRE_TIMEOUT_INT;
345 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
346 smi_info->si_state = SI_CLEARING_FLAGS;
349 /* When we have a situtaion where we run out of memory and cannot
350 allocate messages, we just leave them in the BMC and run the system
351 polled until we can allocate some memory. Once we have some
352 memory, we will re-enable the interrupt. */
353 static inline void disable_si_irq(struct smi_info *smi_info)
355 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
356 disable_irq_nosync(smi_info->irq);
357 smi_info->interrupt_disabled = 1;
361 static inline void enable_si_irq(struct smi_info *smi_info)
363 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
364 enable_irq(smi_info->irq);
365 smi_info->interrupt_disabled = 0;
369 static void handle_flags(struct smi_info *smi_info)
371 retry:
372 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
373 /* Watchdog pre-timeout */
374 spin_lock(&smi_info->count_lock);
375 smi_info->watchdog_pretimeouts++;
376 spin_unlock(&smi_info->count_lock);
378 start_clear_flags(smi_info);
379 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
380 spin_unlock(&(smi_info->si_lock));
381 ipmi_smi_watchdog_pretimeout(smi_info->intf);
382 spin_lock(&(smi_info->si_lock));
383 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
384 /* Messages available. */
385 smi_info->curr_msg = ipmi_alloc_smi_msg();
386 if (!smi_info->curr_msg) {
387 disable_si_irq(smi_info);
388 smi_info->si_state = SI_NORMAL;
389 return;
391 enable_si_irq(smi_info);
393 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
394 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
395 smi_info->curr_msg->data_size = 2;
397 smi_info->handlers->start_transaction(
398 smi_info->si_sm,
399 smi_info->curr_msg->data,
400 smi_info->curr_msg->data_size);
401 smi_info->si_state = SI_GETTING_MESSAGES;
402 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
403 /* Events available. */
404 smi_info->curr_msg = ipmi_alloc_smi_msg();
405 if (!smi_info->curr_msg) {
406 disable_si_irq(smi_info);
407 smi_info->si_state = SI_NORMAL;
408 return;
410 enable_si_irq(smi_info);
412 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
413 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
414 smi_info->curr_msg->data_size = 2;
416 smi_info->handlers->start_transaction(
417 smi_info->si_sm,
418 smi_info->curr_msg->data,
419 smi_info->curr_msg->data_size);
420 smi_info->si_state = SI_GETTING_EVENTS;
421 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
422 smi_info->oem_data_avail_handler) {
423 if (smi_info->oem_data_avail_handler(smi_info))
424 goto retry;
425 } else {
426 smi_info->si_state = SI_NORMAL;
430 static void handle_transaction_done(struct smi_info *smi_info)
432 struct ipmi_smi_msg *msg;
433 #ifdef DEBUG_TIMING
434 struct timeval t;
436 do_gettimeofday(&t);
437 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
438 #endif
439 switch (smi_info->si_state) {
440 case SI_NORMAL:
441 if (!smi_info->curr_msg)
442 break;
444 smi_info->curr_msg->rsp_size
445 = smi_info->handlers->get_result(
446 smi_info->si_sm,
447 smi_info->curr_msg->rsp,
448 IPMI_MAX_MSG_LENGTH);
450 /* Do this here becase deliver_recv_msg() releases the
451 lock, and a new message can be put in during the
452 time the lock is released. */
453 msg = smi_info->curr_msg;
454 smi_info->curr_msg = NULL;
455 deliver_recv_msg(smi_info, msg);
456 break;
458 case SI_GETTING_FLAGS:
460 unsigned char msg[4];
461 unsigned int len;
463 /* We got the flags from the SMI, now handle them. */
464 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
465 if (msg[2] != 0) {
466 /* Error fetching flags, just give up for
467 now. */
468 smi_info->si_state = SI_NORMAL;
469 } else if (len < 4) {
470 /* Hmm, no flags. That's technically illegal, but
471 don't use uninitialized data. */
472 smi_info->si_state = SI_NORMAL;
473 } else {
474 smi_info->msg_flags = msg[3];
475 handle_flags(smi_info);
477 break;
480 case SI_CLEARING_FLAGS:
481 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
483 unsigned char msg[3];
485 /* We cleared the flags. */
486 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
487 if (msg[2] != 0) {
488 /* Error clearing flags */
489 printk(KERN_WARNING
490 "ipmi_si: Error clearing flags: %2.2x\n",
491 msg[2]);
493 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
494 start_enable_irq(smi_info);
495 else
496 smi_info->si_state = SI_NORMAL;
497 break;
500 case SI_GETTING_EVENTS:
502 smi_info->curr_msg->rsp_size
503 = smi_info->handlers->get_result(
504 smi_info->si_sm,
505 smi_info->curr_msg->rsp,
506 IPMI_MAX_MSG_LENGTH);
508 /* Do this here becase deliver_recv_msg() releases the
509 lock, and a new message can be put in during the
510 time the lock is released. */
511 msg = smi_info->curr_msg;
512 smi_info->curr_msg = NULL;
513 if (msg->rsp[2] != 0) {
514 /* Error getting event, probably done. */
515 msg->done(msg);
517 /* Take off the event flag. */
518 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
519 handle_flags(smi_info);
520 } else {
521 spin_lock(&smi_info->count_lock);
522 smi_info->events++;
523 spin_unlock(&smi_info->count_lock);
525 /* Do this before we deliver the message
526 because delivering the message releases the
527 lock and something else can mess with the
528 state. */
529 handle_flags(smi_info);
531 deliver_recv_msg(smi_info, msg);
533 break;
536 case SI_GETTING_MESSAGES:
538 smi_info->curr_msg->rsp_size
539 = smi_info->handlers->get_result(
540 smi_info->si_sm,
541 smi_info->curr_msg->rsp,
542 IPMI_MAX_MSG_LENGTH);
544 /* Do this here becase deliver_recv_msg() releases the
545 lock, and a new message can be put in during the
546 time the lock is released. */
547 msg = smi_info->curr_msg;
548 smi_info->curr_msg = NULL;
549 if (msg->rsp[2] != 0) {
550 /* Error getting event, probably done. */
551 msg->done(msg);
553 /* Take off the msg flag. */
554 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
555 handle_flags(smi_info);
556 } else {
557 spin_lock(&smi_info->count_lock);
558 smi_info->incoming_messages++;
559 spin_unlock(&smi_info->count_lock);
561 /* Do this before we deliver the message
562 because delivering the message releases the
563 lock and something else can mess with the
564 state. */
565 handle_flags(smi_info);
567 deliver_recv_msg(smi_info, msg);
569 break;
572 case SI_ENABLE_INTERRUPTS1:
574 unsigned char msg[4];
576 /* We got the flags from the SMI, now handle them. */
577 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
578 if (msg[2] != 0) {
579 printk(KERN_WARNING
580 "ipmi_si: Could not enable interrupts"
581 ", failed get, using polled mode.\n");
582 smi_info->si_state = SI_NORMAL;
583 } else {
584 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
585 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
586 msg[2] = msg[3] | 1; /* enable msg queue int */
587 smi_info->handlers->start_transaction(
588 smi_info->si_sm, msg, 3);
589 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
591 break;
594 case SI_ENABLE_INTERRUPTS2:
596 unsigned char msg[4];
598 /* We got the flags from the SMI, now handle them. */
599 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
600 if (msg[2] != 0) {
601 printk(KERN_WARNING
602 "ipmi_si: Could not enable interrupts"
603 ", failed set, using polled mode.\n");
605 smi_info->si_state = SI_NORMAL;
606 break;
611 /* Called on timeouts and events. Timeouts should pass the elapsed
612 time, interrupts should pass in zero. */
613 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
614 int time)
616 enum si_sm_result si_sm_result;
618 restart:
619 /* There used to be a loop here that waited a little while
620 (around 25us) before giving up. That turned out to be
621 pointless, the minimum delays I was seeing were in the 300us
622 range, which is far too long to wait in an interrupt. So
623 we just run until the state machine tells us something
624 happened or it needs a delay. */
625 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
626 time = 0;
627 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
629 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
632 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
634 spin_lock(&smi_info->count_lock);
635 smi_info->complete_transactions++;
636 spin_unlock(&smi_info->count_lock);
638 handle_transaction_done(smi_info);
639 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
641 else if (si_sm_result == SI_SM_HOSED)
643 spin_lock(&smi_info->count_lock);
644 smi_info->hosed_count++;
645 spin_unlock(&smi_info->count_lock);
647 /* Do the before return_hosed_msg, because that
648 releases the lock. */
649 smi_info->si_state = SI_NORMAL;
650 if (smi_info->curr_msg != NULL) {
651 /* If we were handling a user message, format
652 a response to send to the upper layer to
653 tell it about the error. */
654 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
656 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
659 /* We prefer handling attn over new messages. */
660 if (si_sm_result == SI_SM_ATTN)
662 unsigned char msg[2];
664 spin_lock(&smi_info->count_lock);
665 smi_info->attentions++;
666 spin_unlock(&smi_info->count_lock);
668 /* Got a attn, send down a get message flags to see
669 what's causing it. It would be better to handle
670 this in the upper layer, but due to the way
671 interrupts work with the SMI, that's not really
672 possible. */
673 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
674 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
676 smi_info->handlers->start_transaction(
677 smi_info->si_sm, msg, 2);
678 smi_info->si_state = SI_GETTING_FLAGS;
679 goto restart;
682 /* If we are currently idle, try to start the next message. */
683 if (si_sm_result == SI_SM_IDLE) {
684 spin_lock(&smi_info->count_lock);
685 smi_info->idles++;
686 spin_unlock(&smi_info->count_lock);
688 si_sm_result = start_next_msg(smi_info);
689 if (si_sm_result != SI_SM_IDLE)
690 goto restart;
693 if ((si_sm_result == SI_SM_IDLE)
694 && (atomic_read(&smi_info->req_events)))
696 /* We are idle and the upper layer requested that I fetch
697 events, so do so. */
698 atomic_set(&smi_info->req_events, 0);
700 smi_info->curr_msg = ipmi_alloc_smi_msg();
701 if (!smi_info->curr_msg)
702 goto out;
704 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
705 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
706 smi_info->curr_msg->data_size = 2;
708 smi_info->handlers->start_transaction(
709 smi_info->si_sm,
710 smi_info->curr_msg->data,
711 smi_info->curr_msg->data_size);
712 smi_info->si_state = SI_GETTING_EVENTS;
713 goto restart;
715 out:
716 return si_sm_result;
719 static void sender(void *send_info,
720 struct ipmi_smi_msg *msg,
721 int priority)
723 struct smi_info *smi_info = send_info;
724 enum si_sm_result result;
725 unsigned long flags;
726 #ifdef DEBUG_TIMING
727 struct timeval t;
728 #endif
730 if (atomic_read(&smi_info->stop_operation)) {
731 msg->rsp[0] = msg->data[0] | 4;
732 msg->rsp[1] = msg->data[1];
733 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
734 msg->rsp_size = 3;
735 deliver_recv_msg(smi_info, msg);
736 return;
739 spin_lock_irqsave(&(smi_info->msg_lock), flags);
740 #ifdef DEBUG_TIMING
741 do_gettimeofday(&t);
742 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
743 #endif
745 if (smi_info->run_to_completion) {
746 /* If we are running to completion, then throw it in
747 the list and run transactions until everything is
748 clear. Priority doesn't matter here. */
749 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
751 /* We have to release the msg lock and claim the smi
752 lock in this case, because of race conditions. */
753 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
755 spin_lock_irqsave(&(smi_info->si_lock), flags);
756 result = smi_event_handler(smi_info, 0);
757 while (result != SI_SM_IDLE) {
758 udelay(SI_SHORT_TIMEOUT_USEC);
759 result = smi_event_handler(smi_info,
760 SI_SHORT_TIMEOUT_USEC);
762 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
763 return;
764 } else {
765 if (priority > 0) {
766 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
767 } else {
768 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
771 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
773 spin_lock_irqsave(&(smi_info->si_lock), flags);
774 if ((smi_info->si_state == SI_NORMAL)
775 && (smi_info->curr_msg == NULL))
777 start_next_msg(smi_info);
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
782 static void set_run_to_completion(void *send_info, int i_run_to_completion)
784 struct smi_info *smi_info = send_info;
785 enum si_sm_result result;
786 unsigned long flags;
788 spin_lock_irqsave(&(smi_info->si_lock), flags);
790 smi_info->run_to_completion = i_run_to_completion;
791 if (i_run_to_completion) {
792 result = smi_event_handler(smi_info, 0);
793 while (result != SI_SM_IDLE) {
794 udelay(SI_SHORT_TIMEOUT_USEC);
795 result = smi_event_handler(smi_info,
796 SI_SHORT_TIMEOUT_USEC);
800 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
803 static int ipmi_thread(void *data)
805 struct smi_info *smi_info = data;
806 unsigned long flags;
807 enum si_sm_result smi_result;
809 set_user_nice(current, 19);
810 while (!kthread_should_stop()) {
811 spin_lock_irqsave(&(smi_info->si_lock), flags);
812 smi_result = smi_event_handler(smi_info, 0);
813 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
814 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
815 /* do nothing */
817 else if (smi_result == SI_SM_CALL_WITH_DELAY)
818 schedule();
819 else
820 schedule_timeout_interruptible(1);
822 return 0;
826 static void poll(void *send_info)
828 struct smi_info *smi_info = send_info;
831 * Make sure there is some delay in the poll loop so we can
832 * drive time forward and timeout things.
834 udelay(10);
835 smi_event_handler(smi_info, 10);
838 static void request_events(void *send_info)
840 struct smi_info *smi_info = send_info;
842 if (atomic_read(&smi_info->stop_operation))
843 return;
845 atomic_set(&smi_info->req_events, 1);
848 static int initialized = 0;
850 static void smi_timeout(unsigned long data)
852 struct smi_info *smi_info = (struct smi_info *) data;
853 enum si_sm_result smi_result;
854 unsigned long flags;
855 unsigned long jiffies_now;
856 long time_diff;
857 #ifdef DEBUG_TIMING
858 struct timeval t;
859 #endif
861 if (atomic_read(&smi_info->stop_operation))
862 return;
864 spin_lock_irqsave(&(smi_info->si_lock), flags);
865 #ifdef DEBUG_TIMING
866 do_gettimeofday(&t);
867 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
868 #endif
869 jiffies_now = jiffies;
870 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
871 * SI_USEC_PER_JIFFY);
872 smi_result = smi_event_handler(smi_info, time_diff);
874 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
876 smi_info->last_timeout_jiffies = jiffies_now;
878 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
879 /* Running with interrupts, only do long timeouts. */
880 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
881 spin_lock_irqsave(&smi_info->count_lock, flags);
882 smi_info->long_timeouts++;
883 spin_unlock_irqrestore(&smi_info->count_lock, flags);
884 goto do_add_timer;
887 /* If the state machine asks for a short delay, then shorten
888 the timer timeout. */
889 if (smi_result == SI_SM_CALL_WITH_DELAY) {
890 spin_lock_irqsave(&smi_info->count_lock, flags);
891 smi_info->short_timeouts++;
892 spin_unlock_irqrestore(&smi_info->count_lock, flags);
893 smi_info->si_timer.expires = jiffies + 1;
894 } else {
895 spin_lock_irqsave(&smi_info->count_lock, flags);
896 smi_info->long_timeouts++;
897 spin_unlock_irqrestore(&smi_info->count_lock, flags);
898 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
901 do_add_timer:
902 add_timer(&(smi_info->si_timer));
905 static irqreturn_t si_irq_handler(int irq, void *data)
907 struct smi_info *smi_info = data;
908 unsigned long flags;
909 #ifdef DEBUG_TIMING
910 struct timeval t;
911 #endif
913 spin_lock_irqsave(&(smi_info->si_lock), flags);
915 spin_lock(&smi_info->count_lock);
916 smi_info->interrupts++;
917 spin_unlock(&smi_info->count_lock);
919 if (atomic_read(&smi_info->stop_operation))
920 goto out;
922 #ifdef DEBUG_TIMING
923 do_gettimeofday(&t);
924 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
925 #endif
926 smi_event_handler(smi_info, 0);
927 out:
928 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
929 return IRQ_HANDLED;
932 static irqreturn_t si_bt_irq_handler(int irq, void *data)
934 struct smi_info *smi_info = data;
935 /* We need to clear the IRQ flag for the BT interface. */
936 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
937 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
938 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
939 return si_irq_handler(irq, data);
942 static int smi_start_processing(void *send_info,
943 ipmi_smi_t intf)
945 struct smi_info *new_smi = send_info;
946 int enable = 0;
948 new_smi->intf = intf;
950 /* Set up the timer that drives the interface. */
951 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
952 new_smi->last_timeout_jiffies = jiffies;
953 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
956 * Check if the user forcefully enabled the daemon.
958 if (new_smi->intf_num < num_force_kipmid)
959 enable = force_kipmid[new_smi->intf_num];
961 * The BT interface is efficient enough to not need a thread,
962 * and there is no need for a thread if we have interrupts.
964 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
965 enable = 1;
967 if (enable) {
968 new_smi->thread = kthread_run(ipmi_thread, new_smi,
969 "kipmi%d", new_smi->intf_num);
970 if (IS_ERR(new_smi->thread)) {
971 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
972 " kernel thread due to error %ld, only using"
973 " timers to drive the interface\n",
974 PTR_ERR(new_smi->thread));
975 new_smi->thread = NULL;
979 return 0;
982 static void set_maintenance_mode(void *send_info, int enable)
984 struct smi_info *smi_info = send_info;
986 if (!enable)
987 atomic_set(&smi_info->req_events, 0);
990 static struct ipmi_smi_handlers handlers =
992 .owner = THIS_MODULE,
993 .start_processing = smi_start_processing,
994 .sender = sender,
995 .request_events = request_events,
996 .set_maintenance_mode = set_maintenance_mode,
997 .set_run_to_completion = set_run_to_completion,
998 .poll = poll,
1001 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1002 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1004 static LIST_HEAD(smi_infos);
1005 static DEFINE_MUTEX(smi_infos_lock);
1006 static int smi_num; /* Used to sequence the SMIs */
1008 #define DEFAULT_REGSPACING 1
1010 static int si_trydefaults = 1;
1011 static char *si_type[SI_MAX_PARMS];
1012 #define MAX_SI_TYPE_STR 30
1013 static char si_type_str[MAX_SI_TYPE_STR];
1014 static unsigned long addrs[SI_MAX_PARMS];
1015 static int num_addrs;
1016 static unsigned int ports[SI_MAX_PARMS];
1017 static int num_ports;
1018 static int irqs[SI_MAX_PARMS];
1019 static int num_irqs;
1020 static int regspacings[SI_MAX_PARMS];
1021 static int num_regspacings = 0;
1022 static int regsizes[SI_MAX_PARMS];
1023 static int num_regsizes = 0;
1024 static int regshifts[SI_MAX_PARMS];
1025 static int num_regshifts = 0;
1026 static int slave_addrs[SI_MAX_PARMS];
1027 static int num_slave_addrs = 0;
1029 #define IPMI_IO_ADDR_SPACE 0
1030 #define IPMI_MEM_ADDR_SPACE 1
1031 static char *addr_space_to_str[] = { "I/O", "mem" };
1033 static int hotmod_handler(const char *val, struct kernel_param *kp);
1035 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1036 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1037 " Documentation/IPMI.txt in the kernel sources for the"
1038 " gory details.");
1040 module_param_named(trydefaults, si_trydefaults, bool, 0);
1041 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1042 " default scan of the KCS and SMIC interface at the standard"
1043 " address");
1044 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1045 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1046 " interface separated by commas. The types are 'kcs',"
1047 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1048 " the first interface to kcs and the second to bt");
1049 module_param_array(addrs, long, &num_addrs, 0);
1050 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1051 " addresses separated by commas. Only use if an interface"
1052 " is in memory. Otherwise, set it to zero or leave"
1053 " it blank.");
1054 module_param_array(ports, int, &num_ports, 0);
1055 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1056 " addresses separated by commas. Only use if an interface"
1057 " is a port. Otherwise, set it to zero or leave"
1058 " it blank.");
1059 module_param_array(irqs, int, &num_irqs, 0);
1060 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1061 " addresses separated by commas. Only use if an interface"
1062 " has an interrupt. Otherwise, set it to zero or leave"
1063 " it blank.");
1064 module_param_array(regspacings, int, &num_regspacings, 0);
1065 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1066 " and each successive register used by the interface. For"
1067 " instance, if the start address is 0xca2 and the spacing"
1068 " is 2, then the second address is at 0xca4. Defaults"
1069 " to 1.");
1070 module_param_array(regsizes, int, &num_regsizes, 0);
1071 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1072 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1073 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1074 " the 8-bit IPMI register has to be read from a larger"
1075 " register.");
1076 module_param_array(regshifts, int, &num_regshifts, 0);
1077 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1078 " IPMI register, in bits. For instance, if the data"
1079 " is read from a 32-bit word and the IPMI data is in"
1080 " bit 8-15, then the shift would be 8");
1081 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1082 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1083 " the controller. Normally this is 0x20, but can be"
1084 " overridden by this parm. This is an array indexed"
1085 " by interface number.");
1086 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1087 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1088 " disabled(0). Normally the IPMI driver auto-detects"
1089 " this, but the value may be overridden by this parm.");
1090 module_param(unload_when_empty, int, 0);
1091 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1092 " specified or found, default is 1. Setting to 0"
1093 " is useful for hot add of devices using hotmod.");
1096 static void std_irq_cleanup(struct smi_info *info)
1098 if (info->si_type == SI_BT)
1099 /* Disable the interrupt in the BT interface. */
1100 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1101 free_irq(info->irq, info);
1104 static int std_irq_setup(struct smi_info *info)
1106 int rv;
1108 if (!info->irq)
1109 return 0;
1111 if (info->si_type == SI_BT) {
1112 rv = request_irq(info->irq,
1113 si_bt_irq_handler,
1114 IRQF_DISABLED,
1115 DEVICE_NAME,
1116 info);
1117 if (!rv)
1118 /* Enable the interrupt in the BT interface. */
1119 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1120 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1121 } else
1122 rv = request_irq(info->irq,
1123 si_irq_handler,
1124 IRQF_DISABLED,
1125 DEVICE_NAME,
1126 info);
1127 if (rv) {
1128 printk(KERN_WARNING
1129 "ipmi_si: %s unable to claim interrupt %d,"
1130 " running polled\n",
1131 DEVICE_NAME, info->irq);
1132 info->irq = 0;
1133 } else {
1134 info->irq_cleanup = std_irq_cleanup;
1135 printk(" Using irq %d\n", info->irq);
1138 return rv;
1141 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1143 unsigned int addr = io->addr_data;
1145 return inb(addr + (offset * io->regspacing));
1148 static void port_outb(struct si_sm_io *io, unsigned int offset,
1149 unsigned char b)
1151 unsigned int addr = io->addr_data;
1153 outb(b, addr + (offset * io->regspacing));
1156 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1158 unsigned int addr = io->addr_data;
1160 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1163 static void port_outw(struct si_sm_io *io, unsigned int offset,
1164 unsigned char b)
1166 unsigned int addr = io->addr_data;
1168 outw(b << io->regshift, addr + (offset * io->regspacing));
1171 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1173 unsigned int addr = io->addr_data;
1175 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1178 static void port_outl(struct si_sm_io *io, unsigned int offset,
1179 unsigned char b)
1181 unsigned int addr = io->addr_data;
1183 outl(b << io->regshift, addr+(offset * io->regspacing));
1186 static void port_cleanup(struct smi_info *info)
1188 unsigned int addr = info->io.addr_data;
1189 int idx;
1191 if (addr) {
1192 for (idx = 0; idx < info->io_size; idx++) {
1193 release_region(addr + idx * info->io.regspacing,
1194 info->io.regsize);
1199 static int port_setup(struct smi_info *info)
1201 unsigned int addr = info->io.addr_data;
1202 int idx;
1204 if (!addr)
1205 return -ENODEV;
1207 info->io_cleanup = port_cleanup;
1209 /* Figure out the actual inb/inw/inl/etc routine to use based
1210 upon the register size. */
1211 switch (info->io.regsize) {
1212 case 1:
1213 info->io.inputb = port_inb;
1214 info->io.outputb = port_outb;
1215 break;
1216 case 2:
1217 info->io.inputb = port_inw;
1218 info->io.outputb = port_outw;
1219 break;
1220 case 4:
1221 info->io.inputb = port_inl;
1222 info->io.outputb = port_outl;
1223 break;
1224 default:
1225 printk("ipmi_si: Invalid register size: %d\n",
1226 info->io.regsize);
1227 return -EINVAL;
1230 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1231 * tables. This causes problems when trying to register the
1232 * entire I/O region. Therefore we must register each I/O
1233 * port separately.
1235 for (idx = 0; idx < info->io_size; idx++) {
1236 if (request_region(addr + idx * info->io.regspacing,
1237 info->io.regsize, DEVICE_NAME) == NULL) {
1238 /* Undo allocations */
1239 while (idx--) {
1240 release_region(addr + idx * info->io.regspacing,
1241 info->io.regsize);
1243 return -EIO;
1246 return 0;
1249 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1251 return readb((io->addr)+(offset * io->regspacing));
1254 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1255 unsigned char b)
1257 writeb(b, (io->addr)+(offset * io->regspacing));
1260 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1262 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1263 & 0xff;
1266 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1267 unsigned char b)
1269 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1272 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1274 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1275 & 0xff;
1278 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1279 unsigned char b)
1281 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1284 #ifdef readq
1285 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1287 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1288 & 0xff;
1291 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1292 unsigned char b)
1294 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1296 #endif
1298 static void mem_cleanup(struct smi_info *info)
1300 unsigned long addr = info->io.addr_data;
1301 int mapsize;
1303 if (info->io.addr) {
1304 iounmap(info->io.addr);
1306 mapsize = ((info->io_size * info->io.regspacing)
1307 - (info->io.regspacing - info->io.regsize));
1309 release_mem_region(addr, mapsize);
1313 static int mem_setup(struct smi_info *info)
1315 unsigned long addr = info->io.addr_data;
1316 int mapsize;
1318 if (!addr)
1319 return -ENODEV;
1321 info->io_cleanup = mem_cleanup;
1323 /* Figure out the actual readb/readw/readl/etc routine to use based
1324 upon the register size. */
1325 switch (info->io.regsize) {
1326 case 1:
1327 info->io.inputb = intf_mem_inb;
1328 info->io.outputb = intf_mem_outb;
1329 break;
1330 case 2:
1331 info->io.inputb = intf_mem_inw;
1332 info->io.outputb = intf_mem_outw;
1333 break;
1334 case 4:
1335 info->io.inputb = intf_mem_inl;
1336 info->io.outputb = intf_mem_outl;
1337 break;
1338 #ifdef readq
1339 case 8:
1340 info->io.inputb = mem_inq;
1341 info->io.outputb = mem_outq;
1342 break;
1343 #endif
1344 default:
1345 printk("ipmi_si: Invalid register size: %d\n",
1346 info->io.regsize);
1347 return -EINVAL;
1350 /* Calculate the total amount of memory to claim. This is an
1351 * unusual looking calculation, but it avoids claiming any
1352 * more memory than it has to. It will claim everything
1353 * between the first address to the end of the last full
1354 * register. */
1355 mapsize = ((info->io_size * info->io.regspacing)
1356 - (info->io.regspacing - info->io.regsize));
1358 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1359 return -EIO;
1361 info->io.addr = ioremap(addr, mapsize);
1362 if (info->io.addr == NULL) {
1363 release_mem_region(addr, mapsize);
1364 return -EIO;
1366 return 0;
1370 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1371 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1372 * Options are:
1373 * rsp=<regspacing>
1374 * rsi=<regsize>
1375 * rsh=<regshift>
1376 * irq=<irq>
1377 * ipmb=<ipmb addr>
1379 enum hotmod_op { HM_ADD, HM_REMOVE };
1380 struct hotmod_vals {
1381 char *name;
1382 int val;
1384 static struct hotmod_vals hotmod_ops[] = {
1385 { "add", HM_ADD },
1386 { "remove", HM_REMOVE },
1387 { NULL }
1389 static struct hotmod_vals hotmod_si[] = {
1390 { "kcs", SI_KCS },
1391 { "smic", SI_SMIC },
1392 { "bt", SI_BT },
1393 { NULL }
1395 static struct hotmod_vals hotmod_as[] = {
1396 { "mem", IPMI_MEM_ADDR_SPACE },
1397 { "i/o", IPMI_IO_ADDR_SPACE },
1398 { NULL }
1400 static int ipmi_strcasecmp(const char *s1, const char *s2)
1402 while (*s1 || *s2) {
1403 if (!*s1)
1404 return -1;
1405 if (!*s2)
1406 return 1;
1407 if (*s1 != *s2)
1408 return *s1 - *s2;
1409 s1++;
1410 s2++;
1412 return 0;
1414 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1416 char *s;
1417 int i;
1419 s = strchr(*curr, ',');
1420 if (!s) {
1421 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1422 return -EINVAL;
1424 *s = '\0';
1425 s++;
1426 for (i = 0; hotmod_ops[i].name; i++) {
1427 if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
1428 *val = v[i].val;
1429 *curr = s;
1430 return 0;
1434 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1435 return -EINVAL;
1438 static int hotmod_handler(const char *val, struct kernel_param *kp)
1440 char *str = kstrdup(val, GFP_KERNEL);
1441 int rv = -EINVAL;
1442 char *next, *curr, *s, *n, *o;
1443 enum hotmod_op op;
1444 enum si_type si_type;
1445 int addr_space;
1446 unsigned long addr;
1447 int regspacing;
1448 int regsize;
1449 int regshift;
1450 int irq;
1451 int ipmb;
1452 int ival;
1453 struct smi_info *info;
1455 if (!str)
1456 return -ENOMEM;
1458 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1459 ival = strlen(str) - 1;
1460 while ((ival >= 0) && isspace(str[ival])) {
1461 str[ival] = '\0';
1462 ival--;
1465 for (curr = str; curr; curr = next) {
1466 regspacing = 1;
1467 regsize = 1;
1468 regshift = 0;
1469 irq = 0;
1470 ipmb = 0x20;
1472 next = strchr(curr, ':');
1473 if (next) {
1474 *next = '\0';
1475 next++;
1478 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1479 if (rv)
1480 break;
1481 op = ival;
1483 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1484 if (rv)
1485 break;
1486 si_type = ival;
1488 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1489 if (rv)
1490 break;
1492 s = strchr(curr, ',');
1493 if (s) {
1494 *s = '\0';
1495 s++;
1497 addr = simple_strtoul(curr, &n, 0);
1498 if ((*n != '\0') || (*curr == '\0')) {
1499 printk(KERN_WARNING PFX "Invalid hotmod address"
1500 " '%s'\n", curr);
1501 break;
1504 while (s) {
1505 curr = s;
1506 s = strchr(curr, ',');
1507 if (s) {
1508 *s = '\0';
1509 s++;
1511 o = strchr(curr, '=');
1512 if (o) {
1513 *o = '\0';
1514 o++;
1516 #define HOTMOD_INT_OPT(name, val) \
1517 if (ipmi_strcasecmp(curr, name) == 0) { \
1518 if (!o) { \
1519 printk(KERN_WARNING PFX \
1520 "No option given for '%s'\n", \
1521 curr); \
1522 goto out; \
1524 val = simple_strtoul(o, &n, 0); \
1525 if ((*n != '\0') || (*o == '\0')) { \
1526 printk(KERN_WARNING PFX \
1527 "Bad option given for '%s'\n", \
1528 curr); \
1529 goto out; \
1533 HOTMOD_INT_OPT("rsp", regspacing)
1534 else HOTMOD_INT_OPT("rsi", regsize)
1535 else HOTMOD_INT_OPT("rsh", regshift)
1536 else HOTMOD_INT_OPT("irq", irq)
1537 else HOTMOD_INT_OPT("ipmb", ipmb)
1538 else {
1539 printk(KERN_WARNING PFX
1540 "Invalid hotmod option '%s'\n",
1541 curr);
1542 goto out;
1544 #undef HOTMOD_INT_OPT
1547 if (op == HM_ADD) {
1548 info = kzalloc(sizeof(*info), GFP_KERNEL);
1549 if (!info) {
1550 rv = -ENOMEM;
1551 goto out;
1554 info->addr_source = "hotmod";
1555 info->si_type = si_type;
1556 info->io.addr_data = addr;
1557 info->io.addr_type = addr_space;
1558 if (addr_space == IPMI_MEM_ADDR_SPACE)
1559 info->io_setup = mem_setup;
1560 else
1561 info->io_setup = port_setup;
1563 info->io.addr = NULL;
1564 info->io.regspacing = regspacing;
1565 if (!info->io.regspacing)
1566 info->io.regspacing = DEFAULT_REGSPACING;
1567 info->io.regsize = regsize;
1568 if (!info->io.regsize)
1569 info->io.regsize = DEFAULT_REGSPACING;
1570 info->io.regshift = regshift;
1571 info->irq = irq;
1572 if (info->irq)
1573 info->irq_setup = std_irq_setup;
1574 info->slave_addr = ipmb;
1576 try_smi_init(info);
1577 } else {
1578 /* remove */
1579 struct smi_info *e, *tmp_e;
1581 mutex_lock(&smi_infos_lock);
1582 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1583 if (e->io.addr_type != addr_space)
1584 continue;
1585 if (e->si_type != si_type)
1586 continue;
1587 if (e->io.addr_data == addr)
1588 cleanup_one_si(e);
1590 mutex_unlock(&smi_infos_lock);
1593 out:
1594 kfree(str);
1595 return rv;
1598 static __devinit void hardcode_find_bmc(void)
1600 int i;
1601 struct smi_info *info;
1603 for (i = 0; i < SI_MAX_PARMS; i++) {
1604 if (!ports[i] && !addrs[i])
1605 continue;
1607 info = kzalloc(sizeof(*info), GFP_KERNEL);
1608 if (!info)
1609 return;
1611 info->addr_source = "hardcoded";
1613 if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
1614 info->si_type = SI_KCS;
1615 } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
1616 info->si_type = SI_SMIC;
1617 } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
1618 info->si_type = SI_BT;
1619 } else {
1620 printk(KERN_WARNING
1621 "ipmi_si: Interface type specified "
1622 "for interface %d, was invalid: %s\n",
1623 i, si_type[i]);
1624 kfree(info);
1625 continue;
1628 if (ports[i]) {
1629 /* An I/O port */
1630 info->io_setup = port_setup;
1631 info->io.addr_data = ports[i];
1632 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1633 } else if (addrs[i]) {
1634 /* A memory port */
1635 info->io_setup = mem_setup;
1636 info->io.addr_data = addrs[i];
1637 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1638 } else {
1639 printk(KERN_WARNING
1640 "ipmi_si: Interface type specified "
1641 "for interface %d, "
1642 "but port and address were not set or "
1643 "set to zero.\n", i);
1644 kfree(info);
1645 continue;
1648 info->io.addr = NULL;
1649 info->io.regspacing = regspacings[i];
1650 if (!info->io.regspacing)
1651 info->io.regspacing = DEFAULT_REGSPACING;
1652 info->io.regsize = regsizes[i];
1653 if (!info->io.regsize)
1654 info->io.regsize = DEFAULT_REGSPACING;
1655 info->io.regshift = regshifts[i];
1656 info->irq = irqs[i];
1657 if (info->irq)
1658 info->irq_setup = std_irq_setup;
1660 try_smi_init(info);
1664 #ifdef CONFIG_ACPI
1666 #include <linux/acpi.h>
1668 /* Once we get an ACPI failure, we don't try any more, because we go
1669 through the tables sequentially. Once we don't find a table, there
1670 are no more. */
1671 static int acpi_failure = 0;
1673 /* For GPE-type interrupts. */
1674 static u32 ipmi_acpi_gpe(void *context)
1676 struct smi_info *smi_info = context;
1677 unsigned long flags;
1678 #ifdef DEBUG_TIMING
1679 struct timeval t;
1680 #endif
1682 spin_lock_irqsave(&(smi_info->si_lock), flags);
1684 spin_lock(&smi_info->count_lock);
1685 smi_info->interrupts++;
1686 spin_unlock(&smi_info->count_lock);
1688 if (atomic_read(&smi_info->stop_operation))
1689 goto out;
1691 #ifdef DEBUG_TIMING
1692 do_gettimeofday(&t);
1693 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1694 #endif
1695 smi_event_handler(smi_info, 0);
1696 out:
1697 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1699 return ACPI_INTERRUPT_HANDLED;
1702 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1704 if (!info->irq)
1705 return;
1707 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1710 static int acpi_gpe_irq_setup(struct smi_info *info)
1712 acpi_status status;
1714 if (!info->irq)
1715 return 0;
1717 /* FIXME - is level triggered right? */
1718 status = acpi_install_gpe_handler(NULL,
1719 info->irq,
1720 ACPI_GPE_LEVEL_TRIGGERED,
1721 &ipmi_acpi_gpe,
1722 info);
1723 if (status != AE_OK) {
1724 printk(KERN_WARNING
1725 "ipmi_si: %s unable to claim ACPI GPE %d,"
1726 " running polled\n",
1727 DEVICE_NAME, info->irq);
1728 info->irq = 0;
1729 return -EINVAL;
1730 } else {
1731 info->irq_cleanup = acpi_gpe_irq_cleanup;
1732 printk(" Using ACPI GPE %d\n", info->irq);
1733 return 0;
1738 * Defined at
1739 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1741 struct SPMITable {
1742 s8 Signature[4];
1743 u32 Length;
1744 u8 Revision;
1745 u8 Checksum;
1746 s8 OEMID[6];
1747 s8 OEMTableID[8];
1748 s8 OEMRevision[4];
1749 s8 CreatorID[4];
1750 s8 CreatorRevision[4];
1751 u8 InterfaceType;
1752 u8 IPMIlegacy;
1753 s16 SpecificationRevision;
1756 * Bit 0 - SCI interrupt supported
1757 * Bit 1 - I/O APIC/SAPIC
1759 u8 InterruptType;
1761 /* If bit 0 of InterruptType is set, then this is the SCI
1762 interrupt in the GPEx_STS register. */
1763 u8 GPE;
1765 s16 Reserved;
1767 /* If bit 1 of InterruptType is set, then this is the I/O
1768 APIC/SAPIC interrupt. */
1769 u32 GlobalSystemInterrupt;
1771 /* The actual register address. */
1772 struct acpi_generic_address addr;
1774 u8 UID[4];
1776 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1779 static __devinit int try_init_acpi(struct SPMITable *spmi)
1781 struct smi_info *info;
1782 char *io_type;
1783 u8 addr_space;
1785 if (spmi->IPMIlegacy != 1) {
1786 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1787 return -ENODEV;
1790 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1791 addr_space = IPMI_MEM_ADDR_SPACE;
1792 else
1793 addr_space = IPMI_IO_ADDR_SPACE;
1795 info = kzalloc(sizeof(*info), GFP_KERNEL);
1796 if (!info) {
1797 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1798 return -ENOMEM;
1801 info->addr_source = "ACPI";
1803 /* Figure out the interface type. */
1804 switch (spmi->InterfaceType)
1806 case 1: /* KCS */
1807 info->si_type = SI_KCS;
1808 break;
1809 case 2: /* SMIC */
1810 info->si_type = SI_SMIC;
1811 break;
1812 case 3: /* BT */
1813 info->si_type = SI_BT;
1814 break;
1815 default:
1816 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1817 spmi->InterfaceType);
1818 kfree(info);
1819 return -EIO;
1822 if (spmi->InterruptType & 1) {
1823 /* We've got a GPE interrupt. */
1824 info->irq = spmi->GPE;
1825 info->irq_setup = acpi_gpe_irq_setup;
1826 } else if (spmi->InterruptType & 2) {
1827 /* We've got an APIC/SAPIC interrupt. */
1828 info->irq = spmi->GlobalSystemInterrupt;
1829 info->irq_setup = std_irq_setup;
1830 } else {
1831 /* Use the default interrupt setting. */
1832 info->irq = 0;
1833 info->irq_setup = NULL;
1836 if (spmi->addr.register_bit_width) {
1837 /* A (hopefully) properly formed register bit width. */
1838 info->io.regspacing = spmi->addr.register_bit_width / 8;
1839 } else {
1840 info->io.regspacing = DEFAULT_REGSPACING;
1842 info->io.regsize = info->io.regspacing;
1843 info->io.regshift = spmi->addr.register_bit_offset;
1845 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1846 io_type = "memory";
1847 info->io_setup = mem_setup;
1848 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1849 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1850 io_type = "I/O";
1851 info->io_setup = port_setup;
1852 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1853 } else {
1854 kfree(info);
1855 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1856 return -EIO;
1858 info->io.addr_data = spmi->addr.address;
1860 try_smi_init(info);
1862 return 0;
1865 static __devinit void acpi_find_bmc(void)
1867 acpi_status status;
1868 struct SPMITable *spmi;
1869 int i;
1871 if (acpi_disabled)
1872 return;
1874 if (acpi_failure)
1875 return;
1877 for (i = 0; ; i++) {
1878 status = acpi_get_firmware_table("SPMI", i+1,
1879 ACPI_LOGICAL_ADDRESSING,
1880 (struct acpi_table_header **)
1881 &spmi);
1882 if (status != AE_OK)
1883 return;
1885 try_init_acpi(spmi);
1888 #endif
1890 #ifdef CONFIG_DMI
1891 struct dmi_ipmi_data
1893 u8 type;
1894 u8 addr_space;
1895 unsigned long base_addr;
1896 u8 irq;
1897 u8 offset;
1898 u8 slave_addr;
1901 static int __devinit decode_dmi(struct dmi_header *dm,
1902 struct dmi_ipmi_data *dmi)
1904 u8 *data = (u8 *)dm;
1905 unsigned long base_addr;
1906 u8 reg_spacing;
1907 u8 len = dm->length;
1909 dmi->type = data[4];
1911 memcpy(&base_addr, data+8, sizeof(unsigned long));
1912 if (len >= 0x11) {
1913 if (base_addr & 1) {
1914 /* I/O */
1915 base_addr &= 0xFFFE;
1916 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1918 else {
1919 /* Memory */
1920 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1922 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1923 is odd. */
1924 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1926 dmi->irq = data[0x11];
1928 /* The top two bits of byte 0x10 hold the register spacing. */
1929 reg_spacing = (data[0x10] & 0xC0) >> 6;
1930 switch(reg_spacing){
1931 case 0x00: /* Byte boundaries */
1932 dmi->offset = 1;
1933 break;
1934 case 0x01: /* 32-bit boundaries */
1935 dmi->offset = 4;
1936 break;
1937 case 0x02: /* 16-byte boundaries */
1938 dmi->offset = 16;
1939 break;
1940 default:
1941 /* Some other interface, just ignore it. */
1942 return -EIO;
1944 } else {
1945 /* Old DMI spec. */
1946 /* Note that technically, the lower bit of the base
1947 * address should be 1 if the address is I/O and 0 if
1948 * the address is in memory. So many systems get that
1949 * wrong (and all that I have seen are I/O) so we just
1950 * ignore that bit and assume I/O. Systems that use
1951 * memory should use the newer spec, anyway. */
1952 dmi->base_addr = base_addr & 0xfffe;
1953 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1954 dmi->offset = 1;
1957 dmi->slave_addr = data[6];
1959 return 0;
1962 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1964 struct smi_info *info;
1966 info = kzalloc(sizeof(*info), GFP_KERNEL);
1967 if (!info) {
1968 printk(KERN_ERR
1969 "ipmi_si: Could not allocate SI data\n");
1970 return;
1973 info->addr_source = "SMBIOS";
1975 switch (ipmi_data->type) {
1976 case 0x01: /* KCS */
1977 info->si_type = SI_KCS;
1978 break;
1979 case 0x02: /* SMIC */
1980 info->si_type = SI_SMIC;
1981 break;
1982 case 0x03: /* BT */
1983 info->si_type = SI_BT;
1984 break;
1985 default:
1986 return;
1989 switch (ipmi_data->addr_space) {
1990 case IPMI_MEM_ADDR_SPACE:
1991 info->io_setup = mem_setup;
1992 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1993 break;
1995 case IPMI_IO_ADDR_SPACE:
1996 info->io_setup = port_setup;
1997 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1998 break;
2000 default:
2001 kfree(info);
2002 printk(KERN_WARNING
2003 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2004 ipmi_data->addr_space);
2005 return;
2007 info->io.addr_data = ipmi_data->base_addr;
2009 info->io.regspacing = ipmi_data->offset;
2010 if (!info->io.regspacing)
2011 info->io.regspacing = DEFAULT_REGSPACING;
2012 info->io.regsize = DEFAULT_REGSPACING;
2013 info->io.regshift = 0;
2015 info->slave_addr = ipmi_data->slave_addr;
2017 info->irq = ipmi_data->irq;
2018 if (info->irq)
2019 info->irq_setup = std_irq_setup;
2021 try_smi_init(info);
2024 static void __devinit dmi_find_bmc(void)
2026 struct dmi_device *dev = NULL;
2027 struct dmi_ipmi_data data;
2028 int rv;
2030 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2031 memset(&data, 0, sizeof(data));
2032 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2033 if (!rv)
2034 try_init_dmi(&data);
2037 #endif /* CONFIG_DMI */
2039 #ifdef CONFIG_PCI
2041 #define PCI_ERMC_CLASSCODE 0x0C0700
2042 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2043 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2044 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2045 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2046 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2048 #define PCI_HP_VENDOR_ID 0x103C
2049 #define PCI_MMC_DEVICE_ID 0x121A
2050 #define PCI_MMC_ADDR_CW 0x10
2052 static void ipmi_pci_cleanup(struct smi_info *info)
2054 struct pci_dev *pdev = info->addr_source_data;
2056 pci_disable_device(pdev);
2059 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2060 const struct pci_device_id *ent)
2062 int rv;
2063 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2064 struct smi_info *info;
2065 int first_reg_offset = 0;
2067 info = kzalloc(sizeof(*info), GFP_KERNEL);
2068 if (!info)
2069 return -ENOMEM;
2071 info->addr_source = "PCI";
2073 switch (class_type) {
2074 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2075 info->si_type = SI_SMIC;
2076 break;
2078 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2079 info->si_type = SI_KCS;
2080 break;
2082 case PCI_ERMC_CLASSCODE_TYPE_BT:
2083 info->si_type = SI_BT;
2084 break;
2086 default:
2087 kfree(info);
2088 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2089 pci_name(pdev), class_type);
2090 return -ENOMEM;
2093 rv = pci_enable_device(pdev);
2094 if (rv) {
2095 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2096 pci_name(pdev));
2097 kfree(info);
2098 return rv;
2101 info->addr_source_cleanup = ipmi_pci_cleanup;
2102 info->addr_source_data = pdev;
2104 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2105 first_reg_offset = 1;
2107 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2108 info->io_setup = port_setup;
2109 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2110 } else {
2111 info->io_setup = mem_setup;
2112 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2114 info->io.addr_data = pci_resource_start(pdev, 0);
2116 info->io.regspacing = DEFAULT_REGSPACING;
2117 info->io.regsize = DEFAULT_REGSPACING;
2118 info->io.regshift = 0;
2120 info->irq = pdev->irq;
2121 if (info->irq)
2122 info->irq_setup = std_irq_setup;
2124 info->dev = &pdev->dev;
2126 return try_smi_init(info);
2129 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2133 #ifdef CONFIG_PM
2134 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2136 return 0;
2139 static int ipmi_pci_resume(struct pci_dev *pdev)
2141 return 0;
2143 #endif
2145 static struct pci_device_id ipmi_pci_devices[] = {
2146 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2147 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2149 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2151 static struct pci_driver ipmi_pci_driver = {
2152 .name = DEVICE_NAME,
2153 .id_table = ipmi_pci_devices,
2154 .probe = ipmi_pci_probe,
2155 .remove = __devexit_p(ipmi_pci_remove),
2156 #ifdef CONFIG_PM
2157 .suspend = ipmi_pci_suspend,
2158 .resume = ipmi_pci_resume,
2159 #endif
2161 #endif /* CONFIG_PCI */
2164 static int try_get_dev_id(struct smi_info *smi_info)
2166 unsigned char msg[2];
2167 unsigned char *resp;
2168 unsigned long resp_len;
2169 enum si_sm_result smi_result;
2170 int rv = 0;
2172 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2173 if (!resp)
2174 return -ENOMEM;
2176 /* Do a Get Device ID command, since it comes back with some
2177 useful info. */
2178 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2179 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2180 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2182 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2183 for (;;)
2185 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2186 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2187 schedule_timeout_uninterruptible(1);
2188 smi_result = smi_info->handlers->event(
2189 smi_info->si_sm, 100);
2191 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2193 smi_result = smi_info->handlers->event(
2194 smi_info->si_sm, 0);
2196 else
2197 break;
2199 if (smi_result == SI_SM_HOSED) {
2200 /* We couldn't get the state machine to run, so whatever's at
2201 the port is probably not an IPMI SMI interface. */
2202 rv = -ENODEV;
2203 goto out;
2206 /* Otherwise, we got some data. */
2207 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2208 resp, IPMI_MAX_MSG_LENGTH);
2209 if (resp_len < 14) {
2210 /* That's odd, it should be longer. */
2211 rv = -EINVAL;
2212 goto out;
2215 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2216 /* That's odd, it shouldn't be able to fail. */
2217 rv = -EINVAL;
2218 goto out;
2221 /* Record info from the get device id, in case we need it. */
2222 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2224 out:
2225 kfree(resp);
2226 return rv;
2229 static int type_file_read_proc(char *page, char **start, off_t off,
2230 int count, int *eof, void *data)
2232 struct smi_info *smi = data;
2234 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2237 static int stat_file_read_proc(char *page, char **start, off_t off,
2238 int count, int *eof, void *data)
2240 char *out = (char *) page;
2241 struct smi_info *smi = data;
2243 out += sprintf(out, "interrupts_enabled: %d\n",
2244 smi->irq && !smi->interrupt_disabled);
2245 out += sprintf(out, "short_timeouts: %ld\n",
2246 smi->short_timeouts);
2247 out += sprintf(out, "long_timeouts: %ld\n",
2248 smi->long_timeouts);
2249 out += sprintf(out, "timeout_restarts: %ld\n",
2250 smi->timeout_restarts);
2251 out += sprintf(out, "idles: %ld\n",
2252 smi->idles);
2253 out += sprintf(out, "interrupts: %ld\n",
2254 smi->interrupts);
2255 out += sprintf(out, "attentions: %ld\n",
2256 smi->attentions);
2257 out += sprintf(out, "flag_fetches: %ld\n",
2258 smi->flag_fetches);
2259 out += sprintf(out, "hosed_count: %ld\n",
2260 smi->hosed_count);
2261 out += sprintf(out, "complete_transactions: %ld\n",
2262 smi->complete_transactions);
2263 out += sprintf(out, "events: %ld\n",
2264 smi->events);
2265 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2266 smi->watchdog_pretimeouts);
2267 out += sprintf(out, "incoming_messages: %ld\n",
2268 smi->incoming_messages);
2270 return out - page;
2273 static int param_read_proc(char *page, char **start, off_t off,
2274 int count, int *eof, void *data)
2276 struct smi_info *smi = data;
2278 return sprintf(page,
2279 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2280 si_to_str[smi->si_type],
2281 addr_space_to_str[smi->io.addr_type],
2282 smi->io.addr_data,
2283 smi->io.regspacing,
2284 smi->io.regsize,
2285 smi->io.regshift,
2286 smi->irq,
2287 smi->slave_addr);
2291 * oem_data_avail_to_receive_msg_avail
2292 * @info - smi_info structure with msg_flags set
2294 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2295 * Returns 1 indicating need to re-run handle_flags().
2297 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2299 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2300 RECEIVE_MSG_AVAIL);
2301 return 1;
2305 * setup_dell_poweredge_oem_data_handler
2306 * @info - smi_info.device_id must be populated
2308 * Systems that match, but have firmware version < 1.40 may assert
2309 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2310 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2311 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2312 * as RECEIVE_MSG_AVAIL instead.
2314 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2315 * assert the OEM[012] bits, and if it did, the driver would have to
2316 * change to handle that properly, we don't actually check for the
2317 * firmware version.
2318 * Device ID = 0x20 BMC on PowerEdge 8G servers
2319 * Device Revision = 0x80
2320 * Firmware Revision1 = 0x01 BMC version 1.40
2321 * Firmware Revision2 = 0x40 BCD encoded
2322 * IPMI Version = 0x51 IPMI 1.5
2323 * Manufacturer ID = A2 02 00 Dell IANA
2325 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2326 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2329 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2330 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2331 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2332 #define DELL_IANA_MFR_ID 0x0002a2
2333 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2335 struct ipmi_device_id *id = &smi_info->device_id;
2336 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2337 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2338 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2339 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2340 smi_info->oem_data_avail_handler =
2341 oem_data_avail_to_receive_msg_avail;
2343 else if (ipmi_version_major(id) < 1 ||
2344 (ipmi_version_major(id) == 1 &&
2345 ipmi_version_minor(id) < 5)) {
2346 smi_info->oem_data_avail_handler =
2347 oem_data_avail_to_receive_msg_avail;
2352 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2353 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2355 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2357 /* Make it a reponse */
2358 msg->rsp[0] = msg->data[0] | 4;
2359 msg->rsp[1] = msg->data[1];
2360 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2361 msg->rsp_size = 3;
2362 smi_info->curr_msg = NULL;
2363 deliver_recv_msg(smi_info, msg);
2367 * dell_poweredge_bt_xaction_handler
2368 * @info - smi_info.device_id must be populated
2370 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2371 * not respond to a Get SDR command if the length of the data
2372 * requested is exactly 0x3A, which leads to command timeouts and no
2373 * data returned. This intercepts such commands, and causes userspace
2374 * callers to try again with a different-sized buffer, which succeeds.
2377 #define STORAGE_NETFN 0x0A
2378 #define STORAGE_CMD_GET_SDR 0x23
2379 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2380 unsigned long unused,
2381 void *in)
2383 struct smi_info *smi_info = in;
2384 unsigned char *data = smi_info->curr_msg->data;
2385 unsigned int size = smi_info->curr_msg->data_size;
2386 if (size >= 8 &&
2387 (data[0]>>2) == STORAGE_NETFN &&
2388 data[1] == STORAGE_CMD_GET_SDR &&
2389 data[7] == 0x3A) {
2390 return_hosed_msg_badsize(smi_info);
2391 return NOTIFY_STOP;
2393 return NOTIFY_DONE;
2396 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2397 .notifier_call = dell_poweredge_bt_xaction_handler,
2401 * setup_dell_poweredge_bt_xaction_handler
2402 * @info - smi_info.device_id must be filled in already
2404 * Fills in smi_info.device_id.start_transaction_pre_hook
2405 * when we know what function to use there.
2407 static void
2408 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2410 struct ipmi_device_id *id = &smi_info->device_id;
2411 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2412 smi_info->si_type == SI_BT)
2413 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2417 * setup_oem_data_handler
2418 * @info - smi_info.device_id must be filled in already
2420 * Fills in smi_info.device_id.oem_data_available_handler
2421 * when we know what function to use there.
2424 static void setup_oem_data_handler(struct smi_info *smi_info)
2426 setup_dell_poweredge_oem_data_handler(smi_info);
2429 static void setup_xaction_handlers(struct smi_info *smi_info)
2431 setup_dell_poweredge_bt_xaction_handler(smi_info);
2434 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2436 if (smi_info->intf) {
2437 /* The timer and thread are only running if the
2438 interface has been started up and registered. */
2439 if (smi_info->thread != NULL)
2440 kthread_stop(smi_info->thread);
2441 del_timer_sync(&smi_info->si_timer);
2445 static __devinitdata struct ipmi_default_vals
2447 int type;
2448 int port;
2449 } ipmi_defaults[] =
2451 { .type = SI_KCS, .port = 0xca2 },
2452 { .type = SI_SMIC, .port = 0xca9 },
2453 { .type = SI_BT, .port = 0xe4 },
2454 { .port = 0 }
2457 static __devinit void default_find_bmc(void)
2459 struct smi_info *info;
2460 int i;
2462 for (i = 0; ; i++) {
2463 if (!ipmi_defaults[i].port)
2464 break;
2466 info = kzalloc(sizeof(*info), GFP_KERNEL);
2467 if (!info)
2468 return;
2470 info->addr_source = NULL;
2472 info->si_type = ipmi_defaults[i].type;
2473 info->io_setup = port_setup;
2474 info->io.addr_data = ipmi_defaults[i].port;
2475 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2477 info->io.addr = NULL;
2478 info->io.regspacing = DEFAULT_REGSPACING;
2479 info->io.regsize = DEFAULT_REGSPACING;
2480 info->io.regshift = 0;
2482 if (try_smi_init(info) == 0) {
2483 /* Found one... */
2484 printk(KERN_INFO "ipmi_si: Found default %s state"
2485 " machine at %s address 0x%lx\n",
2486 si_to_str[info->si_type],
2487 addr_space_to_str[info->io.addr_type],
2488 info->io.addr_data);
2489 return;
2494 static int is_new_interface(struct smi_info *info)
2496 struct smi_info *e;
2498 list_for_each_entry(e, &smi_infos, link) {
2499 if (e->io.addr_type != info->io.addr_type)
2500 continue;
2501 if (e->io.addr_data == info->io.addr_data)
2502 return 0;
2505 return 1;
2508 static int try_smi_init(struct smi_info *new_smi)
2510 int rv;
2512 if (new_smi->addr_source) {
2513 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2514 " machine at %s address 0x%lx, slave address 0x%x,"
2515 " irq %d\n",
2516 new_smi->addr_source,
2517 si_to_str[new_smi->si_type],
2518 addr_space_to_str[new_smi->io.addr_type],
2519 new_smi->io.addr_data,
2520 new_smi->slave_addr, new_smi->irq);
2523 mutex_lock(&smi_infos_lock);
2524 if (!is_new_interface(new_smi)) {
2525 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2526 rv = -EBUSY;
2527 goto out_err;
2530 /* So we know not to free it unless we have allocated one. */
2531 new_smi->intf = NULL;
2532 new_smi->si_sm = NULL;
2533 new_smi->handlers = NULL;
2535 switch (new_smi->si_type) {
2536 case SI_KCS:
2537 new_smi->handlers = &kcs_smi_handlers;
2538 break;
2540 case SI_SMIC:
2541 new_smi->handlers = &smic_smi_handlers;
2542 break;
2544 case SI_BT:
2545 new_smi->handlers = &bt_smi_handlers;
2546 break;
2548 default:
2549 /* No support for anything else yet. */
2550 rv = -EIO;
2551 goto out_err;
2554 /* Allocate the state machine's data and initialize it. */
2555 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2556 if (!new_smi->si_sm) {
2557 printk(" Could not allocate state machine memory\n");
2558 rv = -ENOMEM;
2559 goto out_err;
2561 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2562 &new_smi->io);
2564 /* Now that we know the I/O size, we can set up the I/O. */
2565 rv = new_smi->io_setup(new_smi);
2566 if (rv) {
2567 printk(" Could not set up I/O space\n");
2568 goto out_err;
2571 spin_lock_init(&(new_smi->si_lock));
2572 spin_lock_init(&(new_smi->msg_lock));
2573 spin_lock_init(&(new_smi->count_lock));
2575 /* Do low-level detection first. */
2576 if (new_smi->handlers->detect(new_smi->si_sm)) {
2577 if (new_smi->addr_source)
2578 printk(KERN_INFO "ipmi_si: Interface detection"
2579 " failed\n");
2580 rv = -ENODEV;
2581 goto out_err;
2584 /* Attempt a get device id command. If it fails, we probably
2585 don't have a BMC here. */
2586 rv = try_get_dev_id(new_smi);
2587 if (rv) {
2588 if (new_smi->addr_source)
2589 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2590 " at this location\n");
2591 goto out_err;
2594 setup_oem_data_handler(new_smi);
2595 setup_xaction_handlers(new_smi);
2597 /* Try to claim any interrupts. */
2598 if (new_smi->irq_setup)
2599 new_smi->irq_setup(new_smi);
2601 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2602 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2603 new_smi->curr_msg = NULL;
2604 atomic_set(&new_smi->req_events, 0);
2605 new_smi->run_to_completion = 0;
2607 new_smi->interrupt_disabled = 0;
2608 atomic_set(&new_smi->stop_operation, 0);
2609 new_smi->intf_num = smi_num;
2610 smi_num++;
2612 /* Start clearing the flags before we enable interrupts or the
2613 timer to avoid racing with the timer. */
2614 start_clear_flags(new_smi);
2615 /* IRQ is defined to be set when non-zero. */
2616 if (new_smi->irq)
2617 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2619 if (!new_smi->dev) {
2620 /* If we don't already have a device from something
2621 * else (like PCI), then register a new one. */
2622 new_smi->pdev = platform_device_alloc("ipmi_si",
2623 new_smi->intf_num);
2624 if (rv) {
2625 printk(KERN_ERR
2626 "ipmi_si_intf:"
2627 " Unable to allocate platform device\n");
2628 goto out_err;
2630 new_smi->dev = &new_smi->pdev->dev;
2631 new_smi->dev->driver = &ipmi_driver;
2633 rv = platform_device_add(new_smi->pdev);
2634 if (rv) {
2635 printk(KERN_ERR
2636 "ipmi_si_intf:"
2637 " Unable to register system interface device:"
2638 " %d\n",
2639 rv);
2640 goto out_err;
2642 new_smi->dev_registered = 1;
2645 rv = ipmi_register_smi(&handlers,
2646 new_smi,
2647 &new_smi->device_id,
2648 new_smi->dev,
2649 "bmc",
2650 new_smi->slave_addr);
2651 if (rv) {
2652 printk(KERN_ERR
2653 "ipmi_si: Unable to register device: error %d\n",
2654 rv);
2655 goto out_err_stop_timer;
2658 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2659 type_file_read_proc, NULL,
2660 new_smi, THIS_MODULE);
2661 if (rv) {
2662 printk(KERN_ERR
2663 "ipmi_si: Unable to create proc entry: %d\n",
2664 rv);
2665 goto out_err_stop_timer;
2668 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2669 stat_file_read_proc, NULL,
2670 new_smi, THIS_MODULE);
2671 if (rv) {
2672 printk(KERN_ERR
2673 "ipmi_si: Unable to create proc entry: %d\n",
2674 rv);
2675 goto out_err_stop_timer;
2678 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2679 param_read_proc, NULL,
2680 new_smi, THIS_MODULE);
2681 if (rv) {
2682 printk(KERN_ERR
2683 "ipmi_si: Unable to create proc entry: %d\n",
2684 rv);
2685 goto out_err_stop_timer;
2688 list_add_tail(&new_smi->link, &smi_infos);
2690 mutex_unlock(&smi_infos_lock);
2692 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2694 return 0;
2696 out_err_stop_timer:
2697 atomic_inc(&new_smi->stop_operation);
2698 wait_for_timer_and_thread(new_smi);
2700 out_err:
2701 if (new_smi->intf)
2702 ipmi_unregister_smi(new_smi->intf);
2704 if (new_smi->irq_cleanup)
2705 new_smi->irq_cleanup(new_smi);
2707 /* Wait until we know that we are out of any interrupt
2708 handlers might have been running before we freed the
2709 interrupt. */
2710 synchronize_sched();
2712 if (new_smi->si_sm) {
2713 if (new_smi->handlers)
2714 new_smi->handlers->cleanup(new_smi->si_sm);
2715 kfree(new_smi->si_sm);
2717 if (new_smi->addr_source_cleanup)
2718 new_smi->addr_source_cleanup(new_smi);
2719 if (new_smi->io_cleanup)
2720 new_smi->io_cleanup(new_smi);
2722 if (new_smi->dev_registered)
2723 platform_device_unregister(new_smi->pdev);
2725 kfree(new_smi);
2727 mutex_unlock(&smi_infos_lock);
2729 return rv;
2732 static __devinit int init_ipmi_si(void)
2734 int i;
2735 char *str;
2736 int rv;
2738 if (initialized)
2739 return 0;
2740 initialized = 1;
2742 /* Register the device drivers. */
2743 rv = driver_register(&ipmi_driver);
2744 if (rv) {
2745 printk(KERN_ERR
2746 "init_ipmi_si: Unable to register driver: %d\n",
2747 rv);
2748 return rv;
2752 /* Parse out the si_type string into its components. */
2753 str = si_type_str;
2754 if (*str != '\0') {
2755 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2756 si_type[i] = str;
2757 str = strchr(str, ',');
2758 if (str) {
2759 *str = '\0';
2760 str++;
2761 } else {
2762 break;
2767 printk(KERN_INFO "IPMI System Interface driver.\n");
2769 hardcode_find_bmc();
2771 #ifdef CONFIG_DMI
2772 dmi_find_bmc();
2773 #endif
2775 #ifdef CONFIG_ACPI
2776 if (si_trydefaults)
2777 acpi_find_bmc();
2778 #endif
2780 #ifdef CONFIG_PCI
2781 rv = pci_register_driver(&ipmi_pci_driver);
2782 if (rv){
2783 printk(KERN_ERR
2784 "init_ipmi_si: Unable to register PCI driver: %d\n",
2785 rv);
2787 #endif
2789 if (si_trydefaults) {
2790 mutex_lock(&smi_infos_lock);
2791 if (list_empty(&smi_infos)) {
2792 /* No BMC was found, try defaults. */
2793 mutex_unlock(&smi_infos_lock);
2794 default_find_bmc();
2795 } else {
2796 mutex_unlock(&smi_infos_lock);
2800 mutex_lock(&smi_infos_lock);
2801 if (unload_when_empty && list_empty(&smi_infos)) {
2802 mutex_unlock(&smi_infos_lock);
2803 #ifdef CONFIG_PCI
2804 pci_unregister_driver(&ipmi_pci_driver);
2805 #endif
2806 driver_unregister(&ipmi_driver);
2807 printk("ipmi_si: Unable to find any System Interface(s)\n");
2808 return -ENODEV;
2809 } else {
2810 mutex_unlock(&smi_infos_lock);
2811 return 0;
2814 module_init(init_ipmi_si);
2816 static void cleanup_one_si(struct smi_info *to_clean)
2818 int rv;
2819 unsigned long flags;
2821 if (!to_clean)
2822 return;
2824 list_del(&to_clean->link);
2826 /* Tell the timer and interrupt handlers that we are shutting
2827 down. */
2828 spin_lock_irqsave(&(to_clean->si_lock), flags);
2829 spin_lock(&(to_clean->msg_lock));
2831 atomic_inc(&to_clean->stop_operation);
2833 if (to_clean->irq_cleanup)
2834 to_clean->irq_cleanup(to_clean);
2836 spin_unlock(&(to_clean->msg_lock));
2837 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2839 /* Wait until we know that we are out of any interrupt
2840 handlers might have been running before we freed the
2841 interrupt. */
2842 synchronize_sched();
2844 wait_for_timer_and_thread(to_clean);
2846 /* Interrupts and timeouts are stopped, now make sure the
2847 interface is in a clean state. */
2848 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2849 poll(to_clean);
2850 schedule_timeout_uninterruptible(1);
2853 rv = ipmi_unregister_smi(to_clean->intf);
2854 if (rv) {
2855 printk(KERN_ERR
2856 "ipmi_si: Unable to unregister device: errno=%d\n",
2857 rv);
2860 to_clean->handlers->cleanup(to_clean->si_sm);
2862 kfree(to_clean->si_sm);
2864 if (to_clean->addr_source_cleanup)
2865 to_clean->addr_source_cleanup(to_clean);
2866 if (to_clean->io_cleanup)
2867 to_clean->io_cleanup(to_clean);
2869 if (to_clean->dev_registered)
2870 platform_device_unregister(to_clean->pdev);
2872 kfree(to_clean);
2875 static __exit void cleanup_ipmi_si(void)
2877 struct smi_info *e, *tmp_e;
2879 if (!initialized)
2880 return;
2882 #ifdef CONFIG_PCI
2883 pci_unregister_driver(&ipmi_pci_driver);
2884 #endif
2886 mutex_lock(&smi_infos_lock);
2887 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2888 cleanup_one_si(e);
2889 mutex_unlock(&smi_infos_lock);
2891 driver_unregister(&ipmi_driver);
2893 module_exit(cleanup_ipmi_si);
2895 MODULE_LICENSE("GPL");
2896 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2897 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");