[PATCH] ipmi: kcs error0 delay
[firewire-audio.git] / drivers / char / ipmi / ipmi_si_intf.c
blobd514df7c72830dc018d234db1b6e72fe36946168
1 /*
2 * ipmi_si.c
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <asm/irq.h>
56 #ifdef CONFIG_HIGH_RES_TIMERS
57 #include <linux/hrtime.h>
58 # if defined(schedule_next_int)
59 /* Old high-res timer code, do translations. */
60 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
61 # define arch_cycles_per_jiffy cycles_per_jiffies
62 # endif
63 static inline void add_usec_to_timer(struct timer_list *t, long v)
65 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
66 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
68 t->expires++;
69 t->arch_cycle_expires -= arch_cycles_per_jiffy;
72 #endif
73 #include <linux/interrupt.h>
74 #include <linux/rcupdate.h>
75 #include <linux/ipmi_smi.h>
76 #include <asm/io.h>
77 #include "ipmi_si_sm.h"
78 #include <linux/init.h>
79 #include <linux/dmi.h>
81 /* Measure times between events in the driver. */
82 #undef DEBUG_TIMING
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC 10000
86 #define SI_USEC_PER_JIFFY (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
89 short timeout */
91 enum si_intf_state {
92 SI_NORMAL,
93 SI_GETTING_FLAGS,
94 SI_GETTING_EVENTS,
95 SI_CLEARING_FLAGS,
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
97 SI_GETTING_MESSAGES,
98 SI_ENABLE_INTERRUPTS1,
99 SI_ENABLE_INTERRUPTS2
100 /* FIXME - add watchdog stuff. */
103 /* Some BT-specific defines we need here. */
104 #define IPMI_BT_INTMASK_REG 2
105 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
106 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
108 enum si_type {
109 SI_KCS, SI_SMIC, SI_BT
112 struct ipmi_device_id {
113 unsigned char device_id;
114 unsigned char device_revision;
115 unsigned char firmware_revision_1;
116 unsigned char firmware_revision_2;
117 unsigned char ipmi_version;
118 unsigned char additional_device_support;
119 unsigned char manufacturer_id[3];
120 unsigned char product_id[2];
121 unsigned char aux_firmware_revision[4];
122 } __attribute__((packed));
124 #define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
125 #define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
127 struct smi_info
129 ipmi_smi_t intf;
130 struct si_sm_data *si_sm;
131 struct si_sm_handlers *handlers;
132 enum si_type si_type;
133 spinlock_t si_lock;
134 spinlock_t msg_lock;
135 struct list_head xmit_msgs;
136 struct list_head hp_xmit_msgs;
137 struct ipmi_smi_msg *curr_msg;
138 enum si_intf_state si_state;
140 /* Used to handle the various types of I/O that can occur with
141 IPMI */
142 struct si_sm_io io;
143 int (*io_setup)(struct smi_info *info);
144 void (*io_cleanup)(struct smi_info *info);
145 int (*irq_setup)(struct smi_info *info);
146 void (*irq_cleanup)(struct smi_info *info);
147 unsigned int io_size;
149 /* Per-OEM handler, called from handle_flags().
150 Returns 1 when handle_flags() needs to be re-run
151 or 0 indicating it set si_state itself.
153 int (*oem_data_avail_handler)(struct smi_info *smi_info);
155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156 is set to hold the flags until we are done handling everything
157 from the flags. */
158 #define RECEIVE_MSG_AVAIL 0x01
159 #define EVENT_MSG_BUFFER_FULL 0x02
160 #define WDT_PRE_TIMEOUT_INT 0x08
161 #define OEM0_DATA_AVAIL 0x20
162 #define OEM1_DATA_AVAIL 0x40
163 #define OEM2_DATA_AVAIL 0x80
164 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
165 OEM1_DATA_AVAIL | \
166 OEM2_DATA_AVAIL)
167 unsigned char msg_flags;
169 /* If set to true, this will request events the next time the
170 state machine is idle. */
171 atomic_t req_events;
173 /* If true, run the state machine to completion on every send
174 call. Generally used after a panic to make sure stuff goes
175 out. */
176 int run_to_completion;
178 /* The I/O port of an SI interface. */
179 int port;
181 /* The space between start addresses of the two ports. For
182 instance, if the first port is 0xca2 and the spacing is 4, then
183 the second port is 0xca6. */
184 unsigned int spacing;
186 /* zero if no irq; */
187 int irq;
189 /* The timer for this si. */
190 struct timer_list si_timer;
192 /* The time (in jiffies) the last timeout occurred at. */
193 unsigned long last_timeout_jiffies;
195 /* Used to gracefully stop the timer without race conditions. */
196 volatile int stop_operation;
197 volatile int timer_stopped;
199 /* The driver will disable interrupts when it gets into a
200 situation where it cannot handle messages due to lack of
201 memory. Once that situation clears up, it will re-enable
202 interrupts. */
203 int interrupt_disabled;
205 struct ipmi_device_id device_id;
207 /* Slave address, could be reported from DMI. */
208 unsigned char slave_addr;
210 /* Counters and things for the proc filesystem. */
211 spinlock_t count_lock;
212 unsigned long short_timeouts;
213 unsigned long long_timeouts;
214 unsigned long timeout_restarts;
215 unsigned long idles;
216 unsigned long interrupts;
217 unsigned long attentions;
218 unsigned long flag_fetches;
219 unsigned long hosed_count;
220 unsigned long complete_transactions;
221 unsigned long events;
222 unsigned long watchdog_pretimeouts;
223 unsigned long incoming_messages;
226 static struct notifier_block *xaction_notifier_list;
227 static int register_xaction_notifier(struct notifier_block * nb)
229 return notifier_chain_register(&xaction_notifier_list, nb);
232 static void si_restart_short_timer(struct smi_info *smi_info);
234 static void deliver_recv_msg(struct smi_info *smi_info,
235 struct ipmi_smi_msg *msg)
237 /* Deliver the message to the upper layer with the lock
238 released. */
239 spin_unlock(&(smi_info->si_lock));
240 ipmi_smi_msg_received(smi_info->intf, msg);
241 spin_lock(&(smi_info->si_lock));
244 static void return_hosed_msg(struct smi_info *smi_info)
246 struct ipmi_smi_msg *msg = smi_info->curr_msg;
248 /* Make it a reponse */
249 msg->rsp[0] = msg->data[0] | 4;
250 msg->rsp[1] = msg->data[1];
251 msg->rsp[2] = 0xFF; /* Unknown error. */
252 msg->rsp_size = 3;
254 smi_info->curr_msg = NULL;
255 deliver_recv_msg(smi_info, msg);
258 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
260 int rv;
261 struct list_head *entry = NULL;
262 #ifdef DEBUG_TIMING
263 struct timeval t;
264 #endif
266 /* No need to save flags, we aleady have interrupts off and we
267 already hold the SMI lock. */
268 spin_lock(&(smi_info->msg_lock));
270 /* Pick the high priority queue first. */
271 if (! list_empty(&(smi_info->hp_xmit_msgs))) {
272 entry = smi_info->hp_xmit_msgs.next;
273 } else if (! list_empty(&(smi_info->xmit_msgs))) {
274 entry = smi_info->xmit_msgs.next;
277 if (! entry) {
278 smi_info->curr_msg = NULL;
279 rv = SI_SM_IDLE;
280 } else {
281 int err;
283 list_del(entry);
284 smi_info->curr_msg = list_entry(entry,
285 struct ipmi_smi_msg,
286 link);
287 #ifdef DEBUG_TIMING
288 do_gettimeofday(&t);
289 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
290 #endif
291 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
292 if (err & NOTIFY_STOP_MASK) {
293 rv = SI_SM_CALL_WITHOUT_DELAY;
294 goto out;
296 err = smi_info->handlers->start_transaction(
297 smi_info->si_sm,
298 smi_info->curr_msg->data,
299 smi_info->curr_msg->data_size);
300 if (err) {
301 return_hosed_msg(smi_info);
304 rv = SI_SM_CALL_WITHOUT_DELAY;
306 out:
307 spin_unlock(&(smi_info->msg_lock));
309 return rv;
312 static void start_enable_irq(struct smi_info *smi_info)
314 unsigned char msg[2];
316 /* If we are enabling interrupts, we have to tell the
317 BMC to use them. */
318 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
319 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
321 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
322 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
325 static void start_clear_flags(struct smi_info *smi_info)
327 unsigned char msg[3];
329 /* Make sure the watchdog pre-timeout flag is not set at startup. */
330 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
331 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
332 msg[2] = WDT_PRE_TIMEOUT_INT;
334 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
335 smi_info->si_state = SI_CLEARING_FLAGS;
338 /* When we have a situtaion where we run out of memory and cannot
339 allocate messages, we just leave them in the BMC and run the system
340 polled until we can allocate some memory. Once we have some
341 memory, we will re-enable the interrupt. */
342 static inline void disable_si_irq(struct smi_info *smi_info)
344 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
345 disable_irq_nosync(smi_info->irq);
346 smi_info->interrupt_disabled = 1;
350 static inline void enable_si_irq(struct smi_info *smi_info)
352 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
353 enable_irq(smi_info->irq);
354 smi_info->interrupt_disabled = 0;
358 static void handle_flags(struct smi_info *smi_info)
360 retry:
361 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
362 /* Watchdog pre-timeout */
363 spin_lock(&smi_info->count_lock);
364 smi_info->watchdog_pretimeouts++;
365 spin_unlock(&smi_info->count_lock);
367 start_clear_flags(smi_info);
368 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
369 spin_unlock(&(smi_info->si_lock));
370 ipmi_smi_watchdog_pretimeout(smi_info->intf);
371 spin_lock(&(smi_info->si_lock));
372 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
373 /* Messages available. */
374 smi_info->curr_msg = ipmi_alloc_smi_msg();
375 if (! smi_info->curr_msg) {
376 disable_si_irq(smi_info);
377 smi_info->si_state = SI_NORMAL;
378 return;
380 enable_si_irq(smi_info);
382 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
383 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
384 smi_info->curr_msg->data_size = 2;
386 smi_info->handlers->start_transaction(
387 smi_info->si_sm,
388 smi_info->curr_msg->data,
389 smi_info->curr_msg->data_size);
390 smi_info->si_state = SI_GETTING_MESSAGES;
391 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
392 /* Events available. */
393 smi_info->curr_msg = ipmi_alloc_smi_msg();
394 if (! smi_info->curr_msg) {
395 disable_si_irq(smi_info);
396 smi_info->si_state = SI_NORMAL;
397 return;
399 enable_si_irq(smi_info);
401 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
402 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
403 smi_info->curr_msg->data_size = 2;
405 smi_info->handlers->start_transaction(
406 smi_info->si_sm,
407 smi_info->curr_msg->data,
408 smi_info->curr_msg->data_size);
409 smi_info->si_state = SI_GETTING_EVENTS;
410 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
411 if (smi_info->oem_data_avail_handler)
412 if (smi_info->oem_data_avail_handler(smi_info))
413 goto retry;
414 } else {
415 smi_info->si_state = SI_NORMAL;
419 static void handle_transaction_done(struct smi_info *smi_info)
421 struct ipmi_smi_msg *msg;
422 #ifdef DEBUG_TIMING
423 struct timeval t;
425 do_gettimeofday(&t);
426 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
427 #endif
428 switch (smi_info->si_state) {
429 case SI_NORMAL:
430 if (! smi_info->curr_msg)
431 break;
433 smi_info->curr_msg->rsp_size
434 = smi_info->handlers->get_result(
435 smi_info->si_sm,
436 smi_info->curr_msg->rsp,
437 IPMI_MAX_MSG_LENGTH);
439 /* Do this here becase deliver_recv_msg() releases the
440 lock, and a new message can be put in during the
441 time the lock is released. */
442 msg = smi_info->curr_msg;
443 smi_info->curr_msg = NULL;
444 deliver_recv_msg(smi_info, msg);
445 break;
447 case SI_GETTING_FLAGS:
449 unsigned char msg[4];
450 unsigned int len;
452 /* We got the flags from the SMI, now handle them. */
453 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
454 if (msg[2] != 0) {
455 /* Error fetching flags, just give up for
456 now. */
457 smi_info->si_state = SI_NORMAL;
458 } else if (len < 4) {
459 /* Hmm, no flags. That's technically illegal, but
460 don't use uninitialized data. */
461 smi_info->si_state = SI_NORMAL;
462 } else {
463 smi_info->msg_flags = msg[3];
464 handle_flags(smi_info);
466 break;
469 case SI_CLEARING_FLAGS:
470 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
472 unsigned char msg[3];
474 /* We cleared the flags. */
475 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
476 if (msg[2] != 0) {
477 /* Error clearing flags */
478 printk(KERN_WARNING
479 "ipmi_si: Error clearing flags: %2.2x\n",
480 msg[2]);
482 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
483 start_enable_irq(smi_info);
484 else
485 smi_info->si_state = SI_NORMAL;
486 break;
489 case SI_GETTING_EVENTS:
491 smi_info->curr_msg->rsp_size
492 = smi_info->handlers->get_result(
493 smi_info->si_sm,
494 smi_info->curr_msg->rsp,
495 IPMI_MAX_MSG_LENGTH);
497 /* Do this here becase deliver_recv_msg() releases the
498 lock, and a new message can be put in during the
499 time the lock is released. */
500 msg = smi_info->curr_msg;
501 smi_info->curr_msg = NULL;
502 if (msg->rsp[2] != 0) {
503 /* Error getting event, probably done. */
504 msg->done(msg);
506 /* Take off the event flag. */
507 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
508 handle_flags(smi_info);
509 } else {
510 spin_lock(&smi_info->count_lock);
511 smi_info->events++;
512 spin_unlock(&smi_info->count_lock);
514 /* Do this before we deliver the message
515 because delivering the message releases the
516 lock and something else can mess with the
517 state. */
518 handle_flags(smi_info);
520 deliver_recv_msg(smi_info, msg);
522 break;
525 case SI_GETTING_MESSAGES:
527 smi_info->curr_msg->rsp_size
528 = smi_info->handlers->get_result(
529 smi_info->si_sm,
530 smi_info->curr_msg->rsp,
531 IPMI_MAX_MSG_LENGTH);
533 /* Do this here becase deliver_recv_msg() releases the
534 lock, and a new message can be put in during the
535 time the lock is released. */
536 msg = smi_info->curr_msg;
537 smi_info->curr_msg = NULL;
538 if (msg->rsp[2] != 0) {
539 /* Error getting event, probably done. */
540 msg->done(msg);
542 /* Take off the msg flag. */
543 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
544 handle_flags(smi_info);
545 } else {
546 spin_lock(&smi_info->count_lock);
547 smi_info->incoming_messages++;
548 spin_unlock(&smi_info->count_lock);
550 /* Do this before we deliver the message
551 because delivering the message releases the
552 lock and something else can mess with the
553 state. */
554 handle_flags(smi_info);
556 deliver_recv_msg(smi_info, msg);
558 break;
561 case SI_ENABLE_INTERRUPTS1:
563 unsigned char msg[4];
565 /* We got the flags from the SMI, now handle them. */
566 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
567 if (msg[2] != 0) {
568 printk(KERN_WARNING
569 "ipmi_si: Could not enable interrupts"
570 ", failed get, using polled mode.\n");
571 smi_info->si_state = SI_NORMAL;
572 } else {
573 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
574 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
575 msg[2] = msg[3] | 1; /* enable msg queue int */
576 smi_info->handlers->start_transaction(
577 smi_info->si_sm, msg, 3);
578 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
580 break;
583 case SI_ENABLE_INTERRUPTS2:
585 unsigned char msg[4];
587 /* We got the flags from the SMI, now handle them. */
588 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
589 if (msg[2] != 0) {
590 printk(KERN_WARNING
591 "ipmi_si: Could not enable interrupts"
592 ", failed set, using polled mode.\n");
594 smi_info->si_state = SI_NORMAL;
595 break;
600 /* Called on timeouts and events. Timeouts should pass the elapsed
601 time, interrupts should pass in zero. */
602 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
603 int time)
605 enum si_sm_result si_sm_result;
607 restart:
608 /* There used to be a loop here that waited a little while
609 (around 25us) before giving up. That turned out to be
610 pointless, the minimum delays I was seeing were in the 300us
611 range, which is far too long to wait in an interrupt. So
612 we just run until the state machine tells us something
613 happened or it needs a delay. */
614 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
615 time = 0;
616 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
618 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
621 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
623 spin_lock(&smi_info->count_lock);
624 smi_info->complete_transactions++;
625 spin_unlock(&smi_info->count_lock);
627 handle_transaction_done(smi_info);
628 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
630 else if (si_sm_result == SI_SM_HOSED)
632 spin_lock(&smi_info->count_lock);
633 smi_info->hosed_count++;
634 spin_unlock(&smi_info->count_lock);
636 /* Do the before return_hosed_msg, because that
637 releases the lock. */
638 smi_info->si_state = SI_NORMAL;
639 if (smi_info->curr_msg != NULL) {
640 /* If we were handling a user message, format
641 a response to send to the upper layer to
642 tell it about the error. */
643 return_hosed_msg(smi_info);
645 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
648 /* We prefer handling attn over new messages. */
649 if (si_sm_result == SI_SM_ATTN)
651 unsigned char msg[2];
653 spin_lock(&smi_info->count_lock);
654 smi_info->attentions++;
655 spin_unlock(&smi_info->count_lock);
657 /* Got a attn, send down a get message flags to see
658 what's causing it. It would be better to handle
659 this in the upper layer, but due to the way
660 interrupts work with the SMI, that's not really
661 possible. */
662 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
663 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
665 smi_info->handlers->start_transaction(
666 smi_info->si_sm, msg, 2);
667 smi_info->si_state = SI_GETTING_FLAGS;
668 goto restart;
671 /* If we are currently idle, try to start the next message. */
672 if (si_sm_result == SI_SM_IDLE) {
673 spin_lock(&smi_info->count_lock);
674 smi_info->idles++;
675 spin_unlock(&smi_info->count_lock);
677 si_sm_result = start_next_msg(smi_info);
678 if (si_sm_result != SI_SM_IDLE)
679 goto restart;
682 if ((si_sm_result == SI_SM_IDLE)
683 && (atomic_read(&smi_info->req_events)))
685 /* We are idle and the upper layer requested that I fetch
686 events, so do so. */
687 unsigned char msg[2];
689 spin_lock(&smi_info->count_lock);
690 smi_info->flag_fetches++;
691 spin_unlock(&smi_info->count_lock);
693 atomic_set(&smi_info->req_events, 0);
694 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
695 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
697 smi_info->handlers->start_transaction(
698 smi_info->si_sm, msg, 2);
699 smi_info->si_state = SI_GETTING_FLAGS;
700 goto restart;
703 return si_sm_result;
706 static void sender(void *send_info,
707 struct ipmi_smi_msg *msg,
708 int priority)
710 struct smi_info *smi_info = send_info;
711 enum si_sm_result result;
712 unsigned long flags;
713 #ifdef DEBUG_TIMING
714 struct timeval t;
715 #endif
717 spin_lock_irqsave(&(smi_info->msg_lock), flags);
718 #ifdef DEBUG_TIMING
719 do_gettimeofday(&t);
720 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
721 #endif
723 if (smi_info->run_to_completion) {
724 /* If we are running to completion, then throw it in
725 the list and run transactions until everything is
726 clear. Priority doesn't matter here. */
727 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
729 /* We have to release the msg lock and claim the smi
730 lock in this case, because of race conditions. */
731 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
733 spin_lock_irqsave(&(smi_info->si_lock), flags);
734 result = smi_event_handler(smi_info, 0);
735 while (result != SI_SM_IDLE) {
736 udelay(SI_SHORT_TIMEOUT_USEC);
737 result = smi_event_handler(smi_info,
738 SI_SHORT_TIMEOUT_USEC);
740 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
741 return;
742 } else {
743 if (priority > 0) {
744 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
745 } else {
746 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
749 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
751 spin_lock_irqsave(&(smi_info->si_lock), flags);
752 if ((smi_info->si_state == SI_NORMAL)
753 && (smi_info->curr_msg == NULL))
755 start_next_msg(smi_info);
756 si_restart_short_timer(smi_info);
758 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
761 static void set_run_to_completion(void *send_info, int i_run_to_completion)
763 struct smi_info *smi_info = send_info;
764 enum si_sm_result result;
765 unsigned long flags;
767 spin_lock_irqsave(&(smi_info->si_lock), flags);
769 smi_info->run_to_completion = i_run_to_completion;
770 if (i_run_to_completion) {
771 result = smi_event_handler(smi_info, 0);
772 while (result != SI_SM_IDLE) {
773 udelay(SI_SHORT_TIMEOUT_USEC);
774 result = smi_event_handler(smi_info,
775 SI_SHORT_TIMEOUT_USEC);
779 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
782 static void poll(void *send_info)
784 struct smi_info *smi_info = send_info;
786 smi_event_handler(smi_info, 0);
789 static void request_events(void *send_info)
791 struct smi_info *smi_info = send_info;
793 atomic_set(&smi_info->req_events, 1);
796 static int initialized = 0;
798 /* Must be called with interrupts off and with the si_lock held. */
799 static void si_restart_short_timer(struct smi_info *smi_info)
801 #if defined(CONFIG_HIGH_RES_TIMERS)
802 unsigned long flags;
803 unsigned long jiffies_now;
804 unsigned long seq;
806 if (del_timer(&(smi_info->si_timer))) {
807 /* If we don't delete the timer, then it will go off
808 immediately, anyway. So we only process if we
809 actually delete the timer. */
811 do {
812 seq = read_seqbegin_irqsave(&xtime_lock, flags);
813 jiffies_now = jiffies;
814 smi_info->si_timer.expires = jiffies_now;
815 smi_info->si_timer.arch_cycle_expires
816 = get_arch_cycles(jiffies_now);
817 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
819 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
821 add_timer(&(smi_info->si_timer));
822 spin_lock_irqsave(&smi_info->count_lock, flags);
823 smi_info->timeout_restarts++;
824 spin_unlock_irqrestore(&smi_info->count_lock, flags);
826 #endif
829 static void smi_timeout(unsigned long data)
831 struct smi_info *smi_info = (struct smi_info *) data;
832 enum si_sm_result smi_result;
833 unsigned long flags;
834 unsigned long jiffies_now;
835 long time_diff;
836 #ifdef DEBUG_TIMING
837 struct timeval t;
838 #endif
840 if (smi_info->stop_operation) {
841 smi_info->timer_stopped = 1;
842 return;
845 spin_lock_irqsave(&(smi_info->si_lock), flags);
846 #ifdef DEBUG_TIMING
847 do_gettimeofday(&t);
848 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
849 #endif
850 jiffies_now = jiffies;
851 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
852 * SI_USEC_PER_JIFFY);
853 smi_result = smi_event_handler(smi_info, time_diff);
855 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
857 smi_info->last_timeout_jiffies = jiffies_now;
859 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
860 /* Running with interrupts, only do long timeouts. */
861 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
862 spin_lock_irqsave(&smi_info->count_lock, flags);
863 smi_info->long_timeouts++;
864 spin_unlock_irqrestore(&smi_info->count_lock, flags);
865 goto do_add_timer;
868 /* If the state machine asks for a short delay, then shorten
869 the timer timeout. */
870 if (smi_result == SI_SM_CALL_WITH_DELAY) {
871 #if defined(CONFIG_HIGH_RES_TIMERS)
872 unsigned long seq;
873 #endif
874 spin_lock_irqsave(&smi_info->count_lock, flags);
875 smi_info->short_timeouts++;
876 spin_unlock_irqrestore(&smi_info->count_lock, flags);
877 #if defined(CONFIG_HIGH_RES_TIMERS)
878 do {
879 seq = read_seqbegin_irqsave(&xtime_lock, flags);
880 smi_info->si_timer.expires = jiffies;
881 smi_info->si_timer.arch_cycle_expires
882 = get_arch_cycles(smi_info->si_timer.expires);
883 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
884 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
885 #else
886 smi_info->si_timer.expires = jiffies + 1;
887 #endif
888 } else {
889 spin_lock_irqsave(&smi_info->count_lock, flags);
890 smi_info->long_timeouts++;
891 spin_unlock_irqrestore(&smi_info->count_lock, flags);
892 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
893 #if defined(CONFIG_HIGH_RES_TIMERS)
894 smi_info->si_timer.arch_cycle_expires = 0;
895 #endif
898 do_add_timer:
899 add_timer(&(smi_info->si_timer));
902 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
904 struct smi_info *smi_info = data;
905 unsigned long flags;
906 #ifdef DEBUG_TIMING
907 struct timeval t;
908 #endif
910 spin_lock_irqsave(&(smi_info->si_lock), flags);
912 spin_lock(&smi_info->count_lock);
913 smi_info->interrupts++;
914 spin_unlock(&smi_info->count_lock);
916 if (smi_info->stop_operation)
917 goto out;
919 #ifdef DEBUG_TIMING
920 do_gettimeofday(&t);
921 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
922 #endif
923 smi_event_handler(smi_info, 0);
924 out:
925 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
926 return IRQ_HANDLED;
929 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
931 struct smi_info *smi_info = data;
932 /* We need to clear the IRQ flag for the BT interface. */
933 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
934 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
935 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
936 return si_irq_handler(irq, data, regs);
940 static struct ipmi_smi_handlers handlers =
942 .owner = THIS_MODULE,
943 .sender = sender,
944 .request_events = request_events,
945 .set_run_to_completion = set_run_to_completion,
946 .poll = poll,
949 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
950 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
952 #define SI_MAX_PARMS 4
953 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
954 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
955 { NULL, NULL, NULL, NULL };
957 #define DEVICE_NAME "ipmi_si"
959 #define DEFAULT_KCS_IO_PORT 0xca2
960 #define DEFAULT_SMIC_IO_PORT 0xca9
961 #define DEFAULT_BT_IO_PORT 0xe4
962 #define DEFAULT_REGSPACING 1
964 static int si_trydefaults = 1;
965 static char *si_type[SI_MAX_PARMS];
966 #define MAX_SI_TYPE_STR 30
967 static char si_type_str[MAX_SI_TYPE_STR];
968 static unsigned long addrs[SI_MAX_PARMS];
969 static int num_addrs;
970 static unsigned int ports[SI_MAX_PARMS];
971 static int num_ports;
972 static int irqs[SI_MAX_PARMS];
973 static int num_irqs;
974 static int regspacings[SI_MAX_PARMS];
975 static int num_regspacings = 0;
976 static int regsizes[SI_MAX_PARMS];
977 static int num_regsizes = 0;
978 static int regshifts[SI_MAX_PARMS];
979 static int num_regshifts = 0;
980 static int slave_addrs[SI_MAX_PARMS];
981 static int num_slave_addrs = 0;
984 module_param_named(trydefaults, si_trydefaults, bool, 0);
985 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
986 " default scan of the KCS and SMIC interface at the standard"
987 " address");
988 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
989 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
990 " interface separated by commas. The types are 'kcs',"
991 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
992 " the first interface to kcs and the second to bt");
993 module_param_array(addrs, long, &num_addrs, 0);
994 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
995 " addresses separated by commas. Only use if an interface"
996 " is in memory. Otherwise, set it to zero or leave"
997 " it blank.");
998 module_param_array(ports, int, &num_ports, 0);
999 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1000 " addresses separated by commas. Only use if an interface"
1001 " is a port. Otherwise, set it to zero or leave"
1002 " it blank.");
1003 module_param_array(irqs, int, &num_irqs, 0);
1004 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1005 " addresses separated by commas. Only use if an interface"
1006 " has an interrupt. Otherwise, set it to zero or leave"
1007 " it blank.");
1008 module_param_array(regspacings, int, &num_regspacings, 0);
1009 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1010 " and each successive register used by the interface. For"
1011 " instance, if the start address is 0xca2 and the spacing"
1012 " is 2, then the second address is at 0xca4. Defaults"
1013 " to 1.");
1014 module_param_array(regsizes, int, &num_regsizes, 0);
1015 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1016 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1017 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1018 " the 8-bit IPMI register has to be read from a larger"
1019 " register.");
1020 module_param_array(regshifts, int, &num_regshifts, 0);
1021 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1022 " IPMI register, in bits. For instance, if the data"
1023 " is read from a 32-bit word and the IPMI data is in"
1024 " bit 8-15, then the shift would be 8");
1025 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1026 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1027 " the controller. Normally this is 0x20, but can be"
1028 " overridden by this parm. This is an array indexed"
1029 " by interface number.");
1032 #define IPMI_MEM_ADDR_SPACE 1
1033 #define IPMI_IO_ADDR_SPACE 2
1035 #if defined(CONFIG_ACPI) || defined(CONFIG_X86) || defined(CONFIG_PCI)
1036 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
1038 int i;
1040 for (i = 0; i < SI_MAX_PARMS; ++i) {
1041 /* Don't check our address. */
1042 if (i == intf)
1043 continue;
1044 if (si_type[i] != NULL) {
1045 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
1046 base_addr == addrs[i]) ||
1047 (addr_space == IPMI_IO_ADDR_SPACE &&
1048 base_addr == ports[i]))
1049 return 0;
1051 else
1052 break;
1055 return 1;
1057 #endif
1059 static int std_irq_setup(struct smi_info *info)
1061 int rv;
1063 if (! info->irq)
1064 return 0;
1066 if (info->si_type == SI_BT) {
1067 rv = request_irq(info->irq,
1068 si_bt_irq_handler,
1069 SA_INTERRUPT,
1070 DEVICE_NAME,
1071 info);
1072 if (! rv)
1073 /* Enable the interrupt in the BT interface. */
1074 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1075 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1076 } else
1077 rv = request_irq(info->irq,
1078 si_irq_handler,
1079 SA_INTERRUPT,
1080 DEVICE_NAME,
1081 info);
1082 if (rv) {
1083 printk(KERN_WARNING
1084 "ipmi_si: %s unable to claim interrupt %d,"
1085 " running polled\n",
1086 DEVICE_NAME, info->irq);
1087 info->irq = 0;
1088 } else {
1089 printk(" Using irq %d\n", info->irq);
1092 return rv;
1095 static void std_irq_cleanup(struct smi_info *info)
1097 if (! info->irq)
1098 return;
1100 if (info->si_type == SI_BT)
1101 /* Disable the interrupt in the BT interface. */
1102 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1103 free_irq(info->irq, info);
1106 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1108 unsigned int *addr = io->info;
1110 return inb((*addr)+(offset*io->regspacing));
1113 static void port_outb(struct si_sm_io *io, unsigned int offset,
1114 unsigned char b)
1116 unsigned int *addr = io->info;
1118 outb(b, (*addr)+(offset * io->regspacing));
1121 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1123 unsigned int *addr = io->info;
1125 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1128 static void port_outw(struct si_sm_io *io, unsigned int offset,
1129 unsigned char b)
1131 unsigned int *addr = io->info;
1133 outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1136 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1138 unsigned int *addr = io->info;
1140 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1143 static void port_outl(struct si_sm_io *io, unsigned int offset,
1144 unsigned char b)
1146 unsigned int *addr = io->info;
1148 outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1151 static void port_cleanup(struct smi_info *info)
1153 unsigned int *addr = info->io.info;
1154 int mapsize;
1156 if (addr && (*addr)) {
1157 mapsize = ((info->io_size * info->io.regspacing)
1158 - (info->io.regspacing - info->io.regsize));
1160 release_region (*addr, mapsize);
1162 kfree(info);
1165 static int port_setup(struct smi_info *info)
1167 unsigned int *addr = info->io.info;
1168 int mapsize;
1170 if (! addr || (! *addr))
1171 return -ENODEV;
1173 info->io_cleanup = port_cleanup;
1175 /* Figure out the actual inb/inw/inl/etc routine to use based
1176 upon the register size. */
1177 switch (info->io.regsize) {
1178 case 1:
1179 info->io.inputb = port_inb;
1180 info->io.outputb = port_outb;
1181 break;
1182 case 2:
1183 info->io.inputb = port_inw;
1184 info->io.outputb = port_outw;
1185 break;
1186 case 4:
1187 info->io.inputb = port_inl;
1188 info->io.outputb = port_outl;
1189 break;
1190 default:
1191 printk("ipmi_si: Invalid register size: %d\n",
1192 info->io.regsize);
1193 return -EINVAL;
1196 /* Calculate the total amount of memory to claim. This is an
1197 * unusual looking calculation, but it avoids claiming any
1198 * more memory than it has to. It will claim everything
1199 * between the first address to the end of the last full
1200 * register. */
1201 mapsize = ((info->io_size * info->io.regspacing)
1202 - (info->io.regspacing - info->io.regsize));
1204 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1205 return -EIO;
1206 return 0;
1209 static int try_init_port(int intf_num, struct smi_info **new_info)
1211 struct smi_info *info;
1213 if (! ports[intf_num])
1214 return -ENODEV;
1216 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1217 ports[intf_num]))
1218 return -ENODEV;
1220 info = kmalloc(sizeof(*info), GFP_KERNEL);
1221 if (! info) {
1222 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1223 return -ENOMEM;
1225 memset(info, 0, sizeof(*info));
1227 info->io_setup = port_setup;
1228 info->io.info = &(ports[intf_num]);
1229 info->io.addr = NULL;
1230 info->io.regspacing = regspacings[intf_num];
1231 if (! info->io.regspacing)
1232 info->io.regspacing = DEFAULT_REGSPACING;
1233 info->io.regsize = regsizes[intf_num];
1234 if (! info->io.regsize)
1235 info->io.regsize = DEFAULT_REGSPACING;
1236 info->io.regshift = regshifts[intf_num];
1237 info->irq = 0;
1238 info->irq_setup = NULL;
1239 *new_info = info;
1241 if (si_type[intf_num] == NULL)
1242 si_type[intf_num] = "kcs";
1244 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1245 si_type[intf_num], ports[intf_num]);
1246 return 0;
1249 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1251 return readb((io->addr)+(offset * io->regspacing));
1254 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1255 unsigned char b)
1257 writeb(b, (io->addr)+(offset * io->regspacing));
1260 static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1262 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1263 && 0xff;
1266 static void mem_outw(struct si_sm_io *io, unsigned int offset,
1267 unsigned char b)
1269 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1272 static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1274 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1275 && 0xff;
1278 static void mem_outl(struct si_sm_io *io, unsigned int offset,
1279 unsigned char b)
1281 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1284 #ifdef readq
1285 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1287 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1288 && 0xff;
1291 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1292 unsigned char b)
1294 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1296 #endif
1298 static void mem_cleanup(struct smi_info *info)
1300 unsigned long *addr = info->io.info;
1301 int mapsize;
1303 if (info->io.addr) {
1304 iounmap(info->io.addr);
1306 mapsize = ((info->io_size * info->io.regspacing)
1307 - (info->io.regspacing - info->io.regsize));
1309 release_mem_region(*addr, mapsize);
1311 kfree(info);
1314 static int mem_setup(struct smi_info *info)
1316 unsigned long *addr = info->io.info;
1317 int mapsize;
1319 if (! addr || (! *addr))
1320 return -ENODEV;
1322 info->io_cleanup = mem_cleanup;
1324 /* Figure out the actual readb/readw/readl/etc routine to use based
1325 upon the register size. */
1326 switch (info->io.regsize) {
1327 case 1:
1328 info->io.inputb = mem_inb;
1329 info->io.outputb = mem_outb;
1330 break;
1331 case 2:
1332 info->io.inputb = mem_inw;
1333 info->io.outputb = mem_outw;
1334 break;
1335 case 4:
1336 info->io.inputb = mem_inl;
1337 info->io.outputb = mem_outl;
1338 break;
1339 #ifdef readq
1340 case 8:
1341 info->io.inputb = mem_inq;
1342 info->io.outputb = mem_outq;
1343 break;
1344 #endif
1345 default:
1346 printk("ipmi_si: Invalid register size: %d\n",
1347 info->io.regsize);
1348 return -EINVAL;
1351 /* Calculate the total amount of memory to claim. This is an
1352 * unusual looking calculation, but it avoids claiming any
1353 * more memory than it has to. It will claim everything
1354 * between the first address to the end of the last full
1355 * register. */
1356 mapsize = ((info->io_size * info->io.regspacing)
1357 - (info->io.regspacing - info->io.regsize));
1359 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1360 return -EIO;
1362 info->io.addr = ioremap(*addr, mapsize);
1363 if (info->io.addr == NULL) {
1364 release_mem_region(*addr, mapsize);
1365 return -EIO;
1367 return 0;
1370 static int try_init_mem(int intf_num, struct smi_info **new_info)
1372 struct smi_info *info;
1374 if (! addrs[intf_num])
1375 return -ENODEV;
1377 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1378 addrs[intf_num]))
1379 return -ENODEV;
1381 info = kmalloc(sizeof(*info), GFP_KERNEL);
1382 if (! info) {
1383 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1384 return -ENOMEM;
1386 memset(info, 0, sizeof(*info));
1388 info->io_setup = mem_setup;
1389 info->io.info = &addrs[intf_num];
1390 info->io.addr = NULL;
1391 info->io.regspacing = regspacings[intf_num];
1392 if (! info->io.regspacing)
1393 info->io.regspacing = DEFAULT_REGSPACING;
1394 info->io.regsize = regsizes[intf_num];
1395 if (! info->io.regsize)
1396 info->io.regsize = DEFAULT_REGSPACING;
1397 info->io.regshift = regshifts[intf_num];
1398 info->irq = 0;
1399 info->irq_setup = NULL;
1400 *new_info = info;
1402 if (si_type[intf_num] == NULL)
1403 si_type[intf_num] = "kcs";
1405 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1406 si_type[intf_num], addrs[intf_num]);
1407 return 0;
1411 #ifdef CONFIG_ACPI
1413 #include <linux/acpi.h>
1415 /* Once we get an ACPI failure, we don't try any more, because we go
1416 through the tables sequentially. Once we don't find a table, there
1417 are no more. */
1418 static int acpi_failure = 0;
1420 /* For GPE-type interrupts. */
1421 static u32 ipmi_acpi_gpe(void *context)
1423 struct smi_info *smi_info = context;
1424 unsigned long flags;
1425 #ifdef DEBUG_TIMING
1426 struct timeval t;
1427 #endif
1429 spin_lock_irqsave(&(smi_info->si_lock), flags);
1431 spin_lock(&smi_info->count_lock);
1432 smi_info->interrupts++;
1433 spin_unlock(&smi_info->count_lock);
1435 if (smi_info->stop_operation)
1436 goto out;
1438 #ifdef DEBUG_TIMING
1439 do_gettimeofday(&t);
1440 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1441 #endif
1442 smi_event_handler(smi_info, 0);
1443 out:
1444 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1446 return ACPI_INTERRUPT_HANDLED;
1449 static int acpi_gpe_irq_setup(struct smi_info *info)
1451 acpi_status status;
1453 if (! info->irq)
1454 return 0;
1456 /* FIXME - is level triggered right? */
1457 status = acpi_install_gpe_handler(NULL,
1458 info->irq,
1459 ACPI_GPE_LEVEL_TRIGGERED,
1460 &ipmi_acpi_gpe,
1461 info);
1462 if (status != AE_OK) {
1463 printk(KERN_WARNING
1464 "ipmi_si: %s unable to claim ACPI GPE %d,"
1465 " running polled\n",
1466 DEVICE_NAME, info->irq);
1467 info->irq = 0;
1468 return -EINVAL;
1469 } else {
1470 printk(" Using ACPI GPE %d\n", info->irq);
1471 return 0;
1475 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1477 if (! info->irq)
1478 return;
1480 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1484 * Defined at
1485 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1487 struct SPMITable {
1488 s8 Signature[4];
1489 u32 Length;
1490 u8 Revision;
1491 u8 Checksum;
1492 s8 OEMID[6];
1493 s8 OEMTableID[8];
1494 s8 OEMRevision[4];
1495 s8 CreatorID[4];
1496 s8 CreatorRevision[4];
1497 u8 InterfaceType;
1498 u8 IPMIlegacy;
1499 s16 SpecificationRevision;
1502 * Bit 0 - SCI interrupt supported
1503 * Bit 1 - I/O APIC/SAPIC
1505 u8 InterruptType;
1507 /* If bit 0 of InterruptType is set, then this is the SCI
1508 interrupt in the GPEx_STS register. */
1509 u8 GPE;
1511 s16 Reserved;
1513 /* If bit 1 of InterruptType is set, then this is the I/O
1514 APIC/SAPIC interrupt. */
1515 u32 GlobalSystemInterrupt;
1517 /* The actual register address. */
1518 struct acpi_generic_address addr;
1520 u8 UID[4];
1522 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1525 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1527 struct smi_info *info;
1528 acpi_status status;
1529 struct SPMITable *spmi;
1530 char *io_type;
1531 u8 addr_space;
1533 if (acpi_disabled)
1534 return -ENODEV;
1536 if (acpi_failure)
1537 return -ENODEV;
1539 status = acpi_get_firmware_table("SPMI", intf_num+1,
1540 ACPI_LOGICAL_ADDRESSING,
1541 (struct acpi_table_header **) &spmi);
1542 if (status != AE_OK) {
1543 acpi_failure = 1;
1544 return -ENODEV;
1547 if (spmi->IPMIlegacy != 1) {
1548 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1549 return -ENODEV;
1552 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1553 addr_space = IPMI_MEM_ADDR_SPACE;
1554 else
1555 addr_space = IPMI_IO_ADDR_SPACE;
1556 if (! is_new_interface(-1, addr_space, spmi->addr.address))
1557 return -ENODEV;
1559 if (! spmi->addr.register_bit_width) {
1560 acpi_failure = 1;
1561 return -ENODEV;
1564 /* Figure out the interface type. */
1565 switch (spmi->InterfaceType)
1567 case 1: /* KCS */
1568 si_type[intf_num] = "kcs";
1569 break;
1571 case 2: /* SMIC */
1572 si_type[intf_num] = "smic";
1573 break;
1575 case 3: /* BT */
1576 si_type[intf_num] = "bt";
1577 break;
1579 default:
1580 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1581 spmi->InterfaceType);
1582 return -EIO;
1585 info = kmalloc(sizeof(*info), GFP_KERNEL);
1586 if (! info) {
1587 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1588 return -ENOMEM;
1590 memset(info, 0, sizeof(*info));
1592 if (spmi->InterruptType & 1) {
1593 /* We've got a GPE interrupt. */
1594 info->irq = spmi->GPE;
1595 info->irq_setup = acpi_gpe_irq_setup;
1596 info->irq_cleanup = acpi_gpe_irq_cleanup;
1597 } else if (spmi->InterruptType & 2) {
1598 /* We've got an APIC/SAPIC interrupt. */
1599 info->irq = spmi->GlobalSystemInterrupt;
1600 info->irq_setup = std_irq_setup;
1601 info->irq_cleanup = std_irq_cleanup;
1602 } else {
1603 /* Use the default interrupt setting. */
1604 info->irq = 0;
1605 info->irq_setup = NULL;
1608 if (spmi->addr.register_bit_width) {
1609 /* A (hopefully) properly formed register bit width. */
1610 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1611 info->io.regspacing = spmi->addr.register_bit_width / 8;
1612 } else {
1613 /* Some broken systems get this wrong and set the value
1614 * to zero. Assume it is the default spacing. If that
1615 * is wrong, too bad, the vendor should fix the tables. */
1616 regspacings[intf_num] = DEFAULT_REGSPACING;
1617 info->io.regspacing = DEFAULT_REGSPACING;
1619 regsizes[intf_num] = regspacings[intf_num];
1620 info->io.regsize = regsizes[intf_num];
1621 regshifts[intf_num] = spmi->addr.register_bit_offset;
1622 info->io.regshift = regshifts[intf_num];
1624 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1625 io_type = "memory";
1626 info->io_setup = mem_setup;
1627 addrs[intf_num] = spmi->addr.address;
1628 info->io.info = &(addrs[intf_num]);
1629 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1630 io_type = "I/O";
1631 info->io_setup = port_setup;
1632 ports[intf_num] = spmi->addr.address;
1633 info->io.info = &(ports[intf_num]);
1634 } else {
1635 kfree(info);
1636 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1637 return -EIO;
1640 *new_info = info;
1642 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1643 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1644 return 0;
1646 #endif
1648 #ifdef CONFIG_X86
1649 typedef struct dmi_ipmi_data
1651 u8 type;
1652 u8 addr_space;
1653 unsigned long base_addr;
1654 u8 irq;
1655 u8 offset;
1656 u8 slave_addr;
1657 } dmi_ipmi_data_t;
1659 static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1660 static int dmi_data_entries;
1662 static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1664 u8 *data = (u8 *)dm;
1665 unsigned long base_addr;
1666 u8 reg_spacing;
1667 u8 len = dm->length;
1668 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1670 ipmi_data->type = data[4];
1672 memcpy(&base_addr, data+8, sizeof(unsigned long));
1673 if (len >= 0x11) {
1674 if (base_addr & 1) {
1675 /* I/O */
1676 base_addr &= 0xFFFE;
1677 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1679 else {
1680 /* Memory */
1681 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1683 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1684 is odd. */
1685 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1687 ipmi_data->irq = data[0x11];
1689 /* The top two bits of byte 0x10 hold the register spacing. */
1690 reg_spacing = (data[0x10] & 0xC0) >> 6;
1691 switch(reg_spacing){
1692 case 0x00: /* Byte boundaries */
1693 ipmi_data->offset = 1;
1694 break;
1695 case 0x01: /* 32-bit boundaries */
1696 ipmi_data->offset = 4;
1697 break;
1698 case 0x02: /* 16-byte boundaries */
1699 ipmi_data->offset = 16;
1700 break;
1701 default:
1702 /* Some other interface, just ignore it. */
1703 return -EIO;
1705 } else {
1706 /* Old DMI spec. */
1707 /* Note that technically, the lower bit of the base
1708 * address should be 1 if the address is I/O and 0 if
1709 * the address is in memory. So many systems get that
1710 * wrong (and all that I have seen are I/O) so we just
1711 * ignore that bit and assume I/O. Systems that use
1712 * memory should use the newer spec, anyway. */
1713 ipmi_data->base_addr = base_addr & 0xfffe;
1714 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1715 ipmi_data->offset = 1;
1718 ipmi_data->slave_addr = data[6];
1720 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1721 dmi_data_entries++;
1722 return 0;
1725 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
1727 return -1;
1730 static void __init dmi_find_bmc(void)
1732 struct dmi_device *dev = NULL;
1733 int intf_num = 0;
1735 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1736 if (intf_num >= SI_MAX_DRIVERS)
1737 break;
1739 decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
1743 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1745 struct smi_info *info;
1746 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1747 char *io_type;
1749 if (intf_num >= dmi_data_entries)
1750 return -ENODEV;
1752 switch (ipmi_data->type) {
1753 case 0x01: /* KCS */
1754 si_type[intf_num] = "kcs";
1755 break;
1756 case 0x02: /* SMIC */
1757 si_type[intf_num] = "smic";
1758 break;
1759 case 0x03: /* BT */
1760 si_type[intf_num] = "bt";
1761 break;
1762 default:
1763 return -EIO;
1766 info = kmalloc(sizeof(*info), GFP_KERNEL);
1767 if (! info) {
1768 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1769 return -ENOMEM;
1771 memset(info, 0, sizeof(*info));
1773 if (ipmi_data->addr_space == 1) {
1774 io_type = "memory";
1775 info->io_setup = mem_setup;
1776 addrs[intf_num] = ipmi_data->base_addr;
1777 info->io.info = &(addrs[intf_num]);
1778 } else if (ipmi_data->addr_space == 2) {
1779 io_type = "I/O";
1780 info->io_setup = port_setup;
1781 ports[intf_num] = ipmi_data->base_addr;
1782 info->io.info = &(ports[intf_num]);
1783 } else {
1784 kfree(info);
1785 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1786 return -EIO;
1789 regspacings[intf_num] = ipmi_data->offset;
1790 info->io.regspacing = regspacings[intf_num];
1791 if (! info->io.regspacing)
1792 info->io.regspacing = DEFAULT_REGSPACING;
1793 info->io.regsize = DEFAULT_REGSPACING;
1794 info->io.regshift = regshifts[intf_num];
1796 info->slave_addr = ipmi_data->slave_addr;
1798 irqs[intf_num] = ipmi_data->irq;
1800 *new_info = info;
1802 printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1803 " address 0x%lx, slave address 0x%x\n",
1804 io_type, (unsigned long)ipmi_data->base_addr,
1805 ipmi_data->slave_addr);
1806 return 0;
1808 #endif /* CONFIG_X86 */
1810 #ifdef CONFIG_PCI
1812 #define PCI_ERMC_CLASSCODE 0x0C0700
1813 #define PCI_HP_VENDOR_ID 0x103C
1814 #define PCI_MMC_DEVICE_ID 0x121A
1815 #define PCI_MMC_ADDR_CW 0x10
1817 /* Avoid more than one attempt to probe pci smic. */
1818 static int pci_smic_checked = 0;
1820 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1822 struct smi_info *info;
1823 int error;
1824 struct pci_dev *pci_dev = NULL;
1825 u16 base_addr;
1826 int fe_rmc = 0;
1828 if (pci_smic_checked)
1829 return -ENODEV;
1831 pci_smic_checked = 1;
1833 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
1834 if (! pci_dev) {
1835 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
1836 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
1837 fe_rmc = 1;
1838 else
1839 return -ENODEV;
1842 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1843 if (error)
1845 pci_dev_put(pci_dev);
1846 printk(KERN_ERR
1847 "ipmi_si: pci_read_config_word() failed (%d).\n",
1848 error);
1849 return -ENODEV;
1852 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1853 if (! (base_addr & 0x0001))
1855 pci_dev_put(pci_dev);
1856 printk(KERN_ERR
1857 "ipmi_si: memory mapped I/O not supported for PCI"
1858 " smic.\n");
1859 return -ENODEV;
1862 base_addr &= 0xFFFE;
1863 if (! fe_rmc)
1864 /* Data register starts at base address + 1 in eRMC */
1865 ++base_addr;
1867 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1868 pci_dev_put(pci_dev);
1869 return -ENODEV;
1872 info = kmalloc(sizeof(*info), GFP_KERNEL);
1873 if (! info) {
1874 pci_dev_put(pci_dev);
1875 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1876 return -ENOMEM;
1878 memset(info, 0, sizeof(*info));
1880 info->io_setup = port_setup;
1881 ports[intf_num] = base_addr;
1882 info->io.info = &(ports[intf_num]);
1883 info->io.regspacing = regspacings[intf_num];
1884 if (! info->io.regspacing)
1885 info->io.regspacing = DEFAULT_REGSPACING;
1886 info->io.regsize = DEFAULT_REGSPACING;
1887 info->io.regshift = regshifts[intf_num];
1889 *new_info = info;
1891 irqs[intf_num] = pci_dev->irq;
1892 si_type[intf_num] = "smic";
1894 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1895 (long unsigned int) base_addr);
1897 pci_dev_put(pci_dev);
1898 return 0;
1900 #endif /* CONFIG_PCI */
1902 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1904 #ifdef CONFIG_PCI
1905 if (find_pci_smic(intf_num, new_info) == 0)
1906 return 0;
1907 #endif
1908 /* Include other methods here. */
1910 return -ENODEV;
1914 static int try_get_dev_id(struct smi_info *smi_info)
1916 unsigned char msg[2];
1917 unsigned char *resp;
1918 unsigned long resp_len;
1919 enum si_sm_result smi_result;
1920 int rv = 0;
1922 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1923 if (! resp)
1924 return -ENOMEM;
1926 /* Do a Get Device ID command, since it comes back with some
1927 useful info. */
1928 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1929 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1930 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1932 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1933 for (;;)
1935 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1936 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1937 schedule_timeout_uninterruptible(1);
1938 smi_result = smi_info->handlers->event(
1939 smi_info->si_sm, 100);
1941 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1943 smi_result = smi_info->handlers->event(
1944 smi_info->si_sm, 0);
1946 else
1947 break;
1949 if (smi_result == SI_SM_HOSED) {
1950 /* We couldn't get the state machine to run, so whatever's at
1951 the port is probably not an IPMI SMI interface. */
1952 rv = -ENODEV;
1953 goto out;
1956 /* Otherwise, we got some data. */
1957 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1958 resp, IPMI_MAX_MSG_LENGTH);
1959 if (resp_len < 6) {
1960 /* That's odd, it should be longer. */
1961 rv = -EINVAL;
1962 goto out;
1965 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1966 /* That's odd, it shouldn't be able to fail. */
1967 rv = -EINVAL;
1968 goto out;
1971 /* Record info from the get device id, in case we need it. */
1972 memcpy(&smi_info->device_id, &resp[3],
1973 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1975 out:
1976 kfree(resp);
1977 return rv;
1980 static int type_file_read_proc(char *page, char **start, off_t off,
1981 int count, int *eof, void *data)
1983 char *out = (char *) page;
1984 struct smi_info *smi = data;
1986 switch (smi->si_type) {
1987 case SI_KCS:
1988 return sprintf(out, "kcs\n");
1989 case SI_SMIC:
1990 return sprintf(out, "smic\n");
1991 case SI_BT:
1992 return sprintf(out, "bt\n");
1993 default:
1994 return 0;
1998 static int stat_file_read_proc(char *page, char **start, off_t off,
1999 int count, int *eof, void *data)
2001 char *out = (char *) page;
2002 struct smi_info *smi = data;
2004 out += sprintf(out, "interrupts_enabled: %d\n",
2005 smi->irq && ! smi->interrupt_disabled);
2006 out += sprintf(out, "short_timeouts: %ld\n",
2007 smi->short_timeouts);
2008 out += sprintf(out, "long_timeouts: %ld\n",
2009 smi->long_timeouts);
2010 out += sprintf(out, "timeout_restarts: %ld\n",
2011 smi->timeout_restarts);
2012 out += sprintf(out, "idles: %ld\n",
2013 smi->idles);
2014 out += sprintf(out, "interrupts: %ld\n",
2015 smi->interrupts);
2016 out += sprintf(out, "attentions: %ld\n",
2017 smi->attentions);
2018 out += sprintf(out, "flag_fetches: %ld\n",
2019 smi->flag_fetches);
2020 out += sprintf(out, "hosed_count: %ld\n",
2021 smi->hosed_count);
2022 out += sprintf(out, "complete_transactions: %ld\n",
2023 smi->complete_transactions);
2024 out += sprintf(out, "events: %ld\n",
2025 smi->events);
2026 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2027 smi->watchdog_pretimeouts);
2028 out += sprintf(out, "incoming_messages: %ld\n",
2029 smi->incoming_messages);
2031 return (out - ((char *) page));
2035 * oem_data_avail_to_receive_msg_avail
2036 * @info - smi_info structure with msg_flags set
2038 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2039 * Returns 1 indicating need to re-run handle_flags().
2041 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2043 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2044 RECEIVE_MSG_AVAIL);
2045 return 1;
2049 * setup_dell_poweredge_oem_data_handler
2050 * @info - smi_info.device_id must be populated
2052 * Systems that match, but have firmware version < 1.40 may assert
2053 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2054 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2055 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2056 * as RECEIVE_MSG_AVAIL instead.
2058 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2059 * assert the OEM[012] bits, and if it did, the driver would have to
2060 * change to handle that properly, we don't actually check for the
2061 * firmware version.
2062 * Device ID = 0x20 BMC on PowerEdge 8G servers
2063 * Device Revision = 0x80
2064 * Firmware Revision1 = 0x01 BMC version 1.40
2065 * Firmware Revision2 = 0x40 BCD encoded
2066 * IPMI Version = 0x51 IPMI 1.5
2067 * Manufacturer ID = A2 02 00 Dell IANA
2069 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2070 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2073 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2074 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2075 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2076 #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
2077 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2079 struct ipmi_device_id *id = &smi_info->device_id;
2080 const char mfr[3]=DELL_IANA_MFR_ID;
2081 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2082 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2083 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2084 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2085 smi_info->oem_data_avail_handler =
2086 oem_data_avail_to_receive_msg_avail;
2088 else if (ipmi_version_major(id) < 1 ||
2089 (ipmi_version_major(id) == 1 &&
2090 ipmi_version_minor(id) < 5)) {
2091 smi_info->oem_data_avail_handler =
2092 oem_data_avail_to_receive_msg_avail;
2097 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2098 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2100 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2102 /* Make it a reponse */
2103 msg->rsp[0] = msg->data[0] | 4;
2104 msg->rsp[1] = msg->data[1];
2105 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2106 msg->rsp_size = 3;
2107 smi_info->curr_msg = NULL;
2108 deliver_recv_msg(smi_info, msg);
2112 * dell_poweredge_bt_xaction_handler
2113 * @info - smi_info.device_id must be populated
2115 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2116 * not respond to a Get SDR command if the length of the data
2117 * requested is exactly 0x3A, which leads to command timeouts and no
2118 * data returned. This intercepts such commands, and causes userspace
2119 * callers to try again with a different-sized buffer, which succeeds.
2122 #define STORAGE_NETFN 0x0A
2123 #define STORAGE_CMD_GET_SDR 0x23
2124 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2125 unsigned long unused,
2126 void *in)
2128 struct smi_info *smi_info = in;
2129 unsigned char *data = smi_info->curr_msg->data;
2130 unsigned int size = smi_info->curr_msg->data_size;
2131 if (size >= 8 &&
2132 (data[0]>>2) == STORAGE_NETFN &&
2133 data[1] == STORAGE_CMD_GET_SDR &&
2134 data[7] == 0x3A) {
2135 return_hosed_msg_badsize(smi_info);
2136 return NOTIFY_STOP;
2138 return NOTIFY_DONE;
2141 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2142 .notifier_call = dell_poweredge_bt_xaction_handler,
2146 * setup_dell_poweredge_bt_xaction_handler
2147 * @info - smi_info.device_id must be filled in already
2149 * Fills in smi_info.device_id.start_transaction_pre_hook
2150 * when we know what function to use there.
2152 static void
2153 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2155 struct ipmi_device_id *id = &smi_info->device_id;
2156 const char mfr[3]=DELL_IANA_MFR_ID;
2157 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2158 smi_info->si_type == SI_BT)
2159 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2163 * setup_oem_data_handler
2164 * @info - smi_info.device_id must be filled in already
2166 * Fills in smi_info.device_id.oem_data_available_handler
2167 * when we know what function to use there.
2170 static void setup_oem_data_handler(struct smi_info *smi_info)
2172 setup_dell_poweredge_oem_data_handler(smi_info);
2175 static void setup_xaction_handlers(struct smi_info *smi_info)
2177 setup_dell_poweredge_bt_xaction_handler(smi_info);
2180 /* Returns 0 if initialized, or negative on an error. */
2181 static int init_one_smi(int intf_num, struct smi_info **smi)
2183 int rv;
2184 struct smi_info *new_smi;
2187 rv = try_init_mem(intf_num, &new_smi);
2188 if (rv)
2189 rv = try_init_port(intf_num, &new_smi);
2190 #ifdef CONFIG_ACPI
2191 if (rv && si_trydefaults)
2192 rv = try_init_acpi(intf_num, &new_smi);
2193 #endif
2194 #ifdef CONFIG_X86
2195 if (rv && si_trydefaults)
2196 rv = try_init_smbios(intf_num, &new_smi);
2197 #endif
2198 if (rv && si_trydefaults)
2199 rv = try_init_plug_and_play(intf_num, &new_smi);
2201 if (rv)
2202 return rv;
2204 /* So we know not to free it unless we have allocated one. */
2205 new_smi->intf = NULL;
2206 new_smi->si_sm = NULL;
2207 new_smi->handlers = NULL;
2209 if (! new_smi->irq_setup) {
2210 new_smi->irq = irqs[intf_num];
2211 new_smi->irq_setup = std_irq_setup;
2212 new_smi->irq_cleanup = std_irq_cleanup;
2215 /* Default to KCS if no type is specified. */
2216 if (si_type[intf_num] == NULL) {
2217 if (si_trydefaults)
2218 si_type[intf_num] = "kcs";
2219 else {
2220 rv = -EINVAL;
2221 goto out_err;
2225 /* Set up the state machine to use. */
2226 if (strcmp(si_type[intf_num], "kcs") == 0) {
2227 new_smi->handlers = &kcs_smi_handlers;
2228 new_smi->si_type = SI_KCS;
2229 } else if (strcmp(si_type[intf_num], "smic") == 0) {
2230 new_smi->handlers = &smic_smi_handlers;
2231 new_smi->si_type = SI_SMIC;
2232 } else if (strcmp(si_type[intf_num], "bt") == 0) {
2233 new_smi->handlers = &bt_smi_handlers;
2234 new_smi->si_type = SI_BT;
2235 } else {
2236 /* No support for anything else yet. */
2237 rv = -EIO;
2238 goto out_err;
2241 /* Allocate the state machine's data and initialize it. */
2242 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2243 if (! new_smi->si_sm) {
2244 printk(" Could not allocate state machine memory\n");
2245 rv = -ENOMEM;
2246 goto out_err;
2248 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2249 &new_smi->io);
2251 /* Now that we know the I/O size, we can set up the I/O. */
2252 rv = new_smi->io_setup(new_smi);
2253 if (rv) {
2254 printk(" Could not set up I/O space\n");
2255 goto out_err;
2258 spin_lock_init(&(new_smi->si_lock));
2259 spin_lock_init(&(new_smi->msg_lock));
2260 spin_lock_init(&(new_smi->count_lock));
2262 /* Do low-level detection first. */
2263 if (new_smi->handlers->detect(new_smi->si_sm)) {
2264 rv = -ENODEV;
2265 goto out_err;
2268 /* Attempt a get device id command. If it fails, we probably
2269 don't have a SMI here. */
2270 rv = try_get_dev_id(new_smi);
2271 if (rv)
2272 goto out_err;
2274 setup_oem_data_handler(new_smi);
2275 setup_xaction_handlers(new_smi);
2277 /* Try to claim any interrupts. */
2278 new_smi->irq_setup(new_smi);
2280 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2281 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2282 new_smi->curr_msg = NULL;
2283 atomic_set(&new_smi->req_events, 0);
2284 new_smi->run_to_completion = 0;
2286 new_smi->interrupt_disabled = 0;
2287 new_smi->timer_stopped = 0;
2288 new_smi->stop_operation = 0;
2290 /* Start clearing the flags before we enable interrupts or the
2291 timer to avoid racing with the timer. */
2292 start_clear_flags(new_smi);
2293 /* IRQ is defined to be set when non-zero. */
2294 if (new_smi->irq)
2295 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2297 /* The ipmi_register_smi() code does some operations to
2298 determine the channel information, so we must be ready to
2299 handle operations before it is called. This means we have
2300 to stop the timer if we get an error after this point. */
2301 init_timer(&(new_smi->si_timer));
2302 new_smi->si_timer.data = (long) new_smi;
2303 new_smi->si_timer.function = smi_timeout;
2304 new_smi->last_timeout_jiffies = jiffies;
2305 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2306 add_timer(&(new_smi->si_timer));
2308 rv = ipmi_register_smi(&handlers,
2309 new_smi,
2310 ipmi_version_major(&new_smi->device_id),
2311 ipmi_version_minor(&new_smi->device_id),
2312 new_smi->slave_addr,
2313 &(new_smi->intf));
2314 if (rv) {
2315 printk(KERN_ERR
2316 "ipmi_si: Unable to register device: error %d\n",
2317 rv);
2318 goto out_err_stop_timer;
2321 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2322 type_file_read_proc, NULL,
2323 new_smi, THIS_MODULE);
2324 if (rv) {
2325 printk(KERN_ERR
2326 "ipmi_si: Unable to create proc entry: %d\n",
2327 rv);
2328 goto out_err_stop_timer;
2331 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2332 stat_file_read_proc, NULL,
2333 new_smi, THIS_MODULE);
2334 if (rv) {
2335 printk(KERN_ERR
2336 "ipmi_si: Unable to create proc entry: %d\n",
2337 rv);
2338 goto out_err_stop_timer;
2341 *smi = new_smi;
2343 printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2345 return 0;
2347 out_err_stop_timer:
2348 new_smi->stop_operation = 1;
2350 /* Wait for the timer to stop. This avoids problems with race
2351 conditions removing the timer here. */
2352 while (!new_smi->timer_stopped)
2353 schedule_timeout_uninterruptible(1);
2355 out_err:
2356 if (new_smi->intf)
2357 ipmi_unregister_smi(new_smi->intf);
2359 new_smi->irq_cleanup(new_smi);
2361 /* Wait until we know that we are out of any interrupt
2362 handlers might have been running before we freed the
2363 interrupt. */
2364 synchronize_sched();
2366 if (new_smi->si_sm) {
2367 if (new_smi->handlers)
2368 new_smi->handlers->cleanup(new_smi->si_sm);
2369 kfree(new_smi->si_sm);
2371 new_smi->io_cleanup(new_smi);
2373 return rv;
2376 static __init int init_ipmi_si(void)
2378 int rv = 0;
2379 int pos = 0;
2380 int i;
2381 char *str;
2383 if (initialized)
2384 return 0;
2385 initialized = 1;
2387 /* Parse out the si_type string into its components. */
2388 str = si_type_str;
2389 if (*str != '\0') {
2390 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2391 si_type[i] = str;
2392 str = strchr(str, ',');
2393 if (str) {
2394 *str = '\0';
2395 str++;
2396 } else {
2397 break;
2402 printk(KERN_INFO "IPMI System Interface driver.\n");
2404 #ifdef CONFIG_X86
2405 dmi_find_bmc();
2406 #endif
2408 rv = init_one_smi(0, &(smi_infos[pos]));
2409 if (rv && ! ports[0] && si_trydefaults) {
2410 /* If we are trying defaults and the initial port is
2411 not set, then set it. */
2412 si_type[0] = "kcs";
2413 ports[0] = DEFAULT_KCS_IO_PORT;
2414 rv = init_one_smi(0, &(smi_infos[pos]));
2415 if (rv) {
2416 /* No KCS - try SMIC */
2417 si_type[0] = "smic";
2418 ports[0] = DEFAULT_SMIC_IO_PORT;
2419 rv = init_one_smi(0, &(smi_infos[pos]));
2421 if (rv) {
2422 /* No SMIC - try BT */
2423 si_type[0] = "bt";
2424 ports[0] = DEFAULT_BT_IO_PORT;
2425 rv = init_one_smi(0, &(smi_infos[pos]));
2428 if (rv == 0)
2429 pos++;
2431 for (i = 1; i < SI_MAX_PARMS; i++) {
2432 rv = init_one_smi(i, &(smi_infos[pos]));
2433 if (rv == 0)
2434 pos++;
2437 if (smi_infos[0] == NULL) {
2438 printk("ipmi_si: Unable to find any System Interface(s)\n");
2439 return -ENODEV;
2442 return 0;
2444 module_init(init_ipmi_si);
2446 static void __exit cleanup_one_si(struct smi_info *to_clean)
2448 int rv;
2449 unsigned long flags;
2451 if (! to_clean)
2452 return;
2454 /* Tell the timer and interrupt handlers that we are shutting
2455 down. */
2456 spin_lock_irqsave(&(to_clean->si_lock), flags);
2457 spin_lock(&(to_clean->msg_lock));
2459 to_clean->stop_operation = 1;
2461 to_clean->irq_cleanup(to_clean);
2463 spin_unlock(&(to_clean->msg_lock));
2464 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2466 /* Wait until we know that we are out of any interrupt
2467 handlers might have been running before we freed the
2468 interrupt. */
2469 synchronize_sched();
2471 /* Wait for the timer to stop. This avoids problems with race
2472 conditions removing the timer here. */
2473 while (!to_clean->timer_stopped)
2474 schedule_timeout_uninterruptible(1);
2476 /* Interrupts and timeouts are stopped, now make sure the
2477 interface is in a clean state. */
2478 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2479 poll(to_clean);
2480 schedule_timeout_uninterruptible(1);
2483 rv = ipmi_unregister_smi(to_clean->intf);
2484 if (rv) {
2485 printk(KERN_ERR
2486 "ipmi_si: Unable to unregister device: errno=%d\n",
2487 rv);
2490 to_clean->handlers->cleanup(to_clean->si_sm);
2492 kfree(to_clean->si_sm);
2494 to_clean->io_cleanup(to_clean);
2497 static __exit void cleanup_ipmi_si(void)
2499 int i;
2501 if (! initialized)
2502 return;
2504 for (i = 0; i < SI_MAX_DRIVERS; i++) {
2505 cleanup_one_si(smi_infos[i]);
2508 module_exit(cleanup_ipmi_si);
2510 MODULE_LICENSE("GPL");
2511 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2512 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");