xfs: introduce a xfssyncd workqueue
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / hpilo.c
blobfffc227181b031e13cdbcc243ca19c8a8ced2f86
1 /*
2 * Driver for the HP iLO management processor.
4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
5 * David Altobelli <david.altobelli@hp.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/cdev.h>
21 #include <linux/sched.h>
22 #include <linux/spinlock.h>
23 #include <linux/delay.h>
24 #include <linux/uaccess.h>
25 #include <linux/io.h>
26 #include <linux/wait.h>
27 #include <linux/poll.h>
28 #include <linux/slab.h>
29 #include "hpilo.h"
31 static struct class *ilo_class;
32 static unsigned int ilo_major;
33 static char ilo_hwdev[MAX_ILO_DEV];
35 static inline int get_entry_id(int entry)
37 return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
40 static inline int get_entry_len(int entry)
42 return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
45 static inline int mk_entry(int id, int len)
47 int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
48 return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
51 static inline int desc_mem_sz(int nr_entry)
53 return nr_entry << L2_QENTRY_SZ;
57 * FIFO queues, shared with hardware.
59 * If a queue has empty slots, an entry is added to the queue tail,
60 * and that entry is marked as occupied.
61 * Entries can be dequeued from the head of the list, when the device
62 * has marked the entry as consumed.
64 * Returns true on successful queue/dequeue, false on failure.
66 static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
68 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
69 unsigned long flags;
70 int ret = 0;
72 spin_lock_irqsave(&hw->fifo_lock, flags);
73 if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
74 & ENTRY_MASK_O)) {
75 fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
76 (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
77 fifo_q->tail += 1;
78 ret = 1;
80 spin_unlock_irqrestore(&hw->fifo_lock, flags);
82 return ret;
85 static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
87 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
88 unsigned long flags;
89 int ret = 0;
90 u64 c;
92 spin_lock_irqsave(&hw->fifo_lock, flags);
93 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
94 if (c & ENTRY_MASK_C) {
95 if (entry)
96 *entry = c & ENTRY_MASK_NOSTATE;
98 fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
99 (c | ENTRY_MASK) + 1;
100 fifo_q->head += 1;
101 ret = 1;
103 spin_unlock_irqrestore(&hw->fifo_lock, flags);
105 return ret;
108 static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
110 struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
111 unsigned long flags;
112 int ret = 0;
113 u64 c;
115 spin_lock_irqsave(&hw->fifo_lock, flags);
116 c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
117 if (c & ENTRY_MASK_C)
118 ret = 1;
119 spin_unlock_irqrestore(&hw->fifo_lock, flags);
121 return ret;
124 static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
125 int dir, int id, int len)
127 char *fifobar;
128 int entry;
130 if (dir == SENDQ)
131 fifobar = ccb->ccb_u1.send_fifobar;
132 else
133 fifobar = ccb->ccb_u3.recv_fifobar;
135 entry = mk_entry(id, len);
136 return fifo_enqueue(hw, fifobar, entry);
139 static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
140 int dir, int *id, int *len, void **pkt)
142 char *fifobar, *desc;
143 int entry = 0, pkt_id = 0;
144 int ret;
146 if (dir == SENDQ) {
147 fifobar = ccb->ccb_u1.send_fifobar;
148 desc = ccb->ccb_u2.send_desc;
149 } else {
150 fifobar = ccb->ccb_u3.recv_fifobar;
151 desc = ccb->ccb_u4.recv_desc;
154 ret = fifo_dequeue(hw, fifobar, &entry);
155 if (ret) {
156 pkt_id = get_entry_id(entry);
157 if (id)
158 *id = pkt_id;
159 if (len)
160 *len = get_entry_len(entry);
161 if (pkt)
162 *pkt = (void *)(desc + desc_mem_sz(pkt_id));
165 return ret;
168 static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
170 char *fifobar = ccb->ccb_u3.recv_fifobar;
172 return fifo_check_recv(hw, fifobar);
175 static inline void doorbell_set(struct ccb *ccb)
177 iowrite8(1, ccb->ccb_u5.db_base);
180 static inline void doorbell_clr(struct ccb *ccb)
182 iowrite8(2, ccb->ccb_u5.db_base);
185 static inline int ctrl_set(int l2sz, int idxmask, int desclim)
187 int active = 0, go = 1;
188 return l2sz << CTRL_BITPOS_L2SZ |
189 idxmask << CTRL_BITPOS_FIFOINDEXMASK |
190 desclim << CTRL_BITPOS_DESCLIMIT |
191 active << CTRL_BITPOS_A |
192 go << CTRL_BITPOS_G;
195 static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
197 /* for simplicity, use the same parameters for send and recv ctrls */
198 ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
199 ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
202 static inline int fifo_sz(int nr_entry)
204 /* size of a fifo is determined by the number of entries it contains */
205 return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
208 static void fifo_setup(void *base_addr, int nr_entry)
210 struct fifo *fifo_q = base_addr;
211 int i;
213 /* set up an empty fifo */
214 fifo_q->head = 0;
215 fifo_q->tail = 0;
216 fifo_q->reset = 0;
217 fifo_q->nrents = nr_entry;
218 fifo_q->imask = nr_entry - 1;
219 fifo_q->merge = ENTRY_MASK_O;
221 for (i = 0; i < nr_entry; i++)
222 fifo_q->fifobar[i] = 0;
225 static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
227 struct ccb *driver_ccb = &data->driver_ccb;
228 struct ccb __iomem *device_ccb = data->mapped_ccb;
229 int retries;
231 /* complicated dance to tell the hw we are stopping */
232 doorbell_clr(driver_ccb);
233 iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
234 &device_ccb->send_ctrl);
235 iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
236 &device_ccb->recv_ctrl);
238 /* give iLO some time to process stop request */
239 for (retries = MAX_WAIT; retries > 0; retries--) {
240 doorbell_set(driver_ccb);
241 udelay(WAIT_TIME);
242 if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
244 !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
245 break;
247 if (retries == 0)
248 dev_err(&pdev->dev, "Closing, but controller still active\n");
250 /* clear the hw ccb */
251 memset_io(device_ccb, 0, sizeof(struct ccb));
253 /* free resources used to back send/recv queues */
254 pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
257 static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
259 char *dma_va;
260 dma_addr_t dma_pa;
261 struct ccb *driver_ccb, *ilo_ccb;
263 driver_ccb = &data->driver_ccb;
264 ilo_ccb = &data->ilo_ccb;
266 data->dma_size = 2 * fifo_sz(NR_QENTRY) +
267 2 * desc_mem_sz(NR_QENTRY) +
268 ILO_START_ALIGN + ILO_CACHE_SZ;
270 data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
271 &data->dma_pa);
272 if (!data->dma_va)
273 return -ENOMEM;
275 dma_va = (char *)data->dma_va;
276 dma_pa = data->dma_pa;
278 memset(dma_va, 0, data->dma_size);
280 dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
281 dma_pa = roundup(dma_pa, ILO_START_ALIGN);
284 * Create two ccb's, one with virt addrs, one with phys addrs.
285 * Copy the phys addr ccb to device shared mem.
287 ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
288 ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
290 fifo_setup(dma_va, NR_QENTRY);
291 driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
292 ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
293 dma_va += fifo_sz(NR_QENTRY);
294 dma_pa += fifo_sz(NR_QENTRY);
296 dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
297 dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
299 fifo_setup(dma_va, NR_QENTRY);
300 driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
301 ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
302 dma_va += fifo_sz(NR_QENTRY);
303 dma_pa += fifo_sz(NR_QENTRY);
305 driver_ccb->ccb_u2.send_desc = dma_va;
306 ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
307 dma_pa += desc_mem_sz(NR_QENTRY);
308 dma_va += desc_mem_sz(NR_QENTRY);
310 driver_ccb->ccb_u4.recv_desc = dma_va;
311 ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
313 driver_ccb->channel = slot;
314 ilo_ccb->channel = slot;
316 driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
317 ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
319 return 0;
322 static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
324 int pkt_id, pkt_sz;
325 struct ccb *driver_ccb = &data->driver_ccb;
327 /* copy the ccb with physical addrs to device memory */
328 data->mapped_ccb = (struct ccb __iomem *)
329 (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
330 memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
332 /* put packets on the send and receive queues */
333 pkt_sz = 0;
334 for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
335 ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
336 doorbell_set(driver_ccb);
339 pkt_sz = desc_mem_sz(1);
340 for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
341 ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
343 /* the ccb is ready to use */
344 doorbell_clr(driver_ccb);
347 static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
349 int pkt_id, i;
350 struct ccb *driver_ccb = &data->driver_ccb;
352 /* make sure iLO is really handling requests */
353 for (i = MAX_WAIT; i > 0; i--) {
354 if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
355 break;
356 udelay(WAIT_TIME);
359 if (i == 0) {
360 dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
361 return -EBUSY;
364 ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
365 doorbell_set(driver_ccb);
366 return 0;
369 static inline int is_channel_reset(struct ccb *ccb)
371 /* check for this particular channel needing a reset */
372 return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
375 static inline void set_channel_reset(struct ccb *ccb)
377 /* set a flag indicating this channel needs a reset */
378 FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
381 static inline int get_device_outbound(struct ilo_hwinfo *hw)
383 return ioread32(&hw->mmio_vaddr[DB_OUT]);
386 static inline int is_db_reset(int db_out)
388 return db_out & (1 << DB_RESET);
391 static inline int is_device_reset(struct ilo_hwinfo *hw)
393 /* check for global reset condition */
394 return is_db_reset(get_device_outbound(hw));
397 static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
399 iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
402 static inline void clear_device(struct ilo_hwinfo *hw)
404 /* clear the device (reset bits, pending channel entries) */
405 clear_pending_db(hw, -1);
408 static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
410 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
413 static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
415 iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
416 &hw->mmio_vaddr[DB_IRQ]);
419 static void ilo_set_reset(struct ilo_hwinfo *hw)
421 int slot;
424 * Mapped memory is zeroed on ilo reset, so set a per ccb flag
425 * to indicate that this ccb needs to be closed and reopened.
427 for (slot = 0; slot < MAX_CCB; slot++) {
428 if (!hw->ccb_alloc[slot])
429 continue;
430 set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
434 static ssize_t ilo_read(struct file *fp, char __user *buf,
435 size_t len, loff_t *off)
437 int err, found, cnt, pkt_id, pkt_len;
438 struct ccb_data *data = fp->private_data;
439 struct ccb *driver_ccb = &data->driver_ccb;
440 struct ilo_hwinfo *hw = data->ilo_hw;
441 void *pkt;
443 if (is_channel_reset(driver_ccb)) {
445 * If the device has been reset, applications
446 * need to close and reopen all ccbs.
448 return -ENODEV;
452 * This function is to be called when data is expected
453 * in the channel, and will return an error if no packet is found
454 * during the loop below. The sleep/retry logic is to allow
455 * applications to call read() immediately post write(),
456 * and give iLO some time to process the sent packet.
458 cnt = 20;
459 do {
460 /* look for a received packet */
461 found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
462 &pkt_len, &pkt);
463 if (found)
464 break;
465 cnt--;
466 msleep(100);
467 } while (!found && cnt);
469 if (!found)
470 return -EAGAIN;
472 /* only copy the length of the received packet */
473 if (pkt_len < len)
474 len = pkt_len;
476 err = copy_to_user(buf, pkt, len);
478 /* return the received packet to the queue */
479 ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
481 return err ? -EFAULT : len;
484 static ssize_t ilo_write(struct file *fp, const char __user *buf,
485 size_t len, loff_t *off)
487 int err, pkt_id, pkt_len;
488 struct ccb_data *data = fp->private_data;
489 struct ccb *driver_ccb = &data->driver_ccb;
490 struct ilo_hwinfo *hw = data->ilo_hw;
491 void *pkt;
493 if (is_channel_reset(driver_ccb))
494 return -ENODEV;
496 /* get a packet to send the user command */
497 if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
498 return -EBUSY;
500 /* limit the length to the length of the packet */
501 if (pkt_len < len)
502 len = pkt_len;
504 /* on failure, set the len to 0 to return empty packet to the device */
505 err = copy_from_user(pkt, buf, len);
506 if (err)
507 len = 0;
509 /* send the packet */
510 ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
511 doorbell_set(driver_ccb);
513 return err ? -EFAULT : len;
516 static unsigned int ilo_poll(struct file *fp, poll_table *wait)
518 struct ccb_data *data = fp->private_data;
519 struct ccb *driver_ccb = &data->driver_ccb;
521 poll_wait(fp, &data->ccb_waitq, wait);
523 if (is_channel_reset(driver_ccb))
524 return POLLERR;
525 else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
526 return POLLIN | POLLRDNORM;
528 return 0;
531 static int ilo_close(struct inode *ip, struct file *fp)
533 int slot;
534 struct ccb_data *data;
535 struct ilo_hwinfo *hw;
536 unsigned long flags;
538 slot = iminor(ip) % MAX_CCB;
539 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
541 spin_lock(&hw->open_lock);
543 if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
545 data = fp->private_data;
547 spin_lock_irqsave(&hw->alloc_lock, flags);
548 hw->ccb_alloc[slot] = NULL;
549 spin_unlock_irqrestore(&hw->alloc_lock, flags);
551 ilo_ccb_close(hw->ilo_dev, data);
553 kfree(data);
554 } else
555 hw->ccb_alloc[slot]->ccb_cnt--;
557 spin_unlock(&hw->open_lock);
559 return 0;
562 static int ilo_open(struct inode *ip, struct file *fp)
564 int slot, error;
565 struct ccb_data *data;
566 struct ilo_hwinfo *hw;
567 unsigned long flags;
569 slot = iminor(ip) % MAX_CCB;
570 hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
572 /* new ccb allocation */
573 data = kzalloc(sizeof(*data), GFP_KERNEL);
574 if (!data)
575 return -ENOMEM;
577 spin_lock(&hw->open_lock);
579 /* each fd private_data holds sw/hw view of ccb */
580 if (hw->ccb_alloc[slot] == NULL) {
581 /* create a channel control block for this minor */
582 error = ilo_ccb_setup(hw, data, slot);
583 if (error) {
584 kfree(data);
585 goto out;
588 data->ccb_cnt = 1;
589 data->ccb_excl = fp->f_flags & O_EXCL;
590 data->ilo_hw = hw;
591 init_waitqueue_head(&data->ccb_waitq);
593 /* write the ccb to hw */
594 spin_lock_irqsave(&hw->alloc_lock, flags);
595 ilo_ccb_open(hw, data, slot);
596 hw->ccb_alloc[slot] = data;
597 spin_unlock_irqrestore(&hw->alloc_lock, flags);
599 /* make sure the channel is functional */
600 error = ilo_ccb_verify(hw, data);
601 if (error) {
603 spin_lock_irqsave(&hw->alloc_lock, flags);
604 hw->ccb_alloc[slot] = NULL;
605 spin_unlock_irqrestore(&hw->alloc_lock, flags);
607 ilo_ccb_close(hw->ilo_dev, data);
609 kfree(data);
610 goto out;
613 } else {
614 kfree(data);
615 if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
617 * The channel exists, and either this open
618 * or a previous open of this channel wants
619 * exclusive access.
621 error = -EBUSY;
622 } else {
623 hw->ccb_alloc[slot]->ccb_cnt++;
624 error = 0;
627 out:
628 spin_unlock(&hw->open_lock);
630 if (!error)
631 fp->private_data = hw->ccb_alloc[slot];
633 return error;
636 static const struct file_operations ilo_fops = {
637 .owner = THIS_MODULE,
638 .read = ilo_read,
639 .write = ilo_write,
640 .poll = ilo_poll,
641 .open = ilo_open,
642 .release = ilo_close,
643 .llseek = noop_llseek,
646 static irqreturn_t ilo_isr(int irq, void *data)
648 struct ilo_hwinfo *hw = data;
649 int pending, i;
651 spin_lock(&hw->alloc_lock);
653 /* check for ccbs which have data */
654 pending = get_device_outbound(hw);
655 if (!pending) {
656 spin_unlock(&hw->alloc_lock);
657 return IRQ_NONE;
660 if (is_db_reset(pending)) {
661 /* wake up all ccbs if the device was reset */
662 pending = -1;
663 ilo_set_reset(hw);
666 for (i = 0; i < MAX_CCB; i++) {
667 if (!hw->ccb_alloc[i])
668 continue;
669 if (pending & (1 << i))
670 wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
673 /* clear the device of the channels that have been handled */
674 clear_pending_db(hw, pending);
676 spin_unlock(&hw->alloc_lock);
678 return IRQ_HANDLED;
681 static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
683 pci_iounmap(pdev, hw->db_vaddr);
684 pci_iounmap(pdev, hw->ram_vaddr);
685 pci_iounmap(pdev, hw->mmio_vaddr);
688 static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
690 int error = -ENOMEM;
692 /* map the memory mapped i/o registers */
693 hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
694 if (hw->mmio_vaddr == NULL) {
695 dev_err(&pdev->dev, "Error mapping mmio\n");
696 goto out;
699 /* map the adapter shared memory region */
700 hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ);
701 if (hw->ram_vaddr == NULL) {
702 dev_err(&pdev->dev, "Error mapping shared mem\n");
703 goto mmio_free;
706 /* map the doorbell aperture */
707 hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE);
708 if (hw->db_vaddr == NULL) {
709 dev_err(&pdev->dev, "Error mapping doorbell\n");
710 goto ram_free;
713 return 0;
714 ram_free:
715 pci_iounmap(pdev, hw->ram_vaddr);
716 mmio_free:
717 pci_iounmap(pdev, hw->mmio_vaddr);
718 out:
719 return error;
722 static void ilo_remove(struct pci_dev *pdev)
724 int i, minor;
725 struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
727 clear_device(ilo_hw);
729 minor = MINOR(ilo_hw->cdev.dev);
730 for (i = minor; i < minor + MAX_CCB; i++)
731 device_destroy(ilo_class, MKDEV(ilo_major, i));
733 cdev_del(&ilo_hw->cdev);
734 ilo_disable_interrupts(ilo_hw);
735 free_irq(pdev->irq, ilo_hw);
736 ilo_unmap_device(pdev, ilo_hw);
737 pci_release_regions(pdev);
738 pci_disable_device(pdev);
739 kfree(ilo_hw);
740 ilo_hwdev[(minor / MAX_CCB)] = 0;
743 static int __devinit ilo_probe(struct pci_dev *pdev,
744 const struct pci_device_id *ent)
746 int devnum, minor, start, error;
747 struct ilo_hwinfo *ilo_hw;
749 /* find a free range for device files */
750 for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
751 if (ilo_hwdev[devnum] == 0) {
752 ilo_hwdev[devnum] = 1;
753 break;
757 if (devnum == MAX_ILO_DEV) {
758 dev_err(&pdev->dev, "Error finding free device\n");
759 return -ENODEV;
762 /* track global allocations for this device */
763 error = -ENOMEM;
764 ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
765 if (!ilo_hw)
766 goto out;
768 ilo_hw->ilo_dev = pdev;
769 spin_lock_init(&ilo_hw->alloc_lock);
770 spin_lock_init(&ilo_hw->fifo_lock);
771 spin_lock_init(&ilo_hw->open_lock);
773 error = pci_enable_device(pdev);
774 if (error)
775 goto free;
777 pci_set_master(pdev);
779 error = pci_request_regions(pdev, ILO_NAME);
780 if (error)
781 goto disable;
783 error = ilo_map_device(pdev, ilo_hw);
784 if (error)
785 goto free_regions;
787 pci_set_drvdata(pdev, ilo_hw);
788 clear_device(ilo_hw);
790 error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
791 if (error)
792 goto unmap;
794 ilo_enable_interrupts(ilo_hw);
796 cdev_init(&ilo_hw->cdev, &ilo_fops);
797 ilo_hw->cdev.owner = THIS_MODULE;
798 start = devnum * MAX_CCB;
799 error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
800 if (error) {
801 dev_err(&pdev->dev, "Could not add cdev\n");
802 goto remove_isr;
805 for (minor = 0 ; minor < MAX_CCB; minor++) {
806 struct device *dev;
807 dev = device_create(ilo_class, &pdev->dev,
808 MKDEV(ilo_major, minor), NULL,
809 "hpilo!d%dccb%d", devnum, minor);
810 if (IS_ERR(dev))
811 dev_err(&pdev->dev, "Could not create files\n");
814 return 0;
815 remove_isr:
816 ilo_disable_interrupts(ilo_hw);
817 free_irq(pdev->irq, ilo_hw);
818 unmap:
819 ilo_unmap_device(pdev, ilo_hw);
820 free_regions:
821 pci_release_regions(pdev);
822 disable:
823 pci_disable_device(pdev);
824 free:
825 kfree(ilo_hw);
826 out:
827 ilo_hwdev[devnum] = 0;
828 return error;
831 static struct pci_device_id ilo_devices[] = {
832 { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
833 { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
836 MODULE_DEVICE_TABLE(pci, ilo_devices);
838 static struct pci_driver ilo_driver = {
839 .name = ILO_NAME,
840 .id_table = ilo_devices,
841 .probe = ilo_probe,
842 .remove = __devexit_p(ilo_remove),
845 static int __init ilo_init(void)
847 int error;
848 dev_t dev;
850 ilo_class = class_create(THIS_MODULE, "iLO");
851 if (IS_ERR(ilo_class)) {
852 error = PTR_ERR(ilo_class);
853 goto out;
856 error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
857 if (error)
858 goto class_destroy;
860 ilo_major = MAJOR(dev);
862 error = pci_register_driver(&ilo_driver);
863 if (error)
864 goto chr_remove;
866 return 0;
867 chr_remove:
868 unregister_chrdev_region(dev, MAX_OPEN);
869 class_destroy:
870 class_destroy(ilo_class);
871 out:
872 return error;
875 static void __exit ilo_exit(void)
877 pci_unregister_driver(&ilo_driver);
878 unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
879 class_destroy(ilo_class);
882 MODULE_VERSION("1.2");
883 MODULE_ALIAS(ILO_NAME);
884 MODULE_DESCRIPTION(ILO_NAME);
885 MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
886 MODULE_LICENSE("GPL v2");
888 module_init(ilo_init);
889 module_exit(ilo_exit);