Merge branch 'mini2440-dev-unlikely' into mini2440-dev
[linux-2.6/mini2440.git] / drivers / staging / b3dfg / b3dfg.c
blobcda26bb493b3d3185177e8d3041f4ebf106e0f79
1 /*
2 * Brontes PCI frame grabber driver
4 * Copyright (C) 2008 3M Company
5 * Contact: Justin Bronder <jsbronder@brontes3d.com>
6 * Original Authors: Daniel Drake <ddrake@brontes3d.com>
7 * Duane Griffin <duaneg@dghda.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/device.h>
25 #include <linux/fs.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/ioctl.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/types.h>
33 #include <linux/cdev.h>
34 #include <linux/list.h>
35 #include <linux/poll.h>
36 #include <linux/wait.h>
37 #include <linux/mm.h>
38 #include <linux/uaccess.h>
39 #include <linux/sched.h>
41 static unsigned int b3dfg_nbuf = 2;
43 module_param_named(buffer_count, b3dfg_nbuf, uint, 0444);
45 MODULE_PARM_DESC(buffer_count, "Number of buffers (min 2, default 2)");
47 MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>");
48 MODULE_DESCRIPTION("Brontes frame grabber driver");
49 MODULE_LICENSE("GPL");
51 #define DRIVER_NAME "b3dfg"
52 #define B3DFG_MAX_DEVS 4
53 #define B3DFG_FRAMES_PER_BUFFER 3
55 #define B3DFG_BAR_REGS 0
56 #define B3DFG_REGS_LENGTH 0x10000
58 #define B3DFG_IOC_MAGIC 0xb3 /* dfg :-) */
59 #define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int)
60 #define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2)
61 #define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3)
62 #define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4)
63 #define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll)
64 #define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait)
65 #define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int)
67 enum {
68 /* number of 4kb pages per frame */
69 B3D_REG_FRM_SIZE = 0x0,
71 /* bit 0: set to enable interrupts
72 * bit 1: set to enable cable status change interrupts */
73 B3D_REG_HW_CTRL = 0x4,
75 /* bit 0-1 - 1-based ID of next pending frame transfer (0 = none)
76 * bit 2 indicates the previous DMA transfer has completed
77 * bit 3 indicates wand cable status change
78 * bit 8:15 - counter of number of discarded triplets */
79 B3D_REG_DMA_STS = 0x8,
81 /* bit 0: wand status (1 = present, 0 = disconnected) */
82 B3D_REG_WAND_STS = 0xc,
84 /* bus address for DMA transfers. lower 2 bits must be zero because DMA
85 * works with 32 bit word size. */
86 B3D_REG_EC220_DMA_ADDR = 0x8000,
88 /* bit 20:0 - number of 32 bit words to be transferred
89 * bit 21:31 - reserved */
90 B3D_REG_EC220_TRF_SIZE = 0x8004,
92 /* bit 0 - error bit
93 * bit 1 - interrupt bit (set to generate interrupt at end of transfer)
94 * bit 2 - start bit (set to start transfer)
95 * bit 3 - direction (0 = DMA_TO_DEVICE, 1 = DMA_FROM_DEVICE
96 * bit 4:31 - reserved */
97 B3D_REG_EC220_DMA_STS = 0x8008,
100 enum b3dfg_buffer_state {
101 B3DFG_BUFFER_POLLED = 0,
102 B3DFG_BUFFER_PENDING,
103 B3DFG_BUFFER_POPULATED,
106 struct b3dfg_buffer {
107 unsigned char *frame[B3DFG_FRAMES_PER_BUFFER];
108 struct list_head list;
109 u8 state;
112 struct b3dfg_dev {
114 /* no protection needed: all finalized at initialization time */
115 struct pci_dev *pdev;
116 struct cdev chardev;
117 struct device *dev;
118 void __iomem *regs;
119 unsigned int frame_size;
122 * Protects buffer state, including buffer_queue, triplet_ready,
123 * cur_dma_frame_idx & cur_dma_frame_addr.
125 spinlock_t buffer_lock;
126 struct b3dfg_buffer *buffers;
127 struct list_head buffer_queue;
129 /* Last frame in triplet transferred (-1 if none). */
130 int cur_dma_frame_idx;
132 /* Current frame's address for DMA. */
133 dma_addr_t cur_dma_frame_addr;
136 * Protects cstate_tstamp.
137 * Nests inside buffer_lock.
139 spinlock_t cstate_lock;
140 unsigned long cstate_tstamp;
143 * Protects triplets_dropped.
144 * Nests inside buffers_lock.
146 spinlock_t triplets_dropped_lock;
147 unsigned int triplets_dropped;
149 wait_queue_head_t buffer_waitqueue;
151 unsigned int transmission_enabled:1;
152 unsigned int triplet_ready:1;
155 static u8 b3dfg_devices[B3DFG_MAX_DEVS];
157 static struct class *b3dfg_class;
158 static dev_t b3dfg_devt;
160 static const struct pci_device_id b3dfg_ids[] __devinitdata = {
161 { PCI_DEVICE(0x0b3d, 0x0001) },
162 { },
165 MODULE_DEVICE_TABLE(pci, b3dfg_ids);
167 /***** user-visible types *****/
169 struct b3dfg_poll {
170 int buffer_idx;
171 unsigned int triplets_dropped;
174 struct b3dfg_wait {
175 int buffer_idx;
176 unsigned int timeout;
177 unsigned int triplets_dropped;
180 /**** register I/O ****/
182 static u32 b3dfg_read32(struct b3dfg_dev *fgdev, u16 reg)
184 return ioread32(fgdev->regs + reg);
187 static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value)
189 iowrite32(value, fgdev->regs + reg);
192 /**** buffer management ****/
195 * Program EC220 for transfer of a specific frame.
196 * Called with buffer_lock held.
198 static int setup_frame_transfer(struct b3dfg_dev *fgdev,
199 struct b3dfg_buffer *buf, int frame)
201 unsigned char *frm_addr;
202 dma_addr_t frm_addr_dma;
203 unsigned int frm_size = fgdev->frame_size;
205 frm_addr = buf->frame[frame];
206 frm_addr_dma = pci_map_single(fgdev->pdev, frm_addr,
207 frm_size, PCI_DMA_FROMDEVICE);
208 if (pci_dma_mapping_error(fgdev->pdev, frm_addr_dma))
209 return -ENOMEM;
211 fgdev->cur_dma_frame_addr = frm_addr_dma;
212 fgdev->cur_dma_frame_idx = frame;
214 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR,
215 cpu_to_le32(frm_addr_dma));
216 b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE,
217 cpu_to_le32(frm_size >> 2));
218 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf);
220 return 0;
223 /* Caller should hold buffer lock */
224 static void dequeue_all_buffers(struct b3dfg_dev *fgdev)
226 int i;
227 for (i = 0; i < b3dfg_nbuf; i++) {
228 struct b3dfg_buffer *buf = &fgdev->buffers[i];
229 buf->state = B3DFG_BUFFER_POLLED;
230 list_del_init(&buf->list);
234 /* queue a buffer to receive data */
235 static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
237 struct device *dev = &fgdev->pdev->dev;
238 struct b3dfg_buffer *buf;
239 unsigned long flags;
240 int r = 0;
242 spin_lock_irqsave(&fgdev->buffer_lock, flags);
243 if (bufidx < 0 || bufidx >= b3dfg_nbuf) {
244 dev_dbg(dev, "Invalid buffer index, %d\n", bufidx);
245 r = -ENOENT;
246 goto out;
248 buf = &fgdev->buffers[bufidx];
250 if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) {
251 dev_dbg(dev, "buffer %d is already queued\n", bufidx);
252 r = -EINVAL;
253 goto out;
256 buf->state = B3DFG_BUFFER_PENDING;
257 list_add_tail(&buf->list, &fgdev->buffer_queue);
259 if (fgdev->transmission_enabled && fgdev->triplet_ready) {
260 dev_dbg(dev, "triplet is ready, pushing immediately\n");
261 fgdev->triplet_ready = 0;
262 r = setup_frame_transfer(fgdev, buf, 0);
263 if (r)
264 dev_err(dev, "unable to map DMA buffer\n");
267 out:
268 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
269 return r;
272 /* non-blocking buffer poll. returns 1 if data is present in the buffer,
273 * 0 otherwise */
274 static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg)
276 struct device *dev = &fgdev->pdev->dev;
277 struct b3dfg_poll p;
278 struct b3dfg_buffer *buf;
279 unsigned long flags;
280 int r = 1;
281 int arg_out = 0;
283 if (copy_from_user(&p, arg, sizeof(p)))
284 return -EFAULT;
286 if (unlikely(!fgdev->transmission_enabled)) {
287 dev_dbg(dev, "cannot poll, transmission disabled\n");
288 return -EINVAL;
291 if (p.buffer_idx < 0 || p.buffer_idx >= b3dfg_nbuf)
292 return -ENOENT;
294 buf = &fgdev->buffers[p.buffer_idx];
296 spin_lock_irqsave(&fgdev->buffer_lock, flags);
298 if (likely(buf->state == B3DFG_BUFFER_POPULATED)) {
299 arg_out = 1;
300 buf->state = B3DFG_BUFFER_POLLED;
302 /* IRQs already disabled by spin_lock_irqsave above. */
303 spin_lock(&fgdev->triplets_dropped_lock);
304 p.triplets_dropped = fgdev->triplets_dropped;
305 fgdev->triplets_dropped = 0;
306 spin_unlock(&fgdev->triplets_dropped_lock);
307 } else {
308 r = 0;
311 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
313 if (arg_out && copy_to_user(arg, &p, sizeof(p)))
314 r = -EFAULT;
316 return r;
319 static unsigned long get_cstate_change(struct b3dfg_dev *fgdev)
321 unsigned long flags, when;
323 spin_lock_irqsave(&fgdev->cstate_lock, flags);
324 when = fgdev->cstate_tstamp;
325 spin_unlock_irqrestore(&fgdev->cstate_lock, flags);
326 return when;
329 static int is_event_ready(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf,
330 unsigned long when)
332 int result;
333 unsigned long flags;
335 spin_lock_irqsave(&fgdev->buffer_lock, flags);
336 spin_lock(&fgdev->cstate_lock);
337 result = (!fgdev->transmission_enabled ||
338 buf->state == B3DFG_BUFFER_POPULATED ||
339 when != fgdev->cstate_tstamp);
340 spin_unlock(&fgdev->cstate_lock);
341 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
343 return result;
346 /* sleep until a specific buffer becomes populated */
347 static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg)
349 struct device *dev = &fgdev->pdev->dev;
350 struct b3dfg_wait w;
351 struct b3dfg_buffer *buf;
352 unsigned long flags, when;
353 int r;
355 if (copy_from_user(&w, arg, sizeof(w)))
356 return -EFAULT;
358 if (!fgdev->transmission_enabled) {
359 dev_dbg(dev, "cannot wait, transmission disabled\n");
360 return -EINVAL;
363 if (w.buffer_idx < 0 || w.buffer_idx >= b3dfg_nbuf)
364 return -ENOENT;
366 buf = &fgdev->buffers[w.buffer_idx];
368 spin_lock_irqsave(&fgdev->buffer_lock, flags);
370 if (buf->state == B3DFG_BUFFER_POPULATED) {
371 r = w.timeout;
372 goto out_triplets_dropped;
375 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
377 when = get_cstate_change(fgdev);
378 if (w.timeout > 0) {
379 r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue,
380 is_event_ready(fgdev, buf, when),
381 (w.timeout * HZ) / 1000);
383 if (unlikely(r < 0))
384 goto out;
386 w.timeout = r * 1000 / HZ;
387 } else {
388 r = wait_event_interruptible(fgdev->buffer_waitqueue,
389 is_event_ready(fgdev, buf, when));
391 if (unlikely(r)) {
392 r = -ERESTARTSYS;
393 goto out;
397 /* TODO: Inform the user via field(s) in w? */
398 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) {
399 r = -EINVAL;
400 goto out;
403 spin_lock_irqsave(&fgdev->buffer_lock, flags);
405 if (buf->state != B3DFG_BUFFER_POPULATED) {
406 r = -ETIMEDOUT;
407 goto out_unlock;
410 buf->state = B3DFG_BUFFER_POLLED;
412 out_triplets_dropped:
414 /* IRQs already disabled by spin_lock_irqsave above. */
415 spin_lock(&fgdev->triplets_dropped_lock);
416 w.triplets_dropped = fgdev->triplets_dropped;
417 fgdev->triplets_dropped = 0;
418 spin_unlock(&fgdev->triplets_dropped_lock);
420 out_unlock:
421 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
422 if (copy_to_user(arg, &w, sizeof(w)))
423 r = -EFAULT;
424 out:
425 return r;
428 /* mmap page fault handler */
429 static int b3dfg_vma_fault(struct vm_area_struct *vma,
430 struct vm_fault *vmf)
432 struct b3dfg_dev *fgdev = vma->vm_file->private_data;
433 unsigned long off = vmf->pgoff << PAGE_SHIFT;
434 unsigned int frame_size = fgdev->frame_size;
435 unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER;
436 unsigned char *addr;
438 /* determine which buffer the offset lies within */
439 unsigned int buf_idx = off / buf_size;
440 /* and the offset into the buffer */
441 unsigned int buf_off = off % buf_size;
443 /* determine which frame inside the buffer the offset lies in */
444 unsigned int frm_idx = buf_off / frame_size;
445 /* and the offset into the frame */
446 unsigned int frm_off = buf_off % frame_size;
448 if (unlikely(buf_idx >= b3dfg_nbuf))
449 return VM_FAULT_SIGBUS;
451 addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off;
452 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
453 virt_to_phys(addr) >> PAGE_SHIFT);
455 return VM_FAULT_NOPAGE;
458 static struct vm_operations_struct b3dfg_vm_ops = {
459 .fault = b3dfg_vma_fault,
462 static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg)
464 u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
465 dev_dbg(&fgdev->pdev->dev, "wand status %x\n", wndstat);
466 return __put_user(wndstat & 0x1, arg);
469 static int enable_transmission(struct b3dfg_dev *fgdev)
471 u16 command;
472 unsigned long flags;
473 struct device *dev = &fgdev->pdev->dev;
475 dev_dbg(dev, "enable transmission\n");
477 /* check the cable is plugged in. */
478 if (!b3dfg_read32(fgdev, B3D_REG_WAND_STS)) {
479 dev_dbg(dev, "cannot start transmission without wand\n");
480 return -EINVAL;
484 * Check we're a bus master.
485 * TODO: I think we can remove this having added the pci_set_master call
487 pci_read_config_word(fgdev->pdev, PCI_COMMAND, &command);
488 if (!(command & PCI_COMMAND_MASTER)) {
489 dev_err(dev, "not a bus master, force-enabling\n");
490 pci_write_config_word(fgdev->pdev, PCI_COMMAND,
491 command | PCI_COMMAND_MASTER);
494 spin_lock_irqsave(&fgdev->buffer_lock, flags);
496 /* Handle racing enable_transmission calls. */
497 if (fgdev->transmission_enabled) {
498 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
499 goto out;
502 spin_lock(&fgdev->triplets_dropped_lock);
503 fgdev->triplets_dropped = 0;
504 spin_unlock(&fgdev->triplets_dropped_lock);
506 fgdev->triplet_ready = 0;
507 fgdev->cur_dma_frame_idx = -1;
508 fgdev->transmission_enabled = 1;
510 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
512 /* Enable DMA and cable status interrupts. */
513 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0x03);
515 out:
516 return 0;
519 static void disable_transmission(struct b3dfg_dev *fgdev)
521 struct device *dev = &fgdev->pdev->dev;
522 unsigned long flags;
523 u32 tmp;
525 dev_dbg(dev, "disable transmission\n");
527 /* guarantee that no more interrupts will be serviced */
528 spin_lock_irqsave(&fgdev->buffer_lock, flags);
529 fgdev->transmission_enabled = 0;
531 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
533 /* FIXME: temporary debugging only. if the board stops transmitting,
534 * hitting ctrl+c and seeing this message is useful for determining
535 * the state of the board. */
536 tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
537 dev_dbg(dev, "DMA_STS reads %x after TX stopped\n", tmp);
539 dequeue_all_buffers(fgdev);
540 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
542 wake_up_interruptible(&fgdev->buffer_waitqueue);
545 static int set_transmission(struct b3dfg_dev *fgdev, int enabled)
547 int res = 0;
549 if (enabled && !fgdev->transmission_enabled)
550 res = enable_transmission(fgdev);
551 else if (!enabled && fgdev->transmission_enabled)
552 disable_transmission(fgdev);
554 return res;
557 /* Called in interrupt context. */
558 static void handle_cstate_unplug(struct b3dfg_dev *fgdev)
560 /* Disable all interrupts. */
561 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
563 /* Stop transmission. */
564 spin_lock(&fgdev->buffer_lock);
565 fgdev->transmission_enabled = 0;
567 fgdev->cur_dma_frame_idx = -1;
568 fgdev->triplet_ready = 0;
569 if (fgdev->cur_dma_frame_addr) {
570 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
571 fgdev->frame_size, PCI_DMA_FROMDEVICE);
572 fgdev->cur_dma_frame_addr = 0;
574 dequeue_all_buffers(fgdev);
575 spin_unlock(&fgdev->buffer_lock);
578 /* Called in interrupt context. */
579 static void handle_cstate_change(struct b3dfg_dev *fgdev)
581 u32 cstate = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
582 unsigned long when;
583 struct device *dev = &fgdev->pdev->dev;
585 dev_dbg(dev, "cable state change: %u\n", cstate);
588 * When the wand is unplugged we reset our state. The hardware will
589 * have done the same internally.
591 * Note we should never see a cable *plugged* event, as interrupts
592 * should only be enabled when transmitting, which requires the cable
593 * to be plugged. If we do see one it probably means the cable has been
594 * unplugged and re-plugged very rapidly. Possibly because it has a
595 * broken wire and is momentarily losing contact.
597 * TODO: At the moment if you plug in the cable then enable transmission
598 * the hardware will raise a couple of spurious interrupts, so
599 * just ignore them for now.
601 * Once the hardware is fixed we should complain and treat it as an
602 * unplug. Or at least track how frequently it is happening and do
603 * so if too many come in.
605 if (cstate) {
606 dev_warn(dev, "ignoring unexpected plug event\n");
607 return;
609 handle_cstate_unplug(fgdev);
612 * Record cable state change timestamp & wake anyone waiting
613 * on a cable state change. Be paranoid about ensuring events
614 * are not missed if we somehow get two interrupts in a jiffy.
616 spin_lock(&fgdev->cstate_lock);
617 when = jiffies_64;
618 if (when <= fgdev->cstate_tstamp)
619 when = fgdev->cstate_tstamp + 1;
620 fgdev->cstate_tstamp = when;
621 wake_up_interruptible(&fgdev->buffer_waitqueue);
622 spin_unlock(&fgdev->cstate_lock);
625 /* Called with buffer_lock held. */
626 static void transfer_complete(struct b3dfg_dev *fgdev)
628 struct b3dfg_buffer *buf;
629 struct device *dev = &fgdev->pdev->dev;
631 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
632 fgdev->frame_size, PCI_DMA_FROMDEVICE);
633 fgdev->cur_dma_frame_addr = 0;
635 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
637 dev_dbg(dev, "handle frame completion\n");
638 if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) {
640 /* last frame of that triplet completed */
641 dev_dbg(dev, "triplet completed\n");
642 buf->state = B3DFG_BUFFER_POPULATED;
643 list_del_init(&buf->list);
644 wake_up_interruptible(&fgdev->buffer_waitqueue);
649 * Called with buffer_lock held.
651 * Note that idx is the (1-based) *next* frame to be transferred, while
652 * cur_dma_frame_idx is the (0-based) *last* frame to have been transferred (or
653 * -1 if none). Thus there should be a difference of 2 between them.
655 static bool setup_next_frame_transfer(struct b3dfg_dev *fgdev, int idx)
657 struct b3dfg_buffer *buf;
658 struct device *dev = &fgdev->pdev->dev;
659 bool need_ack = 1;
661 dev_dbg(dev, "program DMA transfer for next frame: %d\n", idx);
663 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
664 if (idx == fgdev->cur_dma_frame_idx + 2) {
665 if (setup_frame_transfer(fgdev, buf, idx - 1))
666 dev_err(dev, "unable to map DMA buffer\n");
667 need_ack = 0;
668 } else {
669 dev_err(dev, "frame mismatch, got %d, expected %d\n",
670 idx, fgdev->cur_dma_frame_idx + 2);
672 /* FIXME: handle dropped triplets here */
675 return need_ack;
678 static irqreturn_t b3dfg_intr(int irq, void *dev_id)
680 struct b3dfg_dev *fgdev = dev_id;
681 struct device *dev = &fgdev->pdev->dev;
682 u32 sts;
683 u8 dropped;
684 bool need_ack = 1;
685 irqreturn_t res = IRQ_HANDLED;
687 sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
688 if (unlikely(sts == 0)) {
689 dev_warn(dev, "ignore interrupt, DMA status is 0\n");
690 res = IRQ_NONE;
691 goto out;
694 if (unlikely(!fgdev->transmission_enabled)) {
695 dev_warn(dev, "ignore interrupt, TX disabled\n");
696 res = IRQ_HANDLED;
697 goto out;
700 /* Handle dropped frames, as reported by the hardware. */
701 dropped = (sts >> 8) & 0xff;
702 dev_dbg(dev, "intr: DMA_STS=%08x (drop=%d comp=%d next=%d)\n",
703 sts, dropped, !!(sts & 0x4), sts & 0x3);
704 if (unlikely(dropped > 0)) {
705 spin_lock(&fgdev->triplets_dropped_lock);
706 fgdev->triplets_dropped += dropped;
707 spin_unlock(&fgdev->triplets_dropped_lock);
710 /* Handle a cable state change (i.e. the wand being unplugged). */
711 if (sts & 0x08) {
712 handle_cstate_change(fgdev);
713 goto out;
716 spin_lock(&fgdev->buffer_lock);
717 if (unlikely(list_empty(&fgdev->buffer_queue))) {
719 /* FIXME need more sanity checking here */
720 dev_info(dev, "buffer not ready for next transfer\n");
721 fgdev->triplet_ready = 1;
722 goto out_unlock;
725 /* Has a frame transfer been completed? */
726 if (sts & 0x4) {
727 u32 dma_status = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS);
729 /* Check for DMA errors reported by the hardware. */
730 if (unlikely(dma_status & 0x1)) {
731 dev_err(dev, "EC220 error: %08x\n", dma_status);
733 /* FIXME flesh out error handling */
734 goto out_unlock;
737 /* Sanity check, we should have a frame index at this point. */
738 if (unlikely(fgdev->cur_dma_frame_idx == -1)) {
739 dev_err(dev, "completed but no last idx?\n");
741 /* FIXME flesh out error handling */
742 goto out_unlock;
745 transfer_complete(fgdev);
748 /* Is there another frame transfer pending? */
749 if (sts & 0x3)
750 need_ack = setup_next_frame_transfer(fgdev, sts & 0x3);
751 else
752 fgdev->cur_dma_frame_idx = -1;
754 out_unlock:
755 spin_unlock(&fgdev->buffer_lock);
756 out:
757 if (need_ack) {
758 dev_dbg(dev, "acknowledging interrupt\n");
759 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b);
761 return res;
764 static int b3dfg_open(struct inode *inode, struct file *filp)
766 struct b3dfg_dev *fgdev =
767 container_of(inode->i_cdev, struct b3dfg_dev, chardev);
769 dev_dbg(&fgdev->pdev->dev, "open\n");
770 filp->private_data = fgdev;
771 return 0;
774 static int b3dfg_release(struct inode *inode, struct file *filp)
776 struct b3dfg_dev *fgdev = filp->private_data;
777 dev_dbg(&fgdev->pdev->dev, "release\n");
778 disable_transmission(fgdev);
779 return 0;
782 static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
784 struct b3dfg_dev *fgdev = filp->private_data;
786 switch (cmd) {
787 case B3DFG_IOCGFRMSZ:
788 return __put_user(fgdev->frame_size, (int __user *) arg);
789 case B3DFG_IOCGWANDSTAT:
790 return get_wand_status(fgdev, (int __user *) arg);
791 case B3DFG_IOCTTRANS:
792 return set_transmission(fgdev, (int) arg);
793 case B3DFG_IOCTQUEUEBUF:
794 return queue_buffer(fgdev, (int) arg);
795 case B3DFG_IOCTPOLLBUF:
796 return poll_buffer(fgdev, (void __user *) arg);
797 case B3DFG_IOCTWAITBUF:
798 return wait_buffer(fgdev, (void __user *) arg);
799 default:
800 dev_dbg(&fgdev->pdev->dev, "unrecognised ioctl %x\n", cmd);
801 return -EINVAL;
805 static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table)
807 struct b3dfg_dev *fgdev = filp->private_data;
808 unsigned long flags, when;
809 int i;
810 int r = 0;
812 when = get_cstate_change(fgdev);
813 poll_wait(filp, &fgdev->buffer_waitqueue, poll_table);
815 spin_lock_irqsave(&fgdev->buffer_lock, flags);
816 for (i = 0; i < b3dfg_nbuf; i++) {
817 if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) {
818 r = POLLIN | POLLRDNORM;
819 break;
822 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
824 /* TODO: Confirm this is how we want to communicate the change. */
825 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev))
826 r = POLLERR;
828 return r;
831 static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
833 struct b3dfg_dev *fgdev = filp->private_data;
834 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
835 unsigned long vsize = vma->vm_end - vma->vm_start;
836 unsigned long bufdatalen = b3dfg_nbuf * fgdev->frame_size * 3;
837 unsigned long psize = bufdatalen - offset;
838 int r = 0;
840 if (vsize <= psize) {
841 vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR |
842 VM_PFNMAP;
843 vma->vm_ops = &b3dfg_vm_ops;
844 } else {
845 r = -EINVAL;
848 return r;
851 static struct file_operations b3dfg_fops = {
852 .owner = THIS_MODULE,
853 .open = b3dfg_open,
854 .release = b3dfg_release,
855 .unlocked_ioctl = b3dfg_ioctl,
856 .poll = b3dfg_poll,
857 .mmap = b3dfg_mmap,
860 static void free_all_frame_buffers(struct b3dfg_dev *fgdev)
862 int i, j;
863 for (i = 0; i < b3dfg_nbuf; i++)
864 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++)
865 kfree(fgdev->buffers[i].frame[j]);
866 kfree(fgdev->buffers);
869 /* initialize device and any data structures. called before any interrupts
870 * are enabled. */
871 static int b3dfg_init_dev(struct b3dfg_dev *fgdev)
873 int i, j;
874 u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE);
876 /* Disable interrupts. In abnormal circumstances (e.g. after a crash)
877 * the board may still be transmitting from the previous session. If we
878 * ensure that interrupts are disabled before we later enable them, we
879 * are sure to capture a triplet from the start, rather than starting
880 * from frame 2 or 3. Disabling interrupts causes the FG to throw away
881 * all buffered data and stop buffering more until interrupts are
882 * enabled again.
884 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
886 fgdev->frame_size = frm_size * 4096;
887 fgdev->buffers = kzalloc(sizeof(struct b3dfg_buffer) * b3dfg_nbuf,
888 GFP_KERNEL);
889 if (!fgdev->buffers)
890 goto err_no_buf;
891 for (i = 0; i < b3dfg_nbuf; i++) {
892 struct b3dfg_buffer *buf = &fgdev->buffers[i];
893 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) {
894 buf->frame[j] = kmalloc(fgdev->frame_size, GFP_KERNEL);
895 if (!buf->frame[j])
896 goto err_no_mem;
898 INIT_LIST_HEAD(&buf->list);
901 INIT_LIST_HEAD(&fgdev->buffer_queue);
902 init_waitqueue_head(&fgdev->buffer_waitqueue);
903 spin_lock_init(&fgdev->buffer_lock);
904 spin_lock_init(&fgdev->cstate_lock);
905 spin_lock_init(&fgdev->triplets_dropped_lock);
906 return 0;
908 err_no_mem:
909 free_all_frame_buffers(fgdev);
910 err_no_buf:
911 return -ENOMEM;
914 /* find next free minor number, returns -1 if none are availabile */
915 static int get_free_minor(void)
917 int i;
918 for (i = 0; i < B3DFG_MAX_DEVS; i++) {
919 if (b3dfg_devices[i] == 0)
920 return i;
922 return -1;
925 static int __devinit b3dfg_probe(struct pci_dev *pdev,
926 const struct pci_device_id *id)
928 struct b3dfg_dev *fgdev = kzalloc(sizeof(*fgdev), GFP_KERNEL);
929 int r = 0;
930 int minor = get_free_minor();
931 dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor);
932 unsigned long res_len;
933 resource_size_t res_base;
935 if (fgdev == NULL)
936 return -ENOMEM;
938 if (minor < 0) {
939 dev_err(&pdev->dev, "too many devices found!\n");
940 r = -EIO;
941 goto err_free;
944 b3dfg_devices[minor] = 1;
945 dev_info(&pdev->dev, "probe device with IRQ %d\n", pdev->irq);
947 cdev_init(&fgdev->chardev, &b3dfg_fops);
948 fgdev->chardev.owner = THIS_MODULE;
950 r = cdev_add(&fgdev->chardev, devno, 1);
951 if (r) {
952 dev_err(&pdev->dev, "cannot add char device\n");
953 goto err_release_minor;
956 fgdev->dev = device_create(
957 b3dfg_class,
958 &pdev->dev,
959 devno,
960 dev_get_drvdata(&pdev->dev),
961 DRIVER_NAME "%d", minor);
963 if (IS_ERR(fgdev->dev)) {
964 dev_err(&pdev->dev, "cannot create device\n");
965 r = PTR_ERR(fgdev->dev);
966 goto err_del_cdev;
969 r = pci_enable_device(pdev);
970 if (r) {
971 dev_err(&pdev->dev, "cannot enable PCI device\n");
972 goto err_dev_unreg;
975 res_len = pci_resource_len(pdev, B3DFG_BAR_REGS);
976 if (res_len != B3DFG_REGS_LENGTH) {
977 dev_err(&pdev->dev, "invalid register resource size\n");
978 r = -EIO;
979 goto err_disable;
982 if (pci_resource_flags(pdev, B3DFG_BAR_REGS)
983 != (IORESOURCE_MEM | IORESOURCE_SIZEALIGN)) {
984 dev_err(&pdev->dev, "invalid resource flags\n");
985 r = -EIO;
986 goto err_disable;
988 r = pci_request_regions(pdev, DRIVER_NAME);
989 if (r) {
990 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
991 goto err_disable;
994 pci_set_master(pdev);
996 r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
997 if (r) {
998 dev_err(&pdev->dev, "no usable DMA configuration\n");
999 goto err_free_res;
1002 res_base = pci_resource_start(pdev, B3DFG_BAR_REGS);
1003 fgdev->regs = ioremap_nocache(res_base, res_len);
1004 if (!fgdev->regs) {
1005 dev_err(&pdev->dev, "regs ioremap failed\n");
1006 r = -EIO;
1007 goto err_free_res;
1010 fgdev->pdev = pdev;
1011 pci_set_drvdata(pdev, fgdev);
1012 r = b3dfg_init_dev(fgdev);
1013 if (r < 0) {
1014 dev_err(&pdev->dev, "failed to initalize device\n");
1015 goto err_unmap;
1018 r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev);
1019 if (r) {
1020 dev_err(&pdev->dev, "couldn't request irq %d\n", pdev->irq);
1021 goto err_free_bufs;
1024 return 0;
1026 err_free_bufs:
1027 free_all_frame_buffers(fgdev);
1028 err_unmap:
1029 iounmap(fgdev->regs);
1030 err_free_res:
1031 pci_release_regions(pdev);
1032 err_disable:
1033 pci_disable_device(pdev);
1034 err_dev_unreg:
1035 device_destroy(b3dfg_class, devno);
1036 err_del_cdev:
1037 cdev_del(&fgdev->chardev);
1038 err_release_minor:
1039 b3dfg_devices[minor] = 0;
1040 err_free:
1041 kfree(fgdev);
1042 return r;
1045 static void __devexit b3dfg_remove(struct pci_dev *pdev)
1047 struct b3dfg_dev *fgdev = pci_get_drvdata(pdev);
1048 unsigned int minor = MINOR(fgdev->chardev.dev);
1050 dev_dbg(&pdev->dev, "remove\n");
1052 free_irq(pdev->irq, fgdev);
1053 iounmap(fgdev->regs);
1054 pci_release_regions(pdev);
1055 pci_disable_device(pdev);
1056 device_destroy(b3dfg_class, MKDEV(MAJOR(b3dfg_devt), minor));
1057 cdev_del(&fgdev->chardev);
1058 free_all_frame_buffers(fgdev);
1059 kfree(fgdev);
1060 b3dfg_devices[minor] = 0;
1063 static struct pci_driver b3dfg_driver = {
1064 .name = DRIVER_NAME,
1065 .id_table = b3dfg_ids,
1066 .probe = b3dfg_probe,
1067 .remove = __devexit_p(b3dfg_remove),
1070 static int __init b3dfg_module_init(void)
1072 int r;
1074 if (b3dfg_nbuf < 2) {
1075 printk(KERN_ERR DRIVER_NAME
1076 ": buffer_count is out of range (must be >= 2)");
1077 return -EINVAL;
1080 printk(KERN_INFO DRIVER_NAME ": loaded\n");
1082 b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME);
1083 if (IS_ERR(b3dfg_class))
1084 return PTR_ERR(b3dfg_class);
1086 r = alloc_chrdev_region(&b3dfg_devt, 0, B3DFG_MAX_DEVS, DRIVER_NAME);
1087 if (r)
1088 goto err1;
1090 r = pci_register_driver(&b3dfg_driver);
1091 if (r)
1092 goto err2;
1094 return r;
1096 err2:
1097 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1098 err1:
1099 class_destroy(b3dfg_class);
1100 return r;
1103 static void __exit b3dfg_module_exit(void)
1105 printk(KERN_INFO DRIVER_NAME ": unloaded\n");
1106 pci_unregister_driver(&b3dfg_driver);
1107 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1108 class_destroy(b3dfg_class);
1111 module_init(b3dfg_module_init);
1112 module_exit(b3dfg_module_exit);