Import 2.3.1pre2
[davej-history.git] / drivers / misc / parport_share.c
blobf413c0a655a78ca3396d6bbbbe26081636d17bd0
1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
3 *
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
8 * Andrea Arcangeli
10 * based on work by Grant Guenther <grant@torque.net>
11 * and Philip Blundell
14 #undef PARPORT_DEBUG_SHARING /* undef for production */
16 #include <linux/config.h>
18 #include <linux/tasks.h>
20 #include <linux/parport.h>
21 #include <linux/delay.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/malloc.h>
27 #include <linux/sched.h>
29 #include <asm/spinlock.h>
30 #include <asm/irq.h>
32 #ifdef CONFIG_KMOD
33 #include <linux/kmod.h>
34 #endif
36 #undef PARPORT_PARANOID
38 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
40 static struct parport *portlist = NULL, *portlist_tail = NULL;
41 spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
43 void (*parport_probe_hook)(struct parport *port) = NULL;
45 /* Return a list of all the ports we know about. */
46 struct parport *parport_enumerate(void)
48 #ifdef CONFIG_KMOD
49 if (portlist == NULL) {
50 request_module("parport_lowlevel");
51 #ifdef CONFIG_PNP_PARPORT_MODULE
52 request_module("parport_probe");
53 #endif /* CONFIG_PNP_PARPORT_MODULE */
55 #endif /* CONFIG_KMOD */
56 return portlist;
59 struct parport *parport_register_port(unsigned long base, int irq, int dma,
60 struct parport_operations *ops)
62 struct parport *tmp;
63 int portnum;
64 char *name;
66 /* Check for a previously registered port.
67 NOTE: we will ignore irq and dma if we find a previously
68 registered device. */
69 for (tmp = portlist; tmp; tmp = tmp->next) {
70 if (tmp->base == base)
71 return tmp;
74 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
75 if (!tmp) {
76 printk(KERN_WARNING "parport: memory squeeze\n");
77 return NULL;
80 /* Search for the lowest free parport number. */
81 for (portnum = 0; ; portnum++) {
82 struct parport *itr = portlist;
83 while (itr) {
84 if (itr->number == portnum)
85 /* No good, already used. */
86 break;
87 else
88 itr = itr->next;
91 if (itr == NULL)
92 /* Got to the end of the list. */
93 break;
96 /* Init our structure */
97 memset(tmp, 0, sizeof(struct parport));
98 tmp->base = base;
99 tmp->irq = irq;
100 tmp->dma = dma;
101 tmp->modes = 0;
102 tmp->next = NULL;
103 tmp->devices = tmp->cad = NULL;
104 tmp->flags = 0;
105 tmp->ops = ops;
106 tmp->number = portnum;
107 memset (&tmp->probe_info, 0, sizeof (struct parport_device_info));
108 tmp->cad_lock = RW_LOCK_UNLOCKED;
109 spin_lock_init(&tmp->waitlist_lock);
110 spin_lock_init(&tmp->pardevice_lock);
112 name = kmalloc(15, GFP_KERNEL);
113 if (!name) {
114 printk(KERN_ERR "parport: memory squeeze\n");
115 kfree(tmp);
116 return NULL;
118 sprintf(name, "parport%d", portnum);
119 tmp->name = name;
122 * Chain the entry to our list.
124 * This function must not run from an irq handler so we don' t need
125 * to clear irq on the local CPU. -arca
127 spin_lock(&parportlist_lock);
128 if (portlist_tail)
129 portlist_tail->next = tmp;
130 portlist_tail = tmp;
131 if (!portlist)
132 portlist = tmp;
133 spin_unlock(&parportlist_lock);
135 tmp->probe_info.class = PARPORT_CLASS_LEGACY; /* assume the worst */
136 tmp->waithead = tmp->waittail = NULL;
138 return tmp;
141 void parport_unregister_port(struct parport *port)
143 struct parport *p;
145 spin_lock(&parportlist_lock);
146 if (portlist == port) {
147 if ((portlist = port->next) == NULL)
148 portlist_tail = NULL;
149 } else {
150 for (p = portlist; (p != NULL) && (p->next != port);
151 p=p->next);
152 if (p) {
153 if ((p->next = port->next) == NULL)
154 portlist_tail = p;
156 else printk (KERN_WARNING
157 "%s not found in port list!\n", port->name);
159 spin_unlock(&parportlist_lock);
160 if (port->probe_info.class_name)
161 kfree (port->probe_info.class_name);
162 if (port->probe_info.mfr)
163 kfree (port->probe_info.mfr);
164 if (port->probe_info.model)
165 kfree (port->probe_info.model);
166 if (port->probe_info.cmdset)
167 kfree (port->probe_info.cmdset);
168 if (port->probe_info.description)
169 kfree (port->probe_info.description);
170 kfree(port->name);
171 kfree(port);
174 void parport_quiesce(struct parport *port)
176 if (port->devices) {
177 printk(KERN_WARNING "%s: attempt to quiesce active port.\n",
178 port->name);
179 return;
182 if (port->flags & PARPORT_FLAG_COMA) {
183 printk(KERN_WARNING "%s: attempt to quiesce comatose port.\n",
184 port->name);
185 return;
188 port->ops->release_resources(port);
190 port->flags |= PARPORT_FLAG_COMA;
193 struct pardevice *parport_register_device(struct parport *port, const char *name,
194 int (*pf)(void *), void (*kf)(void *),
195 void (*irq_func)(int, void *, struct pt_regs *),
196 int flags, void *handle)
198 struct pardevice *tmp;
200 if (port->flags & PARPORT_FLAG_EXCL) {
201 /* An exclusive device is registered. */
202 printk (KERN_DEBUG "%s: no more devices allowed\n",
203 port->name);
204 return NULL;
207 if (flags & PARPORT_DEV_LURK) {
208 if (!pf || !kf) {
209 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
210 return NULL;
214 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
215 if (tmp == NULL) {
216 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
217 return NULL;
220 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
221 if (tmp->state == NULL) {
222 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
223 kfree(tmp);
224 return NULL;
227 /* We may need to claw back the port hardware. */
228 if (port->flags & PARPORT_FLAG_COMA) {
229 if (port->ops->claim_resources(port)) {
230 printk(KERN_WARNING
231 "%s: unable to get hardware to register %s.\n",
232 port->name, name);
233 kfree (tmp->state);
234 kfree (tmp);
235 return NULL;
237 port->flags &= ~PARPORT_FLAG_COMA;
240 tmp->name = name;
241 tmp->port = port;
242 tmp->preempt = pf;
243 tmp->wakeup = kf;
244 tmp->private = handle;
245 tmp->flags = flags;
246 tmp->irq_func = irq_func;
247 port->ops->init_state(tmp->state);
248 tmp->waiting = 0;
250 /* Chain this onto the list */
251 tmp->prev = NULL;
253 * This function must not run from an irq handler so we don' t need
254 * to clear irq on the local CPU. -arca
256 spin_lock(&port->pardevice_lock);
258 if (flags & PARPORT_DEV_EXCL) {
259 if (port->devices) {
260 spin_unlock (&port->pardevice_lock);
261 kfree (tmp->state);
262 kfree (tmp);
263 printk (KERN_DEBUG
264 "%s: cannot grant exclusive access for "
265 "device %s\n", port->name, name);
266 return NULL;
268 port->flags |= PARPORT_FLAG_EXCL;
271 tmp->next = port->devices;
272 if (port->devices)
273 port->devices->prev = tmp;
274 port->devices = tmp;
275 spin_unlock(&port->pardevice_lock);
277 inc_parport_count();
278 port->ops->inc_use_count();
280 init_waitqueue_head(&tmp->wait_q);
281 tmp->timeslice = PARPORT_DEFAULT_TIMESLICE;
282 tmp->waitnext = tmp->waitprev = NULL;
284 return tmp;
287 void parport_unregister_device(struct pardevice *dev)
289 struct parport *port;
291 #ifdef PARPORT_PARANOID
292 if (dev == NULL) {
293 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
294 return;
296 #endif
298 port = dev->port;
300 if (port->cad == dev) {
301 printk(KERN_WARNING "%s: refused to unregister "
302 "currently active device %s.\n", port->name, dev->name);
303 return;
306 spin_lock(&port->pardevice_lock);
307 if (dev->next)
308 dev->next->prev = dev->prev;
309 if (dev->prev)
310 dev->prev->next = dev->next;
311 else
312 port->devices = dev->next;
314 if (dev->flags & PARPORT_DEV_EXCL)
315 port->flags &= ~PARPORT_FLAG_EXCL;
317 spin_unlock(&port->pardevice_lock);
319 kfree(dev->state);
320 kfree(dev);
322 dec_parport_count();
323 port->ops->dec_use_count();
325 /* If there are no more devices, put the port to sleep. */
326 if (!port->devices)
327 parport_quiesce(port);
329 return;
332 int parport_claim(struct pardevice *dev)
334 struct pardevice *oldcad;
335 struct parport *port = dev->port;
336 unsigned long flags;
338 if (port->cad == dev) {
339 printk(KERN_INFO "%s: %s already owner\n",
340 dev->port->name,dev->name);
341 return 0;
344 try_again:
345 /* Preempt any current device */
346 if ((oldcad = port->cad) != NULL) {
347 if (oldcad->preempt) {
348 if (oldcad->preempt(oldcad->private))
349 goto blocked;
350 port->ops->save_state(port, dev->state);
351 } else
352 goto blocked;
354 if (port->cad != oldcad) {
355 printk(KERN_WARNING
356 "%s: %s released port when preempted!\n",
357 port->name, oldcad->name);
358 if (port->cad)
359 goto blocked;
363 /* Can't fail from now on, so mark ourselves as no longer waiting. */
364 if (dev->waiting & 1) {
365 dev->waiting = 0;
367 /* Take ourselves out of the wait list again. */
368 spin_lock_irqsave (&port->waitlist_lock, flags);
369 if (dev->waitprev)
370 dev->waitprev->waitnext = dev->waitnext;
371 else
372 port->waithead = dev->waitnext;
373 if (dev->waitnext)
374 dev->waitnext->waitprev = dev->waitprev;
375 else
376 port->waittail = dev->waitprev;
377 spin_unlock_irqrestore (&port->waitlist_lock, flags);
378 dev->waitprev = dev->waitnext = NULL;
381 if (oldcad && port->irq != PARPORT_IRQ_NONE && !oldcad->irq_func)
383 * If there was an irq pending it should hopefully happen
384 * before return from enable_irq(). -arca
386 enable_irq(port->irq);
389 * Avoid running irq handlers if the pardevice doesn' t use it. -arca
391 if (port->irq != PARPORT_IRQ_NONE && !dev->irq_func)
392 disable_irq(port->irq);
394 /* Now we do the change of devices */
395 write_lock_irqsave(&port->cad_lock, flags);
396 port->cad = dev;
397 write_unlock_irqrestore(&port->cad_lock, flags);
399 /* Restore control registers */
400 port->ops->restore_state(port, dev->state);
401 dev->time = jiffies;
402 return 0;
404 blocked:
405 /* If this is the first time we tried to claim the port, register an
406 interest. This is only allowed for devices sleeping in
407 parport_claim_or_block(), or those with a wakeup function. */
408 if (dev->waiting & 2 || dev->wakeup) {
409 spin_lock_irqsave (&port->waitlist_lock, flags);
410 if (port->cad == NULL) {
411 /* The port got released in the meantime. */
412 spin_unlock_irqrestore (&port->waitlist_lock, flags);
413 goto try_again;
415 if (test_and_set_bit(0, &dev->waiting) == 0) {
416 /* First add ourselves to the end of the wait list. */
417 dev->waitnext = NULL;
418 dev->waitprev = port->waittail;
419 if (port->waittail) {
420 port->waittail->waitnext = dev;
421 port->waittail = dev;
422 } else
423 port->waithead = port->waittail = dev;
425 spin_unlock_irqrestore (&port->waitlist_lock, flags);
427 return -EAGAIN;
430 int parport_claim_or_block(struct pardevice *dev)
432 int r;
434 /* Signal to parport_claim() that we can wait even without a
435 wakeup function. */
436 dev->waiting = 2;
438 /* Try to claim the port. If this fails, we need to sleep. */
439 r = parport_claim(dev);
440 if (r == -EAGAIN) {
441 unsigned long flags;
442 #ifdef PARPORT_DEBUG_SHARING
443 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
444 #endif
445 save_flags (flags);
446 cli();
447 /* If dev->waiting is clear now, an interrupt
448 gave us the port and we would deadlock if we slept. */
449 if (dev->waiting) {
450 sleep_on(&dev->wait_q);
451 r = 1;
452 } else {
453 r = 0;
454 #ifdef PARPORT_DEBUG_SHARING
455 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
456 dev->name);
457 #endif
459 restore_flags(flags);
460 #ifdef PARPORT_DEBUG_SHARING
461 if (dev->port->cad != dev)
462 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n", dev->name, dev->port->cad?dev->port->cad->name:"nobody");
463 #endif
465 dev->waiting = 0;
466 return r;
469 void parport_release(struct pardevice *dev)
471 struct parport *port = dev->port;
472 struct pardevice *pd;
473 unsigned long flags;
475 /* Make sure that dev is the current device */
476 if (port->cad != dev) {
477 printk(KERN_WARNING "%s: %s tried to release parport "
478 "when not owner\n", port->name, dev->name);
479 return;
481 write_lock_irqsave(&port->cad_lock, flags);
482 port->cad = NULL;
483 write_unlock_irqrestore(&port->cad_lock, flags);
486 * Reenable irq and so discard the eventually pending irq while
487 * cad is NULL. -arca
489 if (port->irq != PARPORT_IRQ_NONE && !dev->irq_func)
490 enable_irq(port->irq);
492 /* Save control registers */
493 port->ops->save_state(port, dev->state);
495 /* If anybody is waiting, find out who's been there longest and
496 then wake them up. (Note: no locking required) */
497 for (pd = port->waithead; pd; pd = pd->waitnext) {
498 if (pd->waiting & 2) { /* sleeping in claim_or_block */
499 parport_claim(pd);
500 if (waitqueue_active(&pd->wait_q))
501 wake_up(&pd->wait_q);
502 return;
503 } else if (pd->wakeup) {
504 pd->wakeup(pd->private);
505 if (dev->port->cad)
506 return;
507 } else {
508 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
512 /* Nobody was waiting, so walk the list to see if anyone is
513 interested in being woken up. */
514 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
515 if (pd->wakeup && pd != dev)
516 pd->wakeup(pd->private);
520 void parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
522 unsigned int i;
523 for (i = 0; i < nports && irqstr[i]; i++) {
524 if (!strncmp(irqstr[i], "auto", 4))
525 irqval[i] = PARPORT_IRQ_AUTO;
526 else if (!strncmp(irqstr[i], "none", 4))
527 irqval[i] = PARPORT_IRQ_NONE;
528 else {
529 char *ep;
530 unsigned long r = simple_strtoul(irqstr[i], &ep, 0);
531 if (ep != irqstr[i])
532 irqval[i] = r;
533 else {
534 printk("parport: bad irq specifier `%s'\n", irqstr[i]);
535 return;