pre-2.3.4..
[davej-history.git] / drivers / misc / parport_share.c
blob929c0ad73e0ea65f66d076d8365a1264067fa852
1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
3 *
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
8 * Andrea Arcangeli
10 * based on work by Grant Guenther <grant@torque.net>
11 * and Philip Blundell
14 #undef PARPORT_DEBUG_SHARING /* undef for production */
16 #include <linux/config.h>
18 #include <linux/tasks.h>
20 #include <linux/parport.h>
21 #include <linux/delay.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/malloc.h>
27 #include <linux/sched.h>
29 #include <asm/spinlock.h>
30 #include <asm/irq.h>
32 #ifdef CONFIG_KMOD
33 #include <linux/kmod.h>
34 #endif
36 #undef PARPORT_PARANOID
38 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
40 static struct parport *portlist = NULL, *portlist_tail = NULL;
41 spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
43 static struct parport_driver *driver_chain = NULL;
44 spinlock_t driverlist_lock = SPIN_LOCK_UNLOCKED;
46 static void call_driver_chain (int attach, struct parport *port)
48 struct parport_driver *drv;
50 for (drv = driver_chain; drv; drv = drv->next) {
51 if (attach)
52 drv->attach (port);
53 else
54 drv->detach (port);
58 int parport_register_driver (struct parport_driver *drv)
60 struct parport *port;
62 spin_lock (&driverlist_lock);
63 drv->next = driver_chain;
64 driver_chain = drv;
65 spin_unlock (&driverlist_lock);
67 for (port = portlist; port; port = port->next)
68 drv->attach (port);
70 return 0;
73 void parport_unregister_driver (struct parport_driver *arg)
75 struct parport_driver *drv = driver_chain, *olddrv = NULL;
77 while (drv) {
78 if (drv == arg) {
79 spin_lock (&driverlist_lock);
80 if (olddrv)
81 olddrv->next = drv->next;
82 else
83 driver_chain = drv->next;
84 spin_unlock (&driverlist_lock);
85 return;
87 olddrv = drv;
88 drv = drv->next;
92 void (*parport_probe_hook)(struct parport *port) = NULL;
94 /* Return a list of all the ports we know about. */
95 struct parport *parport_enumerate(void)
97 #ifdef CONFIG_KMOD
98 if (portlist == NULL) {
99 request_module("parport_lowlevel");
100 #ifdef CONFIG_PNP_PARPORT_MODULE
101 request_module("parport_probe");
102 #endif /* CONFIG_PNP_PARPORT_MODULE */
104 #endif /* CONFIG_KMOD */
105 return portlist;
108 struct parport *parport_register_port(unsigned long base, int irq, int dma,
109 struct parport_operations *ops)
111 struct parport *tmp;
112 int portnum;
113 char *name;
115 /* Check for a previously registered port.
116 NOTE: we will ignore irq and dma if we find a previously
117 registered device. */
118 for (tmp = portlist; tmp; tmp = tmp->next) {
119 if (tmp->base == base)
120 return tmp;
123 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
124 if (!tmp) {
125 printk(KERN_WARNING "parport: memory squeeze\n");
126 return NULL;
129 /* Search for the lowest free parport number. */
130 for (portnum = 0; ; portnum++) {
131 struct parport *itr = portlist;
132 while (itr) {
133 if (itr->number == portnum)
134 /* No good, already used. */
135 break;
136 else
137 itr = itr->next;
140 if (itr == NULL)
141 /* Got to the end of the list. */
142 break;
145 /* Init our structure */
146 memset(tmp, 0, sizeof(struct parport));
147 tmp->base = base;
148 tmp->irq = irq;
149 tmp->dma = dma;
150 tmp->modes = 0;
151 tmp->next = NULL;
152 tmp->devices = tmp->cad = NULL;
153 tmp->flags = 0;
154 tmp->ops = ops;
155 tmp->number = portnum;
156 memset (&tmp->probe_info, 0, sizeof (struct parport_device_info));
157 tmp->cad_lock = RW_LOCK_UNLOCKED;
158 spin_lock_init(&tmp->waitlist_lock);
159 spin_lock_init(&tmp->pardevice_lock);
161 name = kmalloc(15, GFP_KERNEL);
162 if (!name) {
163 printk(KERN_ERR "parport: memory squeeze\n");
164 kfree(tmp);
165 return NULL;
167 sprintf(name, "parport%d", portnum);
168 tmp->name = name;
171 * Chain the entry to our list.
173 * This function must not run from an irq handler so we don' t need
174 * to clear irq on the local CPU. -arca
176 spin_lock(&parportlist_lock);
177 if (portlist_tail)
178 portlist_tail->next = tmp;
179 portlist_tail = tmp;
180 if (!portlist)
181 portlist = tmp;
182 spin_unlock(&parportlist_lock);
184 tmp->probe_info.class = PARPORT_CLASS_LEGACY; /* assume the worst */
185 tmp->waithead = tmp->waittail = NULL;
187 return tmp;
190 void parport_announce_port (struct parport *port)
192 /* Let drivers know that a new port has arrived. */
193 call_driver_chain (1, port);
196 void parport_unregister_port(struct parport *port)
198 struct parport *p;
200 /* Spread the word. */
201 call_driver_chain (0, port);
203 spin_lock(&parportlist_lock);
204 if (portlist == port) {
205 if ((portlist = port->next) == NULL)
206 portlist_tail = NULL;
207 } else {
208 for (p = portlist; (p != NULL) && (p->next != port);
209 p=p->next);
210 if (p) {
211 if ((p->next = port->next) == NULL)
212 portlist_tail = p;
214 else printk (KERN_WARNING
215 "%s not found in port list!\n", port->name);
217 spin_unlock(&parportlist_lock);
218 if (port->probe_info.class_name)
219 kfree (port->probe_info.class_name);
220 if (port->probe_info.mfr)
221 kfree (port->probe_info.mfr);
222 if (port->probe_info.model)
223 kfree (port->probe_info.model);
224 if (port->probe_info.cmdset)
225 kfree (port->probe_info.cmdset);
226 if (port->probe_info.description)
227 kfree (port->probe_info.description);
228 kfree(port->name);
229 kfree(port);
232 void parport_quiesce(struct parport *port)
234 if (port->devices) {
235 printk(KERN_WARNING "%s: attempt to quiesce active port.\n",
236 port->name);
237 return;
240 if (port->flags & PARPORT_FLAG_COMA) {
241 printk(KERN_WARNING "%s: attempt to quiesce comatose port.\n",
242 port->name);
243 return;
246 port->ops->release_resources(port);
248 port->flags |= PARPORT_FLAG_COMA;
251 struct pardevice *parport_register_device(struct parport *port, const char *name,
252 int (*pf)(void *), void (*kf)(void *),
253 void (*irq_func)(int, void *, struct pt_regs *),
254 int flags, void *handle)
256 struct pardevice *tmp;
258 if (port->flags & PARPORT_FLAG_EXCL) {
259 /* An exclusive device is registered. */
260 printk (KERN_DEBUG "%s: no more devices allowed\n",
261 port->name);
262 return NULL;
265 if (flags & PARPORT_DEV_LURK) {
266 if (!pf || !kf) {
267 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
268 return NULL;
272 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
273 if (tmp == NULL) {
274 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
275 return NULL;
278 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
279 if (tmp->state == NULL) {
280 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
281 kfree(tmp);
282 return NULL;
285 /* We may need to claw back the port hardware. */
286 if (port->flags & PARPORT_FLAG_COMA) {
287 if (port->ops->claim_resources(port)) {
288 printk(KERN_WARNING
289 "%s: unable to get hardware to register %s.\n",
290 port->name, name);
291 kfree (tmp->state);
292 kfree (tmp);
293 return NULL;
295 port->flags &= ~PARPORT_FLAG_COMA;
298 tmp->name = name;
299 tmp->port = port;
300 tmp->preempt = pf;
301 tmp->wakeup = kf;
302 tmp->private = handle;
303 tmp->flags = flags;
304 tmp->irq_func = irq_func;
305 port->ops->init_state(tmp->state);
306 tmp->waiting = 0;
308 /* Chain this onto the list */
309 tmp->prev = NULL;
311 * This function must not run from an irq handler so we don' t need
312 * to clear irq on the local CPU. -arca
314 spin_lock(&port->pardevice_lock);
316 if (flags & PARPORT_DEV_EXCL) {
317 if (port->devices) {
318 spin_unlock (&port->pardevice_lock);
319 kfree (tmp->state);
320 kfree (tmp);
321 printk (KERN_DEBUG
322 "%s: cannot grant exclusive access for "
323 "device %s\n", port->name, name);
324 return NULL;
326 port->flags |= PARPORT_FLAG_EXCL;
329 tmp->next = port->devices;
330 if (port->devices)
331 port->devices->prev = tmp;
332 port->devices = tmp;
333 spin_unlock(&port->pardevice_lock);
335 inc_parport_count();
336 port->ops->inc_use_count();
338 init_waitqueue_head(&tmp->wait_q);
339 tmp->timeslice = PARPORT_DEFAULT_TIMESLICE;
340 tmp->waitnext = tmp->waitprev = NULL;
342 return tmp;
345 void parport_unregister_device(struct pardevice *dev)
347 struct parport *port;
349 #ifdef PARPORT_PARANOID
350 if (dev == NULL) {
351 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
352 return;
354 #endif
356 port = dev->port;
358 if (port->cad == dev) {
359 printk(KERN_WARNING "%s: refused to unregister "
360 "currently active device %s.\n", port->name, dev->name);
361 return;
364 spin_lock(&port->pardevice_lock);
365 if (dev->next)
366 dev->next->prev = dev->prev;
367 if (dev->prev)
368 dev->prev->next = dev->next;
369 else
370 port->devices = dev->next;
372 if (dev->flags & PARPORT_DEV_EXCL)
373 port->flags &= ~PARPORT_FLAG_EXCL;
375 spin_unlock(&port->pardevice_lock);
377 kfree(dev->state);
378 kfree(dev);
380 dec_parport_count();
381 port->ops->dec_use_count();
383 /* If there are no more devices, put the port to sleep. */
384 if (!port->devices)
385 parport_quiesce(port);
387 return;
390 int parport_claim(struct pardevice *dev)
392 struct pardevice *oldcad;
393 struct parport *port = dev->port;
394 unsigned long flags;
396 if (port->cad == dev) {
397 printk(KERN_INFO "%s: %s already owner\n",
398 dev->port->name,dev->name);
399 return 0;
402 try_again:
403 /* Preempt any current device */
404 if ((oldcad = port->cad) != NULL) {
405 if (oldcad->preempt) {
406 if (oldcad->preempt(oldcad->private))
407 goto blocked;
408 port->ops->save_state(port, dev->state);
409 } else
410 goto blocked;
412 if (port->cad != oldcad) {
413 printk(KERN_WARNING
414 "%s: %s released port when preempted!\n",
415 port->name, oldcad->name);
416 if (port->cad)
417 goto blocked;
421 /* Can't fail from now on, so mark ourselves as no longer waiting. */
422 if (dev->waiting & 1) {
423 dev->waiting = 0;
425 /* Take ourselves out of the wait list again. */
426 spin_lock_irqsave (&port->waitlist_lock, flags);
427 if (dev->waitprev)
428 dev->waitprev->waitnext = dev->waitnext;
429 else
430 port->waithead = dev->waitnext;
431 if (dev->waitnext)
432 dev->waitnext->waitprev = dev->waitprev;
433 else
434 port->waittail = dev->waitprev;
435 spin_unlock_irqrestore (&port->waitlist_lock, flags);
436 dev->waitprev = dev->waitnext = NULL;
439 if (oldcad && port->irq != PARPORT_IRQ_NONE && !oldcad->irq_func)
441 * If there was an irq pending it should hopefully happen
442 * before return from enable_irq(). -arca
444 enable_irq(port->irq);
447 * Avoid running irq handlers if the pardevice doesn' t use it. -arca
449 if (port->irq != PARPORT_IRQ_NONE && !dev->irq_func)
450 disable_irq(port->irq);
452 /* Now we do the change of devices */
453 write_lock_irqsave(&port->cad_lock, flags);
454 port->cad = dev;
455 write_unlock_irqrestore(&port->cad_lock, flags);
457 /* Restore control registers */
458 port->ops->restore_state(port, dev->state);
459 dev->time = jiffies;
460 return 0;
462 blocked:
463 /* If this is the first time we tried to claim the port, register an
464 interest. This is only allowed for devices sleeping in
465 parport_claim_or_block(), or those with a wakeup function. */
466 if (dev->waiting & 2 || dev->wakeup) {
467 spin_lock_irqsave (&port->waitlist_lock, flags);
468 if (port->cad == NULL) {
469 /* The port got released in the meantime. */
470 spin_unlock_irqrestore (&port->waitlist_lock, flags);
471 goto try_again;
473 if (test_and_set_bit(0, &dev->waiting) == 0) {
474 /* First add ourselves to the end of the wait list. */
475 dev->waitnext = NULL;
476 dev->waitprev = port->waittail;
477 if (port->waittail) {
478 port->waittail->waitnext = dev;
479 port->waittail = dev;
480 } else
481 port->waithead = port->waittail = dev;
483 spin_unlock_irqrestore (&port->waitlist_lock, flags);
485 return -EAGAIN;
488 int parport_claim_or_block(struct pardevice *dev)
490 int r;
492 /* Signal to parport_claim() that we can wait even without a
493 wakeup function. */
494 dev->waiting = 2;
496 /* Try to claim the port. If this fails, we need to sleep. */
497 r = parport_claim(dev);
498 if (r == -EAGAIN) {
499 unsigned long flags;
500 #ifdef PARPORT_DEBUG_SHARING
501 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
502 #endif
503 save_flags (flags);
504 cli();
505 /* If dev->waiting is clear now, an interrupt
506 gave us the port and we would deadlock if we slept. */
507 if (dev->waiting) {
508 sleep_on(&dev->wait_q);
509 r = 1;
510 } else {
511 r = 0;
512 #ifdef PARPORT_DEBUG_SHARING
513 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
514 dev->name);
515 #endif
517 restore_flags(flags);
518 #ifdef PARPORT_DEBUG_SHARING
519 if (dev->port->cad != dev)
520 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n", dev->name, dev->port->cad?dev->port->cad->name:"nobody");
521 #endif
523 dev->waiting = 0;
524 return r;
527 void parport_release(struct pardevice *dev)
529 struct parport *port = dev->port;
530 struct pardevice *pd;
531 unsigned long flags;
533 /* Make sure that dev is the current device */
534 if (port->cad != dev) {
535 printk(KERN_WARNING "%s: %s tried to release parport "
536 "when not owner\n", port->name, dev->name);
537 return;
539 write_lock_irqsave(&port->cad_lock, flags);
540 port->cad = NULL;
541 write_unlock_irqrestore(&port->cad_lock, flags);
544 * Reenable irq and so discard the eventually pending irq while
545 * cad is NULL. -arca
547 if (port->irq != PARPORT_IRQ_NONE && !dev->irq_func)
548 enable_irq(port->irq);
550 /* Save control registers */
551 port->ops->save_state(port, dev->state);
553 /* If anybody is waiting, find out who's been there longest and
554 then wake them up. (Note: no locking required) */
555 for (pd = port->waithead; pd; pd = pd->waitnext) {
556 if (pd->waiting & 2) { /* sleeping in claim_or_block */
557 parport_claim(pd);
558 if (waitqueue_active(&pd->wait_q))
559 wake_up(&pd->wait_q);
560 return;
561 } else if (pd->wakeup) {
562 pd->wakeup(pd->private);
563 if (dev->port->cad)
564 return;
565 } else {
566 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
570 /* Nobody was waiting, so walk the list to see if anyone is
571 interested in being woken up. */
572 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
573 if (pd->wakeup && pd != dev)
574 pd->wakeup(pd->private);
578 static int parport_parse_params (int nports, const char *str[], int val[],
579 int automatic, int none)
581 unsigned int i;
582 for (i = 0; i < nports && str[i]; i++) {
583 if (!strncmp(str[i], "auto", 4))
584 val[i] = automatic;
585 else if (!strncmp(str[i], "none", 4))
586 val[i] = none;
587 else {
588 char *ep;
589 unsigned long r = simple_strtoul(str[i], &ep, 0);
590 if (ep != str[i])
591 val[i] = r;
592 else {
593 printk("parport: bad specifier `%s'\n", str[i]);
594 return -1;
599 return 0;
602 int parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
604 return parport_parse_params (nports, irqstr, irqval, PARPORT_IRQ_AUTO,
605 PARPORT_IRQ_NONE);
608 int parport_parse_dmas(int nports, const char *dmastr[], int dmaval[])
610 return parport_parse_params (nports, dmastr, dmaval, PARPORT_DMA_AUTO,
611 PARPORT_DMA_NONE);