Import 2.3.32
[davej-history.git] / drivers / parport / share.c
blobcb0e7333288bf36029139a2708b41e490f114869
1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
3 *
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
8 * Andrea Arcangeli
10 * based on work by Grant Guenther <grant@torque.net>
11 * and Philip Blundell
14 #undef PARPORT_DEBUG_SHARING /* undef for production */
16 #include <linux/config.h>
17 #include <linux/string.h>
18 #include <linux/threads.h>
19 #include <linux/parport.h>
20 #include <linux/delay.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel.h>
25 #include <linux/malloc.h>
26 #include <linux/sched.h>
27 #include <linux/kmod.h>
29 #include <linux/spinlock.h>
30 #include <asm/irq.h>
32 #undef PARPORT_PARANOID
34 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
36 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
37 int parport_default_spintime = DEFAULT_SPIN_TIME;
39 static struct parport *portlist = NULL, *portlist_tail = NULL;
40 spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
42 static struct parport_driver *driver_chain = NULL;
43 spinlock_t driverlist_lock = SPIN_LOCK_UNLOCKED;
45 /* What you can do to a port that's gone away.. */
46 static void dead_write_lines (struct parport *p, unsigned char b){}
47 static unsigned char dead_read_lines (struct parport *p) { return 0; }
48 static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
49 unsigned char c) { return 0; }
50 static void dead_onearg (struct parport *p){}
51 static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
52 static void dead_state (struct parport *p, struct parport_state *s) { }
53 static void dead_noargs (void) { }
54 static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
55 { return 0; }
56 static size_t dead_read (struct parport *p, void *b, size_t l, int f)
57 { return 0; }
58 static struct parport_operations dead_ops = {
59 dead_write_lines, /* data */
60 dead_read_lines,
61 dead_write_lines, /* control */
62 dead_read_lines,
63 dead_frob_lines,
64 dead_read_lines, /* status */
65 dead_onearg, /* enable_irq */
66 dead_onearg, /* disable_irq */
67 dead_onearg, /* data_forward */
68 dead_onearg, /* data_reverse */
69 dead_initstate, /* init_state */
70 dead_state,
71 dead_state,
72 dead_noargs, /* xxx_use_count */
73 dead_noargs,
74 dead_write, /* epp */
75 dead_read,
76 dead_write,
77 dead_read,
78 dead_write, /* ecp */
79 dead_read,
80 dead_write,
81 dead_write, /* compat */
82 dead_read, /* nibble */
83 dead_read /* byte */
86 static void call_driver_chain(int attach, struct parport *port)
88 struct parport_driver *drv;
90 for (drv = driver_chain; drv; drv = drv->next) {
91 if (attach)
92 drv->attach (port);
93 else
94 drv->detach (port);
98 int parport_register_driver (struct parport_driver *drv)
100 struct parport *port;
102 spin_lock (&driverlist_lock);
103 drv->next = driver_chain;
104 driver_chain = drv;
105 spin_unlock (&driverlist_lock);
107 for (port = portlist; port; port = port->next)
108 drv->attach (port);
110 /* For compatibility with 2.2, check the (obsolete) parport_lowlevel
111 * alias in case some people haven't changed to post-install rules
112 * yet. parport_enumerate (itself deprecated) will printk a
113 * friendly reminder. */
114 if (!portlist)
115 parport_enumerate ();
117 return 0;
120 void parport_unregister_driver (struct parport_driver *arg)
122 struct parport_driver *drv = driver_chain, *olddrv = NULL;
124 while (drv) {
125 if (drv == arg) {
126 spin_lock (&driverlist_lock);
127 if (olddrv)
128 olddrv->next = drv->next;
129 else
130 driver_chain = drv->next;
131 spin_unlock (&driverlist_lock);
132 return;
134 olddrv = drv;
135 drv = drv->next;
139 /* Return a list of all the ports we know about. */
140 struct parport *parport_enumerate(void)
142 /* Attempt to make things work on 2.2 systems. */
143 if (!portlist) {
144 request_module ("parport_lowlevel");
145 if (portlist)
146 /* The user has a parport_lowlevel alias in
147 * modules.conf. Warn them that it won't work
148 * for long. */
149 printk (KERN_WARNING
150 "parport: 'parport_lowlevel' is deprecated; "
151 "see parport.txt\n");
154 return portlist;
157 struct parport *parport_register_port(unsigned long base, int irq, int dma,
158 struct parport_operations *ops)
160 struct parport *tmp;
161 int portnum;
162 int device;
163 char *name;
165 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
166 if (!tmp) {
167 printk(KERN_WARNING "parport: memory squeeze\n");
168 return NULL;
171 /* Search for the lowest free parport number. */
172 for (portnum = 0; ; portnum++) {
173 struct parport *itr = portlist;
174 while (itr) {
175 if (itr->number == portnum)
176 /* No good, already used. */
177 break;
178 else
179 itr = itr->next;
182 if (itr == NULL)
183 /* Got to the end of the list. */
184 break;
187 /* Init our structure */
188 memset(tmp, 0, sizeof(struct parport));
189 tmp->base = base;
190 tmp->irq = irq;
191 tmp->dma = dma;
192 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
193 tmp->modes = 0;
194 tmp->next = NULL;
195 tmp->devices = tmp->cad = NULL;
196 tmp->flags = 0;
197 tmp->ops = ops;
198 tmp->portnum = tmp->number = portnum;
199 tmp->physport = tmp;
200 memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
201 tmp->cad_lock = RW_LOCK_UNLOCKED;
202 spin_lock_init(&tmp->waitlist_lock);
203 spin_lock_init(&tmp->pardevice_lock);
204 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
205 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
206 init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
207 tmp->spintime = parport_default_spintime;
209 name = kmalloc(15, GFP_KERNEL);
210 if (!name) {
211 printk(KERN_ERR "parport: memory squeeze\n");
212 kfree(tmp);
213 return NULL;
215 sprintf(name, "parport%d", portnum);
216 tmp->name = name;
219 * Chain the entry to our list.
221 * This function must not run from an irq handler so we don' t need
222 * to clear irq on the local CPU. -arca
224 spin_lock(&parportlist_lock);
225 if (portlist_tail)
226 portlist_tail->next = tmp;
227 portlist_tail = tmp;
228 if (!portlist)
229 portlist = tmp;
230 spin_unlock(&parportlist_lock);
232 for (device = 0; device < 5; device++)
233 /* assume the worst */
234 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
236 tmp->waithead = tmp->waittail = NULL;
238 return tmp;
241 void parport_announce_port (struct parport *port)
243 #ifdef CONFIG_PARPORT_1284
244 /* Analyse the IEEE1284.3 topology of the port. */
245 parport_daisy_init (port);
246 #endif
248 /* Let drivers know that a new port has arrived. */
249 call_driver_chain (1, port);
252 static void free_port (struct parport *port)
254 int d;
255 for (d = 0; d < 5; d++) {
256 if (port->probe_info[d].class_name)
257 kfree (port->probe_info[d].class_name);
258 if (port->probe_info[d].mfr)
259 kfree (port->probe_info[d].mfr);
260 if (port->probe_info[d].model)
261 kfree (port->probe_info[d].model);
262 if (port->probe_info[d].cmdset)
263 kfree (port->probe_info[d].cmdset);
264 if (port->probe_info[d].description)
265 kfree (port->probe_info[d].description);
268 kfree(port->name);
269 kfree(port);
272 void parport_unregister_port(struct parport *port)
274 struct parport *p;
276 port->ops = &dead_ops;
278 /* Spread the word. */
279 call_driver_chain (0, port);
281 #ifdef CONFIG_PARPORT_1284
282 /* Forget the IEEE1284.3 topology of the port. */
283 parport_daisy_fini (port);
284 #endif
286 spin_lock(&parportlist_lock);
287 if (portlist == port) {
288 if ((portlist = port->next) == NULL)
289 portlist_tail = NULL;
290 } else {
291 for (p = portlist; (p != NULL) && (p->next != port);
292 p=p->next);
293 if (p) {
294 if ((p->next = port->next) == NULL)
295 portlist_tail = p;
297 else printk (KERN_WARNING
298 "%s not found in port list!\n", port->name);
300 spin_unlock(&parportlist_lock);
302 if (!port->devices)
303 free_port (port);
306 struct pardevice *parport_register_device(struct parport *port, const char *name,
307 int (*pf)(void *), void (*kf)(void *),
308 void (*irq_func)(int, void *, struct pt_regs *),
309 int flags, void *handle)
311 struct pardevice *tmp;
313 if (port->physport->flags & PARPORT_FLAG_EXCL) {
314 /* An exclusive device is registered. */
315 printk (KERN_DEBUG "%s: no more devices allowed\n",
316 port->name);
317 return NULL;
320 if (flags & PARPORT_DEV_LURK) {
321 if (!pf || !kf) {
322 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
323 return NULL;
327 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
328 if (tmp == NULL) {
329 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
330 return NULL;
333 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
334 if (tmp->state == NULL) {
335 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
336 kfree(tmp);
337 return NULL;
340 tmp->name = name;
341 tmp->port = port;
342 tmp->daisy = -1;
343 tmp->preempt = pf;
344 tmp->wakeup = kf;
345 tmp->private = handle;
346 tmp->flags = flags;
347 tmp->irq_func = irq_func;
348 tmp->waiting = 0;
349 tmp->timeout = 5 * HZ;
351 /* Chain this onto the list */
352 tmp->prev = NULL;
354 * This function must not run from an irq handler so we don' t need
355 * to clear irq on the local CPU. -arca
357 spin_lock(&port->physport->pardevice_lock);
359 if (flags & PARPORT_DEV_EXCL) {
360 if (port->physport->devices) {
361 spin_unlock (&port->physport->pardevice_lock);
362 kfree (tmp->state);
363 kfree (tmp);
364 printk (KERN_DEBUG
365 "%s: cannot grant exclusive access for "
366 "device %s\n", port->name, name);
367 return NULL;
369 port->flags |= PARPORT_FLAG_EXCL;
372 tmp->next = port->physport->devices;
373 if (port->physport->devices)
374 port->physport->devices->prev = tmp;
375 port->physport->devices = tmp;
376 spin_unlock(&port->physport->pardevice_lock);
378 inc_parport_count();
379 port->ops->inc_use_count();
381 init_waitqueue_head(&tmp->wait_q);
382 tmp->timeslice = parport_default_timeslice;
383 tmp->waitnext = tmp->waitprev = NULL;
386 * This has to be run as last thing since init_state may need other
387 * pardevice fields. -arca
389 port->ops->init_state(tmp, tmp->state);
390 parport_device_proc_register(tmp);
391 return tmp;
394 void parport_unregister_device(struct pardevice *dev)
396 struct parport *port;
398 #ifdef PARPORT_PARANOID
399 if (dev == NULL) {
400 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
401 return;
403 #endif
405 parport_device_proc_unregister(dev);
407 port = dev->port->physport;
409 if (port->cad == dev) {
410 printk(KERN_DEBUG "%s: %s forgot to release port\n",
411 port->name, dev->name);
412 parport_release (dev);
415 spin_lock(&port->pardevice_lock);
416 if (dev->next)
417 dev->next->prev = dev->prev;
418 if (dev->prev)
419 dev->prev->next = dev->next;
420 else
421 port->devices = dev->next;
423 if (dev->flags & PARPORT_DEV_EXCL)
424 port->flags &= ~PARPORT_FLAG_EXCL;
426 spin_unlock(&port->pardevice_lock);
428 kfree(dev->state);
429 kfree(dev);
431 dec_parport_count();
432 port->ops->dec_use_count();
434 /* If this was the last device on a port that's already gone away,
435 * free up the resources. */
436 if (port->ops == &dead_ops && !port->devices)
437 free_port (port);
440 int parport_claim(struct pardevice *dev)
442 struct pardevice *oldcad;
443 struct parport *port = dev->port->physport;
444 unsigned long flags;
446 if (port->cad == dev) {
447 printk(KERN_INFO "%s: %s already owner\n",
448 dev->port->name,dev->name);
449 return 0;
452 try_again:
453 /* Preempt any current device */
454 if ((oldcad = port->cad) != NULL) {
455 if (oldcad->preempt) {
456 if (oldcad->preempt(oldcad->private))
457 goto blocked;
458 port->ops->save_state(port, dev->state);
459 } else
460 goto blocked;
462 if (port->cad != oldcad) {
463 printk(KERN_WARNING
464 "%s: %s released port when preempted!\n",
465 port->name, oldcad->name);
466 if (port->cad)
467 goto blocked;
471 /* Can't fail from now on, so mark ourselves as no longer waiting. */
472 if (dev->waiting & 1) {
473 dev->waiting = 0;
475 /* Take ourselves out of the wait list again. */
476 spin_lock_irqsave (&port->waitlist_lock, flags);
477 if (dev->waitprev)
478 dev->waitprev->waitnext = dev->waitnext;
479 else
480 port->waithead = dev->waitnext;
481 if (dev->waitnext)
482 dev->waitnext->waitprev = dev->waitprev;
483 else
484 port->waittail = dev->waitprev;
485 spin_unlock_irqrestore (&port->waitlist_lock, flags);
486 dev->waitprev = dev->waitnext = NULL;
489 /* Now we do the change of devices */
490 write_lock_irqsave(&port->cad_lock, flags);
491 port->cad = dev;
492 write_unlock_irqrestore(&port->cad_lock, flags);
494 #ifdef CONFIG_PARPORT_1284
495 /* If it's a mux port, select it. */
496 if (dev->port->muxport >= 0) {
497 /* FIXME */
498 port->muxsel = dev->port->muxport;
501 /* If it's a daisy chain device, select it. */
502 if (dev->daisy >= 0) {
503 /* This could be lazier. */
504 if (!parport_daisy_select (port, dev->daisy,
505 IEEE1284_MODE_COMPAT))
506 port->daisy = dev->daisy;
508 #endif /* IEEE1284.3 support */
510 /* Restore control registers */
511 port->ops->restore_state(port, dev->state);
512 dev->time = jiffies;
513 return 0;
515 blocked:
516 /* If this is the first time we tried to claim the port, register an
517 interest. This is only allowed for devices sleeping in
518 parport_claim_or_block(), or those with a wakeup function. */
519 if (dev->waiting & 2 || dev->wakeup) {
520 spin_lock_irqsave (&port->waitlist_lock, flags);
521 if (port->cad == NULL) {
522 /* The port got released in the meantime. */
523 spin_unlock_irqrestore (&port->waitlist_lock, flags);
524 goto try_again;
526 if (test_and_set_bit(0, &dev->waiting) == 0) {
527 /* First add ourselves to the end of the wait list. */
528 dev->waitnext = NULL;
529 dev->waitprev = port->waittail;
530 if (port->waittail) {
531 port->waittail->waitnext = dev;
532 port->waittail = dev;
533 } else
534 port->waithead = port->waittail = dev;
536 spin_unlock_irqrestore (&port->waitlist_lock, flags);
538 return -EAGAIN;
541 int parport_claim_or_block(struct pardevice *dev)
543 int r;
545 /* Signal to parport_claim() that we can wait even without a
546 wakeup function. */
547 dev->waiting = 2;
549 /* Try to claim the port. If this fails, we need to sleep. */
550 r = parport_claim(dev);
551 if (r == -EAGAIN) {
552 unsigned long flags;
553 #ifdef PARPORT_DEBUG_SHARING
554 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
555 #endif
556 save_flags (flags);
557 cli();
558 /* If dev->waiting is clear now, an interrupt
559 gave us the port and we would deadlock if we slept. */
560 if (dev->waiting) {
561 sleep_on(&dev->wait_q);
562 r = 1;
563 } else {
564 r = 0;
565 #ifdef PARPORT_DEBUG_SHARING
566 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
567 dev->name);
568 #endif
570 restore_flags(flags);
571 #ifdef PARPORT_DEBUG_SHARING
572 if (dev->port->physport->cad != dev)
573 printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
574 "but %s owns port!\n", dev->name,
575 dev->port->physport->cad ?
576 dev->port->physport->cad->name:"nobody");
577 #endif
579 dev->waiting = 0;
580 return r;
583 void parport_release(struct pardevice *dev)
585 struct parport *port = dev->port->physport;
586 struct pardevice *pd;
587 unsigned long flags;
589 /* Make sure that dev is the current device */
590 if (port->cad != dev) {
591 printk(KERN_WARNING "%s: %s tried to release parport "
592 "when not owner\n", port->name, dev->name);
593 return;
596 #ifdef CONFIG_PARPORT_1284
597 /* If this is on a mux port, deselect it. */
598 if (dev->port->muxport >= 0) {
599 /* FIXME */
600 port->muxsel = -1;
603 /* If this is a daisy device, deselect it. */
604 if (dev->daisy >= 0) {
605 parport_daisy_deselect_all (port);
606 port->daisy = -1;
608 #endif
610 write_lock_irqsave(&port->cad_lock, flags);
611 port->cad = NULL;
612 write_unlock_irqrestore(&port->cad_lock, flags);
614 /* Save control registers */
615 port->ops->save_state(port, dev->state);
617 /* If anybody is waiting, find out who's been there longest and
618 then wake them up. (Note: no locking required) */
619 for (pd = port->waithead; pd; pd = pd->waitnext) {
620 if (pd->waiting & 2) { /* sleeping in claim_or_block */
621 parport_claim(pd);
622 if (waitqueue_active(&pd->wait_q))
623 wake_up(&pd->wait_q);
624 return;
625 } else if (pd->wakeup) {
626 pd->wakeup(pd->private);
627 if (dev->port->cad)
628 return;
629 } else {
630 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
634 /* Nobody was waiting, so walk the list to see if anyone is
635 interested in being woken up. */
636 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
637 if (pd->wakeup && pd != dev)
638 pd->wakeup(pd->private);
642 static int parport_parse_params (int nports, const char *str[], int val[],
643 int automatic, int none, int nofifo)
645 unsigned int i;
646 for (i = 0; i < nports && str[i]; i++) {
647 if (!strncmp(str[i], "auto", 4))
648 val[i] = automatic;
649 else if (!strncmp(str[i], "none", 4))
650 val[i] = none;
651 else if (nofifo && !strncmp(str[i], "nofifo", 4))
652 val[i] = nofifo;
653 else {
654 char *ep;
655 unsigned long r = simple_strtoul(str[i], &ep, 0);
656 if (ep != str[i])
657 val[i] = r;
658 else {
659 printk("parport: bad specifier `%s'\n", str[i]);
660 return -1;
665 return 0;
668 int parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
670 return parport_parse_params (nports, irqstr, irqval, PARPORT_IRQ_AUTO,
671 PARPORT_IRQ_NONE, 0);
674 int parport_parse_dmas(int nports, const char *dmastr[], int dmaval[])
676 return parport_parse_params (nports, dmastr, dmaval, PARPORT_DMA_AUTO,
677 PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);