1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
10 * based on work by Grant Guenther <grant@torque.net>
14 #undef PARPORT_DEBUG_SHARING /* undef for production */
16 #include <linux/config.h>
17 #include <linux/string.h>
18 #include <linux/threads.h>
19 #include <linux/parport.h>
20 #include <linux/delay.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel.h>
25 #include <linux/malloc.h>
26 #include <linux/sched.h>
28 #include <asm/spinlock.h>
31 #undef PARPORT_PARANOID
33 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
35 unsigned long parport_default_timeslice
= PARPORT_DEFAULT_TIMESLICE
;
36 int parport_default_spintime
= DEFAULT_SPIN_TIME
;
38 static struct parport
*portlist
= NULL
, *portlist_tail
= NULL
;
39 spinlock_t parportlist_lock
= SPIN_LOCK_UNLOCKED
;
41 static struct parport_driver
*driver_chain
= NULL
;
42 spinlock_t driverlist_lock
= SPIN_LOCK_UNLOCKED
;
44 static void call_driver_chain(int attach
, struct parport
*port
)
46 struct parport_driver
*drv
;
48 for (drv
= driver_chain
; drv
; drv
= drv
->next
) {
56 int parport_register_driver (struct parport_driver
*drv
)
60 spin_lock (&driverlist_lock
);
61 drv
->next
= driver_chain
;
63 spin_unlock (&driverlist_lock
);
65 for (port
= portlist
; port
; port
= port
->next
)
71 void parport_unregister_driver (struct parport_driver
*arg
)
73 struct parport_driver
*drv
= driver_chain
, *olddrv
= NULL
;
77 spin_lock (&driverlist_lock
);
79 olddrv
->next
= drv
->next
;
81 driver_chain
= drv
->next
;
82 spin_unlock (&driverlist_lock
);
90 /* Return a list of all the ports we know about. */
91 struct parport
*parport_enumerate(void)
96 struct parport
*parport_register_port(unsigned long base
, int irq
, int dma
,
97 struct parport_operations
*ops
)
104 tmp
= kmalloc(sizeof(struct parport
), GFP_KERNEL
);
106 printk(KERN_WARNING
"parport: memory squeeze\n");
110 /* Search for the lowest free parport number. */
111 for (portnum
= 0; ; portnum
++) {
112 struct parport
*itr
= portlist
;
114 if (itr
->number
== portnum
)
115 /* No good, already used. */
122 /* Got to the end of the list. */
126 /* Init our structure */
127 memset(tmp
, 0, sizeof(struct parport
));
131 tmp
->muxport
= tmp
->daisy
= tmp
->muxsel
= -1;
134 tmp
->devices
= tmp
->cad
= NULL
;
137 tmp
->portnum
= tmp
->number
= portnum
;
139 memset (tmp
->probe_info
, 0, 5 * sizeof (struct parport_device_info
));
140 tmp
->cad_lock
= RW_LOCK_UNLOCKED
;
141 spin_lock_init(&tmp
->waitlist_lock
);
142 spin_lock_init(&tmp
->pardevice_lock
);
143 tmp
->ieee1284
.mode
= IEEE1284_MODE_COMPAT
;
144 tmp
->ieee1284
.phase
= IEEE1284_PH_FWD_IDLE
;
145 init_MUTEX_LOCKED (&tmp
->ieee1284
.irq
); /* actually a semaphore at 0 */
146 tmp
->spintime
= parport_default_spintime
;
148 name
= kmalloc(15, GFP_KERNEL
);
150 printk(KERN_ERR
"parport: memory squeeze\n");
154 sprintf(name
, "parport%d", portnum
);
158 * Chain the entry to our list.
160 * This function must not run from an irq handler so we don' t need
161 * to clear irq on the local CPU. -arca
163 spin_lock(&parportlist_lock
);
165 portlist_tail
->next
= tmp
;
169 spin_unlock(&parportlist_lock
);
171 for (device
= 0; device
< 5; device
++)
172 /* assume the worst */
173 tmp
->probe_info
[device
].class = PARPORT_CLASS_LEGACY
;
175 tmp
->waithead
= tmp
->waittail
= NULL
;
180 void parport_announce_port (struct parport
*port
)
182 #ifdef CONFIG_PARPORT_1284
183 /* Analyse the IEEE1284.3 topology of the port. */
184 parport_daisy_init (port
);
187 /* Let drivers know that a new port has arrived. */
188 call_driver_chain (1, port
);
191 void parport_unregister_port(struct parport
*port
)
196 /* Spread the word. */
197 call_driver_chain (0, port
);
199 #ifdef CONFIG_PARPORT_1284
200 /* Forget the IEEE1284.3 topology of the port. */
201 parport_daisy_fini (port
);
204 spin_lock(&parportlist_lock
);
205 if (portlist
== port
) {
206 if ((portlist
= port
->next
) == NULL
)
207 portlist_tail
= NULL
;
209 for (p
= portlist
; (p
!= NULL
) && (p
->next
!= port
);
212 if ((p
->next
= port
->next
) == NULL
)
215 else printk (KERN_WARNING
216 "%s not found in port list!\n", port
->name
);
218 spin_unlock(&parportlist_lock
);
220 for (d
= 0; d
< 5; d
++) {
221 if (port
->probe_info
[d
].class_name
)
222 kfree (port
->probe_info
[d
].class_name
);
223 if (port
->probe_info
[d
].mfr
)
224 kfree (port
->probe_info
[d
].mfr
);
225 if (port
->probe_info
[d
].model
)
226 kfree (port
->probe_info
[d
].model
);
227 if (port
->probe_info
[d
].cmdset
)
228 kfree (port
->probe_info
[d
].cmdset
);
229 if (port
->probe_info
[d
].description
)
230 kfree (port
->probe_info
[d
].description
);
237 struct pardevice
*parport_register_device(struct parport
*port
, const char *name
,
238 int (*pf
)(void *), void (*kf
)(void *),
239 void (*irq_func
)(int, void *, struct pt_regs
*),
240 int flags
, void *handle
)
242 struct pardevice
*tmp
;
244 if (port
->physport
->flags
& PARPORT_FLAG_EXCL
) {
245 /* An exclusive device is registered. */
246 printk (KERN_DEBUG
"%s: no more devices allowed\n",
251 if (flags
& PARPORT_DEV_LURK
) {
253 printk(KERN_INFO
"%s: refused to register lurking device (%s) without callbacks\n", port
->name
, name
);
258 tmp
= kmalloc(sizeof(struct pardevice
), GFP_KERNEL
);
260 printk(KERN_WARNING
"%s: memory squeeze, couldn't register %s.\n", port
->name
, name
);
264 tmp
->state
= kmalloc(sizeof(struct parport_state
), GFP_KERNEL
);
265 if (tmp
->state
== NULL
) {
266 printk(KERN_WARNING
"%s: memory squeeze, couldn't register %s.\n", port
->name
, name
);
276 tmp
->private = handle
;
278 tmp
->irq_func
= irq_func
;
280 tmp
->timeout
= 5 * HZ
;
282 /* Chain this onto the list */
285 * This function must not run from an irq handler so we don' t need
286 * to clear irq on the local CPU. -arca
288 spin_lock(&port
->physport
->pardevice_lock
);
290 if (flags
& PARPORT_DEV_EXCL
) {
291 if (port
->physport
->devices
) {
292 spin_unlock (&port
->physport
->pardevice_lock
);
296 "%s: cannot grant exclusive access for "
297 "device %s\n", port
->name
, name
);
300 port
->flags
|= PARPORT_FLAG_EXCL
;
303 tmp
->next
= port
->physport
->devices
;
304 if (port
->physport
->devices
)
305 port
->physport
->devices
->prev
= tmp
;
306 port
->physport
->devices
= tmp
;
307 spin_unlock(&port
->physport
->pardevice_lock
);
310 port
->ops
->inc_use_count();
312 init_waitqueue_head(&tmp
->wait_q
);
313 tmp
->timeslice
= parport_default_timeslice
;
314 tmp
->waitnext
= tmp
->waitprev
= NULL
;
317 * This has to be run as last thing since init_state may need other
318 * pardevice fields. -arca
320 port
->ops
->init_state(tmp
, tmp
->state
);
321 parport_device_proc_register(tmp
);
325 void parport_unregister_device(struct pardevice
*dev
)
327 struct parport
*port
;
329 #ifdef PARPORT_PARANOID
331 printk(KERN_ERR
"parport_unregister_device: passed NULL\n");
336 parport_device_proc_unregister(dev
);
338 port
= dev
->port
->physport
;
340 if (port
->cad
== dev
) {
341 printk(KERN_DEBUG
"%s: %s forgot to release port\n",
342 port
->name
, dev
->name
);
343 parport_release (dev
);
346 spin_lock(&port
->pardevice_lock
);
348 dev
->next
->prev
= dev
->prev
;
350 dev
->prev
->next
= dev
->next
;
352 port
->devices
= dev
->next
;
354 if (dev
->flags
& PARPORT_DEV_EXCL
)
355 port
->flags
&= ~PARPORT_FLAG_EXCL
;
357 spin_unlock(&port
->pardevice_lock
);
363 port
->ops
->dec_use_count();
366 int parport_claim(struct pardevice
*dev
)
368 struct pardevice
*oldcad
;
369 struct parport
*port
= dev
->port
->physport
;
372 if (port
->cad
== dev
) {
373 printk(KERN_INFO
"%s: %s already owner\n",
374 dev
->port
->name
,dev
->name
);
379 /* Preempt any current device */
380 if ((oldcad
= port
->cad
) != NULL
) {
381 if (oldcad
->preempt
) {
382 if (oldcad
->preempt(oldcad
->private))
384 port
->ops
->save_state(port
, dev
->state
);
388 if (port
->cad
!= oldcad
) {
390 "%s: %s released port when preempted!\n",
391 port
->name
, oldcad
->name
);
397 /* Can't fail from now on, so mark ourselves as no longer waiting. */
398 if (dev
->waiting
& 1) {
401 /* Take ourselves out of the wait list again. */
402 spin_lock_irqsave (&port
->waitlist_lock
, flags
);
404 dev
->waitprev
->waitnext
= dev
->waitnext
;
406 port
->waithead
= dev
->waitnext
;
408 dev
->waitnext
->waitprev
= dev
->waitprev
;
410 port
->waittail
= dev
->waitprev
;
411 spin_unlock_irqrestore (&port
->waitlist_lock
, flags
);
412 dev
->waitprev
= dev
->waitnext
= NULL
;
415 /* Now we do the change of devices */
416 write_lock_irqsave(&port
->cad_lock
, flags
);
418 write_unlock_irqrestore(&port
->cad_lock
, flags
);
420 #ifdef CONFIG_PARPORT_1284
421 /* If it's a mux port, select it. */
422 if (dev
->port
->muxport
>= 0) {
424 port
->muxsel
= dev
->port
->muxport
;
427 /* If it's a daisy chain device, select it. */
428 if (dev
->daisy
>= 0) {
429 /* This could be lazier. */
430 if (!parport_daisy_select (port
, dev
->daisy
,
431 IEEE1284_MODE_COMPAT
))
432 port
->daisy
= dev
->daisy
;
434 #endif /* IEEE1284.3 support */
436 /* Restore control registers */
437 port
->ops
->restore_state(port
, dev
->state
);
442 /* If this is the first time we tried to claim the port, register an
443 interest. This is only allowed for devices sleeping in
444 parport_claim_or_block(), or those with a wakeup function. */
445 if (dev
->waiting
& 2 || dev
->wakeup
) {
446 spin_lock_irqsave (&port
->waitlist_lock
, flags
);
447 if (port
->cad
== NULL
) {
448 /* The port got released in the meantime. */
449 spin_unlock_irqrestore (&port
->waitlist_lock
, flags
);
452 if (test_and_set_bit(0, &dev
->waiting
) == 0) {
453 /* First add ourselves to the end of the wait list. */
454 dev
->waitnext
= NULL
;
455 dev
->waitprev
= port
->waittail
;
456 if (port
->waittail
) {
457 port
->waittail
->waitnext
= dev
;
458 port
->waittail
= dev
;
460 port
->waithead
= port
->waittail
= dev
;
462 spin_unlock_irqrestore (&port
->waitlist_lock
, flags
);
467 int parport_claim_or_block(struct pardevice
*dev
)
471 /* Signal to parport_claim() that we can wait even without a
475 /* Try to claim the port. If this fails, we need to sleep. */
476 r
= parport_claim(dev
);
479 #ifdef PARPORT_DEBUG_SHARING
480 printk(KERN_DEBUG
"%s: parport_claim() returned -EAGAIN\n", dev
->name
);
484 /* If dev->waiting is clear now, an interrupt
485 gave us the port and we would deadlock if we slept. */
487 sleep_on(&dev
->wait_q
);
491 #ifdef PARPORT_DEBUG_SHARING
492 printk(KERN_DEBUG
"%s: didn't sleep in parport_claim_or_block()\n",
496 restore_flags(flags
);
497 #ifdef PARPORT_DEBUG_SHARING
498 if (dev
->port
->physport
->cad
!= dev
)
499 printk(KERN_DEBUG
"%s: exiting parport_claim_or_block "
500 "but %s owns port!\n", dev
->name
,
501 dev
->port
->physport
->cad
?
502 dev
->port
->physport
->cad
->name
:"nobody");
509 void parport_release(struct pardevice
*dev
)
511 struct parport
*port
= dev
->port
->physport
;
512 struct pardevice
*pd
;
515 /* Make sure that dev is the current device */
516 if (port
->cad
!= dev
) {
517 printk(KERN_WARNING
"%s: %s tried to release parport "
518 "when not owner\n", port
->name
, dev
->name
);
522 #ifdef CONFIG_PARPORT_1284
523 /* If this is on a mux port, deselect it. */
524 if (dev
->port
->muxport
>= 0) {
529 /* If this is a daisy device, deselect it. */
530 if (dev
->daisy
>= 0) {
531 parport_daisy_deselect_all (port
);
536 write_lock_irqsave(&port
->cad_lock
, flags
);
538 write_unlock_irqrestore(&port
->cad_lock
, flags
);
540 /* Save control registers */
541 port
->ops
->save_state(port
, dev
->state
);
543 /* If anybody is waiting, find out who's been there longest and
544 then wake them up. (Note: no locking required) */
545 for (pd
= port
->waithead
; pd
; pd
= pd
->waitnext
) {
546 if (pd
->waiting
& 2) { /* sleeping in claim_or_block */
548 if (waitqueue_active(&pd
->wait_q
))
549 wake_up(&pd
->wait_q
);
551 } else if (pd
->wakeup
) {
552 pd
->wakeup(pd
->private);
556 printk(KERN_ERR
"%s: don't know how to wake %s\n", port
->name
, pd
->name
);
560 /* Nobody was waiting, so walk the list to see if anyone is
561 interested in being woken up. */
562 for (pd
= port
->devices
; (port
->cad
== NULL
) && pd
; pd
= pd
->next
) {
563 if (pd
->wakeup
&& pd
!= dev
)
564 pd
->wakeup(pd
->private);
568 static int parport_parse_params (int nports
, const char *str
[], int val
[],
569 int automatic
, int none
)
572 for (i
= 0; i
< nports
&& str
[i
]; i
++) {
573 if (!strncmp(str
[i
], "auto", 4))
575 else if (!strncmp(str
[i
], "none", 4))
579 unsigned long r
= simple_strtoul(str
[i
], &ep
, 0);
583 printk("parport: bad specifier `%s'\n", str
[i
]);
592 int parport_parse_irqs(int nports
, const char *irqstr
[], int irqval
[])
594 return parport_parse_params (nports
, irqstr
, irqval
, PARPORT_IRQ_AUTO
,
598 int parport_parse_dmas(int nports
, const char *dmastr
[], int dmaval
[])
600 return parport_parse_params (nports
, dmastr
, dmaval
, PARPORT_DMA_AUTO
,