cpuset: fix possible deadlock in async_rebuild_sched_domains
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ieee1394 / iso.c
blob1cf6487b65ba8ce9c2b042d0dc5bcbcf03369f50
1 /*
2 * IEEE 1394 for Linux
4 * kernel ISO transmission/reception
6 * Copyright (C) 2002 Maas Digital LLC
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/pci.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
17 #include "hosts.h"
18 #include "iso.h"
20 /**
21 * hpsb_iso_stop - stop DMA
23 void hpsb_iso_stop(struct hpsb_iso *iso)
25 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
26 return;
28 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
29 XMIT_STOP : RECV_STOP, 0);
30 iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
33 /**
34 * hpsb_iso_shutdown - deallocate buffer and DMA context
36 void hpsb_iso_shutdown(struct hpsb_iso *iso)
38 if (iso->flags & HPSB_ISO_DRIVER_INIT) {
39 hpsb_iso_stop(iso);
40 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
41 XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
42 iso->flags &= ~HPSB_ISO_DRIVER_INIT;
45 dma_region_free(&iso->data_buf);
46 kfree(iso);
49 static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
50 enum hpsb_iso_type type,
51 unsigned int data_buf_size,
52 unsigned int buf_packets,
53 int channel, int dma_mode,
54 int irq_interval,
55 void (*callback) (struct hpsb_iso
56 *))
58 struct hpsb_iso *iso;
59 int dma_direction;
61 /* make sure driver supports the ISO API */
62 if (!host->driver->isoctl) {
63 printk(KERN_INFO
64 "ieee1394: host driver '%s' does not support the rawiso API\n",
65 host->driver->name);
66 return NULL;
69 /* sanitize parameters */
71 if (buf_packets < 2)
72 buf_packets = 2;
74 if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
75 || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
76 dma_mode = HPSB_ISO_DMA_DEFAULT;
78 if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
79 irq_interval = buf_packets / 4;
80 if (irq_interval == 0) /* really interrupt for each packet */
81 irq_interval = 1;
83 if (channel < -1 || channel >= 64)
84 return NULL;
86 /* channel = -1 is OK for multi-channel recv but not for xmit */
87 if (type == HPSB_ISO_XMIT && channel < 0)
88 return NULL;
90 /* allocate and write the struct hpsb_iso */
92 iso =
93 kmalloc(sizeof(*iso) +
94 buf_packets * sizeof(struct hpsb_iso_packet_info),
95 GFP_KERNEL);
96 if (!iso)
97 return NULL;
99 iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
101 iso->type = type;
102 iso->host = host;
103 iso->hostdata = NULL;
104 iso->callback = callback;
105 init_waitqueue_head(&iso->waitq);
106 iso->channel = channel;
107 iso->irq_interval = irq_interval;
108 iso->dma_mode = dma_mode;
109 dma_region_init(&iso->data_buf);
110 iso->buf_size = PAGE_ALIGN(data_buf_size);
111 iso->buf_packets = buf_packets;
112 iso->pkt_dma = 0;
113 iso->first_packet = 0;
114 spin_lock_init(&iso->lock);
116 if (iso->type == HPSB_ISO_XMIT) {
117 iso->n_ready_packets = iso->buf_packets;
118 dma_direction = PCI_DMA_TODEVICE;
119 } else {
120 iso->n_ready_packets = 0;
121 dma_direction = PCI_DMA_FROMDEVICE;
124 atomic_set(&iso->overflows, 0);
125 iso->bytes_discarded = 0;
126 iso->flags = 0;
127 iso->prebuffer = 0;
129 /* allocate the packet buffer */
130 if (dma_region_alloc
131 (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
132 goto err;
134 return iso;
136 err:
137 hpsb_iso_shutdown(iso);
138 return NULL;
142 * hpsb_iso_n_ready - returns number of packets ready to send or receive
144 int hpsb_iso_n_ready(struct hpsb_iso *iso)
146 unsigned long flags;
147 int val;
149 spin_lock_irqsave(&iso->lock, flags);
150 val = iso->n_ready_packets;
151 spin_unlock_irqrestore(&iso->lock, flags);
153 return val;
157 * hpsb_iso_xmit_init - allocate the buffer and DMA context
159 struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
160 unsigned int data_buf_size,
161 unsigned int buf_packets,
162 int channel,
163 int speed,
164 int irq_interval,
165 void (*callback) (struct hpsb_iso *))
167 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
168 data_buf_size, buf_packets,
169 channel,
170 HPSB_ISO_DMA_DEFAULT,
171 irq_interval, callback);
172 if (!iso)
173 return NULL;
175 iso->speed = speed;
177 /* tell the driver to start working */
178 if (host->driver->isoctl(iso, XMIT_INIT, 0))
179 goto err;
181 iso->flags |= HPSB_ISO_DRIVER_INIT;
182 return iso;
184 err:
185 hpsb_iso_shutdown(iso);
186 return NULL;
190 * hpsb_iso_recv_init - allocate the buffer and DMA context
192 * Note, if channel = -1, multi-channel receive is enabled.
194 struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
195 unsigned int data_buf_size,
196 unsigned int buf_packets,
197 int channel,
198 int dma_mode,
199 int irq_interval,
200 void (*callback) (struct hpsb_iso *))
202 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
203 data_buf_size, buf_packets,
204 channel, dma_mode,
205 irq_interval, callback);
206 if (!iso)
207 return NULL;
209 /* tell the driver to start working */
210 if (host->driver->isoctl(iso, RECV_INIT, 0))
211 goto err;
213 iso->flags |= HPSB_ISO_DRIVER_INIT;
214 return iso;
216 err:
217 hpsb_iso_shutdown(iso);
218 return NULL;
222 * hpsb_iso_recv_listen_channel
224 * multi-channel only
226 int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
228 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
229 return -EINVAL;
230 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
234 * hpsb_iso_recv_unlisten_channel
236 * multi-channel only
238 int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
240 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
241 return -EINVAL;
242 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
246 * hpsb_iso_recv_set_channel_mask
248 * multi-channel only
250 int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
252 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
253 return -EINVAL;
254 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
255 (unsigned long)&mask);
259 * hpsb_iso_recv_flush - check for arrival of new packets
261 * check for arrival of new packets immediately (even if irq_interval
262 * has not yet been reached)
264 int hpsb_iso_recv_flush(struct hpsb_iso *iso)
266 if (iso->type != HPSB_ISO_RECV)
267 return -EINVAL;
268 return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
271 static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
273 int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
274 if (retval)
275 return retval;
277 iso->flags |= HPSB_ISO_DRIVER_STARTED;
278 return retval;
282 * hpsb_iso_xmit_start - start DMA
284 int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
286 if (iso->type != HPSB_ISO_XMIT)
287 return -1;
289 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
290 return 0;
292 if (cycle < -1)
293 cycle = -1;
294 else if (cycle >= 8000)
295 cycle %= 8000;
297 iso->xmit_cycle = cycle;
299 if (prebuffer < 0)
300 prebuffer = iso->buf_packets - 1;
301 else if (prebuffer == 0)
302 prebuffer = 1;
304 if (prebuffer >= iso->buf_packets)
305 prebuffer = iso->buf_packets - 1;
307 iso->prebuffer = prebuffer;
309 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
310 once enough packets have been buffered */
311 iso->start_cycle = cycle;
313 return 0;
317 * hpsb_iso_recv_start - start DMA
319 int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
321 int retval = 0;
322 int isoctl_args[3];
324 if (iso->type != HPSB_ISO_RECV)
325 return -1;
327 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
328 return 0;
330 if (cycle < -1)
331 cycle = -1;
332 else if (cycle >= 8000)
333 cycle %= 8000;
335 isoctl_args[0] = cycle;
337 if (tag_mask < 0)
338 /* match all tags */
339 tag_mask = 0xF;
340 isoctl_args[1] = tag_mask;
342 isoctl_args[2] = sync;
344 retval =
345 iso->host->driver->isoctl(iso, RECV_START,
346 (unsigned long)&isoctl_args[0]);
347 if (retval)
348 return retval;
350 iso->flags |= HPSB_ISO_DRIVER_STARTED;
351 return retval;
354 /* check to make sure the user has not supplied bogus values of offset/len
355 * that would cause the kernel to access memory outside the buffer */
356 static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
357 unsigned int offset, unsigned short len,
358 unsigned int *out_offset,
359 unsigned short *out_len)
361 if (offset >= iso->buf_size)
362 return -EFAULT;
364 /* make sure the packet does not go beyond the end of the buffer */
365 if (offset + len > iso->buf_size)
366 return -EFAULT;
368 /* check for wrap-around */
369 if (offset + len < offset)
370 return -EFAULT;
372 /* now we can trust 'offset' and 'length' */
373 *out_offset = offset;
374 *out_len = len;
376 return 0;
380 * hpsb_iso_xmit_queue_packet - queue a packet for transmission.
382 * @offset is relative to the beginning of the DMA buffer, where the packet's
383 * data payload should already have been placed.
385 int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
386 u8 tag, u8 sy)
388 struct hpsb_iso_packet_info *info;
389 unsigned long flags;
390 int rv;
392 if (iso->type != HPSB_ISO_XMIT)
393 return -EINVAL;
395 /* is there space in the buffer? */
396 if (iso->n_ready_packets <= 0) {
397 return -EBUSY;
400 info = &iso->infos[iso->first_packet];
402 /* check for bogus offset/length */
403 if (hpsb_iso_check_offset_len
404 (iso, offset, len, &info->offset, &info->len))
405 return -EFAULT;
407 info->tag = tag;
408 info->sy = sy;
410 spin_lock_irqsave(&iso->lock, flags);
412 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
413 if (rv)
414 goto out;
416 /* increment cursors */
417 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
418 iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
419 iso->n_ready_packets--;
421 if (iso->prebuffer != 0) {
422 iso->prebuffer--;
423 if (iso->prebuffer <= 0) {
424 iso->prebuffer = 0;
425 rv = do_iso_xmit_start(iso, iso->start_cycle);
429 out:
430 spin_unlock_irqrestore(&iso->lock, flags);
431 return rv;
435 * hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
437 int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
439 if (iso->type != HPSB_ISO_XMIT)
440 return -EINVAL;
442 return wait_event_interruptible(iso->waitq,
443 hpsb_iso_n_ready(iso) ==
444 iso->buf_packets);
448 * hpsb_iso_packet_sent
450 * Available to low-level drivers.
452 * Call after a packet has been transmitted to the bus (interrupt context is
453 * OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
454 * non-zero if some sort of error occurred when sending the packet.
456 void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
458 unsigned long flags;
459 spin_lock_irqsave(&iso->lock, flags);
461 /* predict the cycle of the next packet to be queued */
463 /* jump ahead by the number of packets that are already buffered */
464 cycle += iso->buf_packets - iso->n_ready_packets;
465 cycle %= 8000;
467 iso->xmit_cycle = cycle;
468 iso->n_ready_packets++;
469 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
471 if (iso->n_ready_packets == iso->buf_packets || error != 0) {
472 /* the buffer has run empty! */
473 atomic_inc(&iso->overflows);
476 spin_unlock_irqrestore(&iso->lock, flags);
480 * hpsb_iso_packet_received
482 * Available to low-level drivers.
484 * Call after a packet has been received (interrupt context is OK).
486 void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
487 u16 total_len, u16 cycle, u8 channel, u8 tag,
488 u8 sy)
490 unsigned long flags;
491 spin_lock_irqsave(&iso->lock, flags);
493 if (iso->n_ready_packets == iso->buf_packets) {
494 /* overflow! */
495 atomic_inc(&iso->overflows);
496 /* Record size of this discarded packet */
497 iso->bytes_discarded += total_len;
498 } else {
499 struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
500 info->offset = offset;
501 info->len = len;
502 info->total_len = total_len;
503 info->cycle = cycle;
504 info->channel = channel;
505 info->tag = tag;
506 info->sy = sy;
508 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
509 iso->n_ready_packets++;
512 spin_unlock_irqrestore(&iso->lock, flags);
516 * hpsb_iso_recv_release_packets - release packets, reuse buffer
518 * @n_packets have been read out of the buffer, re-use the buffer space
520 int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
522 unsigned long flags;
523 unsigned int i;
524 int rv = 0;
526 if (iso->type != HPSB_ISO_RECV)
527 return -1;
529 spin_lock_irqsave(&iso->lock, flags);
530 for (i = 0; i < n_packets; i++) {
531 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
532 (unsigned long)&iso->infos[iso->
533 first_packet]);
534 if (rv)
535 break;
537 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
538 iso->n_ready_packets--;
540 /* release memory from packets discarded when queue was full */
541 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
542 if (iso->bytes_discarded != 0) {
543 struct hpsb_iso_packet_info inf;
544 inf.total_len = iso->bytes_discarded;
545 iso->host->driver->isoctl(iso, RECV_RELEASE,
546 (unsigned long)&inf);
547 iso->bytes_discarded = 0;
551 spin_unlock_irqrestore(&iso->lock, flags);
552 return rv;
556 * hpsb_iso_wake
558 * Available to low-level drivers.
560 * Call to wake waiting processes after buffer space has opened up.
562 void hpsb_iso_wake(struct hpsb_iso *iso)
564 wake_up_interruptible(&iso->waitq);
566 if (iso->callback)
567 iso->callback(iso);