4 * kernel ISO transmission/reception
6 * Copyright (C) 2002 Maas Digital LLC
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/pci.h>
13 #include <linux/slab.h>
18 void hpsb_iso_stop(struct hpsb_iso
*iso
)
20 if (!(iso
->flags
& HPSB_ISO_DRIVER_STARTED
))
23 iso
->host
->driver
->isoctl(iso
, iso
->type
== HPSB_ISO_XMIT
?
24 XMIT_STOP
: RECV_STOP
, 0);
25 iso
->flags
&= ~HPSB_ISO_DRIVER_STARTED
;
28 void hpsb_iso_shutdown(struct hpsb_iso
*iso
)
30 if (iso
->flags
& HPSB_ISO_DRIVER_INIT
) {
32 iso
->host
->driver
->isoctl(iso
, iso
->type
== HPSB_ISO_XMIT
?
33 XMIT_SHUTDOWN
: RECV_SHUTDOWN
, 0);
34 iso
->flags
&= ~HPSB_ISO_DRIVER_INIT
;
37 dma_region_free(&iso
->data_buf
);
41 static struct hpsb_iso
*hpsb_iso_common_init(struct hpsb_host
*host
,
42 enum hpsb_iso_type type
,
43 unsigned int data_buf_size
,
44 unsigned int buf_packets
,
45 int channel
, int dma_mode
,
47 void (*callback
) (struct hpsb_iso
53 /* make sure driver supports the ISO API */
54 if (!host
->driver
->isoctl
) {
56 "ieee1394: host driver '%s' does not support the rawiso API\n",
61 /* sanitize parameters */
66 if ((dma_mode
< HPSB_ISO_DMA_DEFAULT
)
67 || (dma_mode
> HPSB_ISO_DMA_PACKET_PER_BUFFER
))
68 dma_mode
= HPSB_ISO_DMA_DEFAULT
;
70 if ((irq_interval
< 0) || (irq_interval
> buf_packets
/ 4))
71 irq_interval
= buf_packets
/ 4;
72 if (irq_interval
== 0) /* really interrupt for each packet */
75 if (channel
< -1 || channel
>= 64)
78 /* channel = -1 is OK for multi-channel recv but not for xmit */
79 if (type
== HPSB_ISO_XMIT
&& channel
< 0)
82 /* allocate and write the struct hpsb_iso */
85 kmalloc(sizeof(*iso
) +
86 buf_packets
* sizeof(struct hpsb_iso_packet_info
),
91 iso
->infos
= (struct hpsb_iso_packet_info
*)(iso
+ 1);
96 iso
->callback
= callback
;
97 init_waitqueue_head(&iso
->waitq
);
98 iso
->channel
= channel
;
99 iso
->irq_interval
= irq_interval
;
100 iso
->dma_mode
= dma_mode
;
101 dma_region_init(&iso
->data_buf
);
102 iso
->buf_size
= PAGE_ALIGN(data_buf_size
);
103 iso
->buf_packets
= buf_packets
;
105 iso
->first_packet
= 0;
106 spin_lock_init(&iso
->lock
);
108 if (iso
->type
== HPSB_ISO_XMIT
) {
109 iso
->n_ready_packets
= iso
->buf_packets
;
110 dma_direction
= PCI_DMA_TODEVICE
;
112 iso
->n_ready_packets
= 0;
113 dma_direction
= PCI_DMA_FROMDEVICE
;
116 atomic_set(&iso
->overflows
, 0);
117 iso
->bytes_discarded
= 0;
121 /* allocate the packet buffer */
123 (&iso
->data_buf
, iso
->buf_size
, host
->pdev
, dma_direction
))
129 hpsb_iso_shutdown(iso
);
133 int hpsb_iso_n_ready(struct hpsb_iso
*iso
)
138 spin_lock_irqsave(&iso
->lock
, flags
);
139 val
= iso
->n_ready_packets
;
140 spin_unlock_irqrestore(&iso
->lock
, flags
);
145 struct hpsb_iso
*hpsb_iso_xmit_init(struct hpsb_host
*host
,
146 unsigned int data_buf_size
,
147 unsigned int buf_packets
,
151 void (*callback
) (struct hpsb_iso
*))
153 struct hpsb_iso
*iso
= hpsb_iso_common_init(host
, HPSB_ISO_XMIT
,
154 data_buf_size
, buf_packets
,
156 HPSB_ISO_DMA_DEFAULT
,
157 irq_interval
, callback
);
163 /* tell the driver to start working */
164 if (host
->driver
->isoctl(iso
, XMIT_INIT
, 0))
167 iso
->flags
|= HPSB_ISO_DRIVER_INIT
;
171 hpsb_iso_shutdown(iso
);
175 struct hpsb_iso
*hpsb_iso_recv_init(struct hpsb_host
*host
,
176 unsigned int data_buf_size
,
177 unsigned int buf_packets
,
181 void (*callback
) (struct hpsb_iso
*))
183 struct hpsb_iso
*iso
= hpsb_iso_common_init(host
, HPSB_ISO_RECV
,
184 data_buf_size
, buf_packets
,
186 irq_interval
, callback
);
190 /* tell the driver to start working */
191 if (host
->driver
->isoctl(iso
, RECV_INIT
, 0))
194 iso
->flags
|= HPSB_ISO_DRIVER_INIT
;
198 hpsb_iso_shutdown(iso
);
202 int hpsb_iso_recv_listen_channel(struct hpsb_iso
*iso
, unsigned char channel
)
204 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1 || channel
>= 64)
206 return iso
->host
->driver
->isoctl(iso
, RECV_LISTEN_CHANNEL
, channel
);
209 int hpsb_iso_recv_unlisten_channel(struct hpsb_iso
*iso
, unsigned char channel
)
211 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1 || channel
>= 64)
213 return iso
->host
->driver
->isoctl(iso
, RECV_UNLISTEN_CHANNEL
, channel
);
216 int hpsb_iso_recv_set_channel_mask(struct hpsb_iso
*iso
, u64 mask
)
218 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1)
220 return iso
->host
->driver
->isoctl(iso
, RECV_SET_CHANNEL_MASK
,
221 (unsigned long)&mask
);
224 int hpsb_iso_recv_flush(struct hpsb_iso
*iso
)
226 if (iso
->type
!= HPSB_ISO_RECV
)
228 return iso
->host
->driver
->isoctl(iso
, RECV_FLUSH
, 0);
231 static int do_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
)
233 int retval
= iso
->host
->driver
->isoctl(iso
, XMIT_START
, cycle
);
237 iso
->flags
|= HPSB_ISO_DRIVER_STARTED
;
241 int hpsb_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
, int prebuffer
)
243 if (iso
->type
!= HPSB_ISO_XMIT
)
246 if (iso
->flags
& HPSB_ISO_DRIVER_STARTED
)
251 else if (cycle
>= 8000)
254 iso
->xmit_cycle
= cycle
;
257 prebuffer
= iso
->buf_packets
- 1;
258 else if (prebuffer
== 0)
261 if (prebuffer
>= iso
->buf_packets
)
262 prebuffer
= iso
->buf_packets
- 1;
264 iso
->prebuffer
= prebuffer
;
266 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
267 once enough packets have been buffered */
268 iso
->start_cycle
= cycle
;
273 int hpsb_iso_recv_start(struct hpsb_iso
*iso
, int cycle
, int tag_mask
, int sync
)
278 if (iso
->type
!= HPSB_ISO_RECV
)
281 if (iso
->flags
& HPSB_ISO_DRIVER_STARTED
)
286 else if (cycle
>= 8000)
289 isoctl_args
[0] = cycle
;
294 isoctl_args
[1] = tag_mask
;
296 isoctl_args
[2] = sync
;
299 iso
->host
->driver
->isoctl(iso
, RECV_START
,
300 (unsigned long)&isoctl_args
[0]);
304 iso
->flags
|= HPSB_ISO_DRIVER_STARTED
;
308 /* check to make sure the user has not supplied bogus values of offset/len
309 that would cause the kernel to access memory outside the buffer */
311 static int hpsb_iso_check_offset_len(struct hpsb_iso
*iso
,
312 unsigned int offset
, unsigned short len
,
313 unsigned int *out_offset
,
314 unsigned short *out_len
)
316 if (offset
>= iso
->buf_size
)
319 /* make sure the packet does not go beyond the end of the buffer */
320 if (offset
+ len
> iso
->buf_size
)
323 /* check for wrap-around */
324 if (offset
+ len
< offset
)
327 /* now we can trust 'offset' and 'length' */
328 *out_offset
= offset
;
334 int hpsb_iso_xmit_queue_packet(struct hpsb_iso
*iso
, u32 offset
, u16 len
,
337 struct hpsb_iso_packet_info
*info
;
341 if (iso
->type
!= HPSB_ISO_XMIT
)
344 /* is there space in the buffer? */
345 if (iso
->n_ready_packets
<= 0) {
349 info
= &iso
->infos
[iso
->first_packet
];
351 /* check for bogus offset/length */
352 if (hpsb_iso_check_offset_len
353 (iso
, offset
, len
, &info
->offset
, &info
->len
))
359 spin_lock_irqsave(&iso
->lock
, flags
);
361 rv
= iso
->host
->driver
->isoctl(iso
, XMIT_QUEUE
, (unsigned long)info
);
365 /* increment cursors */
366 iso
->first_packet
= (iso
->first_packet
+ 1) % iso
->buf_packets
;
367 iso
->xmit_cycle
= (iso
->xmit_cycle
+ 1) % 8000;
368 iso
->n_ready_packets
--;
370 if (iso
->prebuffer
!= 0) {
372 if (iso
->prebuffer
<= 0) {
374 rv
= do_iso_xmit_start(iso
, iso
->start_cycle
);
379 spin_unlock_irqrestore(&iso
->lock
, flags
);
383 int hpsb_iso_xmit_sync(struct hpsb_iso
*iso
)
385 if (iso
->type
!= HPSB_ISO_XMIT
)
388 return wait_event_interruptible(iso
->waitq
,
389 hpsb_iso_n_ready(iso
) ==
393 void hpsb_iso_packet_sent(struct hpsb_iso
*iso
, int cycle
, int error
)
396 spin_lock_irqsave(&iso
->lock
, flags
);
398 /* predict the cycle of the next packet to be queued */
400 /* jump ahead by the number of packets that are already buffered */
401 cycle
+= iso
->buf_packets
- iso
->n_ready_packets
;
404 iso
->xmit_cycle
= cycle
;
405 iso
->n_ready_packets
++;
406 iso
->pkt_dma
= (iso
->pkt_dma
+ 1) % iso
->buf_packets
;
408 if (iso
->n_ready_packets
== iso
->buf_packets
|| error
!= 0) {
409 /* the buffer has run empty! */
410 atomic_inc(&iso
->overflows
);
413 spin_unlock_irqrestore(&iso
->lock
, flags
);
416 void hpsb_iso_packet_received(struct hpsb_iso
*iso
, u32 offset
, u16 len
,
417 u16 total_len
, u16 cycle
, u8 channel
, u8 tag
,
421 spin_lock_irqsave(&iso
->lock
, flags
);
423 if (iso
->n_ready_packets
== iso
->buf_packets
) {
425 atomic_inc(&iso
->overflows
);
426 /* Record size of this discarded packet */
427 iso
->bytes_discarded
+= total_len
;
429 struct hpsb_iso_packet_info
*info
= &iso
->infos
[iso
->pkt_dma
];
430 info
->offset
= offset
;
432 info
->total_len
= total_len
;
434 info
->channel
= channel
;
438 iso
->pkt_dma
= (iso
->pkt_dma
+ 1) % iso
->buf_packets
;
439 iso
->n_ready_packets
++;
442 spin_unlock_irqrestore(&iso
->lock
, flags
);
445 int hpsb_iso_recv_release_packets(struct hpsb_iso
*iso
, unsigned int n_packets
)
451 if (iso
->type
!= HPSB_ISO_RECV
)
454 spin_lock_irqsave(&iso
->lock
, flags
);
455 for (i
= 0; i
< n_packets
; i
++) {
456 rv
= iso
->host
->driver
->isoctl(iso
, RECV_RELEASE
,
457 (unsigned long)&iso
->infos
[iso
->
462 iso
->first_packet
= (iso
->first_packet
+ 1) % iso
->buf_packets
;
463 iso
->n_ready_packets
--;
465 /* release memory from packets discarded when queue was full */
466 if (iso
->n_ready_packets
== 0) { /* Release only after all prior packets handled */
467 if (iso
->bytes_discarded
!= 0) {
468 struct hpsb_iso_packet_info inf
;
469 inf
.total_len
= iso
->bytes_discarded
;
470 iso
->host
->driver
->isoctl(iso
, RECV_RELEASE
,
471 (unsigned long)&inf
);
472 iso
->bytes_discarded
= 0;
476 spin_unlock_irqrestore(&iso
->lock
, flags
);
480 void hpsb_iso_wake(struct hpsb_iso
*iso
)
482 wake_up_interruptible(&iso
->waitq
);