4 * kernel ISO transmission/reception
6 * Copyright (C) 2002 Maas Digital LLC
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/pci.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
19 void hpsb_iso_stop(struct hpsb_iso
*iso
)
21 if (!(iso
->flags
& HPSB_ISO_DRIVER_STARTED
))
24 iso
->host
->driver
->isoctl(iso
, iso
->type
== HPSB_ISO_XMIT
?
25 XMIT_STOP
: RECV_STOP
, 0);
26 iso
->flags
&= ~HPSB_ISO_DRIVER_STARTED
;
29 void hpsb_iso_shutdown(struct hpsb_iso
*iso
)
31 if (iso
->flags
& HPSB_ISO_DRIVER_INIT
) {
33 iso
->host
->driver
->isoctl(iso
, iso
->type
== HPSB_ISO_XMIT
?
34 XMIT_SHUTDOWN
: RECV_SHUTDOWN
, 0);
35 iso
->flags
&= ~HPSB_ISO_DRIVER_INIT
;
38 dma_region_free(&iso
->data_buf
);
42 static struct hpsb_iso
*hpsb_iso_common_init(struct hpsb_host
*host
,
43 enum hpsb_iso_type type
,
44 unsigned int data_buf_size
,
45 unsigned int buf_packets
,
46 int channel
, int dma_mode
,
48 void (*callback
) (struct hpsb_iso
54 /* make sure driver supports the ISO API */
55 if (!host
->driver
->isoctl
) {
57 "ieee1394: host driver '%s' does not support the rawiso API\n",
62 /* sanitize parameters */
67 if ((dma_mode
< HPSB_ISO_DMA_DEFAULT
)
68 || (dma_mode
> HPSB_ISO_DMA_PACKET_PER_BUFFER
))
69 dma_mode
= HPSB_ISO_DMA_DEFAULT
;
71 if ((irq_interval
< 0) || (irq_interval
> buf_packets
/ 4))
72 irq_interval
= buf_packets
/ 4;
73 if (irq_interval
== 0) /* really interrupt for each packet */
76 if (channel
< -1 || channel
>= 64)
79 /* channel = -1 is OK for multi-channel recv but not for xmit */
80 if (type
== HPSB_ISO_XMIT
&& channel
< 0)
83 /* allocate and write the struct hpsb_iso */
86 kmalloc(sizeof(*iso
) +
87 buf_packets
* sizeof(struct hpsb_iso_packet_info
),
92 iso
->infos
= (struct hpsb_iso_packet_info
*)(iso
+ 1);
97 iso
->callback
= callback
;
98 init_waitqueue_head(&iso
->waitq
);
99 iso
->channel
= channel
;
100 iso
->irq_interval
= irq_interval
;
101 iso
->dma_mode
= dma_mode
;
102 dma_region_init(&iso
->data_buf
);
103 iso
->buf_size
= PAGE_ALIGN(data_buf_size
);
104 iso
->buf_packets
= buf_packets
;
106 iso
->first_packet
= 0;
107 spin_lock_init(&iso
->lock
);
109 if (iso
->type
== HPSB_ISO_XMIT
) {
110 iso
->n_ready_packets
= iso
->buf_packets
;
111 dma_direction
= PCI_DMA_TODEVICE
;
113 iso
->n_ready_packets
= 0;
114 dma_direction
= PCI_DMA_FROMDEVICE
;
117 atomic_set(&iso
->overflows
, 0);
118 iso
->bytes_discarded
= 0;
122 /* allocate the packet buffer */
124 (&iso
->data_buf
, iso
->buf_size
, host
->pdev
, dma_direction
))
130 hpsb_iso_shutdown(iso
);
134 int hpsb_iso_n_ready(struct hpsb_iso
*iso
)
139 spin_lock_irqsave(&iso
->lock
, flags
);
140 val
= iso
->n_ready_packets
;
141 spin_unlock_irqrestore(&iso
->lock
, flags
);
146 struct hpsb_iso
*hpsb_iso_xmit_init(struct hpsb_host
*host
,
147 unsigned int data_buf_size
,
148 unsigned int buf_packets
,
152 void (*callback
) (struct hpsb_iso
*))
154 struct hpsb_iso
*iso
= hpsb_iso_common_init(host
, HPSB_ISO_XMIT
,
155 data_buf_size
, buf_packets
,
157 HPSB_ISO_DMA_DEFAULT
,
158 irq_interval
, callback
);
164 /* tell the driver to start working */
165 if (host
->driver
->isoctl(iso
, XMIT_INIT
, 0))
168 iso
->flags
|= HPSB_ISO_DRIVER_INIT
;
172 hpsb_iso_shutdown(iso
);
176 struct hpsb_iso
*hpsb_iso_recv_init(struct hpsb_host
*host
,
177 unsigned int data_buf_size
,
178 unsigned int buf_packets
,
182 void (*callback
) (struct hpsb_iso
*))
184 struct hpsb_iso
*iso
= hpsb_iso_common_init(host
, HPSB_ISO_RECV
,
185 data_buf_size
, buf_packets
,
187 irq_interval
, callback
);
191 /* tell the driver to start working */
192 if (host
->driver
->isoctl(iso
, RECV_INIT
, 0))
195 iso
->flags
|= HPSB_ISO_DRIVER_INIT
;
199 hpsb_iso_shutdown(iso
);
203 int hpsb_iso_recv_listen_channel(struct hpsb_iso
*iso
, unsigned char channel
)
205 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1 || channel
>= 64)
207 return iso
->host
->driver
->isoctl(iso
, RECV_LISTEN_CHANNEL
, channel
);
210 int hpsb_iso_recv_unlisten_channel(struct hpsb_iso
*iso
, unsigned char channel
)
212 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1 || channel
>= 64)
214 return iso
->host
->driver
->isoctl(iso
, RECV_UNLISTEN_CHANNEL
, channel
);
217 int hpsb_iso_recv_set_channel_mask(struct hpsb_iso
*iso
, u64 mask
)
219 if (iso
->type
!= HPSB_ISO_RECV
|| iso
->channel
!= -1)
221 return iso
->host
->driver
->isoctl(iso
, RECV_SET_CHANNEL_MASK
,
222 (unsigned long)&mask
);
225 int hpsb_iso_recv_flush(struct hpsb_iso
*iso
)
227 if (iso
->type
!= HPSB_ISO_RECV
)
229 return iso
->host
->driver
->isoctl(iso
, RECV_FLUSH
, 0);
232 static int do_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
)
234 int retval
= iso
->host
->driver
->isoctl(iso
, XMIT_START
, cycle
);
238 iso
->flags
|= HPSB_ISO_DRIVER_STARTED
;
242 int hpsb_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
, int prebuffer
)
244 if (iso
->type
!= HPSB_ISO_XMIT
)
247 if (iso
->flags
& HPSB_ISO_DRIVER_STARTED
)
252 else if (cycle
>= 8000)
255 iso
->xmit_cycle
= cycle
;
258 prebuffer
= iso
->buf_packets
- 1;
259 else if (prebuffer
== 0)
262 if (prebuffer
>= iso
->buf_packets
)
263 prebuffer
= iso
->buf_packets
- 1;
265 iso
->prebuffer
= prebuffer
;
267 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
268 once enough packets have been buffered */
269 iso
->start_cycle
= cycle
;
274 int hpsb_iso_recv_start(struct hpsb_iso
*iso
, int cycle
, int tag_mask
, int sync
)
279 if (iso
->type
!= HPSB_ISO_RECV
)
282 if (iso
->flags
& HPSB_ISO_DRIVER_STARTED
)
287 else if (cycle
>= 8000)
290 isoctl_args
[0] = cycle
;
295 isoctl_args
[1] = tag_mask
;
297 isoctl_args
[2] = sync
;
300 iso
->host
->driver
->isoctl(iso
, RECV_START
,
301 (unsigned long)&isoctl_args
[0]);
305 iso
->flags
|= HPSB_ISO_DRIVER_STARTED
;
309 /* check to make sure the user has not supplied bogus values of offset/len
310 that would cause the kernel to access memory outside the buffer */
312 static int hpsb_iso_check_offset_len(struct hpsb_iso
*iso
,
313 unsigned int offset
, unsigned short len
,
314 unsigned int *out_offset
,
315 unsigned short *out_len
)
317 if (offset
>= iso
->buf_size
)
320 /* make sure the packet does not go beyond the end of the buffer */
321 if (offset
+ len
> iso
->buf_size
)
324 /* check for wrap-around */
325 if (offset
+ len
< offset
)
328 /* now we can trust 'offset' and 'length' */
329 *out_offset
= offset
;
335 int hpsb_iso_xmit_queue_packet(struct hpsb_iso
*iso
, u32 offset
, u16 len
,
338 struct hpsb_iso_packet_info
*info
;
342 if (iso
->type
!= HPSB_ISO_XMIT
)
345 /* is there space in the buffer? */
346 if (iso
->n_ready_packets
<= 0) {
350 info
= &iso
->infos
[iso
->first_packet
];
352 /* check for bogus offset/length */
353 if (hpsb_iso_check_offset_len
354 (iso
, offset
, len
, &info
->offset
, &info
->len
))
360 spin_lock_irqsave(&iso
->lock
, flags
);
362 rv
= iso
->host
->driver
->isoctl(iso
, XMIT_QUEUE
, (unsigned long)info
);
366 /* increment cursors */
367 iso
->first_packet
= (iso
->first_packet
+ 1) % iso
->buf_packets
;
368 iso
->xmit_cycle
= (iso
->xmit_cycle
+ 1) % 8000;
369 iso
->n_ready_packets
--;
371 if (iso
->prebuffer
!= 0) {
373 if (iso
->prebuffer
<= 0) {
375 rv
= do_iso_xmit_start(iso
, iso
->start_cycle
);
380 spin_unlock_irqrestore(&iso
->lock
, flags
);
384 int hpsb_iso_xmit_sync(struct hpsb_iso
*iso
)
386 if (iso
->type
!= HPSB_ISO_XMIT
)
389 return wait_event_interruptible(iso
->waitq
,
390 hpsb_iso_n_ready(iso
) ==
394 void hpsb_iso_packet_sent(struct hpsb_iso
*iso
, int cycle
, int error
)
397 spin_lock_irqsave(&iso
->lock
, flags
);
399 /* predict the cycle of the next packet to be queued */
401 /* jump ahead by the number of packets that are already buffered */
402 cycle
+= iso
->buf_packets
- iso
->n_ready_packets
;
405 iso
->xmit_cycle
= cycle
;
406 iso
->n_ready_packets
++;
407 iso
->pkt_dma
= (iso
->pkt_dma
+ 1) % iso
->buf_packets
;
409 if (iso
->n_ready_packets
== iso
->buf_packets
|| error
!= 0) {
410 /* the buffer has run empty! */
411 atomic_inc(&iso
->overflows
);
414 spin_unlock_irqrestore(&iso
->lock
, flags
);
417 void hpsb_iso_packet_received(struct hpsb_iso
*iso
, u32 offset
, u16 len
,
418 u16 total_len
, u16 cycle
, u8 channel
, u8 tag
,
422 spin_lock_irqsave(&iso
->lock
, flags
);
424 if (iso
->n_ready_packets
== iso
->buf_packets
) {
426 atomic_inc(&iso
->overflows
);
427 /* Record size of this discarded packet */
428 iso
->bytes_discarded
+= total_len
;
430 struct hpsb_iso_packet_info
*info
= &iso
->infos
[iso
->pkt_dma
];
431 info
->offset
= offset
;
433 info
->total_len
= total_len
;
435 info
->channel
= channel
;
439 iso
->pkt_dma
= (iso
->pkt_dma
+ 1) % iso
->buf_packets
;
440 iso
->n_ready_packets
++;
443 spin_unlock_irqrestore(&iso
->lock
, flags
);
446 int hpsb_iso_recv_release_packets(struct hpsb_iso
*iso
, unsigned int n_packets
)
452 if (iso
->type
!= HPSB_ISO_RECV
)
455 spin_lock_irqsave(&iso
->lock
, flags
);
456 for (i
= 0; i
< n_packets
; i
++) {
457 rv
= iso
->host
->driver
->isoctl(iso
, RECV_RELEASE
,
458 (unsigned long)&iso
->infos
[iso
->
463 iso
->first_packet
= (iso
->first_packet
+ 1) % iso
->buf_packets
;
464 iso
->n_ready_packets
--;
466 /* release memory from packets discarded when queue was full */
467 if (iso
->n_ready_packets
== 0) { /* Release only after all prior packets handled */
468 if (iso
->bytes_discarded
!= 0) {
469 struct hpsb_iso_packet_info inf
;
470 inf
.total_len
= iso
->bytes_discarded
;
471 iso
->host
->driver
->isoctl(iso
, RECV_RELEASE
,
472 (unsigned long)&inf
);
473 iso
->bytes_discarded
= 0;
477 spin_unlock_irqrestore(&iso
->lock
, flags
);
481 void hpsb_iso_wake(struct hpsb_iso
*iso
)
483 wake_up_interruptible(&iso
->waitq
);