4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge driver channel module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
23 * Care is taken in this code to prevent simulataneous access to channel
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
28 * This is done primarily by:
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
40 * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
42 * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
45 #include <linux/types.h>
47 /* ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
50 /* ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
53 /* ----------------------------------- Trace & Debug */
54 #include <dspbridge/dbc.h>
56 /* ----------------------------------- OS Adaptation Layer */
57 #include <dspbridge/cfg.h>
58 #include <dspbridge/sync.h>
60 /* ----------------------------------- Bridge Driver */
61 #include <dspbridge/dspdefs.h>
62 #include <dspbridge/dspchnl.h>
65 /* ----------------------------------- Platform Manager */
66 #include <dspbridge/dev.h>
68 /* ----------------------------------- Others */
69 #include <dspbridge/io_sm.h>
71 /* ----------------------------------- Define for This */
72 #define USERMODE_ADDR PAGE_OFFSET
74 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
76 /* ----------------------------------- Function Prototypes */
77 static struct lst_list
*create_chirp_list(u32 chirps
);
79 static void free_chirp_list(struct lst_list
*chirp_list
);
81 static struct chnl_irp
*make_new_chirp(void);
83 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
87 * ======== bridge_chnl_add_io_req ========
88 * Enqueue an I/O request for data transfer on a channel to the DSP.
89 * The direction (mode) is specified in the channel object. Note the DSP
90 * address is specified for channels opened in direct I/O mode.
92 int bridge_chnl_add_io_req(struct chnl_object
*chnl_obj
, void *host_buf
,
93 u32 byte_size
, u32 buf_size
,
94 u32 dw_dsp_addr
, u32 dw_arg
)
97 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
98 struct chnl_irp
*chnl_packet_obj
= NULL
;
99 struct bridge_dev_context
*dev_ctxt
;
100 struct dev_object
*dev_obj
;
103 struct chnl_mgr
*chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
104 u8
*host_sys_buf
= NULL
;
105 bool sched_dpc
= false;
108 is_eos
= (byte_size
== 0);
111 if (!host_buf
|| !pchnl
) {
113 } else if (is_eos
&& CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
117 * Check the channel state: only queue chirp if channel state
120 dw_state
= pchnl
->dw_state
;
121 if (dw_state
!= CHNL_STATEREADY
) {
122 if (dw_state
& CHNL_STATECANCEL
)
124 else if ((dw_state
& CHNL_STATEEOS
) &&
125 CHNL_IS_OUTPUT(pchnl
->chnl_mode
))
128 /* No other possible states left */
133 dev_obj
= dev_get_first();
134 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
141 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1 && host_buf
) {
142 if (!(host_buf
< (void *)USERMODE_ADDR
)) {
143 host_sys_buf
= host_buf
;
146 /* if addr in user mode, then copy to kernel space */
147 host_sys_buf
= kmalloc(buf_size
, GFP_KERNEL
);
148 if (host_sys_buf
== NULL
) {
152 if (CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
153 status
= copy_from_user(host_sys_buf
, host_buf
,
164 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
165 * channels. DPCCS is held to avoid race conditions with PCPY channels.
166 * If DPC is scheduled in process context (iosm_schedule) and any
167 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
168 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
169 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
170 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
171 if (pchnl
->chnl_type
== CHNL_PCPY
) {
172 /* This is a processor-copy channel. */
173 if (!status
&& CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
174 /* Check buffer size on output channels for fit. */
176 io_buf_size(pchnl
->chnl_mgr_obj
->hio_mgr
))
182 /* Get a free chirp: */
184 (struct chnl_irp
*)lst_get_head(pchnl
->free_packets_list
);
185 if (chnl_packet_obj
== NULL
)
190 /* Enqueue the chirp on the chnl's IORequest queue: */
191 chnl_packet_obj
->host_user_buf
= chnl_packet_obj
->host_sys_buf
=
193 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)
194 chnl_packet_obj
->host_sys_buf
= host_sys_buf
;
197 * Note: for dma chans dw_dsp_addr contains dsp address
200 DBC_ASSERT(chnl_mgr_obj
->word_size
!= 0);
202 chnl_packet_obj
->dsp_tx_addr
=
203 dw_dsp_addr
/ chnl_mgr_obj
->word_size
;
204 chnl_packet_obj
->byte_size
= byte_size
;
205 chnl_packet_obj
->buf_size
= buf_size
;
206 /* Only valid for output channel */
207 chnl_packet_obj
->dw_arg
= dw_arg
;
208 chnl_packet_obj
->status
= (is_eos
? CHNL_IOCSTATEOS
:
209 CHNL_IOCSTATCOMPLETE
);
210 lst_put_tail(pchnl
->pio_requests
,
211 (struct list_head
*)chnl_packet_obj
);
213 DBC_ASSERT(pchnl
->cio_reqs
<= pchnl
->chnl_packets
);
215 * If end of stream, update the channel state to prevent
219 pchnl
->dw_state
|= CHNL_STATEEOS
;
221 /* Legacy DSM Processor-Copy */
222 DBC_ASSERT(pchnl
->chnl_type
== CHNL_PCPY
);
223 /* Request IO from the DSP */
224 io_request_chnl(chnl_mgr_obj
->hio_mgr
, pchnl
,
225 (CHNL_IS_INPUT(pchnl
->chnl_mode
) ? IO_INPUT
:
226 IO_OUTPUT
), &mb_val
);
230 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
231 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
233 sm_interrupt_dsp(dev_ctxt
, mb_val
);
235 /* Schedule a DPC, to do the actual data transfer */
237 iosm_schedule(chnl_mgr_obj
->hio_mgr
);
244 * ======== bridge_chnl_cancel_io ========
245 * Return all I/O requests to the client which have not yet been
246 * transferred. The channel's I/O completion object is
247 * signalled, and all the I/O requests are queued as IOC's, with the
248 * status field set to CHNL_IOCSTATCANCEL.
249 * This call is typically used in abort situations, and is a prelude to
252 int bridge_chnl_cancel_io(struct chnl_object
*chnl_obj
)
255 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
258 struct chnl_irp
*chnl_packet_obj
;
259 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
262 if (pchnl
&& pchnl
->chnl_mgr_obj
) {
263 chnl_id
= pchnl
->chnl_id
;
264 chnl_mode
= pchnl
->chnl_mode
;
265 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
272 /* Mark this channel as cancelled, to prevent further IORequests or
273 * IORequests or dispatching. */
274 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
275 pchnl
->dw_state
|= CHNL_STATECANCEL
;
276 if (LST_IS_EMPTY(pchnl
->pio_requests
))
279 if (pchnl
->chnl_type
== CHNL_PCPY
) {
280 /* Indicate we have no more buffers available for transfer: */
281 if (CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
282 io_cancel_chnl(chnl_mgr_obj
->hio_mgr
, chnl_id
);
284 /* Record that we no longer have output buffers
286 chnl_mgr_obj
->dw_output_mask
&= ~(1 << chnl_id
);
289 /* Move all IOR's to IOC queue: */
290 while (!LST_IS_EMPTY(pchnl
->pio_requests
)) {
292 (struct chnl_irp
*)lst_get_head(pchnl
->pio_requests
);
293 if (chnl_packet_obj
) {
294 chnl_packet_obj
->byte_size
= 0;
295 chnl_packet_obj
->status
|= CHNL_IOCSTATCANCEL
;
296 lst_put_tail(pchnl
->pio_completions
,
297 (struct list_head
*)chnl_packet_obj
);
300 DBC_ASSERT(pchnl
->cio_reqs
>= 0);
304 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
310 * ======== bridge_chnl_close ========
312 * Ensures all pending I/O on this channel is cancelled, discards all
313 * queued I/O completion notifications, then frees the resources allocated
314 * for this channel, and makes the corresponding logical channel id
315 * available for subsequent use.
317 int bridge_chnl_close(struct chnl_object
*chnl_obj
)
320 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
328 /* Cancel IO: this ensures no further IO requests or
330 status
= bridge_chnl_cancel_io(chnl_obj
);
334 /* Assert I/O on this channel is now cancelled: Protects
336 DBC_ASSERT((pchnl
->dw_state
& CHNL_STATECANCEL
));
337 /* Invalidate channel object: Protects from
338 * CHNL_GetIOCompletion(). */
339 /* Free the slot in the channel manager: */
340 pchnl
->chnl_mgr_obj
->ap_channel
[pchnl
->chnl_id
] = NULL
;
341 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
342 pchnl
->chnl_mgr_obj
->open_channels
-= 1;
343 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
344 if (pchnl
->ntfy_obj
) {
345 ntfy_delete(pchnl
->ntfy_obj
);
346 kfree(pchnl
->ntfy_obj
);
347 pchnl
->ntfy_obj
= NULL
;
349 /* Reset channel event: (NOTE: user_event freed in user
351 if (pchnl
->sync_event
) {
352 sync_reset_event(pchnl
->sync_event
);
353 kfree(pchnl
->sync_event
);
354 pchnl
->sync_event
= NULL
;
356 /* Free I/O request and I/O completion queues: */
357 if (pchnl
->pio_completions
) {
358 free_chirp_list(pchnl
->pio_completions
);
359 pchnl
->pio_completions
= NULL
;
362 if (pchnl
->pio_requests
) {
363 free_chirp_list(pchnl
->pio_requests
);
364 pchnl
->pio_requests
= NULL
;
367 if (pchnl
->free_packets_list
) {
368 free_chirp_list(pchnl
->free_packets_list
);
369 pchnl
->free_packets_list
= NULL
;
371 /* Release channel object. */
375 DBC_ENSURE(status
|| !pchnl
);
380 * ======== bridge_chnl_create ========
381 * Create a channel manager object, responsible for opening new channels
382 * and closing old ones for a given board.
384 int bridge_chnl_create(struct chnl_mgr
**channel_mgr
,
385 struct dev_object
*hdev_obj
,
386 const struct chnl_mgrattrs
*mgr_attrts
)
389 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
392 /* Check DBC requirements: */
393 DBC_REQUIRE(channel_mgr
!= NULL
);
394 DBC_REQUIRE(mgr_attrts
!= NULL
);
395 DBC_REQUIRE(mgr_attrts
->max_channels
> 0);
396 DBC_REQUIRE(mgr_attrts
->max_channels
<= CHNL_MAXCHANNELS
);
397 DBC_REQUIRE(mgr_attrts
->word_size
!= 0);
399 /* Allocate channel manager object */
400 chnl_mgr_obj
= kzalloc(sizeof(struct chnl_mgr
), GFP_KERNEL
);
403 * The max_channels attr must equal the # of supported chnls for
404 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
405 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
406 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
408 DBC_ASSERT(mgr_attrts
->max_channels
== CHNL_MAXCHANNELS
);
409 max_channels
= CHNL_MAXCHANNELS
+ CHNL_MAXCHANNELS
* CHNL_PCPY
;
410 /* Create array of channels */
411 chnl_mgr_obj
->ap_channel
= kzalloc(sizeof(struct chnl_object
*)
412 * max_channels
, GFP_KERNEL
);
413 if (chnl_mgr_obj
->ap_channel
) {
414 /* Initialize chnl_mgr object */
415 chnl_mgr_obj
->dw_type
= CHNL_TYPESM
;
416 chnl_mgr_obj
->word_size
= mgr_attrts
->word_size
;
417 /* Total # chnls supported */
418 chnl_mgr_obj
->max_channels
= max_channels
;
419 chnl_mgr_obj
->open_channels
= 0;
420 chnl_mgr_obj
->dw_output_mask
= 0;
421 chnl_mgr_obj
->dw_last_output
= 0;
422 chnl_mgr_obj
->hdev_obj
= hdev_obj
;
423 spin_lock_init(&chnl_mgr_obj
->chnl_mgr_lock
);
432 bridge_chnl_destroy(chnl_mgr_obj
);
435 /* Return channel manager object to caller... */
436 *channel_mgr
= chnl_mgr_obj
;
442 * ======== bridge_chnl_destroy ========
444 * Close all open channels, and destroy the channel manager.
446 int bridge_chnl_destroy(struct chnl_mgr
*hchnl_mgr
)
449 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
453 /* Close all open channels: */
454 for (chnl_id
= 0; chnl_id
< chnl_mgr_obj
->max_channels
;
457 bridge_chnl_close(chnl_mgr_obj
->ap_channel
460 dev_dbg(bridge
, "%s: Error status 0x%x\n",
464 /* Free channel manager object: */
465 kfree(chnl_mgr_obj
->ap_channel
);
467 /* Set hchnl_mgr to NULL in device object. */
468 dev_set_chnl_mgr(chnl_mgr_obj
->hdev_obj
, NULL
);
469 /* Free this Chnl Mgr object: */
478 * ======== bridge_chnl_flush_io ========
480 * Flushes all the outstanding data requests on a channel.
482 int bridge_chnl_flush_io(struct chnl_object
*chnl_obj
, u32 timeout
)
485 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
487 struct chnl_mgr
*chnl_mgr_obj
;
488 struct chnl_ioc chnl_ioc_obj
;
491 if ((timeout
== CHNL_IOCNOWAIT
)
492 && CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
495 chnl_mode
= pchnl
->chnl_mode
;
496 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
502 /* Note: Currently, if another thread continues to add IO
503 * requests to this channel, this function will continue to
504 * flush all such queued IO requests. */
505 if (CHNL_IS_OUTPUT(chnl_mode
)
506 && (pchnl
->chnl_type
== CHNL_PCPY
)) {
507 /* Wait for IO completions, up to the specified
509 while (!LST_IS_EMPTY(pchnl
->pio_requests
) && !status
) {
510 status
= bridge_chnl_get_ioc(chnl_obj
,
511 timeout
, &chnl_ioc_obj
);
515 if (chnl_ioc_obj
.status
& CHNL_IOCSTATTIMEOUT
)
520 status
= bridge_chnl_cancel_io(chnl_obj
);
521 /* Now, leave the channel in the ready state: */
522 pchnl
->dw_state
&= ~CHNL_STATECANCEL
;
525 DBC_ENSURE(status
|| LST_IS_EMPTY(pchnl
->pio_requests
));
530 * ======== bridge_chnl_get_info ========
532 * Retrieve information related to a channel.
534 int bridge_chnl_get_info(struct chnl_object
*chnl_obj
,
535 struct chnl_info
*channel_info
)
538 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
539 if (channel_info
!= NULL
) {
541 /* Return the requested information: */
542 channel_info
->hchnl_mgr
= pchnl
->chnl_mgr_obj
;
543 channel_info
->event_obj
= pchnl
->user_event
;
544 channel_info
->cnhl_id
= pchnl
->chnl_id
;
545 channel_info
->dw_mode
= pchnl
->chnl_mode
;
546 channel_info
->bytes_tx
= pchnl
->bytes_moved
;
547 channel_info
->process
= pchnl
->process
;
548 channel_info
->sync_event
= pchnl
->sync_event
;
549 channel_info
->cio_cs
= pchnl
->cio_cs
;
550 channel_info
->cio_reqs
= pchnl
->cio_reqs
;
551 channel_info
->dw_state
= pchnl
->dw_state
;
562 * ======== bridge_chnl_get_ioc ========
563 * Optionally wait for I/O completion on a channel. Dequeue an I/O
564 * completion record, which contains information about the completed
566 * Note: Ensures Channel Invariant (see notes above).
568 int bridge_chnl_get_ioc(struct chnl_object
*chnl_obj
, u32 timeout
,
569 struct chnl_ioc
*chan_ioc
)
572 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
573 struct chnl_irp
*chnl_packet_obj
;
575 bool dequeue_ioc
= true;
576 struct chnl_ioc ioc
= { NULL
, 0, 0, 0, 0 };
577 u8
*host_sys_buf
= NULL
;
578 struct bridge_dev_context
*dev_ctxt
;
579 struct dev_object
*dev_obj
;
582 if (!chan_ioc
|| !pchnl
) {
584 } else if (timeout
== CHNL_IOCNOWAIT
) {
585 if (LST_IS_EMPTY(pchnl
->pio_completions
))
590 dev_obj
= dev_get_first();
591 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
598 ioc
.status
= CHNL_IOCSTATCOMPLETE
;
600 CHNL_IOCNOWAIT
&& LST_IS_EMPTY(pchnl
->pio_completions
)) {
601 if (timeout
== CHNL_IOCINFINITE
)
602 timeout
= SYNC_INFINITE
;
604 stat_sync
= sync_wait_on_event(pchnl
->sync_event
, timeout
);
605 if (stat_sync
== -ETIME
) {
606 /* No response from DSP */
607 ioc
.status
|= CHNL_IOCSTATTIMEOUT
;
609 } else if (stat_sync
== -EPERM
) {
610 /* This can occur when the user mode thread is
611 * aborted (^C), or when _VWIN32_WaitSingleObject()
612 * fails due to unkown causes. */
613 /* Even though Wait failed, there may be something in
615 if (LST_IS_EMPTY(pchnl
->pio_completions
)) {
616 ioc
.status
|= CHNL_IOCSTATCANCEL
;
621 /* See comment in AddIOReq */
622 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
623 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
625 /* Dequeue IOC and set chan_ioc; */
626 DBC_ASSERT(!LST_IS_EMPTY(pchnl
->pio_completions
));
628 (struct chnl_irp
*)lst_get_head(pchnl
->pio_completions
);
629 /* Update chan_ioc from channel state and chirp: */
630 if (chnl_packet_obj
) {
632 /* If this is a zero-copy channel, then set IOC's pbuf
633 * to the DSP's address. This DSP address will get
634 * translated to user's virtual addr later. */
636 host_sys_buf
= chnl_packet_obj
->host_sys_buf
;
637 ioc
.pbuf
= chnl_packet_obj
->host_user_buf
;
639 ioc
.byte_size
= chnl_packet_obj
->byte_size
;
640 ioc
.buf_size
= chnl_packet_obj
->buf_size
;
641 ioc
.dw_arg
= chnl_packet_obj
->dw_arg
;
642 ioc
.status
|= chnl_packet_obj
->status
;
643 /* Place the used chirp on the free list: */
644 lst_put_tail(pchnl
->free_packets_list
,
645 (struct list_head
*)chnl_packet_obj
);
656 /* Ensure invariant: If any IOC's are queued for this channel... */
657 if (!LST_IS_EMPTY(pchnl
->pio_completions
)) {
658 /* Since DSPStream_Reclaim() does not take a timeout
659 * parameter, we pass the stream's timeout value to
660 * bridge_chnl_get_ioc. We cannot determine whether or not
661 * we have waited in User mode. Since the stream's timeout
662 * value may be non-zero, we still have to set the event.
663 * Therefore, this optimization is taken out.
665 * if (timeout == CHNL_IOCNOWAIT) {
666 * ... ensure event is set..
667 * sync_set_event(pchnl->sync_event);
669 sync_set_event(pchnl
->sync_event
);
671 /* else, if list is empty, ensure event is reset. */
672 sync_reset_event(pchnl
->sync_event
);
674 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
675 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
677 && (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)) {
678 if (!(ioc
.pbuf
< (void *)USERMODE_ADDR
))
681 /* If the addr is in user mode, then copy it */
682 if (!host_sys_buf
|| !ioc
.pbuf
) {
686 if (!CHNL_IS_INPUT(pchnl
->chnl_mode
))
690 status
= copy_to_user(ioc
.pbuf
, host_sys_buf
, ioc
.byte_size
);
692 if (current
->flags
& PF_EXITING
)
701 /* Update User's IOC block: */
708 * ======== bridge_chnl_get_mgr_info ========
709 * Retrieve information related to the channel manager.
711 int bridge_chnl_get_mgr_info(struct chnl_mgr
*hchnl_mgr
, u32 ch_id
,
712 struct chnl_mgrinfo
*mgr_info
)
715 struct chnl_mgr
*chnl_mgr_obj
= (struct chnl_mgr
*)hchnl_mgr
;
717 if (mgr_info
!= NULL
) {
718 if (ch_id
<= CHNL_MAXCHANNELS
) {
720 /* Return the requested information: */
722 chnl_mgr_obj
->ap_channel
[ch_id
];
723 mgr_info
->open_channels
=
724 chnl_mgr_obj
->open_channels
;
725 mgr_info
->dw_type
= chnl_mgr_obj
->dw_type
;
726 /* total # of chnls */
727 mgr_info
->max_channels
=
728 chnl_mgr_obj
->max_channels
;
743 * ======== bridge_chnl_idle ========
744 * Idles a particular channel.
746 int bridge_chnl_idle(struct chnl_object
*chnl_obj
, u32 timeout
,
750 struct chnl_mgr
*chnl_mgr_obj
;
753 DBC_REQUIRE(chnl_obj
);
755 chnl_mode
= chnl_obj
->chnl_mode
;
756 chnl_mgr_obj
= chnl_obj
->chnl_mgr_obj
;
758 if (CHNL_IS_OUTPUT(chnl_mode
) && !flush_data
) {
759 /* Wait for IO completions, up to the specified timeout: */
760 status
= bridge_chnl_flush_io(chnl_obj
, timeout
);
762 status
= bridge_chnl_cancel_io(chnl_obj
);
764 /* Reset the byte count and put channel back in ready state. */
765 chnl_obj
->bytes_moved
= 0;
766 chnl_obj
->dw_state
&= ~CHNL_STATECANCEL
;
773 * ======== bridge_chnl_open ========
774 * Open a new half-duplex channel to the DSP board.
776 int bridge_chnl_open(struct chnl_object
**chnl
,
777 struct chnl_mgr
*hchnl_mgr
, s8 chnl_mode
,
778 u32 ch_id
, const struct chnl_attr
*pattrs
)
781 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
782 struct chnl_object
*pchnl
= NULL
;
783 struct sync_object
*sync_event
= NULL
;
784 /* Ensure DBC requirements: */
785 DBC_REQUIRE(chnl
!= NULL
);
786 DBC_REQUIRE(pattrs
!= NULL
);
787 DBC_REQUIRE(hchnl_mgr
!= NULL
);
790 if (pattrs
->uio_reqs
== 0) {
796 if (ch_id
!= CHNL_PICKFREE
) {
797 if (ch_id
>= chnl_mgr_obj
->max_channels
)
799 else if (chnl_mgr_obj
->ap_channel
[ch_id
] !=
803 /* Check for free channel */
805 search_free_channel(chnl_mgr_obj
, &ch_id
);
812 DBC_ASSERT(ch_id
< chnl_mgr_obj
->max_channels
);
813 /* Create channel object: */
814 pchnl
= kzalloc(sizeof(struct chnl_object
), GFP_KERNEL
);
819 /* Protect queues from io_dpc: */
820 pchnl
->dw_state
= CHNL_STATECANCEL
;
821 /* Allocate initial IOR and IOC queues: */
822 pchnl
->free_packets_list
= create_chirp_list(pattrs
->uio_reqs
);
823 pchnl
->pio_requests
= create_chirp_list(0);
824 pchnl
->pio_completions
= create_chirp_list(0);
825 pchnl
->chnl_packets
= pattrs
->uio_reqs
;
828 sync_event
= kzalloc(sizeof(struct sync_object
), GFP_KERNEL
);
830 sync_init_event(sync_event
);
835 pchnl
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
),
838 ntfy_init(pchnl
->ntfy_obj
);
844 if (pchnl
->pio_completions
&& pchnl
->pio_requests
&&
845 pchnl
->free_packets_list
) {
846 /* Initialize CHNL object fields: */
847 pchnl
->chnl_mgr_obj
= chnl_mgr_obj
;
848 pchnl
->chnl_id
= ch_id
;
849 pchnl
->chnl_mode
= chnl_mode
;
850 pchnl
->user_event
= sync_event
;
851 pchnl
->sync_event
= sync_event
;
852 /* Get the process handle */
853 pchnl
->process
= current
->tgid
;
855 pchnl
->bytes_moved
= 0;
856 /* Default to proc-copy */
857 pchnl
->chnl_type
= CHNL_PCPY
;
865 if (pchnl
->pio_completions
) {
866 free_chirp_list(pchnl
->pio_completions
);
867 pchnl
->pio_completions
= NULL
;
870 if (pchnl
->pio_requests
) {
871 free_chirp_list(pchnl
->pio_requests
);
872 pchnl
->pio_requests
= NULL
;
874 if (pchnl
->free_packets_list
) {
875 free_chirp_list(pchnl
->free_packets_list
);
876 pchnl
->free_packets_list
= NULL
;
881 if (pchnl
->ntfy_obj
) {
882 ntfy_delete(pchnl
->ntfy_obj
);
883 kfree(pchnl
->ntfy_obj
);
884 pchnl
->ntfy_obj
= NULL
;
888 /* Insert channel object in channel manager: */
889 chnl_mgr_obj
->ap_channel
[pchnl
->chnl_id
] = pchnl
;
890 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
891 chnl_mgr_obj
->open_channels
++;
892 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
893 /* Return result... */
894 pchnl
->dw_state
= CHNL_STATEREADY
;
898 DBC_ENSURE((!status
&& pchnl
) || (*chnl
== NULL
));
903 * ======== bridge_chnl_register_notify ========
904 * Registers for events on a particular channel.
906 int bridge_chnl_register_notify(struct chnl_object
*chnl_obj
,
907 u32 event_mask
, u32 notify_type
,
908 struct dsp_notification
*hnotification
)
912 DBC_ASSERT(!(event_mask
& ~(DSP_STREAMDONE
| DSP_STREAMIOCOMPLETION
)));
915 status
= ntfy_register(chnl_obj
->ntfy_obj
, hnotification
,
916 event_mask
, notify_type
);
918 status
= ntfy_unregister(chnl_obj
->ntfy_obj
, hnotification
);
924 * ======== create_chirp_list ========
926 * Initialize a queue of channel I/O Request/Completion packets.
928 * chirps: Number of Chirps to allocate.
930 * Pointer to queue of IRPs, or NULL.
934 static struct lst_list
*create_chirp_list(u32 chirps
)
936 struct lst_list
*chirp_list
;
937 struct chnl_irp
*chnl_packet_obj
;
940 chirp_list
= kzalloc(sizeof(struct lst_list
), GFP_KERNEL
);
943 INIT_LIST_HEAD(&chirp_list
->head
);
944 /* Make N chirps and place on queue. */
945 for (i
= 0; (i
< chirps
)
946 && ((chnl_packet_obj
= make_new_chirp()) != NULL
); i
++) {
947 lst_put_tail(chirp_list
,
948 (struct list_head
*)chnl_packet_obj
);
951 /* If we couldn't allocate all chirps, free those allocated: */
953 free_chirp_list(chirp_list
);
962 * ======== free_chirp_list ========
964 * Free the queue of Chirps.
966 static void free_chirp_list(struct lst_list
*chirp_list
)
968 DBC_REQUIRE(chirp_list
!= NULL
);
970 while (!LST_IS_EMPTY(chirp_list
))
971 kfree(lst_get_head(chirp_list
));
977 * ======== make_new_chirp ========
978 * Allocate the memory for a new channel IRP.
980 static struct chnl_irp
*make_new_chirp(void)
982 struct chnl_irp
*chnl_packet_obj
;
984 chnl_packet_obj
= kzalloc(sizeof(struct chnl_irp
), GFP_KERNEL
);
985 if (chnl_packet_obj
!= NULL
) {
986 /* lst_init_elem only resets the list's member values. */
987 lst_init_elem(&chnl_packet_obj
->link
);
990 return chnl_packet_obj
;
994 * ======== search_free_channel ========
995 * Search for a free channel slot in the array of channel pointers.
997 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
1000 int status
= -ENOSR
;
1003 DBC_REQUIRE(chnl_mgr_obj
);
1005 for (i
= 0; i
< chnl_mgr_obj
->max_channels
; i
++) {
1006 if (chnl_mgr_obj
->ap_channel
[i
] == NULL
) {