RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / sound / pci / asihpi / hpimsgx.c
blobc9993ff5a94b68a1274e78013319282b3ae044b0
1 /******************************************************************************
3 AudioScience HPI driver
4 Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 Extended Message Function With Response Cacheing
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpimsginit.h"
26 #include "hpimsgx.h"
27 #include "hpidebug.h"
29 static struct pci_device_id asihpi_pci_tbl[] = {
30 #include "hpipcida.h"
33 static struct hpios_spinlock msgx_lock;
35 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
37 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
38 *pci_info)
41 int i;
43 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
44 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].vendor != pci_info->vendor_id)
46 continue;
47 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
48 && asihpi_pci_tbl[i].device != pci_info->device_id)
49 continue;
50 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
51 && asihpi_pci_tbl[i].subvendor !=
52 pci_info->subsys_vendor_id)
53 continue;
54 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
55 && asihpi_pci_tbl[i].subdevice !=
56 pci_info->subsys_device_id)
57 continue;
59 HPI_DEBUG_LOG(DEBUG, " %x,%lu\n", i,
60 asihpi_pci_tbl[i].driver_data);
61 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
64 return NULL;
67 static inline void hw_entry_point(struct hpi_message *phm,
68 struct hpi_response *phr)
71 hpi_handler_func *ep;
73 if (phm->adapter_index < HPI_MAX_ADAPTERS) {
74 ep = (hpi_handler_func *) hpi_entry_points[phm->
75 adapter_index];
76 if (ep) {
77 HPI_DEBUG_MESSAGE(DEBUG, phm);
78 ep(phm, phr);
79 HPI_DEBUG_RESPONSE(phr);
80 return;
83 hpi_init_response(phr, phm->object, phm->function,
84 HPI_ERROR_PROCESSING_MESSAGE);
87 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
88 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
90 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
91 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
93 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
95 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
96 void *h_owner);
97 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
98 void *h_owner);
99 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
100 void *h_owner);
102 static void HPIMSGX__reset(u16 adapter_index);
103 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
104 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
106 #ifndef DISABLE_PRAGMA_PACK1
107 #pragma pack(push, 1)
108 #endif
110 struct hpi_subsys_response {
111 struct hpi_response_header h;
112 struct hpi_subsys_res s;
115 struct hpi_adapter_response {
116 struct hpi_response_header h;
117 struct hpi_adapter_res a;
120 struct hpi_mixer_response {
121 struct hpi_response_header h;
122 struct hpi_mixer_res m;
125 struct hpi_stream_response {
126 struct hpi_response_header h;
127 struct hpi_stream_res d;
130 struct adapter_info {
131 u16 type;
132 u16 num_instreams;
133 u16 num_outstreams;
136 struct asi_open_state {
137 int open_flag;
138 void *h_owner;
141 #ifndef DISABLE_PRAGMA_PACK1
142 #pragma pack(pop)
143 #endif
145 /* Globals */
146 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
148 static struct hpi_stream_response
149 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
151 static struct hpi_stream_response
152 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
154 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
156 static struct hpi_subsys_response gRESP_HPI_SUBSYS_FIND_ADAPTERS;
158 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
160 /* use these to keep track of opens from user mode apps/DLLs */
161 static struct asi_open_state
162 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
164 static struct asi_open_state
165 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
167 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
168 void *h_owner)
170 switch (phm->function) {
171 case HPI_SUBSYS_GET_VERSION:
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
173 HPI_SUBSYS_GET_VERSION, 0);
174 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
175 phr->u.s.data = HPI_VER; /* return major.minor.release */
176 break;
177 case HPI_SUBSYS_OPEN:
178 /*do not propagate the message down the chain */
179 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
180 break;
181 case HPI_SUBSYS_CLOSE:
182 /*do not propagate the message down the chain */
183 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
185 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
186 break;
187 case HPI_SUBSYS_DRIVER_LOAD:
188 /* Initialize this module's internal state */
189 hpios_msgxlock_init(&msgx_lock);
190 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
191 hpios_locked_mem_init();
192 /* Init subsys_findadapters response to no-adapters */
193 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
194 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
195 HPI_SUBSYS_DRIVER_LOAD, 0);
196 /* individual HPIs dont implement driver load */
197 HPI_COMMON(phm, phr);
198 break;
199 case HPI_SUBSYS_DRIVER_UNLOAD:
200 HPI_COMMON(phm, phr);
201 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
202 hpios_locked_mem_free_all();
203 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
204 HPI_SUBSYS_DRIVER_UNLOAD, 0);
205 return;
207 case HPI_SUBSYS_GET_INFO:
208 HPI_COMMON(phm, phr);
209 break;
211 case HPI_SUBSYS_FIND_ADAPTERS:
212 memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
213 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
214 break;
215 case HPI_SUBSYS_GET_NUM_ADAPTERS:
216 memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
217 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
218 phr->function = HPI_SUBSYS_GET_NUM_ADAPTERS;
219 break;
220 case HPI_SUBSYS_GET_ADAPTER:
222 int count = phm->adapter_index;
223 int index = 0;
224 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
225 HPI_SUBSYS_GET_ADAPTER, 0);
227 /* This is complicated by the fact that we want to
228 * "skip" 0's in the adapter list.
229 * First, make sure we are pointing to a
230 * non-zero adapter type.
232 while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
233 s.aw_adapter_list[index] == 0) {
234 index++;
235 if (index >= HPI_MAX_ADAPTERS)
236 break;
238 while (count) {
239 /* move on to the next adapter */
240 index++;
241 if (index >= HPI_MAX_ADAPTERS)
242 break;
243 while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
244 s.aw_adapter_list[index] == 0) {
245 index++;
246 if (index >= HPI_MAX_ADAPTERS)
247 break;
249 count--;
252 if (index < HPI_MAX_ADAPTERS) {
253 phr->u.s.adapter_index = (u16)index;
254 phr->u.s.aw_adapter_list[0] =
255 gRESP_HPI_SUBSYS_FIND_ADAPTERS.
256 s.aw_adapter_list[index];
257 } else {
258 phr->u.s.adapter_index = 0;
259 phr->u.s.aw_adapter_list[0] = 0;
260 phr->error = HPI_ERROR_BAD_ADAPTER_NUMBER;
262 break;
264 case HPI_SUBSYS_CREATE_ADAPTER:
265 HPIMSGX__init(phm, phr);
266 break;
267 case HPI_SUBSYS_DELETE_ADAPTER:
268 HPIMSGX__cleanup(phm->adapter_index, h_owner);
270 struct hpi_message hm;
271 struct hpi_response hr;
272 /* call to HPI_ADAPTER_CLOSE */
273 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
274 HPI_ADAPTER_CLOSE);
275 hm.adapter_index = phm->adapter_index;
276 hw_entry_point(&hm, &hr);
278 hw_entry_point(phm, phr);
279 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.
280 aw_adapter_list[phm->adapter_index]
281 = 0;
282 hpi_entry_points[phm->adapter_index] = NULL;
283 break;
284 default:
285 hw_entry_point(phm, phr);
286 break;
290 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
291 void *h_owner)
293 switch (phm->function) {
294 case HPI_ADAPTER_OPEN:
295 adapter_open(phm, phr);
296 break;
297 case HPI_ADAPTER_CLOSE:
298 adapter_close(phm, phr);
299 break;
300 default:
301 hw_entry_point(phm, phr);
302 break;
306 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
308 switch (phm->function) {
309 case HPI_MIXER_OPEN:
310 mixer_open(phm, phr);
311 break;
312 case HPI_MIXER_CLOSE:
313 mixer_close(phm, phr);
314 break;
315 default:
316 hw_entry_point(phm, phr);
317 break;
321 static void outstream_message(struct hpi_message *phm,
322 struct hpi_response *phr, void *h_owner)
324 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
325 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
326 HPI_ERROR_INVALID_OBJ_INDEX);
327 return;
330 switch (phm->function) {
331 case HPI_OSTREAM_OPEN:
332 outstream_open(phm, phr, h_owner);
333 break;
334 case HPI_OSTREAM_CLOSE:
335 outstream_close(phm, phr, h_owner);
336 break;
337 default:
338 hw_entry_point(phm, phr);
339 break;
343 static void instream_message(struct hpi_message *phm,
344 struct hpi_response *phr, void *h_owner)
346 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
347 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
348 HPI_ERROR_INVALID_OBJ_INDEX);
349 return;
352 switch (phm->function) {
353 case HPI_ISTREAM_OPEN:
354 instream_open(phm, phr, h_owner);
355 break;
356 case HPI_ISTREAM_CLOSE:
357 instream_close(phm, phr, h_owner);
358 break;
359 default:
360 hw_entry_point(phm, phr);
361 break;
365 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
366 * HPI_MessageEx so that functions in hpifunc.c compile.
368 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
369 void *h_owner)
371 HPI_DEBUG_MESSAGE(DEBUG, phm);
373 if (phm->type != HPI_TYPE_MESSAGE) {
374 hpi_init_response(phr, phm->object, phm->function,
375 HPI_ERROR_INVALID_TYPE);
376 return;
379 if (phm->adapter_index >= HPI_MAX_ADAPTERS
380 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
381 hpi_init_response(phr, phm->object, phm->function,
382 HPI_ERROR_BAD_ADAPTER_NUMBER);
383 return;
386 switch (phm->object) {
387 case HPI_OBJ_SUBSYSTEM:
388 subsys_message(phm, phr, h_owner);
389 break;
391 case HPI_OBJ_ADAPTER:
392 adapter_message(phm, phr, h_owner);
393 break;
395 case HPI_OBJ_MIXER:
396 mixer_message(phm, phr);
397 break;
399 case HPI_OBJ_OSTREAM:
400 outstream_message(phm, phr, h_owner);
401 break;
403 case HPI_OBJ_ISTREAM:
404 instream_message(phm, phr, h_owner);
405 break;
407 default:
408 hw_entry_point(phm, phr);
409 break;
411 HPI_DEBUG_RESPONSE(phr);
412 if (phr->error >= HPI_ERROR_BACKEND_BASE) {
413 void *ep = NULL;
414 char *ep_name;
416 HPI_DEBUG_MESSAGE(ERROR, phm);
418 if (phm->adapter_index < HPI_MAX_ADAPTERS)
419 ep = hpi_entry_points[phm->adapter_index];
421 /* Don't need this? Have adapter index in debug info
422 Know at driver load time index->backend mapping */
423 if (ep == HPI_6000)
424 ep_name = "HPI_6000";
425 else if (ep == HPI_6205)
426 ep_name = "HPI_6205";
427 else
428 ep_name = "unknown";
430 HPI_DEBUG_LOG(ERROR, "HPI %s response - error# %d\n", ep_name,
431 phr->error);
433 if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE)
434 hpi_debug_data((u16 *)phm,
435 sizeof(*phm) / sizeof(u16));
439 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
441 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
442 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
443 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
446 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
448 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
449 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
452 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
454 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
455 sizeof(rESP_HPI_MIXER_OPEN[0]));
458 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
460 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
463 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
464 void *h_owner)
467 struct hpi_message hm;
468 struct hpi_response hr;
470 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
472 hpios_msgxlock_lock(&msgx_lock);
474 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
475 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
476 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
477 [phm->obj_index].h.error)
478 memcpy(phr,
479 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
480 obj_index],
481 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
482 else {
483 instream_user_open[phm->adapter_index][phm->
484 obj_index].open_flag = 1;
485 hpios_msgxlock_un_lock(&msgx_lock);
487 /* issue a reset */
488 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
489 HPI_ISTREAM_RESET);
490 hm.adapter_index = phm->adapter_index;
491 hm.obj_index = phm->obj_index;
492 hw_entry_point(&hm, &hr);
494 hpios_msgxlock_lock(&msgx_lock);
495 if (hr.error) {
496 instream_user_open[phm->adapter_index][phm->
497 obj_index].open_flag = 0;
498 phr->error = hr.error;
499 } else {
500 instream_user_open[phm->adapter_index][phm->
501 obj_index].open_flag = 1;
502 instream_user_open[phm->adapter_index][phm->
503 obj_index].h_owner = h_owner;
504 memcpy(phr,
505 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
506 [phm->obj_index],
507 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
510 hpios_msgxlock_un_lock(&msgx_lock);
513 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
514 void *h_owner)
517 struct hpi_message hm;
518 struct hpi_response hr;
520 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
522 hpios_msgxlock_lock(&msgx_lock);
523 if (h_owner ==
524 instream_user_open[phm->adapter_index][phm->
525 obj_index].h_owner) {
526 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
527 "instream %d owned by %p\n",
528 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
529 instream_user_open[phm->adapter_index][phm->
530 obj_index].h_owner = NULL;
531 hpios_msgxlock_un_lock(&msgx_lock);
532 /* issue a reset */
533 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
534 HPI_ISTREAM_RESET);
535 hm.adapter_index = phm->adapter_index;
536 hm.obj_index = phm->obj_index;
537 hw_entry_point(&hm, &hr);
538 hpios_msgxlock_lock(&msgx_lock);
539 if (hr.error) {
540 instream_user_open[phm->adapter_index][phm->
541 obj_index].h_owner = h_owner;
542 phr->error = hr.error;
543 } else {
544 instream_user_open[phm->adapter_index][phm->
545 obj_index].open_flag = 0;
546 instream_user_open[phm->adapter_index][phm->
547 obj_index].h_owner = NULL;
549 } else {
550 HPI_DEBUG_LOG(WARNING,
551 "%p trying to close %d instream %d owned by %p\n",
552 h_owner, phm->adapter_index, phm->obj_index,
553 instream_user_open[phm->adapter_index][phm->
554 obj_index].h_owner);
555 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
557 hpios_msgxlock_un_lock(&msgx_lock);
560 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
561 void *h_owner)
564 struct hpi_message hm;
565 struct hpi_response hr;
567 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
569 hpios_msgxlock_lock(&msgx_lock);
571 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
572 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
573 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
574 [phm->obj_index].h.error)
575 memcpy(phr,
576 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
577 obj_index],
578 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
579 else {
580 outstream_user_open[phm->adapter_index][phm->
581 obj_index].open_flag = 1;
582 hpios_msgxlock_un_lock(&msgx_lock);
584 /* issue a reset */
585 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
586 HPI_OSTREAM_RESET);
587 hm.adapter_index = phm->adapter_index;
588 hm.obj_index = phm->obj_index;
589 hw_entry_point(&hm, &hr);
591 hpios_msgxlock_lock(&msgx_lock);
592 if (hr.error) {
593 outstream_user_open[phm->adapter_index][phm->
594 obj_index].open_flag = 0;
595 phr->error = hr.error;
596 } else {
597 outstream_user_open[phm->adapter_index][phm->
598 obj_index].open_flag = 1;
599 outstream_user_open[phm->adapter_index][phm->
600 obj_index].h_owner = h_owner;
601 memcpy(phr,
602 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
603 [phm->obj_index],
604 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
607 hpios_msgxlock_un_lock(&msgx_lock);
610 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
611 void *h_owner)
614 struct hpi_message hm;
615 struct hpi_response hr;
617 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
619 hpios_msgxlock_lock(&msgx_lock);
621 if (h_owner ==
622 outstream_user_open[phm->adapter_index][phm->
623 obj_index].h_owner) {
624 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
625 "outstream %d owned by %p\n",
626 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
627 outstream_user_open[phm->adapter_index][phm->
628 obj_index].h_owner = NULL;
629 hpios_msgxlock_un_lock(&msgx_lock);
630 /* issue a reset */
631 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
632 HPI_OSTREAM_RESET);
633 hm.adapter_index = phm->adapter_index;
634 hm.obj_index = phm->obj_index;
635 hw_entry_point(&hm, &hr);
636 hpios_msgxlock_lock(&msgx_lock);
637 if (hr.error) {
638 outstream_user_open[phm->adapter_index][phm->
639 obj_index].h_owner = h_owner;
640 phr->error = hr.error;
641 } else {
642 outstream_user_open[phm->adapter_index][phm->
643 obj_index].open_flag = 0;
644 outstream_user_open[phm->adapter_index][phm->
645 obj_index].h_owner = NULL;
647 } else {
648 HPI_DEBUG_LOG(WARNING,
649 "%p trying to close %d outstream %d owned by %p\n",
650 h_owner, phm->adapter_index, phm->obj_index,
651 outstream_user_open[phm->adapter_index][phm->
652 obj_index].h_owner);
653 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
655 hpios_msgxlock_un_lock(&msgx_lock);
658 static u16 adapter_prepare(u16 adapter)
660 struct hpi_message hm;
661 struct hpi_response hr;
663 /* Open the adapter and streams */
664 u16 i;
666 /* call to HPI_ADAPTER_OPEN */
667 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
668 HPI_ADAPTER_OPEN);
669 hm.adapter_index = adapter;
670 hw_entry_point(&hm, &hr);
671 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
672 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
673 if (hr.error)
674 return hr.error;
676 /* call to HPI_ADAPTER_GET_INFO */
677 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
678 HPI_ADAPTER_GET_INFO);
679 hm.adapter_index = adapter;
680 hw_entry_point(&hm, &hr);
681 if (hr.error)
682 return hr.error;
684 aDAPTER_INFO[adapter].num_outstreams = hr.u.a.num_outstreams;
685 aDAPTER_INFO[adapter].num_instreams = hr.u.a.num_instreams;
686 aDAPTER_INFO[adapter].type = hr.u.a.adapter_type;
688 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list[adapter] =
689 hr.u.a.adapter_type;
690 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters++;
691 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters > HPI_MAX_ADAPTERS)
692 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters =
693 HPI_MAX_ADAPTERS;
695 /* call to HPI_OSTREAM_OPEN */
696 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
697 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
698 HPI_OSTREAM_OPEN);
699 hm.adapter_index = adapter;
700 hm.obj_index = i;
701 hw_entry_point(&hm, &hr);
702 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
703 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
704 outstream_user_open[adapter][i].open_flag = 0;
705 outstream_user_open[adapter][i].h_owner = NULL;
708 /* call to HPI_ISTREAM_OPEN */
709 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
710 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
711 HPI_ISTREAM_OPEN);
712 hm.adapter_index = adapter;
713 hm.obj_index = i;
714 hw_entry_point(&hm, &hr);
715 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
716 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
717 instream_user_open[adapter][i].open_flag = 0;
718 instream_user_open[adapter][i].h_owner = NULL;
721 /* call to HPI_MIXER_OPEN */
722 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
723 hm.adapter_index = adapter;
724 hw_entry_point(&hm, &hr);
725 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
726 sizeof(rESP_HPI_MIXER_OPEN[0]));
728 return gRESP_HPI_SUBSYS_FIND_ADAPTERS.h.error;
731 static void HPIMSGX__reset(u16 adapter_index)
733 int i;
734 u16 adapter;
735 struct hpi_response hr;
737 if (adapter_index == HPIMSGX_ALLADAPTERS) {
738 /* reset all responses to contain errors */
739 hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM,
740 HPI_SUBSYS_FIND_ADAPTERS, 0);
741 memcpy(&gRESP_HPI_SUBSYS_FIND_ADAPTERS, &hr,
742 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
744 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
746 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
747 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
748 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
749 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
751 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
752 HPI_ERROR_INVALID_OBJ);
753 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
754 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
756 for (i = 0; i < HPI_MAX_STREAMS; i++) {
757 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
758 HPI_OSTREAM_OPEN,
759 HPI_ERROR_INVALID_OBJ);
760 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
761 &hr,
762 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
763 [i]));
764 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
765 HPI_ISTREAM_OPEN,
766 HPI_ERROR_INVALID_OBJ);
767 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
768 &hr,
769 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
770 [i]));
773 } else if (adapter_index < HPI_MAX_ADAPTERS) {
774 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
775 HPI_ERROR_BAD_ADAPTER;
776 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
777 HPI_ERROR_INVALID_OBJ;
778 for (i = 0; i < HPI_MAX_STREAMS; i++) {
779 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
780 HPI_ERROR_INVALID_OBJ;
781 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
782 HPI_ERROR_INVALID_OBJ;
784 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
785 s.aw_adapter_list[adapter_index]) {
786 gRESP_HPI_SUBSYS_FIND_ADAPTERS.
787 s.aw_adapter_list[adapter_index] = 0;
788 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters--;
793 static u16 HPIMSGX__init(struct hpi_message *phm,
794 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
795 /* resource list or NULL=find all */
796 struct hpi_response *phr
797 /* response from HPI_ADAPTER_GET_INFO */
800 hpi_handler_func *entry_point_func;
801 struct hpi_response hr;
803 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters >= HPI_MAX_ADAPTERS)
804 return HPI_ERROR_BAD_ADAPTER_NUMBER;
806 /* Init response here so we can pass in previous adapter list */
807 hpi_init_response(&hr, phm->object, phm->function,
808 HPI_ERROR_INVALID_OBJ);
809 memcpy(hr.u.s.aw_adapter_list,
810 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list,
811 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list));
813 entry_point_func =
814 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
816 if (entry_point_func) {
817 HPI_DEBUG_MESSAGE(DEBUG, phm);
818 entry_point_func(phm, &hr);
819 } else {
820 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
821 return phr->error;
823 if (hr.error == 0) {
824 /* the adapter was created succesfully
825 save the mapping for future use */
826 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
827 /* prepare adapter (pre-open streams etc.) */
828 HPI_DEBUG_LOG(DEBUG,
829 "HPI_SUBSYS_CREATE_ADAPTER successful,"
830 " preparing adapter\n");
831 adapter_prepare(hr.u.s.adapter_index);
833 memcpy(phr, &hr, hr.size);
834 return phr->error;
837 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
839 int i, adapter, adapter_limit;
841 if (!h_owner)
842 return;
844 if (adapter_index == HPIMSGX_ALLADAPTERS) {
845 adapter = 0;
846 adapter_limit = HPI_MAX_ADAPTERS;
847 } else {
848 adapter = adapter_index;
849 adapter_limit = adapter + 1;
852 for (; adapter < adapter_limit; adapter++) {
853 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
854 for (i = 0; i < HPI_MAX_STREAMS; i++) {
855 if (h_owner ==
856 outstream_user_open[adapter][i].h_owner) {
857 struct hpi_message hm;
858 struct hpi_response hr;
860 HPI_DEBUG_LOG(DEBUG,
861 "close adapter %d ostream %d\n",
862 adapter, i);
864 hpi_init_message_response(&hm, &hr,
865 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
866 hm.adapter_index = (u16)adapter;
867 hm.obj_index = (u16)i;
868 hw_entry_point(&hm, &hr);
870 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
871 hw_entry_point(&hm, &hr);
873 hm.function = HPI_OSTREAM_GROUP_RESET;
874 hw_entry_point(&hm, &hr);
876 outstream_user_open[adapter][i].open_flag = 0;
877 outstream_user_open[adapter][i].h_owner =
878 NULL;
880 if (h_owner == instream_user_open[adapter][i].h_owner) {
881 struct hpi_message hm;
882 struct hpi_response hr;
884 HPI_DEBUG_LOG(DEBUG,
885 "close adapter %d istream %d\n",
886 adapter, i);
888 hpi_init_message_response(&hm, &hr,
889 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
890 hm.adapter_index = (u16)adapter;
891 hm.obj_index = (u16)i;
892 hw_entry_point(&hm, &hr);
894 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
895 hw_entry_point(&hm, &hr);
897 hm.function = HPI_ISTREAM_GROUP_RESET;
898 hw_entry_point(&hm, &hr);
900 instream_user_open[adapter][i].open_flag = 0;
901 instream_user_open[adapter][i].h_owner = NULL;