[PATCH] powerpc: Add "partitionable endpoint" support
[wandboard.git] / drivers / acpi / dispatcher / dsmethod.c
blob36c1ca0b9adb71a9b2d19fddd711a4b545a45312
1 /******************************************************************************
3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
5 *****************************************************************************/
7 /*
8 * Copyright (C) 2000 - 2005, R. Byron Moore
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
44 #include <acpi/acpi.h>
45 #include <acpi/acparser.h>
46 #include <acpi/amlcode.h>
47 #include <acpi/acdispat.h>
48 #include <acpi/acinterp.h>
49 #include <acpi/acnamesp.h>
51 #define _COMPONENT ACPI_DISPATCHER
52 ACPI_MODULE_NAME("dsmethod")
54 /*******************************************************************************
56 * FUNCTION: acpi_ds_parse_method
58 * PARAMETERS: Node - Method node
60 * RETURN: Status
62 * DESCRIPTION: Parse the AML that is associated with the method.
64 * MUTEX: Assumes parser is locked
66 ******************************************************************************/
67 acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
69 acpi_status status;
70 union acpi_operand_object *obj_desc;
71 union acpi_parse_object *op;
72 struct acpi_walk_state *walk_state;
74 ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
76 /* Parameter Validation */
78 if (!node) {
79 return_ACPI_STATUS(AE_NULL_ENTRY);
82 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
83 "**** Parsing [%4.4s] **** named_obj=%p\n",
84 acpi_ut_get_node_name(node), node));
86 /* Extract the method object from the method Node */
88 obj_desc = acpi_ns_get_attached_object(node);
89 if (!obj_desc) {
90 return_ACPI_STATUS(AE_NULL_OBJECT);
93 /* Create a mutex for the method if there is a concurrency limit */
95 if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
96 (!obj_desc->method.semaphore)) {
97 status = acpi_os_create_semaphore(obj_desc->method.concurrency,
98 obj_desc->method.concurrency,
99 &obj_desc->method.semaphore);
100 if (ACPI_FAILURE(status)) {
101 return_ACPI_STATUS(status);
106 * Allocate a new parser op to be the root of the parsed
107 * method tree
109 op = acpi_ps_alloc_op(AML_METHOD_OP);
110 if (!op) {
111 return_ACPI_STATUS(AE_NO_MEMORY);
114 /* Init new op with the method name and pointer back to the Node */
116 acpi_ps_set_name(op, node->name.integer);
117 op->common.node = node;
120 * Get a new owner_id for objects created by this method. Namespace
121 * objects (such as Operation Regions) can be created during the
122 * first pass parse.
124 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
125 if (ACPI_FAILURE(status)) {
126 goto cleanup;
129 /* Create and initialize a new walk state */
131 walk_state =
132 acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
133 NULL);
134 if (!walk_state) {
135 status = AE_NO_MEMORY;
136 goto cleanup2;
139 status = acpi_ds_init_aml_walk(walk_state, op, node,
140 obj_desc->method.aml_start,
141 obj_desc->method.aml_length, NULL, 1);
142 if (ACPI_FAILURE(status)) {
143 acpi_ds_delete_walk_state(walk_state);
144 goto cleanup2;
148 * Parse the method, first pass
150 * The first pass load is where newly declared named objects are added into
151 * the namespace. Actual evaluation of the named objects (what would be
152 * called a "second pass") happens during the actual execution of the
153 * method so that operands to the named objects can take on dynamic
154 * run-time values.
156 status = acpi_ps_parse_aml(walk_state);
157 if (ACPI_FAILURE(status)) {
158 goto cleanup2;
161 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
162 "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
163 acpi_ut_get_node_name(node), node, op));
166 * Delete the parse tree. We simply re-parse the method for every
167 * execution since there isn't much overhead (compared to keeping lots
168 * of parse trees around)
170 acpi_ns_delete_namespace_subtree(node);
171 acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
173 cleanup2:
174 acpi_ut_release_owner_id(&obj_desc->method.owner_id);
176 cleanup:
177 acpi_ps_delete_parse_tree(op);
178 return_ACPI_STATUS(status);
181 /*******************************************************************************
183 * FUNCTION: acpi_ds_begin_method_execution
185 * PARAMETERS: method_node - Node of the method
186 * obj_desc - The method object
187 * calling_method_node - Caller of this method (if non-null)
189 * RETURN: Status
191 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
192 * increments the thread count, and waits at the method semaphore
193 * for clearance to execute.
195 ******************************************************************************/
197 acpi_status
198 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
199 union acpi_operand_object *obj_desc,
200 struct acpi_namespace_node *calling_method_node)
202 acpi_status status = AE_OK;
204 ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
206 if (!method_node) {
207 return_ACPI_STATUS(AE_NULL_ENTRY);
210 /* Prevent wraparound of thread count */
212 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
213 ACPI_REPORT_ERROR(("Method reached maximum reentrancy limit (255)\n"));
214 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
218 * If there is a concurrency limit on this method, we need to
219 * obtain a unit from the method semaphore.
221 if (obj_desc->method.semaphore) {
223 * Allow recursive method calls, up to the reentrancy/concurrency
224 * limit imposed by the SERIALIZED rule and the sync_level method
225 * parameter.
227 * The point of this code is to avoid permanently blocking a
228 * thread that is making recursive method calls.
230 if (method_node == calling_method_node) {
231 if (obj_desc->method.thread_count >=
232 obj_desc->method.concurrency) {
233 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
238 * Get a unit from the method semaphore. This releases the
239 * interpreter if we block
241 status =
242 acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
243 ACPI_WAIT_FOREVER);
247 * Allocate an Owner ID for this method, only if this is the first thread
248 * to begin concurrent execution. We only need one owner_id, even if the
249 * method is invoked recursively.
251 if (!obj_desc->method.owner_id) {
252 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
253 if (ACPI_FAILURE(status)) {
254 return_ACPI_STATUS(status);
259 * Increment the method parse tree thread count since it has been
260 * reentered one more time (even if it is the same thread)
262 obj_desc->method.thread_count++;
263 return_ACPI_STATUS(status);
266 /*******************************************************************************
268 * FUNCTION: acpi_ds_call_control_method
270 * PARAMETERS: Thread - Info for this thread
271 * this_walk_state - Current walk state
272 * Op - Current Op to be walked
274 * RETURN: Status
276 * DESCRIPTION: Transfer execution to a called control method
278 ******************************************************************************/
280 acpi_status
281 acpi_ds_call_control_method(struct acpi_thread_state *thread,
282 struct acpi_walk_state *this_walk_state,
283 union acpi_parse_object *op)
285 acpi_status status;
286 struct acpi_namespace_node *method_node;
287 struct acpi_walk_state *next_walk_state = NULL;
288 union acpi_operand_object *obj_desc;
289 struct acpi_parameter_info info;
290 u32 i;
292 ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
294 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
295 "Execute method %p, currentstate=%p\n",
296 this_walk_state->prev_op, this_walk_state));
299 * Get the namespace entry for the control method we are about to call
301 method_node = this_walk_state->method_call_node;
302 if (!method_node) {
303 return_ACPI_STATUS(AE_NULL_ENTRY);
306 obj_desc = acpi_ns_get_attached_object(method_node);
307 if (!obj_desc) {
308 return_ACPI_STATUS(AE_NULL_OBJECT);
311 /* Init for new method, wait on concurrency semaphore */
313 status = acpi_ds_begin_method_execution(method_node, obj_desc,
314 this_walk_state->method_node);
315 if (ACPI_FAILURE(status)) {
316 goto cleanup;
319 if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
320 /* 1) Parse: Create a new walk state for the preempting walk */
322 next_walk_state =
323 acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
324 obj_desc, NULL);
325 if (!next_walk_state) {
326 return_ACPI_STATUS(AE_NO_MEMORY);
329 /* Create and init a Root Node */
331 op = acpi_ps_create_scope_op();
332 if (!op) {
333 status = AE_NO_MEMORY;
334 goto cleanup;
337 status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
338 obj_desc->method.aml_start,
339 obj_desc->method.aml_length,
340 NULL, 1);
341 if (ACPI_FAILURE(status)) {
342 acpi_ds_delete_walk_state(next_walk_state);
343 goto cleanup;
346 /* Begin AML parse */
348 status = acpi_ps_parse_aml(next_walk_state);
349 acpi_ps_delete_parse_tree(op);
352 /* 2) Execute: Create a new state for the preempting walk */
354 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
355 NULL, obj_desc, thread);
356 if (!next_walk_state) {
357 status = AE_NO_MEMORY;
358 goto cleanup;
361 * The resolved arguments were put on the previous walk state's operand
362 * stack. Operands on the previous walk state stack always
363 * start at index 0. Also, null terminate the list of arguments
365 this_walk_state->operands[this_walk_state->num_operands] = NULL;
367 info.parameters = &this_walk_state->operands[0];
368 info.parameter_type = ACPI_PARAM_ARGS;
370 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
371 obj_desc->method.aml_start,
372 obj_desc->method.aml_length, &info, 3);
373 if (ACPI_FAILURE(status)) {
374 goto cleanup;
378 * Delete the operands on the previous walkstate operand stack
379 * (they were copied to new objects)
381 for (i = 0; i < obj_desc->method.param_count; i++) {
382 acpi_ut_remove_reference(this_walk_state->operands[i]);
383 this_walk_state->operands[i] = NULL;
386 /* Clear the operand stack */
388 this_walk_state->num_operands = 0;
390 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
391 "Starting nested execution, newstate=%p\n",
392 next_walk_state));
394 if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
395 status = obj_desc->method.implementation(next_walk_state);
398 return_ACPI_STATUS(status);
400 cleanup:
401 /* Decrement the thread count on the method parse tree */
403 if (next_walk_state && (next_walk_state->method_desc)) {
404 next_walk_state->method_desc->method.thread_count--;
407 /* On error, we must delete the new walk state */
409 acpi_ds_terminate_control_method(next_walk_state);
410 acpi_ds_delete_walk_state(next_walk_state);
411 return_ACPI_STATUS(status);
414 /*******************************************************************************
416 * FUNCTION: acpi_ds_restart_control_method
418 * PARAMETERS: walk_state - State for preempted method (caller)
419 * return_desc - Return value from the called method
421 * RETURN: Status
423 * DESCRIPTION: Restart a method that was preempted by another (nested) method
424 * invocation. Handle the return value (if any) from the callee.
426 ******************************************************************************/
428 acpi_status
429 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
430 union acpi_operand_object *return_desc)
432 acpi_status status;
434 ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
436 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
437 "****Restart [%4.4s] Op %p return_value_from_callee %p\n",
438 (char *)&walk_state->method_node->name,
439 walk_state->method_call_op, return_desc));
441 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
442 " return_from_this_method_used?=%X res_stack %p Walk %p\n",
443 walk_state->return_used,
444 walk_state->results, walk_state));
446 /* Did the called method return a value? */
448 if (return_desc) {
449 /* Are we actually going to use the return value? */
451 if (walk_state->return_used) {
452 /* Save the return value from the previous method */
454 status = acpi_ds_result_push(return_desc, walk_state);
455 if (ACPI_FAILURE(status)) {
456 acpi_ut_remove_reference(return_desc);
457 return_ACPI_STATUS(status);
461 * Save as THIS method's return value in case it is returned
462 * immediately to yet another method
464 walk_state->return_desc = return_desc;
468 * The following code is the
469 * optional support for a so-called "implicit return". Some AML code
470 * assumes that the last value of the method is "implicitly" returned
471 * to the caller. Just save the last result as the return value.
472 * NOTE: this is optional because the ASL language does not actually
473 * support this behavior.
475 else if (!acpi_ds_do_implicit_return
476 (return_desc, walk_state, FALSE)) {
478 * Delete the return value if it will not be used by the
479 * calling method
481 acpi_ut_remove_reference(return_desc);
485 return_ACPI_STATUS(AE_OK);
488 /*******************************************************************************
490 * FUNCTION: acpi_ds_terminate_control_method
492 * PARAMETERS: walk_state - State of the method
494 * RETURN: None
496 * DESCRIPTION: Terminate a control method. Delete everything that the method
497 * created, delete all locals and arguments, and delete the parse
498 * tree if requested.
500 ******************************************************************************/
502 void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
504 union acpi_operand_object *obj_desc;
505 struct acpi_namespace_node *method_node;
506 acpi_status status;
508 ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
510 if (!walk_state) {
511 return_VOID;
514 /* The current method object was saved in the walk state */
516 obj_desc = walk_state->method_desc;
517 if (!obj_desc) {
518 return_VOID;
521 /* Delete all arguments and locals */
523 acpi_ds_method_data_delete_all(walk_state);
526 * Lock the parser while we terminate this method.
527 * If this is the last thread executing the method,
528 * we have additional cleanup to perform
530 status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
531 if (ACPI_FAILURE(status)) {
532 return_VOID;
535 /* Signal completion of the execution of this method if necessary */
537 if (walk_state->method_desc->method.semaphore) {
538 status =
539 acpi_os_signal_semaphore(walk_state->method_desc->method.
540 semaphore, 1);
541 if (ACPI_FAILURE(status)) {
542 ACPI_REPORT_ERROR(("Could not signal method semaphore\n"));
544 /* Ignore error and continue cleanup */
548 if (walk_state->method_desc->method.thread_count) {
549 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
550 "*** Not deleting method namespace, there are still %d threads\n",
551 walk_state->method_desc->method.
552 thread_count));
553 } else { /* This is the last executing thread */
556 * Support to dynamically change a method from not_serialized to
557 * Serialized if it appears that the method is written foolishly and
558 * does not support multiple thread execution. The best example of this
559 * is if such a method creates namespace objects and blocks. A second
560 * thread will fail with an AE_ALREADY_EXISTS exception
562 * This code is here because we must wait until the last thread exits
563 * before creating the synchronization semaphore.
565 if ((walk_state->method_desc->method.concurrency == 1) &&
566 (!walk_state->method_desc->method.semaphore)) {
567 status = acpi_os_create_semaphore(1, 1,
568 &walk_state->
569 method_desc->method.
570 semaphore);
574 * There are no more threads executing this method. Perform
575 * additional cleanup.
577 * The method Node is stored in the walk state
579 method_node = walk_state->method_node;
582 * Delete any namespace entries created immediately underneath
583 * the method
585 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
586 if (ACPI_FAILURE(status)) {
587 goto exit;
590 if (method_node->child) {
591 acpi_ns_delete_namespace_subtree(method_node);
595 * Delete any namespace entries created anywhere else within
596 * the namespace
598 acpi_ns_delete_namespace_by_owner(walk_state->method_desc->
599 method.owner_id);
600 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
601 acpi_ut_release_owner_id(&walk_state->method_desc->method.
602 owner_id);
605 exit:
606 (void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
607 return_VOID;