2 /* Execute compiled code */
5 XXX speed up searching for keywords by using a dictionary
9 /* enable more aggressive intra-module optimizations, where available */
10 #define PY_LOCAL_AGGRESSIVE
15 #include "frameobject.h"
18 #include "structmember.h"
24 #define READ_TIMESTAMP(var)
28 typedef unsigned long long uint64
;
30 #if defined(__ppc__) /* <- Don't know if this is the correct symbol; this
31 section should work for GCC on any PowerPC
32 platform, irrespective of OS.
33 POWER? Who knows :-) */
35 #define READ_TIMESTAMP(var) ppc_getcounter(&var)
38 ppc_getcounter(uint64
*v
)
40 register unsigned long tbu
, tb
, tbu2
;
43 asm volatile ("mftbu %0" : "=r" (tbu
) );
44 asm volatile ("mftb %0" : "=r" (tb
) );
45 asm volatile ("mftbu %0" : "=r" (tbu2
));
46 if (__builtin_expect(tbu
!= tbu2
, 0)) goto loop
;
48 /* The slightly peculiar way of writing the next lines is
49 compiled better by GCC than any other way I tried. */
50 ((long*)(v
))[0] = tbu
;
54 #else /* this is for linux/x86 (and probably any other GCC/x86 combo) */
56 #define READ_TIMESTAMP(val) \
57 __asm__ __volatile__("rdtsc" : "=A" (val))
61 void dump_tsc(int opcode
, int ticked
, uint64 inst0
, uint64 inst1
,
62 uint64 loop0
, uint64 loop1
, uint64 intr0
, uint64 intr1
)
64 uint64 intr
, inst
, loop
;
65 PyThreadState
*tstate
= PyThreadState_Get();
66 if (!tstate
->interp
->tscdump
)
69 inst
= inst1
- inst0
- intr
;
70 loop
= loop1
- loop0
- intr
;
71 fprintf(stderr
, "opcode=%03d t=%d inst=%06lld loop=%06lld\n",
72 opcode
, ticked
, inst
, loop
);
77 /* Turn this on if your compiler chokes on the big switch: */
78 /* #define CASE_TOO_BIG 1 */
81 /* For debugging the interpreter: */
82 #define LLTRACE 1 /* Low-level trace feature */
83 #define CHECKEXC 1 /* Double-check exception checking */
86 typedef PyObject
*(*callproc
)(PyObject
*, PyObject
*, PyObject
*);
88 /* Forward declarations */
90 static PyObject
* call_function(PyObject
***, int, uint64
*, uint64
*);
92 static PyObject
* call_function(PyObject
***, int);
94 static PyObject
* fast_function(PyObject
*, PyObject
***, int, int, int);
95 static PyObject
* do_call(PyObject
*, PyObject
***, int, int);
96 static PyObject
* ext_do_call(PyObject
*, PyObject
***, int, int, int);
97 static PyObject
* update_keyword_args(PyObject
*, int, PyObject
***,
99 static PyObject
* update_star_args(int, int, PyObject
*, PyObject
***);
100 static PyObject
* load_args(PyObject
***, int);
101 #define CALL_FLAG_VAR 1
102 #define CALL_FLAG_KW 2
106 static int prtrace(PyObject
*, char *);
108 static int call_trace(Py_tracefunc
, PyObject
*, PyFrameObject
*,
110 static int call_trace_protected(Py_tracefunc
, PyObject
*,
111 PyFrameObject
*, int, PyObject
*);
112 static void call_exc_trace(Py_tracefunc
, PyObject
*, PyFrameObject
*);
113 static int maybe_call_line_trace(Py_tracefunc
, PyObject
*,
114 PyFrameObject
*, int *, int *, int *);
116 static PyObject
* cmp_outcome(int, PyObject
*, PyObject
*);
117 static PyObject
* import_from(PyObject
*, PyObject
*);
118 static int import_all_from(PyObject
*, PyObject
*);
119 static void format_exc_check_arg(PyObject
*, const char *, PyObject
*);
120 static PyObject
* unicode_concatenate(PyObject
*, PyObject
*,
121 PyFrameObject
*, unsigned char *);
123 #define NAME_ERROR_MSG \
124 "name '%.200s' is not defined"
125 #define GLOBAL_NAME_ERROR_MSG \
126 "global name '%.200s' is not defined"
127 #define UNBOUNDLOCAL_ERROR_MSG \
128 "local variable '%.200s' referenced before assignment"
129 #define UNBOUNDFREE_ERROR_MSG \
130 "free variable '%.200s' referenced before assignment" \
131 " in enclosing scope"
133 /* Dynamic execution profile */
134 #ifdef DYNAMIC_EXECUTION_PROFILE
136 static long dxpairs
[257][256];
137 #define dxp dxpairs[256]
139 static long dxp
[256];
143 /* Function call profile */
146 static int pcall
[PCALL_NUM
];
149 #define PCALL_FUNCTION 1
150 #define PCALL_FAST_FUNCTION 2
151 #define PCALL_FASTER_FUNCTION 3
152 #define PCALL_METHOD 4
153 #define PCALL_BOUND_METHOD 5
154 #define PCALL_CFUNCTION 6
156 #define PCALL_GENERATOR 8
157 #define PCALL_OTHER 9
160 /* Notes about the statistics
164 FAST_FUNCTION means no argument tuple needs to be created.
165 FASTER_FUNCTION means that the fast-path frame setup code is used.
167 If there is a method call where the call can be optimized by changing
168 the argument tuple and calling the function directly, it gets recorded
171 As a result, the relationship among the statistics appears to be
172 PCALL_ALL == PCALL_FUNCTION + PCALL_METHOD - PCALL_BOUND_METHOD +
173 PCALL_CFUNCTION + PCALL_TYPE + PCALL_GENERATOR + PCALL_OTHER
174 PCALL_FUNCTION > PCALL_FAST_FUNCTION > PCALL_FASTER_FUNCTION
175 PCALL_METHOD > PCALL_BOUND_METHOD
178 #define PCALL(POS) pcall[POS]++
181 PyEval_GetCallStats(PyObject
*self
)
183 return Py_BuildValue("iiiiiiiiiii",
184 pcall
[0], pcall
[1], pcall
[2], pcall
[3],
185 pcall
[4], pcall
[5], pcall
[6], pcall
[7],
186 pcall
[8], pcall
[9], pcall
[10]);
192 PyEval_GetCallStats(PyObject
*self
)
205 #include "pythread.h"
207 static PyThread_type_lock interpreter_lock
= 0; /* This is the GIL */
208 static PyThread_type_lock pending_lock
= 0; /* for pending calls */
209 static long main_thread
= 0;
212 PyEval_ThreadsInitialized(void)
214 return interpreter_lock
!= 0;
218 PyEval_InitThreads(void)
220 if (interpreter_lock
)
222 interpreter_lock
= PyThread_allocate_lock();
223 PyThread_acquire_lock(interpreter_lock
, 1);
224 main_thread
= PyThread_get_thread_ident();
228 PyEval_AcquireLock(void)
230 PyThread_acquire_lock(interpreter_lock
, 1);
234 PyEval_ReleaseLock(void)
236 PyThread_release_lock(interpreter_lock
);
240 PyEval_AcquireThread(PyThreadState
*tstate
)
243 Py_FatalError("PyEval_AcquireThread: NULL new thread state");
244 /* Check someone has called PyEval_InitThreads() to create the lock */
245 assert(interpreter_lock
);
246 PyThread_acquire_lock(interpreter_lock
, 1);
247 if (PyThreadState_Swap(tstate
) != NULL
)
249 "PyEval_AcquireThread: non-NULL old thread state");
253 PyEval_ReleaseThread(PyThreadState
*tstate
)
256 Py_FatalError("PyEval_ReleaseThread: NULL thread state");
257 if (PyThreadState_Swap(NULL
) != tstate
)
258 Py_FatalError("PyEval_ReleaseThread: wrong thread state");
259 PyThread_release_lock(interpreter_lock
);
262 /* This function is called from PyOS_AfterFork to ensure that newly
263 created child processes don't hold locks referring to threads which
264 are not running in the child process. (This could also be done using
265 pthread_atfork mechanism, at least for the pthreads implementation.) */
268 PyEval_ReInitThreads(void)
270 PyObject
*threading
, *result
;
271 PyThreadState
*tstate
;
273 if (!interpreter_lock
)
275 /*XXX Can't use PyThread_free_lock here because it does too
276 much error-checking. Doing this cleanly would require
277 adding a new function to each thread_*.h. Instead, just
278 create a new lock and waste a little bit of memory */
279 interpreter_lock
= PyThread_allocate_lock();
280 pending_lock
= PyThread_allocate_lock();
281 PyThread_acquire_lock(interpreter_lock
, 1);
282 main_thread
= PyThread_get_thread_ident();
284 /* Update the threading module with the new state.
286 tstate
= PyThreadState_GET();
287 threading
= PyMapping_GetItemString(tstate
->interp
->modules
,
289 if (threading
== NULL
) {
290 /* threading not imported */
294 result
= PyObject_CallMethod(threading
, "_after_fork", NULL
);
296 PyErr_WriteUnraisable(threading
);
299 Py_DECREF(threading
);
303 /* Functions save_thread and restore_thread are always defined so
304 dynamically loaded modules needn't be compiled separately for use
305 with and without threads: */
308 PyEval_SaveThread(void)
310 PyThreadState
*tstate
= PyThreadState_Swap(NULL
);
312 Py_FatalError("PyEval_SaveThread: NULL tstate");
314 if (interpreter_lock
)
315 PyThread_release_lock(interpreter_lock
);
321 PyEval_RestoreThread(PyThreadState
*tstate
)
324 Py_FatalError("PyEval_RestoreThread: NULL tstate");
326 if (interpreter_lock
) {
328 PyThread_acquire_lock(interpreter_lock
, 1);
332 PyThreadState_Swap(tstate
);
336 /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
337 signal handlers or Mac I/O completion routines) can schedule calls
338 to a function to be called synchronously.
339 The synchronous function is called with one void* argument.
340 It should return 0 for success or -1 for failure -- failure should
341 be accompanied by an exception.
343 If registry succeeds, the registry function returns 0; if it fails
344 (e.g. due to too many pending calls) it returns -1 (without setting
345 an exception condition).
347 Note that because registry may occur from within signal handlers,
348 or other asynchronous events, calling malloc() is unsafe!
351 Any thread can schedule pending calls, but only the main thread
353 There is no facility to schedule calls to a particular thread, but
354 that should be easy to change, should that ever be required. In
355 that case, the static variables here should go into the python
362 /* The WITH_THREAD implementation is thread-safe. It allows
363 scheduling to be made from any thread, and even from an executing
367 #define NPENDINGCALLS 32
371 } pendingcalls
[NPENDINGCALLS
];
372 static int pendingfirst
= 0;
373 static int pendinglast
= 0;
374 static volatile int pendingcalls_to_do
= 1; /* trigger initialization of lock */
375 static char pendingbusy
= 0;
378 Py_AddPendingCall(int (*func
)(void *), void *arg
)
381 PyThread_type_lock lock
= pending_lock
;
383 /* try a few times for the lock. Since this mechanism is used
384 * for signal handling (on the main thread), there is a (slim)
385 * chance that a signal is delivered on the same thread while we
386 * hold the lock during the Py_MakePendingCalls() function.
387 * This avoids a deadlock in that case.
388 * Note that signals can be delivered on any thread. In particular,
389 * on Windows, a SIGINT is delivered on a system-created worker
391 * We also check for lock being NULL, in the unlikely case that
392 * this function is called before any bytecode evaluation takes place.
395 for (i
= 0; i
<100; i
++) {
396 if (PyThread_acquire_lock(lock
, NOWAIT_LOCK
))
404 j
= (i
+ 1) % NPENDINGCALLS
;
405 if (j
== pendingfirst
) {
406 result
= -1; /* Queue full */
408 pendingcalls
[i
].func
= func
;
409 pendingcalls
[i
].arg
= arg
;
412 /* signal main loop */
414 pendingcalls_to_do
= 1;
416 PyThread_release_lock(lock
);
421 Py_MakePendingCalls(void)
427 /* initial allocation of the lock */
428 pending_lock
= PyThread_allocate_lock();
429 if (pending_lock
== NULL
)
433 /* only service pending calls on main thread */
434 if (main_thread
&& PyThread_get_thread_ident() != main_thread
)
436 /* don't perform recursive pending calls */
440 /* perform a bounded number of calls, in case of recursion */
441 for (i
=0; i
<NPENDINGCALLS
; i
++) {
446 /* pop one item off the queue while holding the lock */
447 PyThread_acquire_lock(pending_lock
, WAIT_LOCK
);
449 if (j
== pendinglast
) {
450 func
= NULL
; /* Queue empty */
452 func
= pendingcalls
[j
].func
;
453 arg
= pendingcalls
[j
].arg
;
454 pendingfirst
= (j
+ 1) % NPENDINGCALLS
;
456 pendingcalls_to_do
= pendingfirst
!= pendinglast
;
457 PyThread_release_lock(pending_lock
);
458 /* having released the lock, perform the callback */
469 #else /* if ! defined WITH_THREAD */
472 WARNING! ASYNCHRONOUSLY EXECUTING CODE!
473 This code is used for signal handling in python that isn't built
475 Don't use this implementation when Py_AddPendingCalls() can happen
476 on a different thread!
478 There are two possible race conditions:
479 (1) nested asynchronous calls to Py_AddPendingCall()
480 (2) AddPendingCall() calls made while pending calls are being processed.
482 (1) is very unlikely because typically signal delivery
483 is blocked during signal handling. So it should be impossible.
484 (2) is a real possibility.
485 The current code is safe against (2), but not against (1).
486 The safety against (2) is derived from the fact that only one
487 thread is present, interrupted by signals, and that the critical
488 section is protected with the "busy" variable. On Windows, which
489 delivers SIGINT on a system thread, this does not hold and therefore
490 Windows really shouldn't use this version.
491 The two threads could theoretically wiggle around the "busy" variable.
494 #define NPENDINGCALLS 32
498 } pendingcalls
[NPENDINGCALLS
];
499 static volatile int pendingfirst
= 0;
500 static volatile int pendinglast
= 0;
501 static volatile int pendingcalls_to_do
= 0;
504 Py_AddPendingCall(int (*func
)(void *), void *arg
)
506 static volatile int busy
= 0;
508 /* XXX Begin critical section */
513 j
= (i
+ 1) % NPENDINGCALLS
;
514 if (j
== pendingfirst
) {
516 return -1; /* Queue full */
518 pendingcalls
[i
].func
= func
;
519 pendingcalls
[i
].arg
= arg
;
523 pendingcalls_to_do
= 1; /* Signal main loop */
525 /* XXX End critical section */
530 Py_MakePendingCalls(void)
536 pendingcalls_to_do
= 0;
542 if (i
== pendinglast
)
543 break; /* Queue empty */
544 func
= pendingcalls
[i
].func
;
545 arg
= pendingcalls
[i
].arg
;
546 pendingfirst
= (i
+ 1) % NPENDINGCALLS
;
549 pendingcalls_to_do
= 1; /* We're not done yet */
557 #endif /* WITH_THREAD */
560 /* The interpreter's recursion limit */
562 #ifndef Py_DEFAULT_RECURSION_LIMIT
563 #define Py_DEFAULT_RECURSION_LIMIT 1000
565 static int recursion_limit
= Py_DEFAULT_RECURSION_LIMIT
;
566 int _Py_CheckRecursionLimit
= Py_DEFAULT_RECURSION_LIMIT
;
569 Py_GetRecursionLimit(void)
571 return recursion_limit
;
575 Py_SetRecursionLimit(int new_limit
)
577 recursion_limit
= new_limit
;
578 _Py_CheckRecursionLimit
= recursion_limit
;
581 /* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
582 if the recursion_depth reaches _Py_CheckRecursionLimit.
583 If USE_STACKCHECK, the macro decrements _Py_CheckRecursionLimit
584 to guarantee that _Py_CheckRecursiveCall() is regularly called.
585 Without USE_STACKCHECK, there is no need for this. */
587 _Py_CheckRecursiveCall(char *where
)
589 PyThreadState
*tstate
= PyThreadState_GET();
591 #ifdef USE_STACKCHECK
592 if (PyOS_CheckStack()) {
593 --tstate
->recursion_depth
;
594 PyErr_SetString(PyExc_MemoryError
, "Stack overflow");
598 _Py_CheckRecursionLimit
= recursion_limit
;
599 if (tstate
->recursion_critical
)
600 /* Somebody asked that we don't check for recursion. */
602 if (tstate
->overflowed
) {
603 if (tstate
->recursion_depth
> recursion_limit
+ 50) {
604 /* Overflowing while handling an overflow. Give up. */
605 Py_FatalError("Cannot recover from stack overflow.");
609 if (tstate
->recursion_depth
> recursion_limit
) {
610 --tstate
->recursion_depth
;
611 tstate
->overflowed
= 1;
612 PyErr_Format(PyExc_RuntimeError
,
613 "maximum recursion depth exceeded%s",
620 /* Status code for main loop (reason for stack unwind) */
622 WHY_NOT
= 0x0001, /* No error */
623 WHY_EXCEPTION
= 0x0002, /* Exception occurred */
624 WHY_RERAISE
= 0x0004, /* Exception re-raised by 'finally' */
625 WHY_RETURN
= 0x0008, /* 'return' statement */
626 WHY_BREAK
= 0x0010, /* 'break' statement */
627 WHY_CONTINUE
= 0x0020, /* 'continue' statement */
628 WHY_YIELD
= 0x0040, /* 'yield' operator */
629 WHY_SILENCED
= 0x0080 /* Exception silenced by 'with' */
632 static enum why_code
do_raise(PyObject
*, PyObject
*);
633 static int unpack_iterable(PyObject
*, int, int, PyObject
**);
635 /* Records whether tracing is on for any thread. Counts the number of
636 threads for which tstate->c_tracefunc is non-NULL, so if the value
637 is 0, we know we don't have to check this thread's c_tracefunc.
638 This speeds up the if statement in PyEval_EvalFrameEx() after
640 static int _Py_TracingPossible
= 0;
642 /* for manipulating the thread switch and periodic "stuff" - used to be
643 per thread, now just a pair o' globals */
644 int _Py_CheckInterval
= 100;
645 volatile int _Py_Ticker
= 0; /* so that we hit a "tick" first thing */
648 PyEval_EvalCode(PyCodeObject
*co
, PyObject
*globals
, PyObject
*locals
)
650 return PyEval_EvalCodeEx(co
,
652 (PyObject
**)NULL
, 0,
653 (PyObject
**)NULL
, 0,
654 (PyObject
**)NULL
, 0,
659 /* Interpreter main loop */
662 PyEval_EvalFrame(PyFrameObject
*f
) {
663 /* This is for backward compatibility with extension modules that
664 used this API; core interpreter code should call
665 PyEval_EvalFrameEx() */
666 return PyEval_EvalFrameEx(f
, 0);
670 PyEval_EvalFrameEx(PyFrameObject
*f
, int throwflag
)
675 register PyObject
**stack_pointer
; /* Next free slot in value stack */
676 register unsigned char *next_instr
;
677 register int opcode
; /* Current opcode */
678 register int oparg
; /* Current opcode argument, if any */
679 register enum why_code why
; /* Reason for block stack unwind */
680 register int err
; /* Error status -- nonzero if error */
681 register PyObject
*x
; /* Result object -- NULL if error */
682 register PyObject
*v
; /* Temporary objects popped off stack */
683 register PyObject
*w
;
684 register PyObject
*u
;
685 register PyObject
*t
;
686 register PyObject
**fastlocals
, **freevars
;
687 PyObject
*retval
= NULL
; /* Return value */
688 PyThreadState
*tstate
= PyThreadState_GET();
691 /* when tracing we set things up so that
693 not (instr_lb <= current_bytecode_offset < instr_ub)
695 is true when the line being executed has changed. The
696 initial values are such as to make this false the first
697 time it is tested. */
698 int instr_ub
= -1, instr_lb
= 0, instr_prev
= -1;
700 unsigned char *first_instr
;
703 #if defined(Py_DEBUG) || defined(LLTRACE)
704 /* Make it easier to find out where we are with a debugger */
708 /* Computed GOTOs, or
709 the-optimization-commonly-but-improperly-known-as-"threaded code"
710 using gcc's labels-as-values extension
711 (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
713 The traditional bytecode evaluation loop uses a "switch" statement, which
714 decent compilers will optimize as a single indirect branch instruction
715 combined with a lookup table of jump addresses. However, since the
716 indirect jump instruction is shared by all opcodes, the CPU will have a
717 hard time making the right prediction for where to jump next (actually,
718 it will be always wrong except in the uncommon case of a sequence of
719 several identical opcodes).
721 "Threaded code" in contrast, uses an explicit jump table and an explicit
722 indirect jump instruction at the end of each opcode. Since the jump
723 instruction is at a different address for each opcode, the CPU will make a
724 separate prediction for each of these instructions, which is equivalent to
725 predicting the second opcode of each opcode pair. These predictions have
726 a much better chance to turn out valid, especially in small bytecode loops.
728 A mispredicted branch on a modern CPU flushes the whole pipeline and
729 can cost several CPU cycles (depending on the pipeline depth),
730 and potentially many more instructions (depending on the pipeline width).
731 A correctly predicted branch, however, is nearly free.
733 At the time of this writing, the "threaded code" version is up to 15-20%
734 faster than the normal "switch" version, depending on the compiler and the
737 We disable the optimization if DYNAMIC_EXECUTION_PROFILE is defined,
738 because it would render the measurements invalid.
741 NOTE: care must be taken that the compiler doesn't try to "optimize" the
742 indirect jumps by sharing them between all opcodes. Such optimizations
743 can be disabled on gcc by using the -fno-gcse flag (or possibly
747 #if defined(USE_COMPUTED_GOTOS) && defined(DYNAMIC_EXECUTION_PROFILE)
748 #undef USE_COMPUTED_GOTOS
751 #ifdef USE_COMPUTED_GOTOS
752 /* Import the static jump table */
753 #include "opcode_targets.h"
755 /* This macro is used when several opcodes defer to the same implementation
756 (e.g. SETUP_LOOP, SETUP_FINALLY) */
757 #define TARGET_WITH_IMPL(op, impl) \
775 /* Avoid multiple loads from _Py_Ticker despite `volatile` */ \
776 int _tick = _Py_Ticker - 1; \
777 _Py_Ticker = _tick; \
785 #define FAST_DISPATCH() \
787 if (!lltrace && !_Py_TracingPossible) { \
788 f->f_lasti = INSTR_OFFSET(); \
789 goto *opcode_targets[*next_instr++]; \
791 goto fast_next_opcode; \
794 #define FAST_DISPATCH() \
796 if (!_Py_TracingPossible) { \
797 f->f_lasti = INSTR_OFFSET(); \
798 goto *opcode_targets[*next_instr++]; \
800 goto fast_next_opcode; \
807 #define TARGET_WITH_IMPL(op, impl) \
808 /* silence compiler warnings about `impl` unused */ \
811 #define DISPATCH() continue
812 #define FAST_DISPATCH() goto fast_next_opcode
816 /* Tuple access macros */
819 #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
821 #define GETITEM(v, i) PyTuple_GetItem((v), (i))
825 /* Use Pentium timestamp counter to mark certain events:
826 inst0 -- beginning of switch statement for opcode dispatch
827 inst1 -- end of switch statement (may be skipped)
828 loop0 -- the top of the mainloop
829 loop1 -- place where control returns again to top of mainloop
831 intr1 -- beginning of long interruption
832 intr2 -- end of long interruption
834 Many opcodes call out to helper C functions. In some cases, the
835 time in those functions should be counted towards the time for the
836 opcode, but not in all cases. For example, a CALL_FUNCTION opcode
837 calls another Python function; there's no point in charge all the
838 bytecode executed by the called function to the caller.
840 It's hard to make a useful judgement statically. In the presence
841 of operator overloading, it's impossible to tell if a call will
842 execute new Python code or not.
844 It's a case-by-case judgement. I'll use intr1 for the following
849 CALL_FUNCTION (and friends)
852 uint64 inst0
, inst1
, loop0
, loop1
, intr0
= 0, intr1
= 0;
855 READ_TIMESTAMP(inst0
);
856 READ_TIMESTAMP(inst1
);
857 READ_TIMESTAMP(loop0
);
858 READ_TIMESTAMP(loop1
);
860 /* shut up the compiler */
864 /* Code access macros */
866 #define INSTR_OFFSET() ((int)(next_instr - first_instr))
867 #define NEXTOP() (*next_instr++)
868 #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2])
869 #define PEEKARG() ((next_instr[2]<<8) + next_instr[1])
870 #define JUMPTO(x) (next_instr = first_instr + (x))
871 #define JUMPBY(x) (next_instr += (x))
873 /* OpCode prediction macros
874 Some opcodes tend to come in pairs thus making it possible to
875 predict the second code when the first is run. For example,
876 COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And,
877 those opcodes are often followed by a POP_TOP.
879 Verifying the prediction costs a single high-speed test of a register
880 variable against a constant. If the pairing was good, then the
881 processor's own internal branch predication has a high likelihood of
882 success, resulting in a nearly zero-overhead transition to the
883 next opcode. A successful prediction saves a trip through the eval-loop
884 including its two unpredictable branches, the HAS_ARG test and the
885 switch-case. Combined with the processor's internal branch prediction,
886 a successful PREDICT has the effect of making the two opcodes run as if
887 they were a single new opcode with the bodies combined.
889 If collecting opcode statistics, your choices are to either keep the
890 predictions turned-on and interpret the results as if some opcodes
891 had been combined or turn-off predictions so that the opcode frequency
892 counter updates for both opcodes.
894 Opcode prediction is disabled with threaded code, since the latter allows
895 the CPU to record separate branch prediction information for each
900 #if defined(DYNAMIC_EXECUTION_PROFILE) || defined(USE_COMPUTED_GOTOS)
901 #define PREDICT(op) if (0) goto PRED_##op
902 #define PREDICTED(op) PRED_##op:
903 #define PREDICTED_WITH_ARG(op) PRED_##op:
905 #define PREDICT(op) if (*next_instr == op) goto PRED_##op
906 #define PREDICTED(op) PRED_##op: next_instr++
907 #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = PEEKARG(); next_instr += 3
911 /* Stack manipulation macros */
913 /* The stack can grow at most MAXINT deep, as co_nlocals and
914 co_stacksize are ints. */
915 #define STACK_LEVEL() ((int)(stack_pointer - f->f_valuestack))
916 #define EMPTY() (STACK_LEVEL() == 0)
917 #define TOP() (stack_pointer[-1])
918 #define SECOND() (stack_pointer[-2])
919 #define THIRD() (stack_pointer[-3])
920 #define FOURTH() (stack_pointer[-4])
921 #define SET_TOP(v) (stack_pointer[-1] = (v))
922 #define SET_SECOND(v) (stack_pointer[-2] = (v))
923 #define SET_THIRD(v) (stack_pointer[-3] = (v))
924 #define SET_FOURTH(v) (stack_pointer[-4] = (v))
925 #define BASIC_STACKADJ(n) (stack_pointer += n)
926 #define BASIC_PUSH(v) (*stack_pointer++ = (v))
927 #define BASIC_POP() (*--stack_pointer)
930 #define PUSH(v) { (void)(BASIC_PUSH(v), \
931 lltrace && prtrace(TOP(), "push")); \
932 assert(STACK_LEVEL() <= co->co_stacksize); }
933 #define POP() ((void)(lltrace && prtrace(TOP(), "pop")), \
935 #define STACKADJ(n) { (void)(BASIC_STACKADJ(n), \
936 lltrace && prtrace(TOP(), "stackadj")); \
937 assert(STACK_LEVEL() <= co->co_stacksize); }
938 #define EXT_POP(STACK_POINTER) ((void)(lltrace && \
939 prtrace((STACK_POINTER)[-1], "ext_pop")), \
942 #define PUSH(v) BASIC_PUSH(v)
943 #define POP() BASIC_POP()
944 #define STACKADJ(n) BASIC_STACKADJ(n)
945 #define EXT_POP(STACK_POINTER) (*--(STACK_POINTER))
948 /* Local variable macros */
950 #define GETLOCAL(i) (fastlocals[i])
952 /* The SETLOCAL() macro must not DECREF the local variable in-place and
953 then store the new value; it must copy the old value to a temporary
954 value, then store the new value, and then DECREF the temporary value.
955 This is because it is possible that during the DECREF the frame is
956 accessed by other code (e.g. a __del__ method or gc.collect()) and the
957 variable would be pointing to already-freed memory. */
958 #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \
959 GETLOCAL(i) = value; \
960 Py_XDECREF(tmp); } while (0)
963 #define UNWIND_BLOCK(b) \
964 while (STACK_LEVEL() > (b)->b_level) { \
965 PyObject *v = POP(); \
969 #define UNWIND_EXCEPT_HANDLER(b) \
971 PyObject *type, *value, *traceback; \
972 assert(STACK_LEVEL() >= (b)->b_level + 3); \
973 while (STACK_LEVEL() > (b)->b_level + 3) { \
977 type = tstate->exc_type; \
978 value = tstate->exc_value; \
979 traceback = tstate->exc_traceback; \
980 tstate->exc_type = POP(); \
981 tstate->exc_value = POP(); \
982 tstate->exc_traceback = POP(); \
985 Py_XDECREF(traceback); \
988 #define SAVE_EXC_STATE() \
990 PyObject *type, *value, *traceback; \
991 Py_XINCREF(tstate->exc_type); \
992 Py_XINCREF(tstate->exc_value); \
993 Py_XINCREF(tstate->exc_traceback); \
994 type = f->f_exc_type; \
995 value = f->f_exc_value; \
996 traceback = f->f_exc_traceback; \
997 f->f_exc_type = tstate->exc_type; \
998 f->f_exc_value = tstate->exc_value; \
999 f->f_exc_traceback = tstate->exc_traceback; \
1001 Py_XDECREF(value); \
1002 Py_XDECREF(traceback); \
1005 #define SWAP_EXC_STATE() \
1008 tmp = tstate->exc_type; \
1009 tstate->exc_type = f->f_exc_type; \
1010 f->f_exc_type = tmp; \
1011 tmp = tstate->exc_value; \
1012 tstate->exc_value = f->f_exc_value; \
1013 f->f_exc_value = tmp; \
1014 tmp = tstate->exc_traceback; \
1015 tstate->exc_traceback = f->f_exc_traceback; \
1016 f->f_exc_traceback = tmp; \
1025 if (Py_EnterRecursiveCall(""))
1030 if (tstate
->use_tracing
) {
1031 if (tstate
->c_tracefunc
!= NULL
) {
1032 /* tstate->c_tracefunc, if defined, is a
1033 function that will be called on *every* entry
1034 to a code block. Its return value, if not
1035 None, is a function that will be called at
1036 the start of each executed line of code.
1037 (Actually, the function must return itself
1038 in order to continue tracing.) The trace
1039 functions are called with three arguments:
1040 a pointer to the current frame, a string
1041 indicating why the function is called, and
1042 an argument which depends on the situation.
1043 The global trace function is also called
1044 whenever an exception is detected. */
1045 if (call_trace_protected(tstate
->c_tracefunc
,
1047 f
, PyTrace_CALL
, Py_None
)) {
1048 /* Trace function raised an error */
1049 goto exit_eval_frame
;
1052 if (tstate
->c_profilefunc
!= NULL
) {
1053 /* Similar for c_profilefunc, except it needn't
1054 return itself and isn't called for "line" events */
1055 if (call_trace_protected(tstate
->c_profilefunc
,
1056 tstate
->c_profileobj
,
1057 f
, PyTrace_CALL
, Py_None
)) {
1058 /* Profile function raised an error */
1059 goto exit_eval_frame
;
1065 names
= co
->co_names
;
1066 consts
= co
->co_consts
;
1067 fastlocals
= f
->f_localsplus
;
1068 freevars
= f
->f_localsplus
+ co
->co_nlocals
;
1069 first_instr
= (unsigned char*) PyBytes_AS_STRING(co
->co_code
);
1070 /* An explanation is in order for the next line.
1072 f->f_lasti now refers to the index of the last instruction
1073 executed. You might think this was obvious from the name, but
1074 this wasn't always true before 2.3! PyFrame_New now sets
1075 f->f_lasti to -1 (i.e. the index *before* the first instruction)
1076 and YIELD_VALUE doesn't fiddle with f_lasti any more. So this
1079 When the PREDICT() macros are enabled, some opcode pairs follow in
1080 direct succession without updating f->f_lasti. A successful
1081 prediction effectively links the two codes together as if they
1082 were a single new opcode; accordingly,f->f_lasti will point to
1083 the first code in the pair (for instance, GET_ITER followed by
1084 FOR_ITER is effectively a single opcode and f->f_lasti will point
1085 at to the beginning of the combined pair.)
1087 next_instr
= first_instr
+ f
->f_lasti
+ 1;
1088 stack_pointer
= f
->f_stacktop
;
1089 assert(stack_pointer
!= NULL
);
1090 f
->f_stacktop
= NULL
; /* remains NULL unless yield suspends frame */
1092 if (f
->f_code
->co_flags
& CO_GENERATOR
) {
1093 if (f
->f_exc_type
!= NULL
&& f
->f_exc_type
!= Py_None
) {
1094 /* We were in an except handler when we left,
1095 restore the exception state which was put aside
1096 (see YIELD_VALUE). */
1105 lltrace
= PyDict_GetItemString(f
->f_globals
, "__lltrace__") != NULL
;
1107 #if defined(Py_DEBUG) || defined(LLTRACE)
1108 filename
= _PyUnicode_AsString(co
->co_filename
);
1113 x
= Py_None
; /* Not a reference, just anything non-NULL */
1116 if (throwflag
) { /* support for generator.throw() */
1117 why
= WHY_EXCEPTION
;
1124 /* Almost surely, the opcode executed a break
1125 or a continue, preventing inst1 from being set
1126 on the way out of the loop.
1128 READ_TIMESTAMP(inst1
);
1131 dump_tsc(opcode
, ticked
, inst0
, inst1
, loop0
, loop1
,
1137 READ_TIMESTAMP(loop0
);
1139 assert(stack_pointer
>= f
->f_valuestack
); /* else underflow */
1140 assert(STACK_LEVEL() <= co
->co_stacksize
); /* else overflow */
1142 /* Do periodic things. Doing this every time through
1143 the loop would add too much overhead, so we do it
1144 only every Nth instruction. We also do it if
1145 ``pendingcalls_to_do'' is set, i.e. when an asynchronous
1146 event needs attention (e.g. a signal handler or
1147 async I/O handler); see Py_AddPendingCall() and
1148 Py_MakePendingCalls() above. */
1150 if (--_Py_Ticker
< 0) {
1151 if (*next_instr
== SETUP_FINALLY
) {
1152 /* Make the last opcode before
1153 a try: finally: block uninterruptable. */
1154 goto fast_next_opcode
;
1156 _Py_Ticker
= _Py_CheckInterval
;
1157 tstate
->tick_counter
++;
1161 if (pendingcalls_to_do
) {
1162 if (Py_MakePendingCalls() < 0) {
1163 why
= WHY_EXCEPTION
;
1166 if (pendingcalls_to_do
)
1167 /* MakePendingCalls() didn't succeed.
1168 Force early re-execution of this
1169 "periodic" code, possibly after
1174 if (interpreter_lock
) {
1175 /* Give another thread a chance */
1177 if (PyThreadState_Swap(NULL
) != tstate
)
1178 Py_FatalError("ceval: tstate mix-up");
1179 PyThread_release_lock(interpreter_lock
);
1181 /* Other threads may run now */
1183 PyThread_acquire_lock(interpreter_lock
, 1);
1184 if (PyThreadState_Swap(tstate
) != NULL
)
1185 Py_FatalError("ceval: orphan tstate");
1187 /* Check for thread interrupts */
1189 if (tstate
->async_exc
!= NULL
) {
1190 x
= tstate
->async_exc
;
1191 tstate
->async_exc
= NULL
;
1194 why
= WHY_EXCEPTION
;
1202 f
->f_lasti
= INSTR_OFFSET();
1204 /* line-by-line tracing support */
1206 if (_Py_TracingPossible
&&
1207 tstate
->c_tracefunc
!= NULL
&& !tstate
->tracing
) {
1208 /* see maybe_call_line_trace
1209 for expository comments */
1210 f
->f_stacktop
= stack_pointer
;
1212 err
= maybe_call_line_trace(tstate
->c_tracefunc
,
1214 f
, &instr_lb
, &instr_ub
,
1216 /* Reload possibly changed frame fields */
1218 if (f
->f_stacktop
!= NULL
) {
1219 stack_pointer
= f
->f_stacktop
;
1220 f
->f_stacktop
= NULL
;
1223 /* trace function raised an exception */
1228 /* Extract opcode and argument */
1231 oparg
= 0; /* allows oparg to be stored in a register because
1232 it doesn't have to be remembered across a full loop */
1233 if (HAS_ARG(opcode
))
1236 #ifdef DYNAMIC_EXECUTION_PROFILE
1238 dxpairs
[lastopcode
][opcode
]++;
1239 lastopcode
= opcode
;
1245 /* Instruction tracing */
1248 if (HAS_ARG(opcode
)) {
1249 printf("%d: %d, %d\n",
1250 f
->f_lasti
, opcode
, oparg
);
1254 f
->f_lasti
, opcode
);
1259 /* Main switch on opcode */
1260 READ_TIMESTAMP(inst0
);
1265 It is essential that any operation that fails sets either
1266 x to NULL, err to nonzero, or why to anything but WHY_NOT,
1267 and that no operation that succeeds does this! */
1269 /* case STOP_CODE: this is an error! */
1275 x
= GETLOCAL(oparg
);
1281 format_exc_check_arg(PyExc_UnboundLocalError
,
1282 UNBOUNDLOCAL_ERROR_MSG
,
1283 PyTuple_GetItem(co
->co_varnames
, oparg
));
1287 x
= GETITEM(consts
, oparg
);
1292 PREDICTED_WITH_ARG(STORE_FAST
);
1346 } else if (oparg
== 3) {
1359 Py_FatalError("invalid argument to DUP_TOPX"
1360 " (bytecode corruption?)");
1361 /* Never returns, so don't bother to set why. */
1364 TARGET(UNARY_POSITIVE
)
1366 x
= PyNumber_Positive(v
);
1369 if (x
!= NULL
) DISPATCH();
1372 TARGET(UNARY_NEGATIVE
)
1374 x
= PyNumber_Negative(v
);
1377 if (x
!= NULL
) DISPATCH();
1382 err
= PyObject_IsTrue(v
);
1390 Py_INCREF(Py_False
);
1398 TARGET(UNARY_INVERT
)
1400 x
= PyNumber_Invert(v
);
1403 if (x
!= NULL
) DISPATCH();
1406 TARGET(BINARY_POWER
)
1409 x
= PyNumber_Power(v
, w
, Py_None
);
1413 if (x
!= NULL
) DISPATCH();
1416 TARGET(BINARY_MULTIPLY
)
1419 x
= PyNumber_Multiply(v
, w
);
1423 if (x
!= NULL
) DISPATCH();
1426 TARGET(BINARY_TRUE_DIVIDE
)
1429 x
= PyNumber_TrueDivide(v
, w
);
1433 if (x
!= NULL
) DISPATCH();
1436 TARGET(BINARY_FLOOR_DIVIDE
)
1439 x
= PyNumber_FloorDivide(v
, w
);
1443 if (x
!= NULL
) DISPATCH();
1446 TARGET(BINARY_MODULO
)
1449 if (PyUnicode_CheckExact(v
))
1450 x
= PyUnicode_Format(v
, w
);
1452 x
= PyNumber_Remainder(v
, w
);
1456 if (x
!= NULL
) DISPATCH();
1462 if (PyUnicode_CheckExact(v
) &&
1463 PyUnicode_CheckExact(w
)) {
1464 x
= unicode_concatenate(v
, w
, f
, next_instr
);
1465 /* unicode_concatenate consumed the ref to v */
1466 goto skip_decref_vx
;
1469 x
= PyNumber_Add(v
, w
);
1475 if (x
!= NULL
) DISPATCH();
1478 TARGET(BINARY_SUBTRACT
)
1481 x
= PyNumber_Subtract(v
, w
);
1485 if (x
!= NULL
) DISPATCH();
1488 TARGET(BINARY_SUBSCR
)
1491 x
= PyObject_GetItem(v
, w
);
1495 if (x
!= NULL
) DISPATCH();
1498 TARGET(BINARY_LSHIFT
)
1501 x
= PyNumber_Lshift(v
, w
);
1505 if (x
!= NULL
) DISPATCH();
1508 TARGET(BINARY_RSHIFT
)
1511 x
= PyNumber_Rshift(v
, w
);
1515 if (x
!= NULL
) DISPATCH();
1521 x
= PyNumber_And(v
, w
);
1525 if (x
!= NULL
) DISPATCH();
1531 x
= PyNumber_Xor(v
, w
);
1535 if (x
!= NULL
) DISPATCH();
1541 x
= PyNumber_Or(v
, w
);
1545 if (x
!= NULL
) DISPATCH();
1550 v
= stack_pointer
[-oparg
];
1551 err
= PyList_Append(v
, w
);
1554 PREDICT(JUMP_ABSOLUTE
);
1561 v
= stack_pointer
[-oparg
];
1562 err
= PySet_Add(v
, w
);
1565 PREDICT(JUMP_ABSOLUTE
);
1570 TARGET(INPLACE_POWER
)
1573 x
= PyNumber_InPlacePower(v
, w
, Py_None
);
1577 if (x
!= NULL
) DISPATCH();
1580 TARGET(INPLACE_MULTIPLY
)
1583 x
= PyNumber_InPlaceMultiply(v
, w
);
1587 if (x
!= NULL
) DISPATCH();
1590 TARGET(INPLACE_TRUE_DIVIDE
)
1593 x
= PyNumber_InPlaceTrueDivide(v
, w
);
1597 if (x
!= NULL
) DISPATCH();
1600 TARGET(INPLACE_FLOOR_DIVIDE
)
1603 x
= PyNumber_InPlaceFloorDivide(v
, w
);
1607 if (x
!= NULL
) DISPATCH();
1610 TARGET(INPLACE_MODULO
)
1613 x
= PyNumber_InPlaceRemainder(v
, w
);
1617 if (x
!= NULL
) DISPATCH();
1623 if (PyUnicode_CheckExact(v
) &&
1624 PyUnicode_CheckExact(w
)) {
1625 x
= unicode_concatenate(v
, w
, f
, next_instr
);
1626 /* unicode_concatenate consumed the ref to v */
1630 x
= PyNumber_InPlaceAdd(v
, w
);
1636 if (x
!= NULL
) DISPATCH();
1639 TARGET(INPLACE_SUBTRACT
)
1642 x
= PyNumber_InPlaceSubtract(v
, w
);
1646 if (x
!= NULL
) DISPATCH();
1649 TARGET(INPLACE_LSHIFT
)
1652 x
= PyNumber_InPlaceLshift(v
, w
);
1656 if (x
!= NULL
) DISPATCH();
1659 TARGET(INPLACE_RSHIFT
)
1662 x
= PyNumber_InPlaceRshift(v
, w
);
1666 if (x
!= NULL
) DISPATCH();
1672 x
= PyNumber_InPlaceAnd(v
, w
);
1676 if (x
!= NULL
) DISPATCH();
1682 x
= PyNumber_InPlaceXor(v
, w
);
1686 if (x
!= NULL
) DISPATCH();
1692 x
= PyNumber_InPlaceOr(v
, w
);
1696 if (x
!= NULL
) DISPATCH();
1699 TARGET(STORE_SUBSCR
)
1705 err
= PyObject_SetItem(v
, w
, u
);
1709 if (err
== 0) DISPATCH();
1712 TARGET(DELETE_SUBSCR
)
1717 err
= PyObject_DelItem(v
, w
);
1720 if (err
== 0) DISPATCH();
1725 w
= PySys_GetObject("displayhook");
1727 PyErr_SetString(PyExc_RuntimeError
,
1728 "lost sys.displayhook");
1733 x
= PyTuple_Pack(1, v
);
1738 w
= PyEval_CallObject(w
, x
);
1748 default: switch (opcode
) {
1750 TARGET(RAISE_VARARGS
)
1754 v
= POP(); /* cause */
1756 w
= POP(); /* exc */
1757 case 0: /* Fallthrough */
1758 why
= do_raise(w
, v
);
1761 PyErr_SetString(PyExc_SystemError
,
1762 "bad RAISE_VARARGS oparg");
1763 why
= WHY_EXCEPTION
;
1768 TARGET(STORE_LOCALS
)
1775 TARGET(RETURN_VALUE
)
1778 goto fast_block_end
;
1782 f
->f_stacktop
= stack_pointer
;
1784 /* Put aside the current exception state and restore
1785 that of the calling frame. This only serves when
1786 "yield" is used inside an except handler. */
1792 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1793 if (b
->b_type
!= EXCEPT_HANDLER
) {
1794 PyErr_SetString(PyExc_SystemError
,
1795 "popped block is not an except handler");
1796 why
= WHY_EXCEPTION
;
1799 UNWIND_EXCEPT_HANDLER(b
);
1805 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1810 PREDICTED(END_FINALLY
);
1813 if (PyLong_Check(v
)) {
1814 why
= (enum why_code
) PyLong_AS_LONG(v
);
1815 assert(why
!= WHY_YIELD
);
1816 if (why
== WHY_RETURN
||
1817 why
== WHY_CONTINUE
)
1819 if (why
== WHY_SILENCED
) {
1820 /* An exception was silenced by 'with', we must
1821 manually unwind the EXCEPT_HANDLER block which was
1822 created when the exception was caught, otherwise
1823 the stack will be in an inconsistent state. */
1824 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1825 if (b
->b_type
!= EXCEPT_HANDLER
) {
1826 PyErr_SetString(PyExc_SystemError
,
1827 "popped block is not an except handler");
1828 why
= WHY_EXCEPTION
;
1831 UNWIND_EXCEPT_HANDLER(b
);
1836 else if (PyExceptionClass_Check(v
)) {
1839 PyErr_Restore(v
, w
, u
);
1843 else if (v
!= Py_None
) {
1844 PyErr_SetString(PyExc_SystemError
,
1845 "'finally' pops bad exception");
1846 why
= WHY_EXCEPTION
;
1851 TARGET(LOAD_BUILD_CLASS
)
1852 x
= PyDict_GetItemString(f
->f_builtins
,
1855 PyErr_SetString(PyExc_ImportError
,
1856 "__build_class__ not found");
1864 w
= GETITEM(names
, oparg
);
1866 if ((x
= f
->f_locals
) != NULL
) {
1867 if (PyDict_CheckExact(x
))
1868 err
= PyDict_SetItem(x
, w
, v
);
1870 err
= PyObject_SetItem(x
, w
, v
);
1872 if (err
== 0) DISPATCH();
1875 PyErr_Format(PyExc_SystemError
,
1876 "no locals found when storing %R", w
);
1880 w
= GETITEM(names
, oparg
);
1881 if ((x
= f
->f_locals
) != NULL
) {
1882 if ((err
= PyObject_DelItem(x
, w
)) != 0)
1883 format_exc_check_arg(PyExc_NameError
,
1888 PyErr_Format(PyExc_SystemError
,
1889 "no locals when deleting %R", w
);
1892 PREDICTED_WITH_ARG(UNPACK_SEQUENCE
);
1893 TARGET(UNPACK_SEQUENCE
)
1895 if (PyTuple_CheckExact(v
) &&
1896 PyTuple_GET_SIZE(v
) == oparg
) {
1897 PyObject
**items
= \
1898 ((PyTupleObject
*)v
)->ob_item
;
1906 } else if (PyList_CheckExact(v
) &&
1907 PyList_GET_SIZE(v
) == oparg
) {
1908 PyObject
**items
= \
1909 ((PyListObject
*)v
)->ob_item
;
1915 } else if (unpack_iterable(v
, oparg
, -1,
1916 stack_pointer
+ oparg
)) {
1917 stack_pointer
+= oparg
;
1919 /* unpack_iterable() raised an exception */
1920 why
= WHY_EXCEPTION
;
1927 int totalargs
= 1 + (oparg
& 0xFF) + (oparg
>> 8);
1930 if (unpack_iterable(v
, oparg
& 0xFF, oparg
>> 8,
1931 stack_pointer
+ totalargs
)) {
1932 stack_pointer
+= totalargs
;
1934 why
= WHY_EXCEPTION
;
1941 w
= GETITEM(names
, oparg
);
1945 err
= PyObject_SetAttr(v
, w
, u
); /* v.w = u */
1948 if (err
== 0) DISPATCH();
1952 w
= GETITEM(names
, oparg
);
1954 err
= PyObject_SetAttr(v
, w
, (PyObject
*)NULL
);
1959 TARGET(STORE_GLOBAL
)
1960 w
= GETITEM(names
, oparg
);
1962 err
= PyDict_SetItem(f
->f_globals
, w
, v
);
1964 if (err
== 0) DISPATCH();
1967 TARGET(DELETE_GLOBAL
)
1968 w
= GETITEM(names
, oparg
);
1969 if ((err
= PyDict_DelItem(f
->f_globals
, w
)) != 0)
1970 format_exc_check_arg(
1971 PyExc_NameError
, GLOBAL_NAME_ERROR_MSG
, w
);
1975 w
= GETITEM(names
, oparg
);
1976 if ((v
= f
->f_locals
) == NULL
) {
1977 PyErr_Format(PyExc_SystemError
,
1978 "no locals when loading %R", w
);
1979 why
= WHY_EXCEPTION
;
1982 if (PyDict_CheckExact(v
)) {
1983 x
= PyDict_GetItem(v
, w
);
1987 x
= PyObject_GetItem(v
, w
);
1988 if (x
== NULL
&& PyErr_Occurred()) {
1989 if (!PyErr_ExceptionMatches(
1996 x
= PyDict_GetItem(f
->f_globals
, w
);
1998 x
= PyDict_GetItem(f
->f_builtins
, w
);
2000 format_exc_check_arg(
2012 w
= GETITEM(names
, oparg
);
2013 if (PyUnicode_CheckExact(w
)) {
2014 /* Inline the PyDict_GetItem() calls.
2015 WARNING: this is an extreme speed hack.
2016 Do not try this at home. */
2017 long hash
= ((PyUnicodeObject
*)w
)->hash
;
2021 d
= (PyDictObject
*)(f
->f_globals
);
2022 e
= d
->ma_lookup(d
, w
, hash
);
2033 d
= (PyDictObject
*)(f
->f_builtins
);
2034 e
= d
->ma_lookup(d
, w
, hash
);
2045 goto load_global_error
;
2048 /* This is the un-inlined version of the code above */
2049 x
= PyDict_GetItem(f
->f_globals
, w
);
2051 x
= PyDict_GetItem(f
->f_builtins
, w
);
2054 format_exc_check_arg(
2056 GLOBAL_NAME_ERROR_MSG
, w
);
2065 x
= GETLOCAL(oparg
);
2067 SETLOCAL(oparg
, NULL
);
2070 format_exc_check_arg(
2071 PyExc_UnboundLocalError
,
2072 UNBOUNDLOCAL_ERROR_MSG
,
2073 PyTuple_GetItem(co
->co_varnames
, oparg
)
2077 TARGET(LOAD_CLOSURE
)
2078 x
= freevars
[oparg
];
2081 if (x
!= NULL
) DISPATCH();
2085 x
= freevars
[oparg
];
2092 /* Don't stomp existing exception */
2093 if (PyErr_Occurred())
2095 if (oparg
< PyTuple_GET_SIZE(co
->co_cellvars
)) {
2096 v
= PyTuple_GET_ITEM(co
->co_cellvars
,
2098 format_exc_check_arg(
2099 PyExc_UnboundLocalError
,
2100 UNBOUNDLOCAL_ERROR_MSG
,
2103 v
= PyTuple_GET_ITEM(co
->co_freevars
, oparg
-
2104 PyTuple_GET_SIZE(co
->co_cellvars
));
2105 format_exc_check_arg(PyExc_NameError
,
2106 UNBOUNDFREE_ERROR_MSG
, v
);
2112 x
= freevars
[oparg
];
2118 x
= PyTuple_New(oparg
);
2120 for (; --oparg
>= 0;) {
2122 PyTuple_SET_ITEM(x
, oparg
, w
);
2130 x
= PyList_New(oparg
);
2132 for (; --oparg
>= 0;) {
2134 PyList_SET_ITEM(x
, oparg
, w
);
2142 x
= PySet_New(NULL
);
2144 for (; --oparg
>= 0;) {
2147 err
= PySet_Add(x
, w
);
2160 x
= _PyDict_NewPresized((Py_ssize_t
)oparg
);
2162 if (x
!= NULL
) DISPATCH();
2166 w
= TOP(); /* key */
2167 u
= SECOND(); /* value */
2168 v
= THIRD(); /* dict */
2170 assert (PyDict_CheckExact(v
));
2171 err
= PyDict_SetItem(v
, w
, u
); /* v[w] = u */
2174 if (err
== 0) DISPATCH();
2178 w
= TOP(); /* key */
2179 u
= SECOND(); /* value */
2181 v
= stack_pointer
[-oparg
]; /* dict */
2182 assert (PyDict_CheckExact(v
));
2183 err
= PyDict_SetItem(v
, w
, u
); /* v[w] = u */
2187 PREDICT(JUMP_ABSOLUTE
);
2193 w
= GETITEM(names
, oparg
);
2195 x
= PyObject_GetAttr(v
, w
);
2198 if (x
!= NULL
) DISPATCH();
2204 x
= cmp_outcome(oparg
, v
, w
);
2208 if (x
== NULL
) break;
2209 PREDICT(POP_JUMP_IF_FALSE
);
2210 PREDICT(POP_JUMP_IF_TRUE
);
2214 w
= GETITEM(names
, oparg
);
2215 x
= PyDict_GetItemString(f
->f_builtins
, "__import__");
2217 PyErr_SetString(PyExc_ImportError
,
2218 "__import__ not found");
2224 if (PyLong_AsLong(u
) != -1 || PyErr_Occurred())
2228 f
->f_locals
== NULL
?
2229 Py_None
: f
->f_locals
,
2236 f
->f_locals
== NULL
?
2237 Py_None
: f
->f_locals
,
2247 READ_TIMESTAMP(intr0
);
2249 x
= PyEval_CallObject(v
, w
);
2251 READ_TIMESTAMP(intr1
);
2254 if (x
!= NULL
) DISPATCH();
2259 PyFrame_FastToLocals(f
);
2260 if ((x
= f
->f_locals
) == NULL
) {
2261 PyErr_SetString(PyExc_SystemError
,
2262 "no locals found during 'import *'");
2265 READ_TIMESTAMP(intr0
);
2266 err
= import_all_from(x
, v
);
2267 READ_TIMESTAMP(intr1
);
2268 PyFrame_LocalsToFast(f
, 0);
2270 if (err
== 0) DISPATCH();
2274 w
= GETITEM(names
, oparg
);
2276 READ_TIMESTAMP(intr0
);
2277 x
= import_from(v
, w
);
2278 READ_TIMESTAMP(intr1
);
2280 if (x
!= NULL
) DISPATCH();
2283 TARGET(JUMP_FORWARD
)
2287 PREDICTED_WITH_ARG(POP_JUMP_IF_FALSE
);
2288 TARGET(POP_JUMP_IF_FALSE
)
2294 if (w
== Py_False
) {
2299 err
= PyObject_IsTrue(w
);
2309 PREDICTED_WITH_ARG(POP_JUMP_IF_TRUE
);
2310 TARGET(POP_JUMP_IF_TRUE
)
2312 if (w
== Py_False
) {
2321 err
= PyObject_IsTrue(w
);
2333 TARGET(JUMP_IF_FALSE_OR_POP
)
2340 if (w
== Py_False
) {
2344 err
= PyObject_IsTrue(w
);
2356 TARGET(JUMP_IF_TRUE_OR_POP
)
2358 if (w
== Py_False
) {
2367 err
= PyObject_IsTrue(w
);
2372 else if (err
== 0) {
2380 PREDICTED_WITH_ARG(JUMP_ABSOLUTE
);
2381 TARGET(JUMP_ABSOLUTE
)
2384 /* Enabling this path speeds-up all while and for-loops by bypassing
2385 the per-loop checks for signals. By default, this should be turned-off
2386 because it prevents detection of a control-break in tight loops like
2387 "while 1: pass". Compile with this option turned-on when you need
2388 the speed-up and do not need break checking inside tight loops (ones
2389 that contain only instructions ending with FAST_DISPATCH).
2397 /* before: [obj]; after [getiter(obj)] */
2399 x
= PyObject_GetIter(v
);
2409 PREDICTED_WITH_ARG(FOR_ITER
);
2411 /* before: [iter]; after: [iter, iter()] *or* [] */
2413 x
= (*v
->ob_type
->tp_iternext
)(v
);
2416 PREDICT(STORE_FAST
);
2417 PREDICT(UNPACK_SEQUENCE
);
2420 if (PyErr_Occurred()) {
2421 if (!PyErr_ExceptionMatches(
2422 PyExc_StopIteration
))
2426 /* iterator ended normally */
2434 goto fast_block_end
;
2436 TARGET(CONTINUE_LOOP
)
2437 retval
= PyLong_FromLong(oparg
);
2443 goto fast_block_end
;
2445 TARGET_WITH_IMPL(SETUP_LOOP
, _setup_finally
)
2446 TARGET_WITH_IMPL(SETUP_EXCEPT
, _setup_finally
)
2447 TARGET(SETUP_FINALLY
)
2449 /* NOTE: If you add any new block-setup opcodes that
2450 are not try/except/finally handlers, you may need
2451 to update the PyGen_NeedsFinalizing() function.
2454 PyFrame_BlockSetup(f
, opcode
, INSTR_OFFSET() + oparg
,
2458 TARGET(WITH_CLEANUP
)
2460 /* At the top of the stack are 1-3 values indicating
2461 how/why we entered the finally clause:
2463 - (TOP, SECOND) = (WHY_{RETURN,CONTINUE}), retval
2464 - TOP = WHY_*; no retval below it
2465 - (TOP, SECOND, THIRD) = exc_info()
2466 Below them is EXIT, the context.__exit__ bound method.
2467 In the last case, we must call
2468 EXIT(TOP, SECOND, THIRD)
2469 otherwise we must call
2470 EXIT(None, None, None)
2472 In all cases, we remove EXIT from the stack, leaving
2473 the rest in the same order.
2475 In addition, if the stack represents an exception,
2476 *and* the function call returns a 'true' value, we
2477 "zap" this information, to prevent END_FINALLY from
2478 re-raising the exception. (But non-local gotos
2479 should still be resumed.)
2482 PyObject
*exit_func
= POP();
2487 else if (PyLong_Check(u
)) {
2488 u
= v
= w
= Py_None
;
2494 /* XXX Not the fastest way to call it... */
2495 x
= PyObject_CallFunctionObjArgs(exit_func
, u
, v
, w
,
2497 Py_DECREF(exit_func
);
2499 break; /* Go to error exit */
2502 err
= PyObject_IsTrue(x
);
2508 break; /* Go to error exit */
2511 /* There was an exception and a True return */
2513 SET_TOP(PyLong_FromLong((long) WHY_SILENCED
));
2518 PREDICT(END_FINALLY
);
2522 TARGET(CALL_FUNCTION
)
2528 x
= call_function(&sp
, oparg
, &intr0
, &intr1
);
2530 x
= call_function(&sp
, oparg
);
2539 TARGET_WITH_IMPL(CALL_FUNCTION_VAR
, _call_function_var_kw
)
2540 TARGET_WITH_IMPL(CALL_FUNCTION_KW
, _call_function_var_kw
)
2541 TARGET(CALL_FUNCTION_VAR_KW
)
2542 _call_function_var_kw
:
2544 int na
= oparg
& 0xff;
2545 int nk
= (oparg
>>8) & 0xff;
2546 int flags
= (opcode
- CALL_FUNCTION
) & 3;
2547 int n
= na
+ 2 * nk
;
2548 PyObject
**pfunc
, *func
, **sp
;
2550 if (flags
& CALL_FLAG_VAR
)
2552 if (flags
& CALL_FLAG_KW
)
2554 pfunc
= stack_pointer
- n
- 1;
2557 if (PyMethod_Check(func
)
2558 && PyMethod_GET_SELF(func
) != NULL
) {
2559 PyObject
*self
= PyMethod_GET_SELF(func
);
2561 func
= PyMethod_GET_FUNCTION(func
);
2570 READ_TIMESTAMP(intr0
);
2571 x
= ext_do_call(func
, &sp
, flags
, na
, nk
);
2572 READ_TIMESTAMP(intr1
);
2576 while (stack_pointer
> pfunc
) {
2586 TARGET_WITH_IMPL(MAKE_CLOSURE
, _make_function
)
2587 TARGET(MAKE_FUNCTION
)
2590 int posdefaults
= oparg
& 0xff;
2591 int kwdefaults
= (oparg
>>8) & 0xff;
2592 int num_annotations
= (oparg
>> 16) & 0x7fff;
2594 v
= POP(); /* code object */
2595 x
= PyFunction_New(v
, f
->f_globals
);
2598 if (x
!= NULL
&& opcode
== MAKE_CLOSURE
) {
2600 if (PyFunction_SetClosure(x
, v
) != 0) {
2601 /* Can't happen unless bytecode is corrupt. */
2602 why
= WHY_EXCEPTION
;
2607 if (x
!= NULL
&& num_annotations
> 0) {
2609 u
= POP(); /* names of args with annotations */
2616 name_ix
= PyTuple_Size(u
);
2617 assert(num_annotations
== name_ix
+1);
2618 while (name_ix
> 0) {
2620 t
= PyTuple_GET_ITEM(u
, name_ix
);
2622 /* XXX(nnorwitz): check for errors */
2623 PyDict_SetItem(v
, t
, w
);
2627 if (PyFunction_SetAnnotations(x
, v
) != 0) {
2628 /* Can't happen unless
2629 PyFunction_SetAnnotations changes. */
2630 why
= WHY_EXCEPTION
;
2636 /* XXX Maybe this should be a separate opcode? */
2637 if (x
!= NULL
&& posdefaults
> 0) {
2638 v
= PyTuple_New(posdefaults
);
2644 while (--posdefaults
>= 0) {
2646 PyTuple_SET_ITEM(v
, posdefaults
, w
);
2648 if (PyFunction_SetDefaults(x
, v
) != 0) {
2649 /* Can't happen unless
2650 PyFunction_SetDefaults changes. */
2651 why
= WHY_EXCEPTION
;
2655 if (x
!= NULL
&& kwdefaults
> 0) {
2662 while (--kwdefaults
>= 0) {
2663 w
= POP(); /* default value */
2664 u
= POP(); /* kw only arg name */
2665 /* XXX(nnorwitz): check for errors */
2666 PyDict_SetItem(v
, u
, w
);
2670 if (PyFunction_SetKwDefaults(x
, v
) != 0) {
2671 /* Can't happen unless
2672 PyFunction_SetKwDefaults changes. */
2673 why
= WHY_EXCEPTION
;
2688 x
= PySlice_New(u
, v
, w
);
2693 if (x
!= NULL
) DISPATCH();
2696 TARGET(EXTENDED_ARG
)
2698 oparg
= oparg
<<16 | NEXTARG();
2699 goto dispatch_opcode
;
2701 #ifdef USE_COMPUTED_GOTOS
2706 "XXX lineno: %d, opcode: %d\n",
2707 PyCode_Addr2Line(f
->f_code
, f
->f_lasti
),
2709 PyErr_SetString(PyExc_SystemError
, "unknown opcode");
2710 why
= WHY_EXCEPTION
;
2721 READ_TIMESTAMP(inst1
);
2723 /* Quickly continue if no error occurred */
2725 if (why
== WHY_NOT
) {
2726 if (err
== 0 && x
!= NULL
) {
2728 /* This check is expensive! */
2729 if (PyErr_Occurred())
2731 "XXX undetected error\n");
2734 READ_TIMESTAMP(loop1
);
2735 continue; /* Normal, fast path */
2740 why
= WHY_EXCEPTION
;
2745 /* Double-check exception status */
2747 if (why
== WHY_EXCEPTION
|| why
== WHY_RERAISE
) {
2748 if (!PyErr_Occurred()) {
2749 PyErr_SetString(PyExc_SystemError
,
2750 "error return without exception set");
2751 why
= WHY_EXCEPTION
;
2756 /* This check is expensive! */
2757 if (PyErr_Occurred()) {
2759 sprintf(buf
, "Stack unwind with exception "
2760 "set and why=%d", why
);
2766 /* Log traceback info if this is a real exception */
2768 if (why
== WHY_EXCEPTION
) {
2769 PyTraceBack_Here(f
);
2771 if (tstate
->c_tracefunc
!= NULL
)
2772 call_exc_trace(tstate
->c_tracefunc
,
2773 tstate
->c_traceobj
, f
);
2776 /* For the rest, treat WHY_RERAISE as WHY_EXCEPTION */
2778 if (why
== WHY_RERAISE
)
2779 why
= WHY_EXCEPTION
;
2781 /* Unwind stacks if a (pseudo) exception occurred */
2784 while (why
!= WHY_NOT
&& f
->f_iblock
> 0) {
2785 PyTryBlock
*b
= PyFrame_BlockPop(f
);
2787 assert(why
!= WHY_YIELD
);
2788 if (b
->b_type
== SETUP_LOOP
&& why
== WHY_CONTINUE
) {
2789 /* For a continue inside a try block,
2790 don't pop the block for the loop. */
2791 PyFrame_BlockSetup(f
, b
->b_type
, b
->b_handler
,
2794 JUMPTO(PyLong_AS_LONG(retval
));
2799 if (b
->b_type
== EXCEPT_HANDLER
) {
2800 UNWIND_EXCEPT_HANDLER(b
);
2804 if (b
->b_type
== SETUP_LOOP
&& why
== WHY_BREAK
) {
2806 JUMPTO(b
->b_handler
);
2809 if (why
== WHY_EXCEPTION
&& (b
->b_type
== SETUP_EXCEPT
2810 || b
->b_type
== SETUP_FINALLY
)) {
2811 PyObject
*exc
, *val
, *tb
;
2812 int handler
= b
->b_handler
;
2813 /* Beware, this invalidates all b->b_* fields */
2814 PyFrame_BlockSetup(f
, EXCEPT_HANDLER
, -1, STACK_LEVEL());
2815 PUSH(tstate
->exc_traceback
);
2816 PUSH(tstate
->exc_value
);
2817 if (tstate
->exc_type
!= NULL
) {
2818 PUSH(tstate
->exc_type
);
2824 PyErr_Fetch(&exc
, &val
, &tb
);
2825 /* Make the raw exception data
2826 available to the handler,
2827 so a program can emulate the
2828 Python main loop. */
2829 PyErr_NormalizeException(
2831 PyException_SetTraceback(val
, tb
);
2833 tstate
->exc_type
= exc
;
2835 tstate
->exc_value
= val
;
2836 tstate
->exc_traceback
= tb
;
2847 if (b
->b_type
== SETUP_FINALLY
) {
2848 if (why
& (WHY_RETURN
| WHY_CONTINUE
))
2850 PUSH(PyLong_FromLong((long)why
));
2852 JUMPTO(b
->b_handler
);
2855 } /* unwind stack */
2857 /* End the loop if we still have an error (or return) */
2861 READ_TIMESTAMP(loop1
);
2865 assert(why
!= WHY_YIELD
);
2866 /* Pop remaining stack entries. */
2872 if (why
!= WHY_RETURN
)
2876 if (tstate
->use_tracing
) {
2877 if (tstate
->c_tracefunc
) {
2878 if (why
== WHY_RETURN
|| why
== WHY_YIELD
) {
2879 if (call_trace(tstate
->c_tracefunc
,
2880 tstate
->c_traceobj
, f
,
2881 PyTrace_RETURN
, retval
)) {
2884 why
= WHY_EXCEPTION
;
2887 else if (why
== WHY_EXCEPTION
) {
2888 call_trace_protected(tstate
->c_tracefunc
,
2889 tstate
->c_traceobj
, f
,
2890 PyTrace_RETURN
, NULL
);
2893 if (tstate
->c_profilefunc
) {
2894 if (why
== WHY_EXCEPTION
)
2895 call_trace_protected(tstate
->c_profilefunc
,
2896 tstate
->c_profileobj
, f
,
2897 PyTrace_RETURN
, NULL
);
2898 else if (call_trace(tstate
->c_profilefunc
,
2899 tstate
->c_profileobj
, f
,
2900 PyTrace_RETURN
, retval
)) {
2903 why
= WHY_EXCEPTION
;
2910 Py_LeaveRecursiveCall();
2911 tstate
->frame
= f
->f_back
;
2916 /* This is gonna seem *real weird*, but if you put some other code between
2917 PyEval_EvalFrame() and PyEval_EvalCodeEx() you will need to adjust
2918 the test in the if statements in Misc/gdbinit (pystack and pystackv). */
2921 PyEval_EvalCodeEx(PyCodeObject
*co
, PyObject
*globals
, PyObject
*locals
,
2922 PyObject
**args
, int argcount
, PyObject
**kws
, int kwcount
,
2923 PyObject
**defs
, int defcount
, PyObject
*kwdefs
, PyObject
*closure
)
2925 register PyFrameObject
*f
;
2926 register PyObject
*retval
= NULL
;
2927 register PyObject
**fastlocals
, **freevars
;
2928 PyThreadState
*tstate
= PyThreadState_GET();
2931 if (globals
== NULL
) {
2932 PyErr_SetString(PyExc_SystemError
,
2933 "PyEval_EvalCodeEx: NULL globals");
2937 assert(tstate
!= NULL
);
2938 assert(globals
!= NULL
);
2939 f
= PyFrame_New(tstate
, co
, globals
, locals
);
2943 fastlocals
= f
->f_localsplus
;
2944 freevars
= f
->f_localsplus
+ co
->co_nlocals
;
2946 if (co
->co_argcount
> 0 ||
2947 co
->co_kwonlyargcount
> 0 ||
2948 co
->co_flags
& (CO_VARARGS
| CO_VARKEYWORDS
)) {
2951 PyObject
*kwdict
= NULL
;
2952 if (co
->co_flags
& CO_VARKEYWORDS
) {
2953 kwdict
= PyDict_New();
2956 i
= co
->co_argcount
+ co
->co_kwonlyargcount
;
2957 if (co
->co_flags
& CO_VARARGS
)
2959 SETLOCAL(i
, kwdict
);
2961 if (argcount
> co
->co_argcount
) {
2962 if (!(co
->co_flags
& CO_VARARGS
)) {
2963 PyErr_Format(PyExc_TypeError
,
2965 "%spositional argument%s (%d given)",
2967 defcount
? "at most" : "exactly",
2969 kwcount
? "non-keyword " : "",
2970 co
->co_argcount
== 1 ? "" : "s",
2974 n
= co
->co_argcount
;
2976 for (i
= 0; i
< n
; i
++) {
2981 if (co
->co_flags
& CO_VARARGS
) {
2982 u
= PyTuple_New(argcount
- n
);
2985 SETLOCAL(co
->co_argcount
+ co
->co_kwonlyargcount
, u
);
2986 for (i
= n
; i
< argcount
; i
++) {
2989 PyTuple_SET_ITEM(u
, i
-n
, x
);
2992 for (i
= 0; i
< kwcount
; i
++) {
2993 PyObject
**co_varnames
;
2994 PyObject
*keyword
= kws
[2*i
];
2995 PyObject
*value
= kws
[2*i
+ 1];
2997 if (keyword
== NULL
|| !PyUnicode_Check(keyword
)) {
2998 PyErr_Format(PyExc_TypeError
,
2999 "%U() keywords must be strings",
3003 /* Speed hack: do raw pointer compares. As names are
3004 normally interned this should almost always hit. */
3005 co_varnames
= PySequence_Fast_ITEMS(co
->co_varnames
);
3007 j
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3009 PyObject
*nm
= co_varnames
[j
];
3013 /* Slow fallback, just in case */
3015 j
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3017 PyObject
*nm
= co_varnames
[j
];
3018 int cmp
= PyObject_RichCompareBool(
3019 keyword
, nm
, Py_EQ
);
3025 /* Check errors from Compare */
3026 if (PyErr_Occurred())
3028 if (j
>= co
->co_argcount
+ co
->co_kwonlyargcount
) {
3029 if (kwdict
== NULL
) {
3030 PyErr_Format(PyExc_TypeError
,
3031 "%U() got an unexpected "
3032 "keyword argument '%S'",
3037 PyDict_SetItem(kwdict
, keyword
, value
);
3041 if (GETLOCAL(j
) != NULL
) {
3042 PyErr_Format(PyExc_TypeError
,
3043 "%U() got multiple "
3044 "values for keyword "
3053 if (co
->co_kwonlyargcount
> 0) {
3054 for (i
= co
->co_argcount
;
3055 i
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3057 PyObject
*name
, *def
;
3058 if (GETLOCAL(i
) != NULL
)
3060 name
= PyTuple_GET_ITEM(co
->co_varnames
, i
);
3063 def
= PyDict_GetItem(kwdefs
, name
);
3069 PyErr_Format(PyExc_TypeError
,
3070 "%U() needs keyword-only argument %S",
3075 if (argcount
< co
->co_argcount
) {
3076 int m
= co
->co_argcount
- defcount
;
3077 for (i
= argcount
; i
< m
; i
++) {
3078 if (GETLOCAL(i
) == NULL
) {
3079 PyErr_Format(PyExc_TypeError
,
3081 "%spositional argument%s "
3084 ((co
->co_flags
& CO_VARARGS
) ||
3085 defcount
) ? "at least"
3087 m
, kwcount
? "non-keyword " : "",
3088 m
== 1 ? "" : "s", i
);
3096 for (; i
< defcount
; i
++) {
3097 if (GETLOCAL(m
+i
) == NULL
) {
3098 PyObject
*def
= defs
[i
];
3106 if (argcount
> 0 || kwcount
> 0) {
3107 PyErr_Format(PyExc_TypeError
,
3108 "%U() takes no arguments (%d given)",
3110 argcount
+ kwcount
);
3114 /* Allocate and initialize storage for cell vars, and copy free
3115 vars into frame. This isn't too efficient right now. */
3116 if (PyTuple_GET_SIZE(co
->co_cellvars
)) {
3117 int i
, j
, nargs
, found
;
3118 Py_UNICODE
*cellname
, *argname
;
3121 nargs
= co
->co_argcount
+ co
->co_kwonlyargcount
;
3122 if (co
->co_flags
& CO_VARARGS
)
3124 if (co
->co_flags
& CO_VARKEYWORDS
)
3127 /* Initialize each cell var, taking into account
3128 cell vars that are initialized from arguments.
3130 Should arrange for the compiler to put cellvars
3131 that are arguments at the beginning of the cellvars
3132 list so that we can march over it more efficiently?
3134 for (i
= 0; i
< PyTuple_GET_SIZE(co
->co_cellvars
); ++i
) {
3135 cellname
= PyUnicode_AS_UNICODE(
3136 PyTuple_GET_ITEM(co
->co_cellvars
, i
));
3138 for (j
= 0; j
< nargs
; j
++) {
3139 argname
= PyUnicode_AS_UNICODE(
3140 PyTuple_GET_ITEM(co
->co_varnames
, j
));
3141 if (Py_UNICODE_strcmp(cellname
, argname
) == 0) {
3142 c
= PyCell_New(GETLOCAL(j
));
3145 GETLOCAL(co
->co_nlocals
+ i
) = c
;
3151 c
= PyCell_New(NULL
);
3154 SETLOCAL(co
->co_nlocals
+ i
, c
);
3158 if (PyTuple_GET_SIZE(co
->co_freevars
)) {
3160 for (i
= 0; i
< PyTuple_GET_SIZE(co
->co_freevars
); ++i
) {
3161 PyObject
*o
= PyTuple_GET_ITEM(closure
, i
);
3163 freevars
[PyTuple_GET_SIZE(co
->co_cellvars
) + i
] = o
;
3167 if (co
->co_flags
& CO_GENERATOR
) {
3168 /* Don't need to keep the reference to f_back, it will be set
3169 * when the generator is resumed. */
3170 Py_XDECREF(f
->f_back
);
3173 PCALL(PCALL_GENERATOR
);
3175 /* Create a new generator that owns the ready to run frame
3176 * and return that as the value. */
3177 return PyGen_New(f
);
3180 retval
= PyEval_EvalFrameEx(f
,0);
3182 fail
: /* Jump here from prelude on failure */
3184 /* decref'ing the frame can cause __del__ methods to get invoked,
3185 which can call back into Python. While we're done with the
3186 current Python frame (f), the associated C stack is still in use,
3187 so recursion_depth must be boosted for the duration.
3189 assert(tstate
!= NULL
);
3190 ++tstate
->recursion_depth
;
3192 --tstate
->recursion_depth
;
3197 /* Logic for the raise statement (too complicated for inlining).
3198 This *consumes* a reference count to each of its arguments. */
3199 static enum why_code
3200 do_raise(PyObject
*exc
, PyObject
*cause
)
3202 PyObject
*type
= NULL
, *value
= NULL
;
3206 PyThreadState
*tstate
= PyThreadState_GET();
3208 type
= tstate
->exc_type
;
3209 value
= tstate
->exc_value
;
3210 tb
= tstate
->exc_traceback
;
3211 if (type
== Py_None
) {
3212 PyErr_SetString(PyExc_RuntimeError
,
3213 "No active exception to reraise");
3214 return WHY_EXCEPTION
;
3219 PyErr_Restore(type
, value
, tb
);
3223 /* We support the following forms of raise:
3228 if (PyExceptionClass_Check(exc
)) {
3230 value
= PyObject_CallObject(exc
, NULL
);
3234 else if (PyExceptionInstance_Check(exc
)) {
3236 type
= PyExceptionInstance_Class(exc
);
3240 /* Not something you can raise. You get an exception
3241 anyway, just not what you specified :-) */
3243 PyErr_SetString(PyExc_TypeError
,
3244 "exceptions must derive from BaseException");
3249 PyObject
*fixed_cause
;
3250 if (PyExceptionClass_Check(cause
)) {
3251 fixed_cause
= PyObject_CallObject(cause
, NULL
);
3252 if (fixed_cause
== NULL
)
3256 else if (PyExceptionInstance_Check(cause
)) {
3257 fixed_cause
= cause
;
3260 PyErr_SetString(PyExc_TypeError
,
3261 "exception causes must derive from "
3265 PyException_SetCause(value
, fixed_cause
);
3268 PyErr_SetObject(type
, value
);
3269 /* PyErr_SetObject incref's its arguments */
3272 return WHY_EXCEPTION
;
3278 return WHY_EXCEPTION
;
3281 /* Iterate v argcnt times and store the results on the stack (via decreasing
3282 sp). Return 1 for success, 0 if error.
3284 If argcntafter == -1, do a simple unpack. If it is >= 0, do an unpack
3285 with a variable target.
3289 unpack_iterable(PyObject
*v
, int argcnt
, int argcntafter
, PyObject
**sp
)
3293 PyObject
*it
; /* iter(v) */
3295 PyObject
*l
= NULL
; /* variable list */
3299 it
= PyObject_GetIter(v
);
3303 for (; i
< argcnt
; i
++) {
3304 w
= PyIter_Next(it
);
3306 /* Iterator done, via error or exhaustion. */
3307 if (!PyErr_Occurred()) {
3308 PyErr_Format(PyExc_ValueError
,
3309 "need more than %d value%s to unpack",
3310 i
, i
== 1 ? "" : "s");
3317 if (argcntafter
== -1) {
3318 /* We better have exhausted the iterator now. */
3319 w
= PyIter_Next(it
);
3321 if (PyErr_Occurred())
3327 PyErr_SetString(PyExc_ValueError
, "too many values to unpack");
3331 l
= PySequence_List(it
);
3337 ll
= PyList_GET_SIZE(l
);
3338 if (ll
< argcntafter
) {
3339 PyErr_Format(PyExc_ValueError
, "need more than %zd values to unpack",
3344 /* Pop the "after-variable" args off the list. */
3345 for (j
= argcntafter
; j
> 0; j
--, i
++) {
3346 *--sp
= PyList_GET_ITEM(l
, ll
- j
);
3348 /* Resize the list. */
3349 Py_SIZE(l
) = ll
- argcntafter
;
3354 for (; i
> 0; i
--, sp
++)
3363 prtrace(PyObject
*v
, char *str
)
3366 if (PyObject_Print(v
, stdout
, 0) != 0)
3367 PyErr_Clear(); /* Don't know what else to do */
3374 call_exc_trace(Py_tracefunc func
, PyObject
*self
, PyFrameObject
*f
)
3376 PyObject
*type
, *value
, *traceback
, *arg
;
3378 PyErr_Fetch(&type
, &value
, &traceback
);
3379 if (value
== NULL
) {
3383 arg
= PyTuple_Pack(3, type
, value
, traceback
);
3385 PyErr_Restore(type
, value
, traceback
);
3388 err
= call_trace(func
, self
, f
, PyTrace_EXCEPTION
, arg
);
3391 PyErr_Restore(type
, value
, traceback
);
3395 Py_XDECREF(traceback
);
3400 call_trace_protected(Py_tracefunc func
, PyObject
*obj
, PyFrameObject
*frame
,
3401 int what
, PyObject
*arg
)
3403 PyObject
*type
, *value
, *traceback
;
3405 PyErr_Fetch(&type
, &value
, &traceback
);
3406 err
= call_trace(func
, obj
, frame
, what
, arg
);
3409 PyErr_Restore(type
, value
, traceback
);
3415 Py_XDECREF(traceback
);
3421 call_trace(Py_tracefunc func
, PyObject
*obj
, PyFrameObject
*frame
,
3422 int what
, PyObject
*arg
)
3424 register PyThreadState
*tstate
= frame
->f_tstate
;
3426 if (tstate
->tracing
)
3429 tstate
->use_tracing
= 0;
3430 result
= func(obj
, frame
, what
, arg
);
3431 tstate
->use_tracing
= ((tstate
->c_tracefunc
!= NULL
)
3432 || (tstate
->c_profilefunc
!= NULL
));
3438 _PyEval_CallTracing(PyObject
*func
, PyObject
*args
)
3440 PyFrameObject
*frame
= PyEval_GetFrame();
3441 PyThreadState
*tstate
= frame
->f_tstate
;
3442 int save_tracing
= tstate
->tracing
;
3443 int save_use_tracing
= tstate
->use_tracing
;
3446 tstate
->tracing
= 0;
3447 tstate
->use_tracing
= ((tstate
->c_tracefunc
!= NULL
)
3448 || (tstate
->c_profilefunc
!= NULL
));
3449 result
= PyObject_Call(func
, args
, NULL
);
3450 tstate
->tracing
= save_tracing
;
3451 tstate
->use_tracing
= save_use_tracing
;
3456 maybe_call_line_trace(Py_tracefunc func
, PyObject
*obj
,
3457 PyFrameObject
*frame
, int *instr_lb
, int *instr_ub
,
3462 /* If the last instruction executed isn't in the current
3463 instruction window, reset the window. If the last
3464 instruction happens to fall at the start of a line or if it
3465 represents a jump backwards, call the trace function.
3467 if ((frame
->f_lasti
< *instr_lb
|| frame
->f_lasti
>= *instr_ub
)) {
3471 line
= PyCode_CheckLineNumber(frame
->f_code
, frame
->f_lasti
,
3474 frame
->f_lineno
= line
;
3475 result
= call_trace(func
, obj
, frame
,
3476 PyTrace_LINE
, Py_None
);
3478 *instr_lb
= bounds
.ap_lower
;
3479 *instr_ub
= bounds
.ap_upper
;
3481 else if (frame
->f_lasti
<= *instr_prev
) {
3482 result
= call_trace(func
, obj
, frame
, PyTrace_LINE
, Py_None
);
3484 *instr_prev
= frame
->f_lasti
;
3489 PyEval_SetProfile(Py_tracefunc func
, PyObject
*arg
)
3491 PyThreadState
*tstate
= PyThreadState_GET();
3492 PyObject
*temp
= tstate
->c_profileobj
;
3494 tstate
->c_profilefunc
= NULL
;
3495 tstate
->c_profileobj
= NULL
;
3496 /* Must make sure that tracing is not ignored if 'temp' is freed */
3497 tstate
->use_tracing
= tstate
->c_tracefunc
!= NULL
;
3499 tstate
->c_profilefunc
= func
;
3500 tstate
->c_profileobj
= arg
;
3501 /* Flag that tracing or profiling is turned on */
3502 tstate
->use_tracing
= (func
!= NULL
) || (tstate
->c_tracefunc
!= NULL
);
3506 PyEval_SetTrace(Py_tracefunc func
, PyObject
*arg
)
3508 PyThreadState
*tstate
= PyThreadState_GET();
3509 PyObject
*temp
= tstate
->c_traceobj
;
3510 _Py_TracingPossible
+= (func
!= NULL
) - (tstate
->c_tracefunc
!= NULL
);
3512 tstate
->c_tracefunc
= NULL
;
3513 tstate
->c_traceobj
= NULL
;
3514 /* Must make sure that profiling is not ignored if 'temp' is freed */
3515 tstate
->use_tracing
= tstate
->c_profilefunc
!= NULL
;
3517 tstate
->c_tracefunc
= func
;
3518 tstate
->c_traceobj
= arg
;
3519 /* Flag that tracing or profiling is turned on */
3520 tstate
->use_tracing
= ((func
!= NULL
)
3521 || (tstate
->c_profilefunc
!= NULL
));
3525 PyEval_GetBuiltins(void)
3527 PyFrameObject
*current_frame
= PyEval_GetFrame();
3528 if (current_frame
== NULL
)
3529 return PyThreadState_GET()->interp
->builtins
;
3531 return current_frame
->f_builtins
;
3535 PyEval_GetLocals(void)
3537 PyFrameObject
*current_frame
= PyEval_GetFrame();
3538 if (current_frame
== NULL
)
3540 PyFrame_FastToLocals(current_frame
);
3541 return current_frame
->f_locals
;
3545 PyEval_GetGlobals(void)
3547 PyFrameObject
*current_frame
= PyEval_GetFrame();
3548 if (current_frame
== NULL
)
3551 return current_frame
->f_globals
;
3555 PyEval_GetFrame(void)
3557 PyThreadState
*tstate
= PyThreadState_GET();
3558 return _PyThreadState_GetFrame(tstate
);
3562 PyEval_MergeCompilerFlags(PyCompilerFlags
*cf
)
3564 PyFrameObject
*current_frame
= PyEval_GetFrame();
3565 int result
= cf
->cf_flags
!= 0;
3567 if (current_frame
!= NULL
) {
3568 const int codeflags
= current_frame
->f_code
->co_flags
;
3569 const int compilerflags
= codeflags
& PyCF_MASK
;
3570 if (compilerflags
) {
3572 cf
->cf_flags
|= compilerflags
;
3574 #if 0 /* future keyword */
3575 if (codeflags
& CO_GENERATOR_ALLOWED
) {
3577 cf
->cf_flags
|= CO_GENERATOR_ALLOWED
;
3585 /* External interface to call any callable object.
3586 The arg must be a tuple or NULL. */
3588 #undef PyEval_CallObject
3589 /* for backward compatibility: export this interface */
3592 PyEval_CallObject(PyObject
*func
, PyObject
*arg
)
3594 return PyEval_CallObjectWithKeywords(func
, arg
, (PyObject
*)NULL
);
3596 #define PyEval_CallObject(func,arg) \
3597 PyEval_CallObjectWithKeywords(func, arg, (PyObject *)NULL)
3600 PyEval_CallObjectWithKeywords(PyObject
*func
, PyObject
*arg
, PyObject
*kw
)
3605 arg
= PyTuple_New(0);
3609 else if (!PyTuple_Check(arg
)) {
3610 PyErr_SetString(PyExc_TypeError
,
3611 "argument list must be a tuple");
3617 if (kw
!= NULL
&& !PyDict_Check(kw
)) {
3618 PyErr_SetString(PyExc_TypeError
,
3619 "keyword list must be a dictionary");
3624 result
= PyObject_Call(func
, arg
, kw
);
3630 PyEval_GetFuncName(PyObject
*func
)
3632 if (PyMethod_Check(func
))
3633 return PyEval_GetFuncName(PyMethod_GET_FUNCTION(func
));
3634 else if (PyFunction_Check(func
))
3635 return _PyUnicode_AsString(((PyFunctionObject
*)func
)->func_name
);
3636 else if (PyCFunction_Check(func
))
3637 return ((PyCFunctionObject
*)func
)->m_ml
->ml_name
;
3639 return func
->ob_type
->tp_name
;
3643 PyEval_GetFuncDesc(PyObject
*func
)
3645 if (PyMethod_Check(func
))
3647 else if (PyFunction_Check(func
))
3649 else if (PyCFunction_Check(func
))
3656 err_args(PyObject
*func
, int flags
, int nargs
)
3658 if (flags
& METH_NOARGS
)
3659 PyErr_Format(PyExc_TypeError
,
3660 "%.200s() takes no arguments (%d given)",
3661 ((PyCFunctionObject
*)func
)->m_ml
->ml_name
,
3664 PyErr_Format(PyExc_TypeError
,
3665 "%.200s() takes exactly one argument (%d given)",
3666 ((PyCFunctionObject
*)func
)->m_ml
->ml_name
,
3670 #define C_TRACE(x, call) \
3671 if (tstate->use_tracing && tstate->c_profilefunc) { \
3672 if (call_trace(tstate->c_profilefunc, \
3673 tstate->c_profileobj, \
3674 tstate->frame, PyTrace_C_CALL, \
3680 if (tstate->c_profilefunc != NULL) { \
3682 call_trace_protected(tstate->c_profilefunc, \
3683 tstate->c_profileobj, \
3684 tstate->frame, PyTrace_C_EXCEPTION, \
3686 /* XXX should pass (type, value, tb) */ \
3688 if (call_trace(tstate->c_profilefunc, \
3689 tstate->c_profileobj, \
3690 tstate->frame, PyTrace_C_RETURN, \
3703 call_function(PyObject
***pp_stack
, int oparg
3705 , uint64
* pintr0
, uint64
* pintr1
3709 int na
= oparg
& 0xff;
3710 int nk
= (oparg
>>8) & 0xff;
3711 int n
= na
+ 2 * nk
;
3712 PyObject
**pfunc
= (*pp_stack
) - n
- 1;
3713 PyObject
*func
= *pfunc
;
3716 /* Always dispatch PyCFunction first, because these are
3717 presumed to be the most frequent callable object.
3719 if (PyCFunction_Check(func
) && nk
== 0) {
3720 int flags
= PyCFunction_GET_FLAGS(func
);
3721 PyThreadState
*tstate
= PyThreadState_GET();
3723 PCALL(PCALL_CFUNCTION
);
3724 if (flags
& (METH_NOARGS
| METH_O
)) {
3725 PyCFunction meth
= PyCFunction_GET_FUNCTION(func
);
3726 PyObject
*self
= PyCFunction_GET_SELF(func
);
3727 if (flags
& METH_NOARGS
&& na
== 0) {
3728 C_TRACE(x
, (*meth
)(self
,NULL
));
3730 else if (flags
& METH_O
&& na
== 1) {
3731 PyObject
*arg
= EXT_POP(*pp_stack
);
3732 C_TRACE(x
, (*meth
)(self
,arg
));
3736 err_args(func
, flags
, na
);
3742 callargs
= load_args(pp_stack
, na
);
3743 READ_TIMESTAMP(*pintr0
);
3744 C_TRACE(x
, PyCFunction_Call(func
,callargs
,NULL
));
3745 READ_TIMESTAMP(*pintr1
);
3746 Py_XDECREF(callargs
);
3749 if (PyMethod_Check(func
) && PyMethod_GET_SELF(func
) != NULL
) {
3750 /* optimize access to bound methods */
3751 PyObject
*self
= PyMethod_GET_SELF(func
);
3752 PCALL(PCALL_METHOD
);
3753 PCALL(PCALL_BOUND_METHOD
);
3755 func
= PyMethod_GET_FUNCTION(func
);
3763 READ_TIMESTAMP(*pintr0
);
3764 if (PyFunction_Check(func
))
3765 x
= fast_function(func
, pp_stack
, n
, na
, nk
);
3767 x
= do_call(func
, pp_stack
, na
, nk
);
3768 READ_TIMESTAMP(*pintr1
);
3772 /* Clear the stack of the function object. Also removes
3773 the arguments in case they weren't consumed already
3774 (fast_function() and err_args() leave them on the stack).
3776 while ((*pp_stack
) > pfunc
) {
3777 w
= EXT_POP(*pp_stack
);
3784 /* The fast_function() function optimize calls for which no argument
3785 tuple is necessary; the objects are passed directly from the stack.
3786 For the simplest case -- a function that takes only positional
3787 arguments and is called with only positional arguments -- it
3788 inlines the most primitive frame setup code from
3789 PyEval_EvalCodeEx(), which vastly reduces the checks that must be
3790 done before evaluating the frame.
3794 fast_function(PyObject
*func
, PyObject
***pp_stack
, int n
, int na
, int nk
)
3796 PyCodeObject
*co
= (PyCodeObject
*)PyFunction_GET_CODE(func
);
3797 PyObject
*globals
= PyFunction_GET_GLOBALS(func
);
3798 PyObject
*argdefs
= PyFunction_GET_DEFAULTS(func
);
3799 PyObject
*kwdefs
= PyFunction_GET_KW_DEFAULTS(func
);
3800 PyObject
**d
= NULL
;
3803 PCALL(PCALL_FUNCTION
);
3804 PCALL(PCALL_FAST_FUNCTION
);
3805 if (argdefs
== NULL
&& co
->co_argcount
== n
&&
3806 co
->co_kwonlyargcount
== 0 && nk
==0 &&
3807 co
->co_flags
== (CO_OPTIMIZED
| CO_NEWLOCALS
| CO_NOFREE
)) {
3809 PyObject
*retval
= NULL
;
3810 PyThreadState
*tstate
= PyThreadState_GET();
3811 PyObject
**fastlocals
, **stack
;
3814 PCALL(PCALL_FASTER_FUNCTION
);
3815 assert(globals
!= NULL
);
3816 /* XXX Perhaps we should create a specialized
3817 PyFrame_New() that doesn't take locals, but does
3818 take builtins without sanity checking them.
3820 assert(tstate
!= NULL
);
3821 f
= PyFrame_New(tstate
, co
, globals
, NULL
);
3825 fastlocals
= f
->f_localsplus
;
3826 stack
= (*pp_stack
) - n
;
3828 for (i
= 0; i
< n
; i
++) {
3830 fastlocals
[i
] = *stack
++;
3832 retval
= PyEval_EvalFrameEx(f
,0);
3833 ++tstate
->recursion_depth
;
3835 --tstate
->recursion_depth
;
3838 if (argdefs
!= NULL
) {
3839 d
= &PyTuple_GET_ITEM(argdefs
, 0);
3840 nd
= Py_SIZE(argdefs
);
3842 return PyEval_EvalCodeEx(co
, globals
,
3843 (PyObject
*)NULL
, (*pp_stack
)-n
, na
,
3844 (*pp_stack
)-2*nk
, nk
, d
, nd
, kwdefs
,
3845 PyFunction_GET_CLOSURE(func
));
3849 update_keyword_args(PyObject
*orig_kwdict
, int nk
, PyObject
***pp_stack
,
3852 PyObject
*kwdict
= NULL
;
3853 if (orig_kwdict
== NULL
)
3854 kwdict
= PyDict_New();
3856 kwdict
= PyDict_Copy(orig_kwdict
);
3857 Py_DECREF(orig_kwdict
);
3863 PyObject
*value
= EXT_POP(*pp_stack
);
3864 PyObject
*key
= EXT_POP(*pp_stack
);
3865 if (PyDict_GetItem(kwdict
, key
) != NULL
) {
3866 PyErr_Format(PyExc_TypeError
,
3867 "%.200s%s got multiple values "
3868 "for keyword argument '%.200s'",
3869 PyEval_GetFuncName(func
),
3870 PyEval_GetFuncDesc(func
),
3871 _PyUnicode_AsString(key
));
3877 err
= PyDict_SetItem(kwdict
, key
, value
);
3889 update_star_args(int nstack
, int nstar
, PyObject
*stararg
,
3890 PyObject
***pp_stack
)
3892 PyObject
*callargs
, *w
;
3894 callargs
= PyTuple_New(nstack
+ nstar
);
3895 if (callargs
== NULL
) {
3900 for (i
= 0; i
< nstar
; i
++) {
3901 PyObject
*a
= PyTuple_GET_ITEM(stararg
, i
);
3903 PyTuple_SET_ITEM(callargs
, nstack
+ i
, a
);
3906 while (--nstack
>= 0) {
3907 w
= EXT_POP(*pp_stack
);
3908 PyTuple_SET_ITEM(callargs
, nstack
, w
);
3914 load_args(PyObject
***pp_stack
, int na
)
3916 PyObject
*args
= PyTuple_New(na
);
3922 w
= EXT_POP(*pp_stack
);
3923 PyTuple_SET_ITEM(args
, na
, w
);
3929 do_call(PyObject
*func
, PyObject
***pp_stack
, int na
, int nk
)
3931 PyObject
*callargs
= NULL
;
3932 PyObject
*kwdict
= NULL
;
3933 PyObject
*result
= NULL
;
3936 kwdict
= update_keyword_args(NULL
, nk
, pp_stack
, func
);
3940 callargs
= load_args(pp_stack
, na
);
3941 if (callargs
== NULL
)
3944 /* At this point, we have to look at the type of func to
3945 update the call stats properly. Do it here so as to avoid
3946 exposing the call stats machinery outside ceval.c
3948 if (PyFunction_Check(func
))
3949 PCALL(PCALL_FUNCTION
);
3950 else if (PyMethod_Check(func
))
3951 PCALL(PCALL_METHOD
);
3952 else if (PyType_Check(func
))
3954 else if (PyCFunction_Check(func
))
3955 PCALL(PCALL_CFUNCTION
);
3959 if (PyCFunction_Check(func
)) {
3960 PyThreadState
*tstate
= PyThreadState_GET();
3961 C_TRACE(result
, PyCFunction_Call(func
, callargs
, kwdict
));
3964 result
= PyObject_Call(func
, callargs
, kwdict
);
3966 Py_XDECREF(callargs
);
3972 ext_do_call(PyObject
*func
, PyObject
***pp_stack
, int flags
, int na
, int nk
)
3975 PyObject
*callargs
= NULL
;
3976 PyObject
*stararg
= NULL
;
3977 PyObject
*kwdict
= NULL
;
3978 PyObject
*result
= NULL
;
3980 if (flags
& CALL_FLAG_KW
) {
3981 kwdict
= EXT_POP(*pp_stack
);
3982 if (!PyDict_Check(kwdict
)) {
3987 if (PyDict_Update(d
, kwdict
) != 0) {
3989 /* PyDict_Update raises attribute
3990 * error (percolated from an attempt
3991 * to get 'keys' attribute) instead of
3992 * a type error if its second argument
3995 if (PyErr_ExceptionMatches(PyExc_AttributeError
)) {
3996 PyErr_Format(PyExc_TypeError
,
3997 "%.200s%.200s argument after ** "
3998 "must be a mapping, not %.200s",
3999 PyEval_GetFuncName(func
),
4000 PyEval_GetFuncDesc(func
),
4001 kwdict
->ob_type
->tp_name
);
4009 if (flags
& CALL_FLAG_VAR
) {
4010 stararg
= EXT_POP(*pp_stack
);
4011 if (!PyTuple_Check(stararg
)) {
4013 t
= PySequence_Tuple(stararg
);
4015 if (PyErr_ExceptionMatches(PyExc_TypeError
)) {
4016 PyErr_Format(PyExc_TypeError
,
4017 "%.200s%.200s argument after * "
4018 "must be a sequence, not %200s",
4019 PyEval_GetFuncName(func
),
4020 PyEval_GetFuncDesc(func
),
4021 stararg
->ob_type
->tp_name
);
4028 nstar
= PyTuple_GET_SIZE(stararg
);
4031 kwdict
= update_keyword_args(kwdict
, nk
, pp_stack
, func
);
4035 callargs
= update_star_args(na
, nstar
, stararg
, pp_stack
);
4036 if (callargs
== NULL
)
4039 /* At this point, we have to look at the type of func to
4040 update the call stats properly. Do it here so as to avoid
4041 exposing the call stats machinery outside ceval.c
4043 if (PyFunction_Check(func
))
4044 PCALL(PCALL_FUNCTION
);
4045 else if (PyMethod_Check(func
))
4046 PCALL(PCALL_METHOD
);
4047 else if (PyType_Check(func
))
4049 else if (PyCFunction_Check(func
))
4050 PCALL(PCALL_CFUNCTION
);
4054 if (PyCFunction_Check(func
)) {
4055 PyThreadState
*tstate
= PyThreadState_GET();
4056 C_TRACE(result
, PyCFunction_Call(func
, callargs
, kwdict
));
4059 result
= PyObject_Call(func
, callargs
, kwdict
);
4061 Py_XDECREF(callargs
);
4063 Py_XDECREF(stararg
);
4067 /* Extract a slice index from a PyInt or PyLong or an object with the
4068 nb_index slot defined, and store in *pi.
4069 Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX,
4070 and silently boost values less than -PY_SSIZE_T_MAX-1 to -PY_SSIZE_T_MAX-1.
4071 Return 0 on error, 1 on success.
4073 /* Note: If v is NULL, return success without storing into *pi. This
4074 is because_PyEval_SliceIndex() is called by apply_slice(), which can be
4075 called by the SLICE opcode with v and/or w equal to NULL.
4078 _PyEval_SliceIndex(PyObject
*v
, Py_ssize_t
*pi
)
4082 if (PyIndex_Check(v
)) {
4083 x
= PyNumber_AsSsize_t(v
, NULL
);
4084 if (x
== -1 && PyErr_Occurred())
4088 PyErr_SetString(PyExc_TypeError
,
4089 "slice indices must be integers or "
4090 "None or have an __index__ method");
4098 #define CANNOT_CATCH_MSG "catching classes that do not inherit from "\
4099 "BaseException is not allowed"
4102 cmp_outcome(int op
, register PyObject
*v
, register PyObject
*w
)
4113 res
= PySequence_Contains(w
, v
);
4118 res
= PySequence_Contains(w
, v
);
4123 case PyCmp_EXC_MATCH
:
4124 if (PyTuple_Check(w
)) {
4125 Py_ssize_t i
, length
;
4126 length
= PyTuple_Size(w
);
4127 for (i
= 0; i
< length
; i
+= 1) {
4128 PyObject
*exc
= PyTuple_GET_ITEM(w
, i
);
4129 if (!PyExceptionClass_Check(exc
)) {
4130 PyErr_SetString(PyExc_TypeError
,
4137 if (!PyExceptionClass_Check(w
)) {
4138 PyErr_SetString(PyExc_TypeError
,
4143 res
= PyErr_GivenExceptionMatches(v
, w
);
4146 return PyObject_RichCompare(v
, w
, op
);
4148 v
= res
? Py_True
: Py_False
;
4154 import_from(PyObject
*v
, PyObject
*name
)
4158 x
= PyObject_GetAttr(v
, name
);
4159 if (x
== NULL
&& PyErr_ExceptionMatches(PyExc_AttributeError
)) {
4160 PyErr_Format(PyExc_ImportError
, "cannot import name %S", name
);
4166 import_all_from(PyObject
*locals
, PyObject
*v
)
4168 PyObject
*all
= PyObject_GetAttrString(v
, "__all__");
4169 PyObject
*dict
, *name
, *value
;
4170 int skip_leading_underscores
= 0;
4174 if (!PyErr_ExceptionMatches(PyExc_AttributeError
))
4175 return -1; /* Unexpected error */
4177 dict
= PyObject_GetAttrString(v
, "__dict__");
4179 if (!PyErr_ExceptionMatches(PyExc_AttributeError
))
4181 PyErr_SetString(PyExc_ImportError
,
4182 "from-import-* object has no __dict__ and no __all__");
4185 all
= PyMapping_Keys(dict
);
4189 skip_leading_underscores
= 1;
4192 for (pos
= 0, err
= 0; ; pos
++) {
4193 name
= PySequence_GetItem(all
, pos
);
4195 if (!PyErr_ExceptionMatches(PyExc_IndexError
))
4201 if (skip_leading_underscores
&&
4202 PyUnicode_Check(name
) &&
4203 PyUnicode_AS_UNICODE(name
)[0] == '_')
4208 value
= PyObject_GetAttr(v
, name
);
4211 else if (PyDict_CheckExact(locals
))
4212 err
= PyDict_SetItem(locals
, name
, value
);
4214 err
= PyObject_SetItem(locals
, name
, value
);
4225 format_exc_check_arg(PyObject
*exc
, const char *format_str
, PyObject
*obj
)
4227 const char *obj_str
;
4232 obj_str
= _PyUnicode_AsString(obj
);
4236 PyErr_Format(exc
, format_str
, obj_str
);
4240 unicode_concatenate(PyObject
*v
, PyObject
*w
,
4241 PyFrameObject
*f
, unsigned char *next_instr
)
4243 /* This function implements 'variable += expr' when both arguments
4244 are (Unicode) strings. */
4245 Py_ssize_t v_len
= PyUnicode_GET_SIZE(v
);
4246 Py_ssize_t w_len
= PyUnicode_GET_SIZE(w
);
4247 Py_ssize_t new_len
= v_len
+ w_len
;
4249 PyErr_SetString(PyExc_OverflowError
,
4250 "strings are too large to concat");
4254 if (v
->ob_refcnt
== 2) {
4255 /* In the common case, there are 2 references to the value
4256 * stored in 'variable' when the += is performed: one on the
4257 * value stack (in 'v') and one still stored in the
4258 * 'variable'. We try to delete the variable now to reduce
4261 switch (*next_instr
) {
4264 int oparg
= PEEKARG();
4265 PyObject
**fastlocals
= f
->f_localsplus
;
4266 if (GETLOCAL(oparg
) == v
)
4267 SETLOCAL(oparg
, NULL
);
4272 PyObject
**freevars
= (f
->f_localsplus
+
4273 f
->f_code
->co_nlocals
);
4274 PyObject
*c
= freevars
[PEEKARG()];
4275 if (PyCell_GET(c
) == v
)
4276 PyCell_Set(c
, NULL
);
4281 PyObject
*names
= f
->f_code
->co_names
;
4282 PyObject
*name
= GETITEM(names
, PEEKARG());
4283 PyObject
*locals
= f
->f_locals
;
4284 if (PyDict_CheckExact(locals
) &&
4285 PyDict_GetItem(locals
, name
) == v
) {
4286 if (PyDict_DelItem(locals
, name
) != 0) {
4295 if (v
->ob_refcnt
== 1 && !PyUnicode_CHECK_INTERNED(v
)) {
4296 /* Now we own the last reference to 'v', so we can resize it
4299 if (PyUnicode_Resize(&v
, new_len
) != 0) {
4300 /* XXX if PyUnicode_Resize() fails, 'v' has been
4301 * deallocated so it cannot be put back into
4302 * 'variable'. The MemoryError is raised when there
4303 * is no value in 'variable', which might (very
4304 * remotely) be a cause of incompatibilities.
4308 /* copy 'w' into the newly allocated area of 'v' */
4309 memcpy(PyUnicode_AS_UNICODE(v
) + v_len
,
4310 PyUnicode_AS_UNICODE(w
), w_len
*sizeof(Py_UNICODE
));
4314 /* When in-place resizing is not an option. */
4315 w
= PyUnicode_Concat(v
, w
);
4321 #ifdef DYNAMIC_EXECUTION_PROFILE
4324 getarray(long a
[256])
4327 PyObject
*l
= PyList_New(256);
4328 if (l
== NULL
) return NULL
;
4329 for (i
= 0; i
< 256; i
++) {
4330 PyObject
*x
= PyLong_FromLong(a
[i
]);
4335 PyList_SetItem(l
, i
, x
);
4337 for (i
= 0; i
< 256; i
++)
4343 _Py_GetDXProfile(PyObject
*self
, PyObject
*args
)
4346 return getarray(dxp
);
4349 PyObject
*l
= PyList_New(257);
4350 if (l
== NULL
) return NULL
;
4351 for (i
= 0; i
< 257; i
++) {
4352 PyObject
*x
= getarray(dxpairs
[i
]);
4357 PyList_SetItem(l
, i
, x
);