2 /* Execute compiled code */
5 XXX speed up searching for keywords by using a dictionary
9 /* enable more aggressive intra-module optimizations, where available */
10 #define PY_LOCAL_AGGRESSIVE
15 #include "frameobject.h"
18 #include "structmember.h"
24 #define READ_TIMESTAMP(var)
28 typedef unsigned long long uint64
;
30 #if defined(__ppc__) /* <- Don't know if this is the correct symbol; this
31 section should work for GCC on any PowerPC
32 platform, irrespective of OS.
33 POWER? Who knows :-) */
35 #define READ_TIMESTAMP(var) ppc_getcounter(&var)
38 ppc_getcounter(uint64
*v
)
40 register unsigned long tbu
, tb
, tbu2
;
43 asm volatile ("mftbu %0" : "=r" (tbu
) );
44 asm volatile ("mftb %0" : "=r" (tb
) );
45 asm volatile ("mftbu %0" : "=r" (tbu2
));
46 if (__builtin_expect(tbu
!= tbu2
, 0)) goto loop
;
48 /* The slightly peculiar way of writing the next lines is
49 compiled better by GCC than any other way I tried. */
50 ((long*)(v
))[0] = tbu
;
54 #elif defined(__i386__)
56 /* this is for linux/x86 (and probably any other GCC/x86 combo) */
58 #define READ_TIMESTAMP(val) \
59 __asm__ __volatile__("rdtsc" : "=A" (val))
61 #elif defined(__x86_64__)
63 /* for gcc/x86_64, the "A" constraint in DI mode means *either* rax *or* rdx;
64 not edx:eax as it does for i386. Since rdtsc puts its result in edx:eax
65 even in 64-bit mode, we need to use "a" and "d" for the lower and upper
66 32-bit pieces of the result. */
68 #define READ_TIMESTAMP(val) \
69 __asm__ __volatile__("rdtsc" : \
70 "=a" (((int*)&(val))[0]), "=d" (((int*)&(val))[1]));
75 #error "Don't know how to implement timestamp counter for this architecture"
79 void dump_tsc(int opcode
, int ticked
, uint64 inst0
, uint64 inst1
,
80 uint64 loop0
, uint64 loop1
, uint64 intr0
, uint64 intr1
)
82 uint64 intr
, inst
, loop
;
83 PyThreadState
*tstate
= PyThreadState_Get();
84 if (!tstate
->interp
->tscdump
)
87 inst
= inst1
- inst0
- intr
;
88 loop
= loop1
- loop0
- intr
;
89 fprintf(stderr
, "opcode=%03d t=%d inst=%06lld loop=%06lld\n",
90 opcode
, ticked
, inst
, loop
);
95 /* Turn this on if your compiler chokes on the big switch: */
96 /* #define CASE_TOO_BIG 1 */
99 /* For debugging the interpreter: */
100 #define LLTRACE 1 /* Low-level trace feature */
101 #define CHECKEXC 1 /* Double-check exception checking */
104 typedef PyObject
*(*callproc
)(PyObject
*, PyObject
*, PyObject
*);
106 /* Forward declarations */
108 static PyObject
* call_function(PyObject
***, int, uint64
*, uint64
*);
110 static PyObject
* call_function(PyObject
***, int);
112 static PyObject
* fast_function(PyObject
*, PyObject
***, int, int, int);
113 static PyObject
* do_call(PyObject
*, PyObject
***, int, int);
114 static PyObject
* ext_do_call(PyObject
*, PyObject
***, int, int, int);
115 static PyObject
* update_keyword_args(PyObject
*, int, PyObject
***,
117 static PyObject
* update_star_args(int, int, PyObject
*, PyObject
***);
118 static PyObject
* load_args(PyObject
***, int);
119 #define CALL_FLAG_VAR 1
120 #define CALL_FLAG_KW 2
124 static int prtrace(PyObject
*, char *);
126 static int call_trace(Py_tracefunc
, PyObject
*, PyFrameObject
*,
128 static int call_trace_protected(Py_tracefunc
, PyObject
*,
129 PyFrameObject
*, int, PyObject
*);
130 static void call_exc_trace(Py_tracefunc
, PyObject
*, PyFrameObject
*);
131 static int maybe_call_line_trace(Py_tracefunc
, PyObject
*,
132 PyFrameObject
*, int *, int *, int *);
134 static PyObject
* cmp_outcome(int, PyObject
*, PyObject
*);
135 static PyObject
* import_from(PyObject
*, PyObject
*);
136 static int import_all_from(PyObject
*, PyObject
*);
137 static void format_exc_check_arg(PyObject
*, const char *, PyObject
*);
138 static PyObject
* unicode_concatenate(PyObject
*, PyObject
*,
139 PyFrameObject
*, unsigned char *);
141 #define NAME_ERROR_MSG \
142 "name '%.200s' is not defined"
143 #define GLOBAL_NAME_ERROR_MSG \
144 "global name '%.200s' is not defined"
145 #define UNBOUNDLOCAL_ERROR_MSG \
146 "local variable '%.200s' referenced before assignment"
147 #define UNBOUNDFREE_ERROR_MSG \
148 "free variable '%.200s' referenced before assignment" \
149 " in enclosing scope"
151 /* Dynamic execution profile */
152 #ifdef DYNAMIC_EXECUTION_PROFILE
154 static long dxpairs
[257][256];
155 #define dxp dxpairs[256]
157 static long dxp
[256];
161 /* Function call profile */
164 static int pcall
[PCALL_NUM
];
167 #define PCALL_FUNCTION 1
168 #define PCALL_FAST_FUNCTION 2
169 #define PCALL_FASTER_FUNCTION 3
170 #define PCALL_METHOD 4
171 #define PCALL_BOUND_METHOD 5
172 #define PCALL_CFUNCTION 6
174 #define PCALL_GENERATOR 8
175 #define PCALL_OTHER 9
178 /* Notes about the statistics
182 FAST_FUNCTION means no argument tuple needs to be created.
183 FASTER_FUNCTION means that the fast-path frame setup code is used.
185 If there is a method call where the call can be optimized by changing
186 the argument tuple and calling the function directly, it gets recorded
189 As a result, the relationship among the statistics appears to be
190 PCALL_ALL == PCALL_FUNCTION + PCALL_METHOD - PCALL_BOUND_METHOD +
191 PCALL_CFUNCTION + PCALL_TYPE + PCALL_GENERATOR + PCALL_OTHER
192 PCALL_FUNCTION > PCALL_FAST_FUNCTION > PCALL_FASTER_FUNCTION
193 PCALL_METHOD > PCALL_BOUND_METHOD
196 #define PCALL(POS) pcall[POS]++
199 PyEval_GetCallStats(PyObject
*self
)
201 return Py_BuildValue("iiiiiiiiiii",
202 pcall
[0], pcall
[1], pcall
[2], pcall
[3],
203 pcall
[4], pcall
[5], pcall
[6], pcall
[7],
204 pcall
[8], pcall
[9], pcall
[10]);
210 PyEval_GetCallStats(PyObject
*self
)
223 #include "pythread.h"
225 static PyThread_type_lock interpreter_lock
= 0; /* This is the GIL */
226 static PyThread_type_lock pending_lock
= 0; /* for pending calls */
227 static long main_thread
= 0;
230 PyEval_ThreadsInitialized(void)
232 return interpreter_lock
!= 0;
236 PyEval_InitThreads(void)
238 if (interpreter_lock
)
240 interpreter_lock
= PyThread_allocate_lock();
241 PyThread_acquire_lock(interpreter_lock
, 1);
242 main_thread
= PyThread_get_thread_ident();
246 PyEval_AcquireLock(void)
248 PyThread_acquire_lock(interpreter_lock
, 1);
252 PyEval_ReleaseLock(void)
254 PyThread_release_lock(interpreter_lock
);
258 PyEval_AcquireThread(PyThreadState
*tstate
)
261 Py_FatalError("PyEval_AcquireThread: NULL new thread state");
262 /* Check someone has called PyEval_InitThreads() to create the lock */
263 assert(interpreter_lock
);
264 PyThread_acquire_lock(interpreter_lock
, 1);
265 if (PyThreadState_Swap(tstate
) != NULL
)
267 "PyEval_AcquireThread: non-NULL old thread state");
271 PyEval_ReleaseThread(PyThreadState
*tstate
)
274 Py_FatalError("PyEval_ReleaseThread: NULL thread state");
275 if (PyThreadState_Swap(NULL
) != tstate
)
276 Py_FatalError("PyEval_ReleaseThread: wrong thread state");
277 PyThread_release_lock(interpreter_lock
);
280 /* This function is called from PyOS_AfterFork to ensure that newly
281 created child processes don't hold locks referring to threads which
282 are not running in the child process. (This could also be done using
283 pthread_atfork mechanism, at least for the pthreads implementation.) */
286 PyEval_ReInitThreads(void)
288 PyObject
*threading
, *result
;
289 PyThreadState
*tstate
;
291 if (!interpreter_lock
)
293 /*XXX Can't use PyThread_free_lock here because it does too
294 much error-checking. Doing this cleanly would require
295 adding a new function to each thread_*.h. Instead, just
296 create a new lock and waste a little bit of memory */
297 interpreter_lock
= PyThread_allocate_lock();
298 pending_lock
= PyThread_allocate_lock();
299 PyThread_acquire_lock(interpreter_lock
, 1);
300 main_thread
= PyThread_get_thread_ident();
302 /* Update the threading module with the new state.
304 tstate
= PyThreadState_GET();
305 threading
= PyMapping_GetItemString(tstate
->interp
->modules
,
307 if (threading
== NULL
) {
308 /* threading not imported */
312 result
= PyObject_CallMethod(threading
, "_after_fork", NULL
);
314 PyErr_WriteUnraisable(threading
);
317 Py_DECREF(threading
);
321 /* Functions save_thread and restore_thread are always defined so
322 dynamically loaded modules needn't be compiled separately for use
323 with and without threads: */
326 PyEval_SaveThread(void)
328 PyThreadState
*tstate
= PyThreadState_Swap(NULL
);
330 Py_FatalError("PyEval_SaveThread: NULL tstate");
332 if (interpreter_lock
)
333 PyThread_release_lock(interpreter_lock
);
339 PyEval_RestoreThread(PyThreadState
*tstate
)
342 Py_FatalError("PyEval_RestoreThread: NULL tstate");
344 if (interpreter_lock
) {
346 PyThread_acquire_lock(interpreter_lock
, 1);
350 PyThreadState_Swap(tstate
);
354 /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
355 signal handlers or Mac I/O completion routines) can schedule calls
356 to a function to be called synchronously.
357 The synchronous function is called with one void* argument.
358 It should return 0 for success or -1 for failure -- failure should
359 be accompanied by an exception.
361 If registry succeeds, the registry function returns 0; if it fails
362 (e.g. due to too many pending calls) it returns -1 (without setting
363 an exception condition).
365 Note that because registry may occur from within signal handlers,
366 or other asynchronous events, calling malloc() is unsafe!
369 Any thread can schedule pending calls, but only the main thread
371 There is no facility to schedule calls to a particular thread, but
372 that should be easy to change, should that ever be required. In
373 that case, the static variables here should go into the python
380 /* The WITH_THREAD implementation is thread-safe. It allows
381 scheduling to be made from any thread, and even from an executing
385 #define NPENDINGCALLS 32
389 } pendingcalls
[NPENDINGCALLS
];
390 static int pendingfirst
= 0;
391 static int pendinglast
= 0;
392 static volatile int pendingcalls_to_do
= 1; /* trigger initialization of lock */
393 static char pendingbusy
= 0;
396 Py_AddPendingCall(int (*func
)(void *), void *arg
)
399 PyThread_type_lock lock
= pending_lock
;
401 /* try a few times for the lock. Since this mechanism is used
402 * for signal handling (on the main thread), there is a (slim)
403 * chance that a signal is delivered on the same thread while we
404 * hold the lock during the Py_MakePendingCalls() function.
405 * This avoids a deadlock in that case.
406 * Note that signals can be delivered on any thread. In particular,
407 * on Windows, a SIGINT is delivered on a system-created worker
409 * We also check for lock being NULL, in the unlikely case that
410 * this function is called before any bytecode evaluation takes place.
413 for (i
= 0; i
<100; i
++) {
414 if (PyThread_acquire_lock(lock
, NOWAIT_LOCK
))
422 j
= (i
+ 1) % NPENDINGCALLS
;
423 if (j
== pendingfirst
) {
424 result
= -1; /* Queue full */
426 pendingcalls
[i
].func
= func
;
427 pendingcalls
[i
].arg
= arg
;
430 /* signal main loop */
432 pendingcalls_to_do
= 1;
434 PyThread_release_lock(lock
);
439 Py_MakePendingCalls(void)
445 /* initial allocation of the lock */
446 pending_lock
= PyThread_allocate_lock();
447 if (pending_lock
== NULL
)
451 /* only service pending calls on main thread */
452 if (main_thread
&& PyThread_get_thread_ident() != main_thread
)
454 /* don't perform recursive pending calls */
458 /* perform a bounded number of calls, in case of recursion */
459 for (i
=0; i
<NPENDINGCALLS
; i
++) {
464 /* pop one item off the queue while holding the lock */
465 PyThread_acquire_lock(pending_lock
, WAIT_LOCK
);
467 if (j
== pendinglast
) {
468 func
= NULL
; /* Queue empty */
470 func
= pendingcalls
[j
].func
;
471 arg
= pendingcalls
[j
].arg
;
472 pendingfirst
= (j
+ 1) % NPENDINGCALLS
;
474 pendingcalls_to_do
= pendingfirst
!= pendinglast
;
475 PyThread_release_lock(pending_lock
);
476 /* having released the lock, perform the callback */
487 #else /* if ! defined WITH_THREAD */
490 WARNING! ASYNCHRONOUSLY EXECUTING CODE!
491 This code is used for signal handling in python that isn't built
493 Don't use this implementation when Py_AddPendingCalls() can happen
494 on a different thread!
496 There are two possible race conditions:
497 (1) nested asynchronous calls to Py_AddPendingCall()
498 (2) AddPendingCall() calls made while pending calls are being processed.
500 (1) is very unlikely because typically signal delivery
501 is blocked during signal handling. So it should be impossible.
502 (2) is a real possibility.
503 The current code is safe against (2), but not against (1).
504 The safety against (2) is derived from the fact that only one
505 thread is present, interrupted by signals, and that the critical
506 section is protected with the "busy" variable. On Windows, which
507 delivers SIGINT on a system thread, this does not hold and therefore
508 Windows really shouldn't use this version.
509 The two threads could theoretically wiggle around the "busy" variable.
512 #define NPENDINGCALLS 32
516 } pendingcalls
[NPENDINGCALLS
];
517 static volatile int pendingfirst
= 0;
518 static volatile int pendinglast
= 0;
519 static volatile int pendingcalls_to_do
= 0;
522 Py_AddPendingCall(int (*func
)(void *), void *arg
)
524 static volatile int busy
= 0;
526 /* XXX Begin critical section */
531 j
= (i
+ 1) % NPENDINGCALLS
;
532 if (j
== pendingfirst
) {
534 return -1; /* Queue full */
536 pendingcalls
[i
].func
= func
;
537 pendingcalls
[i
].arg
= arg
;
541 pendingcalls_to_do
= 1; /* Signal main loop */
543 /* XXX End critical section */
548 Py_MakePendingCalls(void)
554 pendingcalls_to_do
= 0;
560 if (i
== pendinglast
)
561 break; /* Queue empty */
562 func
= pendingcalls
[i
].func
;
563 arg
= pendingcalls
[i
].arg
;
564 pendingfirst
= (i
+ 1) % NPENDINGCALLS
;
567 pendingcalls_to_do
= 1; /* We're not done yet */
575 #endif /* WITH_THREAD */
578 /* The interpreter's recursion limit */
580 #ifndef Py_DEFAULT_RECURSION_LIMIT
581 #define Py_DEFAULT_RECURSION_LIMIT 1000
583 static int recursion_limit
= Py_DEFAULT_RECURSION_LIMIT
;
584 int _Py_CheckRecursionLimit
= Py_DEFAULT_RECURSION_LIMIT
;
587 Py_GetRecursionLimit(void)
589 return recursion_limit
;
593 Py_SetRecursionLimit(int new_limit
)
595 recursion_limit
= new_limit
;
596 _Py_CheckRecursionLimit
= recursion_limit
;
599 /* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
600 if the recursion_depth reaches _Py_CheckRecursionLimit.
601 If USE_STACKCHECK, the macro decrements _Py_CheckRecursionLimit
602 to guarantee that _Py_CheckRecursiveCall() is regularly called.
603 Without USE_STACKCHECK, there is no need for this. */
605 _Py_CheckRecursiveCall(char *where
)
607 PyThreadState
*tstate
= PyThreadState_GET();
609 #ifdef USE_STACKCHECK
610 if (PyOS_CheckStack()) {
611 --tstate
->recursion_depth
;
612 PyErr_SetString(PyExc_MemoryError
, "Stack overflow");
616 _Py_CheckRecursionLimit
= recursion_limit
;
617 if (tstate
->recursion_critical
)
618 /* Somebody asked that we don't check for recursion. */
620 if (tstate
->overflowed
) {
621 if (tstate
->recursion_depth
> recursion_limit
+ 50) {
622 /* Overflowing while handling an overflow. Give up. */
623 Py_FatalError("Cannot recover from stack overflow.");
627 if (tstate
->recursion_depth
> recursion_limit
) {
628 --tstate
->recursion_depth
;
629 tstate
->overflowed
= 1;
630 PyErr_Format(PyExc_RuntimeError
,
631 "maximum recursion depth exceeded%s",
638 /* Status code for main loop (reason for stack unwind) */
640 WHY_NOT
= 0x0001, /* No error */
641 WHY_EXCEPTION
= 0x0002, /* Exception occurred */
642 WHY_RERAISE
= 0x0004, /* Exception re-raised by 'finally' */
643 WHY_RETURN
= 0x0008, /* 'return' statement */
644 WHY_BREAK
= 0x0010, /* 'break' statement */
645 WHY_CONTINUE
= 0x0020, /* 'continue' statement */
646 WHY_YIELD
= 0x0040, /* 'yield' operator */
647 WHY_SILENCED
= 0x0080 /* Exception silenced by 'with' */
650 static enum why_code
do_raise(PyObject
*, PyObject
*);
651 static int unpack_iterable(PyObject
*, int, int, PyObject
**);
653 /* Records whether tracing is on for any thread. Counts the number of
654 threads for which tstate->c_tracefunc is non-NULL, so if the value
655 is 0, we know we don't have to check this thread's c_tracefunc.
656 This speeds up the if statement in PyEval_EvalFrameEx() after
658 static int _Py_TracingPossible
= 0;
660 /* for manipulating the thread switch and periodic "stuff" - used to be
661 per thread, now just a pair o' globals */
662 int _Py_CheckInterval
= 100;
663 volatile int _Py_Ticker
= 0; /* so that we hit a "tick" first thing */
666 PyEval_EvalCode(PyCodeObject
*co
, PyObject
*globals
, PyObject
*locals
)
668 return PyEval_EvalCodeEx(co
,
670 (PyObject
**)NULL
, 0,
671 (PyObject
**)NULL
, 0,
672 (PyObject
**)NULL
, 0,
677 /* Interpreter main loop */
680 PyEval_EvalFrame(PyFrameObject
*f
) {
681 /* This is for backward compatibility with extension modules that
682 used this API; core interpreter code should call
683 PyEval_EvalFrameEx() */
684 return PyEval_EvalFrameEx(f
, 0);
688 PyEval_EvalFrameEx(PyFrameObject
*f
, int throwflag
)
693 register PyObject
**stack_pointer
; /* Next free slot in value stack */
694 register unsigned char *next_instr
;
695 register int opcode
; /* Current opcode */
696 register int oparg
; /* Current opcode argument, if any */
697 register enum why_code why
; /* Reason for block stack unwind */
698 register int err
; /* Error status -- nonzero if error */
699 register PyObject
*x
; /* Result object -- NULL if error */
700 register PyObject
*v
; /* Temporary objects popped off stack */
701 register PyObject
*w
;
702 register PyObject
*u
;
703 register PyObject
*t
;
704 register PyObject
**fastlocals
, **freevars
;
705 PyObject
*retval
= NULL
; /* Return value */
706 PyThreadState
*tstate
= PyThreadState_GET();
709 /* when tracing we set things up so that
711 not (instr_lb <= current_bytecode_offset < instr_ub)
713 is true when the line being executed has changed. The
714 initial values are such as to make this false the first
715 time it is tested. */
716 int instr_ub
= -1, instr_lb
= 0, instr_prev
= -1;
718 unsigned char *first_instr
;
721 #if defined(Py_DEBUG) || defined(LLTRACE)
722 /* Make it easier to find out where we are with a debugger */
726 /* Computed GOTOs, or
727 the-optimization-commonly-but-improperly-known-as-"threaded code"
728 using gcc's labels-as-values extension
729 (http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
731 The traditional bytecode evaluation loop uses a "switch" statement, which
732 decent compilers will optimize as a single indirect branch instruction
733 combined with a lookup table of jump addresses. However, since the
734 indirect jump instruction is shared by all opcodes, the CPU will have a
735 hard time making the right prediction for where to jump next (actually,
736 it will be always wrong except in the uncommon case of a sequence of
737 several identical opcodes).
739 "Threaded code" in contrast, uses an explicit jump table and an explicit
740 indirect jump instruction at the end of each opcode. Since the jump
741 instruction is at a different address for each opcode, the CPU will make a
742 separate prediction for each of these instructions, which is equivalent to
743 predicting the second opcode of each opcode pair. These predictions have
744 a much better chance to turn out valid, especially in small bytecode loops.
746 A mispredicted branch on a modern CPU flushes the whole pipeline and
747 can cost several CPU cycles (depending on the pipeline depth),
748 and potentially many more instructions (depending on the pipeline width).
749 A correctly predicted branch, however, is nearly free.
751 At the time of this writing, the "threaded code" version is up to 15-20%
752 faster than the normal "switch" version, depending on the compiler and the
755 We disable the optimization if DYNAMIC_EXECUTION_PROFILE is defined,
756 because it would render the measurements invalid.
759 NOTE: care must be taken that the compiler doesn't try to "optimize" the
760 indirect jumps by sharing them between all opcodes. Such optimizations
761 can be disabled on gcc by using the -fno-gcse flag (or possibly
765 #if defined(USE_COMPUTED_GOTOS) && defined(DYNAMIC_EXECUTION_PROFILE)
766 #undef USE_COMPUTED_GOTOS
769 #ifdef USE_COMPUTED_GOTOS
770 /* Import the static jump table */
771 #include "opcode_targets.h"
773 /* This macro is used when several opcodes defer to the same implementation
774 (e.g. SETUP_LOOP, SETUP_FINALLY) */
775 #define TARGET_WITH_IMPL(op, impl) \
793 /* Avoid multiple loads from _Py_Ticker despite `volatile` */ \
794 int _tick = _Py_Ticker - 1; \
795 _Py_Ticker = _tick; \
803 #define FAST_DISPATCH() \
805 if (!lltrace && !_Py_TracingPossible) { \
806 f->f_lasti = INSTR_OFFSET(); \
807 goto *opcode_targets[*next_instr++]; \
809 goto fast_next_opcode; \
812 #define FAST_DISPATCH() \
814 if (!_Py_TracingPossible) { \
815 f->f_lasti = INSTR_OFFSET(); \
816 goto *opcode_targets[*next_instr++]; \
818 goto fast_next_opcode; \
825 #define TARGET_WITH_IMPL(op, impl) \
826 /* silence compiler warnings about `impl` unused */ \
829 #define DISPATCH() continue
830 #define FAST_DISPATCH() goto fast_next_opcode
834 /* Tuple access macros */
837 #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
839 #define GETITEM(v, i) PyTuple_GetItem((v), (i))
843 /* Use Pentium timestamp counter to mark certain events:
844 inst0 -- beginning of switch statement for opcode dispatch
845 inst1 -- end of switch statement (may be skipped)
846 loop0 -- the top of the mainloop
847 loop1 -- place where control returns again to top of mainloop
849 intr1 -- beginning of long interruption
850 intr2 -- end of long interruption
852 Many opcodes call out to helper C functions. In some cases, the
853 time in those functions should be counted towards the time for the
854 opcode, but not in all cases. For example, a CALL_FUNCTION opcode
855 calls another Python function; there's no point in charge all the
856 bytecode executed by the called function to the caller.
858 It's hard to make a useful judgement statically. In the presence
859 of operator overloading, it's impossible to tell if a call will
860 execute new Python code or not.
862 It's a case-by-case judgement. I'll use intr1 for the following
867 CALL_FUNCTION (and friends)
870 uint64 inst0
, inst1
, loop0
, loop1
, intr0
= 0, intr1
= 0;
873 READ_TIMESTAMP(inst0
);
874 READ_TIMESTAMP(inst1
);
875 READ_TIMESTAMP(loop0
);
876 READ_TIMESTAMP(loop1
);
878 /* shut up the compiler */
882 /* Code access macros */
884 #define INSTR_OFFSET() ((int)(next_instr - first_instr))
885 #define NEXTOP() (*next_instr++)
886 #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2])
887 #define PEEKARG() ((next_instr[2]<<8) + next_instr[1])
888 #define JUMPTO(x) (next_instr = first_instr + (x))
889 #define JUMPBY(x) (next_instr += (x))
891 /* OpCode prediction macros
892 Some opcodes tend to come in pairs thus making it possible to
893 predict the second code when the first is run. For example,
894 COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And,
895 those opcodes are often followed by a POP_TOP.
897 Verifying the prediction costs a single high-speed test of a register
898 variable against a constant. If the pairing was good, then the
899 processor's own internal branch predication has a high likelihood of
900 success, resulting in a nearly zero-overhead transition to the
901 next opcode. A successful prediction saves a trip through the eval-loop
902 including its two unpredictable branches, the HAS_ARG test and the
903 switch-case. Combined with the processor's internal branch prediction,
904 a successful PREDICT has the effect of making the two opcodes run as if
905 they were a single new opcode with the bodies combined.
907 If collecting opcode statistics, your choices are to either keep the
908 predictions turned-on and interpret the results as if some opcodes
909 had been combined or turn-off predictions so that the opcode frequency
910 counter updates for both opcodes.
912 Opcode prediction is disabled with threaded code, since the latter allows
913 the CPU to record separate branch prediction information for each
918 #if defined(DYNAMIC_EXECUTION_PROFILE) || defined(USE_COMPUTED_GOTOS)
919 #define PREDICT(op) if (0) goto PRED_##op
920 #define PREDICTED(op) PRED_##op:
921 #define PREDICTED_WITH_ARG(op) PRED_##op:
923 #define PREDICT(op) if (*next_instr == op) goto PRED_##op
924 #define PREDICTED(op) PRED_##op: next_instr++
925 #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = PEEKARG(); next_instr += 3
929 /* Stack manipulation macros */
931 /* The stack can grow at most MAXINT deep, as co_nlocals and
932 co_stacksize are ints. */
933 #define STACK_LEVEL() ((int)(stack_pointer - f->f_valuestack))
934 #define EMPTY() (STACK_LEVEL() == 0)
935 #define TOP() (stack_pointer[-1])
936 #define SECOND() (stack_pointer[-2])
937 #define THIRD() (stack_pointer[-3])
938 #define FOURTH() (stack_pointer[-4])
939 #define SET_TOP(v) (stack_pointer[-1] = (v))
940 #define SET_SECOND(v) (stack_pointer[-2] = (v))
941 #define SET_THIRD(v) (stack_pointer[-3] = (v))
942 #define SET_FOURTH(v) (stack_pointer[-4] = (v))
943 #define BASIC_STACKADJ(n) (stack_pointer += n)
944 #define BASIC_PUSH(v) (*stack_pointer++ = (v))
945 #define BASIC_POP() (*--stack_pointer)
948 #define PUSH(v) { (void)(BASIC_PUSH(v), \
949 lltrace && prtrace(TOP(), "push")); \
950 assert(STACK_LEVEL() <= co->co_stacksize); }
951 #define POP() ((void)(lltrace && prtrace(TOP(), "pop")), \
953 #define STACKADJ(n) { (void)(BASIC_STACKADJ(n), \
954 lltrace && prtrace(TOP(), "stackadj")); \
955 assert(STACK_LEVEL() <= co->co_stacksize); }
956 #define EXT_POP(STACK_POINTER) ((void)(lltrace && \
957 prtrace((STACK_POINTER)[-1], "ext_pop")), \
960 #define PUSH(v) BASIC_PUSH(v)
961 #define POP() BASIC_POP()
962 #define STACKADJ(n) BASIC_STACKADJ(n)
963 #define EXT_POP(STACK_POINTER) (*--(STACK_POINTER))
966 /* Local variable macros */
968 #define GETLOCAL(i) (fastlocals[i])
970 /* The SETLOCAL() macro must not DECREF the local variable in-place and
971 then store the new value; it must copy the old value to a temporary
972 value, then store the new value, and then DECREF the temporary value.
973 This is because it is possible that during the DECREF the frame is
974 accessed by other code (e.g. a __del__ method or gc.collect()) and the
975 variable would be pointing to already-freed memory. */
976 #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \
977 GETLOCAL(i) = value; \
978 Py_XDECREF(tmp); } while (0)
981 #define UNWIND_BLOCK(b) \
982 while (STACK_LEVEL() > (b)->b_level) { \
983 PyObject *v = POP(); \
987 #define UNWIND_EXCEPT_HANDLER(b) \
989 PyObject *type, *value, *traceback; \
990 assert(STACK_LEVEL() >= (b)->b_level + 3); \
991 while (STACK_LEVEL() > (b)->b_level + 3) { \
995 type = tstate->exc_type; \
996 value = tstate->exc_value; \
997 traceback = tstate->exc_traceback; \
998 tstate->exc_type = POP(); \
999 tstate->exc_value = POP(); \
1000 tstate->exc_traceback = POP(); \
1002 Py_XDECREF(value); \
1003 Py_XDECREF(traceback); \
1006 #define SAVE_EXC_STATE() \
1008 PyObject *type, *value, *traceback; \
1009 Py_XINCREF(tstate->exc_type); \
1010 Py_XINCREF(tstate->exc_value); \
1011 Py_XINCREF(tstate->exc_traceback); \
1012 type = f->f_exc_type; \
1013 value = f->f_exc_value; \
1014 traceback = f->f_exc_traceback; \
1015 f->f_exc_type = tstate->exc_type; \
1016 f->f_exc_value = tstate->exc_value; \
1017 f->f_exc_traceback = tstate->exc_traceback; \
1019 Py_XDECREF(value); \
1020 Py_XDECREF(traceback); \
1023 #define SWAP_EXC_STATE() \
1026 tmp = tstate->exc_type; \
1027 tstate->exc_type = f->f_exc_type; \
1028 f->f_exc_type = tmp; \
1029 tmp = tstate->exc_value; \
1030 tstate->exc_value = f->f_exc_value; \
1031 f->f_exc_value = tmp; \
1032 tmp = tstate->exc_traceback; \
1033 tstate->exc_traceback = f->f_exc_traceback; \
1034 f->f_exc_traceback = tmp; \
1043 if (Py_EnterRecursiveCall(""))
1048 if (tstate
->use_tracing
) {
1049 if (tstate
->c_tracefunc
!= NULL
) {
1050 /* tstate->c_tracefunc, if defined, is a
1051 function that will be called on *every* entry
1052 to a code block. Its return value, if not
1053 None, is a function that will be called at
1054 the start of each executed line of code.
1055 (Actually, the function must return itself
1056 in order to continue tracing.) The trace
1057 functions are called with three arguments:
1058 a pointer to the current frame, a string
1059 indicating why the function is called, and
1060 an argument which depends on the situation.
1061 The global trace function is also called
1062 whenever an exception is detected. */
1063 if (call_trace_protected(tstate
->c_tracefunc
,
1065 f
, PyTrace_CALL
, Py_None
)) {
1066 /* Trace function raised an error */
1067 goto exit_eval_frame
;
1070 if (tstate
->c_profilefunc
!= NULL
) {
1071 /* Similar for c_profilefunc, except it needn't
1072 return itself and isn't called for "line" events */
1073 if (call_trace_protected(tstate
->c_profilefunc
,
1074 tstate
->c_profileobj
,
1075 f
, PyTrace_CALL
, Py_None
)) {
1076 /* Profile function raised an error */
1077 goto exit_eval_frame
;
1083 names
= co
->co_names
;
1084 consts
= co
->co_consts
;
1085 fastlocals
= f
->f_localsplus
;
1086 freevars
= f
->f_localsplus
+ co
->co_nlocals
;
1087 first_instr
= (unsigned char*) PyBytes_AS_STRING(co
->co_code
);
1088 /* An explanation is in order for the next line.
1090 f->f_lasti now refers to the index of the last instruction
1091 executed. You might think this was obvious from the name, but
1092 this wasn't always true before 2.3! PyFrame_New now sets
1093 f->f_lasti to -1 (i.e. the index *before* the first instruction)
1094 and YIELD_VALUE doesn't fiddle with f_lasti any more. So this
1097 When the PREDICT() macros are enabled, some opcode pairs follow in
1098 direct succession without updating f->f_lasti. A successful
1099 prediction effectively links the two codes together as if they
1100 were a single new opcode; accordingly,f->f_lasti will point to
1101 the first code in the pair (for instance, GET_ITER followed by
1102 FOR_ITER is effectively a single opcode and f->f_lasti will point
1103 at to the beginning of the combined pair.)
1105 next_instr
= first_instr
+ f
->f_lasti
+ 1;
1106 stack_pointer
= f
->f_stacktop
;
1107 assert(stack_pointer
!= NULL
);
1108 f
->f_stacktop
= NULL
; /* remains NULL unless yield suspends frame */
1110 if (co
->co_flags
& CO_GENERATOR
&& !throwflag
) {
1111 if (f
->f_exc_type
!= NULL
&& f
->f_exc_type
!= Py_None
) {
1112 /* We were in an except handler when we left,
1113 restore the exception state which was put aside
1114 (see YIELD_VALUE). */
1123 lltrace
= PyDict_GetItemString(f
->f_globals
, "__lltrace__") != NULL
;
1125 #if defined(Py_DEBUG) || defined(LLTRACE)
1126 filename
= _PyUnicode_AsString(co
->co_filename
);
1131 x
= Py_None
; /* Not a reference, just anything non-NULL */
1134 if (throwflag
) { /* support for generator.throw() */
1135 why
= WHY_EXCEPTION
;
1142 /* Almost surely, the opcode executed a break
1143 or a continue, preventing inst1 from being set
1144 on the way out of the loop.
1146 READ_TIMESTAMP(inst1
);
1149 dump_tsc(opcode
, ticked
, inst0
, inst1
, loop0
, loop1
,
1155 READ_TIMESTAMP(loop0
);
1157 assert(stack_pointer
>= f
->f_valuestack
); /* else underflow */
1158 assert(STACK_LEVEL() <= co
->co_stacksize
); /* else overflow */
1160 /* Do periodic things. Doing this every time through
1161 the loop would add too much overhead, so we do it
1162 only every Nth instruction. We also do it if
1163 ``pendingcalls_to_do'' is set, i.e. when an asynchronous
1164 event needs attention (e.g. a signal handler or
1165 async I/O handler); see Py_AddPendingCall() and
1166 Py_MakePendingCalls() above. */
1168 if (--_Py_Ticker
< 0) {
1169 if (*next_instr
== SETUP_FINALLY
) {
1170 /* Make the last opcode before
1171 a try: finally: block uninterruptable. */
1172 goto fast_next_opcode
;
1174 _Py_Ticker
= _Py_CheckInterval
;
1175 tstate
->tick_counter
++;
1179 if (pendingcalls_to_do
) {
1180 if (Py_MakePendingCalls() < 0) {
1181 why
= WHY_EXCEPTION
;
1184 if (pendingcalls_to_do
)
1185 /* MakePendingCalls() didn't succeed.
1186 Force early re-execution of this
1187 "periodic" code, possibly after
1192 if (interpreter_lock
) {
1193 /* Give another thread a chance */
1195 if (PyThreadState_Swap(NULL
) != tstate
)
1196 Py_FatalError("ceval: tstate mix-up");
1197 PyThread_release_lock(interpreter_lock
);
1199 /* Other threads may run now */
1201 PyThread_acquire_lock(interpreter_lock
, 1);
1202 if (PyThreadState_Swap(tstate
) != NULL
)
1203 Py_FatalError("ceval: orphan tstate");
1205 /* Check for thread interrupts */
1207 if (tstate
->async_exc
!= NULL
) {
1208 x
= tstate
->async_exc
;
1209 tstate
->async_exc
= NULL
;
1212 why
= WHY_EXCEPTION
;
1220 f
->f_lasti
= INSTR_OFFSET();
1222 /* line-by-line tracing support */
1224 if (_Py_TracingPossible
&&
1225 tstate
->c_tracefunc
!= NULL
&& !tstate
->tracing
) {
1226 /* see maybe_call_line_trace
1227 for expository comments */
1228 f
->f_stacktop
= stack_pointer
;
1230 err
= maybe_call_line_trace(tstate
->c_tracefunc
,
1232 f
, &instr_lb
, &instr_ub
,
1234 /* Reload possibly changed frame fields */
1236 if (f
->f_stacktop
!= NULL
) {
1237 stack_pointer
= f
->f_stacktop
;
1238 f
->f_stacktop
= NULL
;
1241 /* trace function raised an exception */
1246 /* Extract opcode and argument */
1249 oparg
= 0; /* allows oparg to be stored in a register because
1250 it doesn't have to be remembered across a full loop */
1251 if (HAS_ARG(opcode
))
1254 #ifdef DYNAMIC_EXECUTION_PROFILE
1256 dxpairs
[lastopcode
][opcode
]++;
1257 lastopcode
= opcode
;
1263 /* Instruction tracing */
1266 if (HAS_ARG(opcode
)) {
1267 printf("%d: %d, %d\n",
1268 f
->f_lasti
, opcode
, oparg
);
1272 f
->f_lasti
, opcode
);
1277 /* Main switch on opcode */
1278 READ_TIMESTAMP(inst0
);
1283 It is essential that any operation that fails sets either
1284 x to NULL, err to nonzero, or why to anything but WHY_NOT,
1285 and that no operation that succeeds does this! */
1287 /* case STOP_CODE: this is an error! */
1293 x
= GETLOCAL(oparg
);
1299 format_exc_check_arg(PyExc_UnboundLocalError
,
1300 UNBOUNDLOCAL_ERROR_MSG
,
1301 PyTuple_GetItem(co
->co_varnames
, oparg
));
1305 x
= GETITEM(consts
, oparg
);
1310 PREDICTED_WITH_ARG(STORE_FAST
);
1364 } else if (oparg
== 3) {
1377 Py_FatalError("invalid argument to DUP_TOPX"
1378 " (bytecode corruption?)");
1379 /* Never returns, so don't bother to set why. */
1382 TARGET(UNARY_POSITIVE
)
1384 x
= PyNumber_Positive(v
);
1387 if (x
!= NULL
) DISPATCH();
1390 TARGET(UNARY_NEGATIVE
)
1392 x
= PyNumber_Negative(v
);
1395 if (x
!= NULL
) DISPATCH();
1400 err
= PyObject_IsTrue(v
);
1408 Py_INCREF(Py_False
);
1416 TARGET(UNARY_INVERT
)
1418 x
= PyNumber_Invert(v
);
1421 if (x
!= NULL
) DISPATCH();
1424 TARGET(BINARY_POWER
)
1427 x
= PyNumber_Power(v
, w
, Py_None
);
1431 if (x
!= NULL
) DISPATCH();
1434 TARGET(BINARY_MULTIPLY
)
1437 x
= PyNumber_Multiply(v
, w
);
1441 if (x
!= NULL
) DISPATCH();
1444 TARGET(BINARY_TRUE_DIVIDE
)
1447 x
= PyNumber_TrueDivide(v
, w
);
1451 if (x
!= NULL
) DISPATCH();
1454 TARGET(BINARY_FLOOR_DIVIDE
)
1457 x
= PyNumber_FloorDivide(v
, w
);
1461 if (x
!= NULL
) DISPATCH();
1464 TARGET(BINARY_MODULO
)
1467 if (PyUnicode_CheckExact(v
))
1468 x
= PyUnicode_Format(v
, w
);
1470 x
= PyNumber_Remainder(v
, w
);
1474 if (x
!= NULL
) DISPATCH();
1480 if (PyUnicode_CheckExact(v
) &&
1481 PyUnicode_CheckExact(w
)) {
1482 x
= unicode_concatenate(v
, w
, f
, next_instr
);
1483 /* unicode_concatenate consumed the ref to v */
1484 goto skip_decref_vx
;
1487 x
= PyNumber_Add(v
, w
);
1493 if (x
!= NULL
) DISPATCH();
1496 TARGET(BINARY_SUBTRACT
)
1499 x
= PyNumber_Subtract(v
, w
);
1503 if (x
!= NULL
) DISPATCH();
1506 TARGET(BINARY_SUBSCR
)
1509 x
= PyObject_GetItem(v
, w
);
1513 if (x
!= NULL
) DISPATCH();
1516 TARGET(BINARY_LSHIFT
)
1519 x
= PyNumber_Lshift(v
, w
);
1523 if (x
!= NULL
) DISPATCH();
1526 TARGET(BINARY_RSHIFT
)
1529 x
= PyNumber_Rshift(v
, w
);
1533 if (x
!= NULL
) DISPATCH();
1539 x
= PyNumber_And(v
, w
);
1543 if (x
!= NULL
) DISPATCH();
1549 x
= PyNumber_Xor(v
, w
);
1553 if (x
!= NULL
) DISPATCH();
1559 x
= PyNumber_Or(v
, w
);
1563 if (x
!= NULL
) DISPATCH();
1568 v
= stack_pointer
[-oparg
];
1569 err
= PyList_Append(v
, w
);
1572 PREDICT(JUMP_ABSOLUTE
);
1579 v
= stack_pointer
[-oparg
];
1580 err
= PySet_Add(v
, w
);
1583 PREDICT(JUMP_ABSOLUTE
);
1588 TARGET(INPLACE_POWER
)
1591 x
= PyNumber_InPlacePower(v
, w
, Py_None
);
1595 if (x
!= NULL
) DISPATCH();
1598 TARGET(INPLACE_MULTIPLY
)
1601 x
= PyNumber_InPlaceMultiply(v
, w
);
1605 if (x
!= NULL
) DISPATCH();
1608 TARGET(INPLACE_TRUE_DIVIDE
)
1611 x
= PyNumber_InPlaceTrueDivide(v
, w
);
1615 if (x
!= NULL
) DISPATCH();
1618 TARGET(INPLACE_FLOOR_DIVIDE
)
1621 x
= PyNumber_InPlaceFloorDivide(v
, w
);
1625 if (x
!= NULL
) DISPATCH();
1628 TARGET(INPLACE_MODULO
)
1631 x
= PyNumber_InPlaceRemainder(v
, w
);
1635 if (x
!= NULL
) DISPATCH();
1641 if (PyUnicode_CheckExact(v
) &&
1642 PyUnicode_CheckExact(w
)) {
1643 x
= unicode_concatenate(v
, w
, f
, next_instr
);
1644 /* unicode_concatenate consumed the ref to v */
1648 x
= PyNumber_InPlaceAdd(v
, w
);
1654 if (x
!= NULL
) DISPATCH();
1657 TARGET(INPLACE_SUBTRACT
)
1660 x
= PyNumber_InPlaceSubtract(v
, w
);
1664 if (x
!= NULL
) DISPATCH();
1667 TARGET(INPLACE_LSHIFT
)
1670 x
= PyNumber_InPlaceLshift(v
, w
);
1674 if (x
!= NULL
) DISPATCH();
1677 TARGET(INPLACE_RSHIFT
)
1680 x
= PyNumber_InPlaceRshift(v
, w
);
1684 if (x
!= NULL
) DISPATCH();
1690 x
= PyNumber_InPlaceAnd(v
, w
);
1694 if (x
!= NULL
) DISPATCH();
1700 x
= PyNumber_InPlaceXor(v
, w
);
1704 if (x
!= NULL
) DISPATCH();
1710 x
= PyNumber_InPlaceOr(v
, w
);
1714 if (x
!= NULL
) DISPATCH();
1717 TARGET(STORE_SUBSCR
)
1723 err
= PyObject_SetItem(v
, w
, u
);
1727 if (err
== 0) DISPATCH();
1730 TARGET(DELETE_SUBSCR
)
1735 err
= PyObject_DelItem(v
, w
);
1738 if (err
== 0) DISPATCH();
1743 w
= PySys_GetObject("displayhook");
1745 PyErr_SetString(PyExc_RuntimeError
,
1746 "lost sys.displayhook");
1751 x
= PyTuple_Pack(1, v
);
1756 w
= PyEval_CallObject(w
, x
);
1766 default: switch (opcode
) {
1768 TARGET(RAISE_VARARGS
)
1772 v
= POP(); /* cause */
1774 w
= POP(); /* exc */
1775 case 0: /* Fallthrough */
1776 why
= do_raise(w
, v
);
1779 PyErr_SetString(PyExc_SystemError
,
1780 "bad RAISE_VARARGS oparg");
1781 why
= WHY_EXCEPTION
;
1786 TARGET(STORE_LOCALS
)
1793 TARGET(RETURN_VALUE
)
1796 goto fast_block_end
;
1800 f
->f_stacktop
= stack_pointer
;
1802 /* Put aside the current exception state and restore
1803 that of the calling frame. This only serves when
1804 "yield" is used inside an except handler. */
1810 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1811 if (b
->b_type
!= EXCEPT_HANDLER
) {
1812 PyErr_SetString(PyExc_SystemError
,
1813 "popped block is not an except handler");
1814 why
= WHY_EXCEPTION
;
1817 UNWIND_EXCEPT_HANDLER(b
);
1823 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1828 PREDICTED(END_FINALLY
);
1831 if (PyLong_Check(v
)) {
1832 why
= (enum why_code
) PyLong_AS_LONG(v
);
1833 assert(why
!= WHY_YIELD
);
1834 if (why
== WHY_RETURN
||
1835 why
== WHY_CONTINUE
)
1837 if (why
== WHY_SILENCED
) {
1838 /* An exception was silenced by 'with', we must
1839 manually unwind the EXCEPT_HANDLER block which was
1840 created when the exception was caught, otherwise
1841 the stack will be in an inconsistent state. */
1842 PyTryBlock
*b
= PyFrame_BlockPop(f
);
1843 if (b
->b_type
!= EXCEPT_HANDLER
) {
1844 PyErr_SetString(PyExc_SystemError
,
1845 "popped block is not an except handler");
1846 why
= WHY_EXCEPTION
;
1849 UNWIND_EXCEPT_HANDLER(b
);
1854 else if (PyExceptionClass_Check(v
)) {
1857 PyErr_Restore(v
, w
, u
);
1861 else if (v
!= Py_None
) {
1862 PyErr_SetString(PyExc_SystemError
,
1863 "'finally' pops bad exception");
1864 why
= WHY_EXCEPTION
;
1869 TARGET(LOAD_BUILD_CLASS
)
1870 x
= PyDict_GetItemString(f
->f_builtins
,
1873 PyErr_SetString(PyExc_ImportError
,
1874 "__build_class__ not found");
1882 w
= GETITEM(names
, oparg
);
1884 if ((x
= f
->f_locals
) != NULL
) {
1885 if (PyDict_CheckExact(x
))
1886 err
= PyDict_SetItem(x
, w
, v
);
1888 err
= PyObject_SetItem(x
, w
, v
);
1890 if (err
== 0) DISPATCH();
1893 PyErr_Format(PyExc_SystemError
,
1894 "no locals found when storing %R", w
);
1898 w
= GETITEM(names
, oparg
);
1899 if ((x
= f
->f_locals
) != NULL
) {
1900 if ((err
= PyObject_DelItem(x
, w
)) != 0)
1901 format_exc_check_arg(PyExc_NameError
,
1906 PyErr_Format(PyExc_SystemError
,
1907 "no locals when deleting %R", w
);
1910 PREDICTED_WITH_ARG(UNPACK_SEQUENCE
);
1911 TARGET(UNPACK_SEQUENCE
)
1913 if (PyTuple_CheckExact(v
) &&
1914 PyTuple_GET_SIZE(v
) == oparg
) {
1915 PyObject
**items
= \
1916 ((PyTupleObject
*)v
)->ob_item
;
1924 } else if (PyList_CheckExact(v
) &&
1925 PyList_GET_SIZE(v
) == oparg
) {
1926 PyObject
**items
= \
1927 ((PyListObject
*)v
)->ob_item
;
1933 } else if (unpack_iterable(v
, oparg
, -1,
1934 stack_pointer
+ oparg
)) {
1935 stack_pointer
+= oparg
;
1937 /* unpack_iterable() raised an exception */
1938 why
= WHY_EXCEPTION
;
1945 int totalargs
= 1 + (oparg
& 0xFF) + (oparg
>> 8);
1948 if (unpack_iterable(v
, oparg
& 0xFF, oparg
>> 8,
1949 stack_pointer
+ totalargs
)) {
1950 stack_pointer
+= totalargs
;
1952 why
= WHY_EXCEPTION
;
1959 w
= GETITEM(names
, oparg
);
1963 err
= PyObject_SetAttr(v
, w
, u
); /* v.w = u */
1966 if (err
== 0) DISPATCH();
1970 w
= GETITEM(names
, oparg
);
1972 err
= PyObject_SetAttr(v
, w
, (PyObject
*)NULL
);
1977 TARGET(STORE_GLOBAL
)
1978 w
= GETITEM(names
, oparg
);
1980 err
= PyDict_SetItem(f
->f_globals
, w
, v
);
1982 if (err
== 0) DISPATCH();
1985 TARGET(DELETE_GLOBAL
)
1986 w
= GETITEM(names
, oparg
);
1987 if ((err
= PyDict_DelItem(f
->f_globals
, w
)) != 0)
1988 format_exc_check_arg(
1989 PyExc_NameError
, GLOBAL_NAME_ERROR_MSG
, w
);
1993 w
= GETITEM(names
, oparg
);
1994 if ((v
= f
->f_locals
) == NULL
) {
1995 PyErr_Format(PyExc_SystemError
,
1996 "no locals when loading %R", w
);
1997 why
= WHY_EXCEPTION
;
2000 if (PyDict_CheckExact(v
)) {
2001 x
= PyDict_GetItem(v
, w
);
2005 x
= PyObject_GetItem(v
, w
);
2006 if (x
== NULL
&& PyErr_Occurred()) {
2007 if (!PyErr_ExceptionMatches(
2014 x
= PyDict_GetItem(f
->f_globals
, w
);
2016 x
= PyDict_GetItem(f
->f_builtins
, w
);
2018 format_exc_check_arg(
2030 w
= GETITEM(names
, oparg
);
2031 if (PyUnicode_CheckExact(w
)) {
2032 /* Inline the PyDict_GetItem() calls.
2033 WARNING: this is an extreme speed hack.
2034 Do not try this at home. */
2035 long hash
= ((PyUnicodeObject
*)w
)->hash
;
2039 d
= (PyDictObject
*)(f
->f_globals
);
2040 e
= d
->ma_lookup(d
, w
, hash
);
2051 d
= (PyDictObject
*)(f
->f_builtins
);
2052 e
= d
->ma_lookup(d
, w
, hash
);
2063 goto load_global_error
;
2066 /* This is the un-inlined version of the code above */
2067 x
= PyDict_GetItem(f
->f_globals
, w
);
2069 x
= PyDict_GetItem(f
->f_builtins
, w
);
2072 format_exc_check_arg(
2074 GLOBAL_NAME_ERROR_MSG
, w
);
2083 x
= GETLOCAL(oparg
);
2085 SETLOCAL(oparg
, NULL
);
2088 format_exc_check_arg(
2089 PyExc_UnboundLocalError
,
2090 UNBOUNDLOCAL_ERROR_MSG
,
2091 PyTuple_GetItem(co
->co_varnames
, oparg
)
2095 TARGET(LOAD_CLOSURE
)
2096 x
= freevars
[oparg
];
2099 if (x
!= NULL
) DISPATCH();
2103 x
= freevars
[oparg
];
2110 /* Don't stomp existing exception */
2111 if (PyErr_Occurred())
2113 if (oparg
< PyTuple_GET_SIZE(co
->co_cellvars
)) {
2114 v
= PyTuple_GET_ITEM(co
->co_cellvars
,
2116 format_exc_check_arg(
2117 PyExc_UnboundLocalError
,
2118 UNBOUNDLOCAL_ERROR_MSG
,
2121 v
= PyTuple_GET_ITEM(co
->co_freevars
, oparg
-
2122 PyTuple_GET_SIZE(co
->co_cellvars
));
2123 format_exc_check_arg(PyExc_NameError
,
2124 UNBOUNDFREE_ERROR_MSG
, v
);
2130 x
= freevars
[oparg
];
2136 x
= PyTuple_New(oparg
);
2138 for (; --oparg
>= 0;) {
2140 PyTuple_SET_ITEM(x
, oparg
, w
);
2148 x
= PyList_New(oparg
);
2150 for (; --oparg
>= 0;) {
2152 PyList_SET_ITEM(x
, oparg
, w
);
2160 x
= PySet_New(NULL
);
2162 for (; --oparg
>= 0;) {
2165 err
= PySet_Add(x
, w
);
2178 x
= _PyDict_NewPresized((Py_ssize_t
)oparg
);
2180 if (x
!= NULL
) DISPATCH();
2184 w
= TOP(); /* key */
2185 u
= SECOND(); /* value */
2186 v
= THIRD(); /* dict */
2188 assert (PyDict_CheckExact(v
));
2189 err
= PyDict_SetItem(v
, w
, u
); /* v[w] = u */
2192 if (err
== 0) DISPATCH();
2196 w
= TOP(); /* key */
2197 u
= SECOND(); /* value */
2199 v
= stack_pointer
[-oparg
]; /* dict */
2200 assert (PyDict_CheckExact(v
));
2201 err
= PyDict_SetItem(v
, w
, u
); /* v[w] = u */
2205 PREDICT(JUMP_ABSOLUTE
);
2211 w
= GETITEM(names
, oparg
);
2213 x
= PyObject_GetAttr(v
, w
);
2216 if (x
!= NULL
) DISPATCH();
2222 x
= cmp_outcome(oparg
, v
, w
);
2226 if (x
== NULL
) break;
2227 PREDICT(POP_JUMP_IF_FALSE
);
2228 PREDICT(POP_JUMP_IF_TRUE
);
2232 w
= GETITEM(names
, oparg
);
2233 x
= PyDict_GetItemString(f
->f_builtins
, "__import__");
2235 PyErr_SetString(PyExc_ImportError
,
2236 "__import__ not found");
2242 if (PyLong_AsLong(u
) != -1 || PyErr_Occurred())
2246 f
->f_locals
== NULL
?
2247 Py_None
: f
->f_locals
,
2254 f
->f_locals
== NULL
?
2255 Py_None
: f
->f_locals
,
2265 READ_TIMESTAMP(intr0
);
2267 x
= PyEval_CallObject(v
, w
);
2269 READ_TIMESTAMP(intr1
);
2272 if (x
!= NULL
) DISPATCH();
2277 PyFrame_FastToLocals(f
);
2278 if ((x
= f
->f_locals
) == NULL
) {
2279 PyErr_SetString(PyExc_SystemError
,
2280 "no locals found during 'import *'");
2283 READ_TIMESTAMP(intr0
);
2284 err
= import_all_from(x
, v
);
2285 READ_TIMESTAMP(intr1
);
2286 PyFrame_LocalsToFast(f
, 0);
2288 if (err
== 0) DISPATCH();
2292 w
= GETITEM(names
, oparg
);
2294 READ_TIMESTAMP(intr0
);
2295 x
= import_from(v
, w
);
2296 READ_TIMESTAMP(intr1
);
2298 if (x
!= NULL
) DISPATCH();
2301 TARGET(JUMP_FORWARD
)
2305 PREDICTED_WITH_ARG(POP_JUMP_IF_FALSE
);
2306 TARGET(POP_JUMP_IF_FALSE
)
2312 if (w
== Py_False
) {
2317 err
= PyObject_IsTrue(w
);
2327 PREDICTED_WITH_ARG(POP_JUMP_IF_TRUE
);
2328 TARGET(POP_JUMP_IF_TRUE
)
2330 if (w
== Py_False
) {
2339 err
= PyObject_IsTrue(w
);
2351 TARGET(JUMP_IF_FALSE_OR_POP
)
2358 if (w
== Py_False
) {
2362 err
= PyObject_IsTrue(w
);
2374 TARGET(JUMP_IF_TRUE_OR_POP
)
2376 if (w
== Py_False
) {
2385 err
= PyObject_IsTrue(w
);
2390 else if (err
== 0) {
2398 PREDICTED_WITH_ARG(JUMP_ABSOLUTE
);
2399 TARGET(JUMP_ABSOLUTE
)
2402 /* Enabling this path speeds-up all while and for-loops by bypassing
2403 the per-loop checks for signals. By default, this should be turned-off
2404 because it prevents detection of a control-break in tight loops like
2405 "while 1: pass". Compile with this option turned-on when you need
2406 the speed-up and do not need break checking inside tight loops (ones
2407 that contain only instructions ending with FAST_DISPATCH).
2415 /* before: [obj]; after [getiter(obj)] */
2417 x
= PyObject_GetIter(v
);
2427 PREDICTED_WITH_ARG(FOR_ITER
);
2429 /* before: [iter]; after: [iter, iter()] *or* [] */
2431 x
= (*v
->ob_type
->tp_iternext
)(v
);
2434 PREDICT(STORE_FAST
);
2435 PREDICT(UNPACK_SEQUENCE
);
2438 if (PyErr_Occurred()) {
2439 if (!PyErr_ExceptionMatches(
2440 PyExc_StopIteration
))
2444 /* iterator ended normally */
2452 goto fast_block_end
;
2454 TARGET(CONTINUE_LOOP
)
2455 retval
= PyLong_FromLong(oparg
);
2461 goto fast_block_end
;
2463 TARGET_WITH_IMPL(SETUP_LOOP
, _setup_finally
)
2464 TARGET_WITH_IMPL(SETUP_EXCEPT
, _setup_finally
)
2465 TARGET(SETUP_FINALLY
)
2467 /* NOTE: If you add any new block-setup opcodes that
2468 are not try/except/finally handlers, you may need
2469 to update the PyGen_NeedsFinalizing() function.
2472 PyFrame_BlockSetup(f
, opcode
, INSTR_OFFSET() + oparg
,
2476 TARGET(WITH_CLEANUP
)
2478 /* At the top of the stack are 1-3 values indicating
2479 how/why we entered the finally clause:
2481 - (TOP, SECOND) = (WHY_{RETURN,CONTINUE}), retval
2482 - TOP = WHY_*; no retval below it
2483 - (TOP, SECOND, THIRD) = exc_info()
2484 Below them is EXIT, the context.__exit__ bound method.
2485 In the last case, we must call
2486 EXIT(TOP, SECOND, THIRD)
2487 otherwise we must call
2488 EXIT(None, None, None)
2490 In all cases, we remove EXIT from the stack, leaving
2491 the rest in the same order.
2493 In addition, if the stack represents an exception,
2494 *and* the function call returns a 'true' value, we
2495 "zap" this information, to prevent END_FINALLY from
2496 re-raising the exception. (But non-local gotos
2497 should still be resumed.)
2500 PyObject
*exit_func
= POP();
2505 else if (PyLong_Check(u
)) {
2506 u
= v
= w
= Py_None
;
2512 /* XXX Not the fastest way to call it... */
2513 x
= PyObject_CallFunctionObjArgs(exit_func
, u
, v
, w
,
2515 Py_DECREF(exit_func
);
2517 break; /* Go to error exit */
2520 err
= PyObject_IsTrue(x
);
2526 break; /* Go to error exit */
2529 /* There was an exception and a True return */
2531 SET_TOP(PyLong_FromLong((long) WHY_SILENCED
));
2536 PREDICT(END_FINALLY
);
2540 TARGET(CALL_FUNCTION
)
2546 x
= call_function(&sp
, oparg
, &intr0
, &intr1
);
2548 x
= call_function(&sp
, oparg
);
2557 TARGET_WITH_IMPL(CALL_FUNCTION_VAR
, _call_function_var_kw
)
2558 TARGET_WITH_IMPL(CALL_FUNCTION_KW
, _call_function_var_kw
)
2559 TARGET(CALL_FUNCTION_VAR_KW
)
2560 _call_function_var_kw
:
2562 int na
= oparg
& 0xff;
2563 int nk
= (oparg
>>8) & 0xff;
2564 int flags
= (opcode
- CALL_FUNCTION
) & 3;
2565 int n
= na
+ 2 * nk
;
2566 PyObject
**pfunc
, *func
, **sp
;
2568 if (flags
& CALL_FLAG_VAR
)
2570 if (flags
& CALL_FLAG_KW
)
2572 pfunc
= stack_pointer
- n
- 1;
2575 if (PyMethod_Check(func
)
2576 && PyMethod_GET_SELF(func
) != NULL
) {
2577 PyObject
*self
= PyMethod_GET_SELF(func
);
2579 func
= PyMethod_GET_FUNCTION(func
);
2588 READ_TIMESTAMP(intr0
);
2589 x
= ext_do_call(func
, &sp
, flags
, na
, nk
);
2590 READ_TIMESTAMP(intr1
);
2594 while (stack_pointer
> pfunc
) {
2604 TARGET_WITH_IMPL(MAKE_CLOSURE
, _make_function
)
2605 TARGET(MAKE_FUNCTION
)
2608 int posdefaults
= oparg
& 0xff;
2609 int kwdefaults
= (oparg
>>8) & 0xff;
2610 int num_annotations
= (oparg
>> 16) & 0x7fff;
2612 v
= POP(); /* code object */
2613 x
= PyFunction_New(v
, f
->f_globals
);
2616 if (x
!= NULL
&& opcode
== MAKE_CLOSURE
) {
2618 if (PyFunction_SetClosure(x
, v
) != 0) {
2619 /* Can't happen unless bytecode is corrupt. */
2620 why
= WHY_EXCEPTION
;
2625 if (x
!= NULL
&& num_annotations
> 0) {
2627 u
= POP(); /* names of args with annotations */
2634 name_ix
= PyTuple_Size(u
);
2635 assert(num_annotations
== name_ix
+1);
2636 while (name_ix
> 0) {
2638 t
= PyTuple_GET_ITEM(u
, name_ix
);
2640 /* XXX(nnorwitz): check for errors */
2641 PyDict_SetItem(v
, t
, w
);
2645 if (PyFunction_SetAnnotations(x
, v
) != 0) {
2646 /* Can't happen unless
2647 PyFunction_SetAnnotations changes. */
2648 why
= WHY_EXCEPTION
;
2654 /* XXX Maybe this should be a separate opcode? */
2655 if (x
!= NULL
&& posdefaults
> 0) {
2656 v
= PyTuple_New(posdefaults
);
2662 while (--posdefaults
>= 0) {
2664 PyTuple_SET_ITEM(v
, posdefaults
, w
);
2666 if (PyFunction_SetDefaults(x
, v
) != 0) {
2667 /* Can't happen unless
2668 PyFunction_SetDefaults changes. */
2669 why
= WHY_EXCEPTION
;
2673 if (x
!= NULL
&& kwdefaults
> 0) {
2680 while (--kwdefaults
>= 0) {
2681 w
= POP(); /* default value */
2682 u
= POP(); /* kw only arg name */
2683 /* XXX(nnorwitz): check for errors */
2684 PyDict_SetItem(v
, u
, w
);
2688 if (PyFunction_SetKwDefaults(x
, v
) != 0) {
2689 /* Can't happen unless
2690 PyFunction_SetKwDefaults changes. */
2691 why
= WHY_EXCEPTION
;
2706 x
= PySlice_New(u
, v
, w
);
2711 if (x
!= NULL
) DISPATCH();
2714 TARGET(EXTENDED_ARG
)
2716 oparg
= oparg
<<16 | NEXTARG();
2717 goto dispatch_opcode
;
2719 #ifdef USE_COMPUTED_GOTOS
2724 "XXX lineno: %d, opcode: %d\n",
2725 PyCode_Addr2Line(f
->f_code
, f
->f_lasti
),
2727 PyErr_SetString(PyExc_SystemError
, "unknown opcode");
2728 why
= WHY_EXCEPTION
;
2739 READ_TIMESTAMP(inst1
);
2741 /* Quickly continue if no error occurred */
2743 if (why
== WHY_NOT
) {
2744 if (err
== 0 && x
!= NULL
) {
2746 /* This check is expensive! */
2747 if (PyErr_Occurred())
2749 "XXX undetected error\n");
2752 READ_TIMESTAMP(loop1
);
2753 continue; /* Normal, fast path */
2758 why
= WHY_EXCEPTION
;
2763 /* Double-check exception status */
2765 if (why
== WHY_EXCEPTION
|| why
== WHY_RERAISE
) {
2766 if (!PyErr_Occurred()) {
2767 PyErr_SetString(PyExc_SystemError
,
2768 "error return without exception set");
2769 why
= WHY_EXCEPTION
;
2774 /* This check is expensive! */
2775 if (PyErr_Occurred()) {
2777 sprintf(buf
, "Stack unwind with exception "
2778 "set and why=%d", why
);
2784 /* Log traceback info if this is a real exception */
2786 if (why
== WHY_EXCEPTION
) {
2787 PyTraceBack_Here(f
);
2789 if (tstate
->c_tracefunc
!= NULL
)
2790 call_exc_trace(tstate
->c_tracefunc
,
2791 tstate
->c_traceobj
, f
);
2794 /* For the rest, treat WHY_RERAISE as WHY_EXCEPTION */
2796 if (why
== WHY_RERAISE
)
2797 why
= WHY_EXCEPTION
;
2799 /* Unwind stacks if a (pseudo) exception occurred */
2802 while (why
!= WHY_NOT
&& f
->f_iblock
> 0) {
2803 PyTryBlock
*b
= PyFrame_BlockPop(f
);
2805 assert(why
!= WHY_YIELD
);
2806 if (b
->b_type
== SETUP_LOOP
&& why
== WHY_CONTINUE
) {
2807 /* For a continue inside a try block,
2808 don't pop the block for the loop. */
2809 PyFrame_BlockSetup(f
, b
->b_type
, b
->b_handler
,
2812 JUMPTO(PyLong_AS_LONG(retval
));
2817 if (b
->b_type
== EXCEPT_HANDLER
) {
2818 UNWIND_EXCEPT_HANDLER(b
);
2822 if (b
->b_type
== SETUP_LOOP
&& why
== WHY_BREAK
) {
2824 JUMPTO(b
->b_handler
);
2827 if (why
== WHY_EXCEPTION
&& (b
->b_type
== SETUP_EXCEPT
2828 || b
->b_type
== SETUP_FINALLY
)) {
2829 PyObject
*exc
, *val
, *tb
;
2830 int handler
= b
->b_handler
;
2831 /* Beware, this invalidates all b->b_* fields */
2832 PyFrame_BlockSetup(f
, EXCEPT_HANDLER
, -1, STACK_LEVEL());
2833 PUSH(tstate
->exc_traceback
);
2834 PUSH(tstate
->exc_value
);
2835 if (tstate
->exc_type
!= NULL
) {
2836 PUSH(tstate
->exc_type
);
2842 PyErr_Fetch(&exc
, &val
, &tb
);
2843 /* Make the raw exception data
2844 available to the handler,
2845 so a program can emulate the
2846 Python main loop. */
2847 PyErr_NormalizeException(
2849 PyException_SetTraceback(val
, tb
);
2851 tstate
->exc_type
= exc
;
2853 tstate
->exc_value
= val
;
2854 tstate
->exc_traceback
= tb
;
2865 if (b
->b_type
== SETUP_FINALLY
) {
2866 if (why
& (WHY_RETURN
| WHY_CONTINUE
))
2868 PUSH(PyLong_FromLong((long)why
));
2870 JUMPTO(b
->b_handler
);
2873 } /* unwind stack */
2875 /* End the loop if we still have an error (or return) */
2879 READ_TIMESTAMP(loop1
);
2883 assert(why
!= WHY_YIELD
);
2884 /* Pop remaining stack entries. */
2890 if (why
!= WHY_RETURN
)
2894 if (tstate
->use_tracing
) {
2895 if (tstate
->c_tracefunc
) {
2896 if (why
== WHY_RETURN
|| why
== WHY_YIELD
) {
2897 if (call_trace(tstate
->c_tracefunc
,
2898 tstate
->c_traceobj
, f
,
2899 PyTrace_RETURN
, retval
)) {
2902 why
= WHY_EXCEPTION
;
2905 else if (why
== WHY_EXCEPTION
) {
2906 call_trace_protected(tstate
->c_tracefunc
,
2907 tstate
->c_traceobj
, f
,
2908 PyTrace_RETURN
, NULL
);
2911 if (tstate
->c_profilefunc
) {
2912 if (why
== WHY_EXCEPTION
)
2913 call_trace_protected(tstate
->c_profilefunc
,
2914 tstate
->c_profileobj
, f
,
2915 PyTrace_RETURN
, NULL
);
2916 else if (call_trace(tstate
->c_profilefunc
,
2917 tstate
->c_profileobj
, f
,
2918 PyTrace_RETURN
, retval
)) {
2921 why
= WHY_EXCEPTION
;
2928 Py_LeaveRecursiveCall();
2929 tstate
->frame
= f
->f_back
;
2934 /* This is gonna seem *real weird*, but if you put some other code between
2935 PyEval_EvalFrame() and PyEval_EvalCodeEx() you will need to adjust
2936 the test in the if statements in Misc/gdbinit (pystack and pystackv). */
2939 PyEval_EvalCodeEx(PyCodeObject
*co
, PyObject
*globals
, PyObject
*locals
,
2940 PyObject
**args
, int argcount
, PyObject
**kws
, int kwcount
,
2941 PyObject
**defs
, int defcount
, PyObject
*kwdefs
, PyObject
*closure
)
2943 register PyFrameObject
*f
;
2944 register PyObject
*retval
= NULL
;
2945 register PyObject
**fastlocals
, **freevars
;
2946 PyThreadState
*tstate
= PyThreadState_GET();
2949 if (globals
== NULL
) {
2950 PyErr_SetString(PyExc_SystemError
,
2951 "PyEval_EvalCodeEx: NULL globals");
2955 assert(tstate
!= NULL
);
2956 assert(globals
!= NULL
);
2957 f
= PyFrame_New(tstate
, co
, globals
, locals
);
2961 fastlocals
= f
->f_localsplus
;
2962 freevars
= f
->f_localsplus
+ co
->co_nlocals
;
2964 if (co
->co_argcount
> 0 ||
2965 co
->co_kwonlyargcount
> 0 ||
2966 co
->co_flags
& (CO_VARARGS
| CO_VARKEYWORDS
)) {
2969 PyObject
*kwdict
= NULL
;
2970 if (co
->co_flags
& CO_VARKEYWORDS
) {
2971 kwdict
= PyDict_New();
2974 i
= co
->co_argcount
+ co
->co_kwonlyargcount
;
2975 if (co
->co_flags
& CO_VARARGS
)
2977 SETLOCAL(i
, kwdict
);
2979 if (argcount
> co
->co_argcount
) {
2980 if (!(co
->co_flags
& CO_VARARGS
)) {
2981 PyErr_Format(PyExc_TypeError
,
2983 "%spositional argument%s (%d given)",
2985 defcount
? "at most" : "exactly",
2987 kwcount
? "non-keyword " : "",
2988 co
->co_argcount
== 1 ? "" : "s",
2992 n
= co
->co_argcount
;
2994 for (i
= 0; i
< n
; i
++) {
2999 if (co
->co_flags
& CO_VARARGS
) {
3000 u
= PyTuple_New(argcount
- n
);
3003 SETLOCAL(co
->co_argcount
+ co
->co_kwonlyargcount
, u
);
3004 for (i
= n
; i
< argcount
; i
++) {
3007 PyTuple_SET_ITEM(u
, i
-n
, x
);
3010 for (i
= 0; i
< kwcount
; i
++) {
3011 PyObject
**co_varnames
;
3012 PyObject
*keyword
= kws
[2*i
];
3013 PyObject
*value
= kws
[2*i
+ 1];
3015 if (keyword
== NULL
|| !PyUnicode_Check(keyword
)) {
3016 PyErr_Format(PyExc_TypeError
,
3017 "%U() keywords must be strings",
3021 /* Speed hack: do raw pointer compares. As names are
3022 normally interned this should almost always hit. */
3023 co_varnames
= PySequence_Fast_ITEMS(co
->co_varnames
);
3025 j
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3027 PyObject
*nm
= co_varnames
[j
];
3031 /* Slow fallback, just in case */
3033 j
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3035 PyObject
*nm
= co_varnames
[j
];
3036 int cmp
= PyObject_RichCompareBool(
3037 keyword
, nm
, Py_EQ
);
3043 /* Check errors from Compare */
3044 if (PyErr_Occurred())
3046 if (j
>= co
->co_argcount
+ co
->co_kwonlyargcount
) {
3047 if (kwdict
== NULL
) {
3048 PyErr_Format(PyExc_TypeError
,
3049 "%U() got an unexpected "
3050 "keyword argument '%S'",
3055 PyDict_SetItem(kwdict
, keyword
, value
);
3059 if (GETLOCAL(j
) != NULL
) {
3060 PyErr_Format(PyExc_TypeError
,
3061 "%U() got multiple "
3062 "values for keyword "
3071 if (co
->co_kwonlyargcount
> 0) {
3072 for (i
= co
->co_argcount
;
3073 i
< co
->co_argcount
+ co
->co_kwonlyargcount
;
3075 PyObject
*name
, *def
;
3076 if (GETLOCAL(i
) != NULL
)
3078 name
= PyTuple_GET_ITEM(co
->co_varnames
, i
);
3081 def
= PyDict_GetItem(kwdefs
, name
);
3087 PyErr_Format(PyExc_TypeError
,
3088 "%U() needs keyword-only argument %S",
3093 if (argcount
< co
->co_argcount
) {
3094 int m
= co
->co_argcount
- defcount
;
3095 for (i
= argcount
; i
< m
; i
++) {
3096 if (GETLOCAL(i
) == NULL
) {
3097 PyErr_Format(PyExc_TypeError
,
3099 "%spositional argument%s "
3102 ((co
->co_flags
& CO_VARARGS
) ||
3103 defcount
) ? "at least"
3105 m
, kwcount
? "non-keyword " : "",
3106 m
== 1 ? "" : "s", i
);
3114 for (; i
< defcount
; i
++) {
3115 if (GETLOCAL(m
+i
) == NULL
) {
3116 PyObject
*def
= defs
[i
];
3124 if (argcount
> 0 || kwcount
> 0) {
3125 PyErr_Format(PyExc_TypeError
,
3126 "%U() takes no arguments (%d given)",
3128 argcount
+ kwcount
);
3132 /* Allocate and initialize storage for cell vars, and copy free
3133 vars into frame. This isn't too efficient right now. */
3134 if (PyTuple_GET_SIZE(co
->co_cellvars
)) {
3135 int i
, j
, nargs
, found
;
3136 Py_UNICODE
*cellname
, *argname
;
3139 nargs
= co
->co_argcount
+ co
->co_kwonlyargcount
;
3140 if (co
->co_flags
& CO_VARARGS
)
3142 if (co
->co_flags
& CO_VARKEYWORDS
)
3145 /* Initialize each cell var, taking into account
3146 cell vars that are initialized from arguments.
3148 Should arrange for the compiler to put cellvars
3149 that are arguments at the beginning of the cellvars
3150 list so that we can march over it more efficiently?
3152 for (i
= 0; i
< PyTuple_GET_SIZE(co
->co_cellvars
); ++i
) {
3153 cellname
= PyUnicode_AS_UNICODE(
3154 PyTuple_GET_ITEM(co
->co_cellvars
, i
));
3156 for (j
= 0; j
< nargs
; j
++) {
3157 argname
= PyUnicode_AS_UNICODE(
3158 PyTuple_GET_ITEM(co
->co_varnames
, j
));
3159 if (Py_UNICODE_strcmp(cellname
, argname
) == 0) {
3160 c
= PyCell_New(GETLOCAL(j
));
3163 GETLOCAL(co
->co_nlocals
+ i
) = c
;
3169 c
= PyCell_New(NULL
);
3172 SETLOCAL(co
->co_nlocals
+ i
, c
);
3176 if (PyTuple_GET_SIZE(co
->co_freevars
)) {
3178 for (i
= 0; i
< PyTuple_GET_SIZE(co
->co_freevars
); ++i
) {
3179 PyObject
*o
= PyTuple_GET_ITEM(closure
, i
);
3181 freevars
[PyTuple_GET_SIZE(co
->co_cellvars
) + i
] = o
;
3185 if (co
->co_flags
& CO_GENERATOR
) {
3186 /* Don't need to keep the reference to f_back, it will be set
3187 * when the generator is resumed. */
3188 Py_XDECREF(f
->f_back
);
3191 PCALL(PCALL_GENERATOR
);
3193 /* Create a new generator that owns the ready to run frame
3194 * and return that as the value. */
3195 return PyGen_New(f
);
3198 retval
= PyEval_EvalFrameEx(f
,0);
3200 fail
: /* Jump here from prelude on failure */
3202 /* decref'ing the frame can cause __del__ methods to get invoked,
3203 which can call back into Python. While we're done with the
3204 current Python frame (f), the associated C stack is still in use,
3205 so recursion_depth must be boosted for the duration.
3207 assert(tstate
!= NULL
);
3208 ++tstate
->recursion_depth
;
3210 --tstate
->recursion_depth
;
3215 /* Logic for the raise statement (too complicated for inlining).
3216 This *consumes* a reference count to each of its arguments. */
3217 static enum why_code
3218 do_raise(PyObject
*exc
, PyObject
*cause
)
3220 PyObject
*type
= NULL
, *value
= NULL
;
3224 PyThreadState
*tstate
= PyThreadState_GET();
3226 type
= tstate
->exc_type
;
3227 value
= tstate
->exc_value
;
3228 tb
= tstate
->exc_traceback
;
3229 if (type
== Py_None
) {
3230 PyErr_SetString(PyExc_RuntimeError
,
3231 "No active exception to reraise");
3232 return WHY_EXCEPTION
;
3237 PyErr_Restore(type
, value
, tb
);
3241 /* We support the following forms of raise:
3246 if (PyExceptionClass_Check(exc
)) {
3248 value
= PyObject_CallObject(exc
, NULL
);
3252 else if (PyExceptionInstance_Check(exc
)) {
3254 type
= PyExceptionInstance_Class(exc
);
3258 /* Not something you can raise. You get an exception
3259 anyway, just not what you specified :-) */
3261 PyErr_SetString(PyExc_TypeError
,
3262 "exceptions must derive from BaseException");
3267 PyObject
*fixed_cause
;
3268 if (PyExceptionClass_Check(cause
)) {
3269 fixed_cause
= PyObject_CallObject(cause
, NULL
);
3270 if (fixed_cause
== NULL
)
3274 else if (PyExceptionInstance_Check(cause
)) {
3275 fixed_cause
= cause
;
3278 PyErr_SetString(PyExc_TypeError
,
3279 "exception causes must derive from "
3283 PyException_SetCause(value
, fixed_cause
);
3286 PyErr_SetObject(type
, value
);
3287 /* PyErr_SetObject incref's its arguments */
3290 return WHY_EXCEPTION
;
3296 return WHY_EXCEPTION
;
3299 /* Iterate v argcnt times and store the results on the stack (via decreasing
3300 sp). Return 1 for success, 0 if error.
3302 If argcntafter == -1, do a simple unpack. If it is >= 0, do an unpack
3303 with a variable target.
3307 unpack_iterable(PyObject
*v
, int argcnt
, int argcntafter
, PyObject
**sp
)
3311 PyObject
*it
; /* iter(v) */
3313 PyObject
*l
= NULL
; /* variable list */
3317 it
= PyObject_GetIter(v
);
3321 for (; i
< argcnt
; i
++) {
3322 w
= PyIter_Next(it
);
3324 /* Iterator done, via error or exhaustion. */
3325 if (!PyErr_Occurred()) {
3326 PyErr_Format(PyExc_ValueError
,
3327 "need more than %d value%s to unpack",
3328 i
, i
== 1 ? "" : "s");
3335 if (argcntafter
== -1) {
3336 /* We better have exhausted the iterator now. */
3337 w
= PyIter_Next(it
);
3339 if (PyErr_Occurred())
3345 PyErr_SetString(PyExc_ValueError
, "too many values to unpack");
3349 l
= PySequence_List(it
);
3355 ll
= PyList_GET_SIZE(l
);
3356 if (ll
< argcntafter
) {
3357 PyErr_Format(PyExc_ValueError
, "need more than %zd values to unpack",
3362 /* Pop the "after-variable" args off the list. */
3363 for (j
= argcntafter
; j
> 0; j
--, i
++) {
3364 *--sp
= PyList_GET_ITEM(l
, ll
- j
);
3366 /* Resize the list. */
3367 Py_SIZE(l
) = ll
- argcntafter
;
3372 for (; i
> 0; i
--, sp
++)
3381 prtrace(PyObject
*v
, char *str
)
3384 if (PyObject_Print(v
, stdout
, 0) != 0)
3385 PyErr_Clear(); /* Don't know what else to do */
3392 call_exc_trace(Py_tracefunc func
, PyObject
*self
, PyFrameObject
*f
)
3394 PyObject
*type
, *value
, *traceback
, *arg
;
3396 PyErr_Fetch(&type
, &value
, &traceback
);
3397 if (value
== NULL
) {
3401 arg
= PyTuple_Pack(3, type
, value
, traceback
);
3403 PyErr_Restore(type
, value
, traceback
);
3406 err
= call_trace(func
, self
, f
, PyTrace_EXCEPTION
, arg
);
3409 PyErr_Restore(type
, value
, traceback
);
3413 Py_XDECREF(traceback
);
3418 call_trace_protected(Py_tracefunc func
, PyObject
*obj
, PyFrameObject
*frame
,
3419 int what
, PyObject
*arg
)
3421 PyObject
*type
, *value
, *traceback
;
3423 PyErr_Fetch(&type
, &value
, &traceback
);
3424 err
= call_trace(func
, obj
, frame
, what
, arg
);
3427 PyErr_Restore(type
, value
, traceback
);
3433 Py_XDECREF(traceback
);
3439 call_trace(Py_tracefunc func
, PyObject
*obj
, PyFrameObject
*frame
,
3440 int what
, PyObject
*arg
)
3442 register PyThreadState
*tstate
= frame
->f_tstate
;
3444 if (tstate
->tracing
)
3447 tstate
->use_tracing
= 0;
3448 result
= func(obj
, frame
, what
, arg
);
3449 tstate
->use_tracing
= ((tstate
->c_tracefunc
!= NULL
)
3450 || (tstate
->c_profilefunc
!= NULL
));
3456 _PyEval_CallTracing(PyObject
*func
, PyObject
*args
)
3458 PyFrameObject
*frame
= PyEval_GetFrame();
3459 PyThreadState
*tstate
= frame
->f_tstate
;
3460 int save_tracing
= tstate
->tracing
;
3461 int save_use_tracing
= tstate
->use_tracing
;
3464 tstate
->tracing
= 0;
3465 tstate
->use_tracing
= ((tstate
->c_tracefunc
!= NULL
)
3466 || (tstate
->c_profilefunc
!= NULL
));
3467 result
= PyObject_Call(func
, args
, NULL
);
3468 tstate
->tracing
= save_tracing
;
3469 tstate
->use_tracing
= save_use_tracing
;
3474 maybe_call_line_trace(Py_tracefunc func
, PyObject
*obj
,
3475 PyFrameObject
*frame
, int *instr_lb
, int *instr_ub
,
3480 /* If the last instruction executed isn't in the current
3481 instruction window, reset the window. If the last
3482 instruction happens to fall at the start of a line or if it
3483 represents a jump backwards, call the trace function.
3485 if ((frame
->f_lasti
< *instr_lb
|| frame
->f_lasti
>= *instr_ub
)) {
3489 line
= PyCode_CheckLineNumber(frame
->f_code
, frame
->f_lasti
,
3492 frame
->f_lineno
= line
;
3493 result
= call_trace(func
, obj
, frame
,
3494 PyTrace_LINE
, Py_None
);
3496 *instr_lb
= bounds
.ap_lower
;
3497 *instr_ub
= bounds
.ap_upper
;
3499 else if (frame
->f_lasti
<= *instr_prev
) {
3500 result
= call_trace(func
, obj
, frame
, PyTrace_LINE
, Py_None
);
3502 *instr_prev
= frame
->f_lasti
;
3507 PyEval_SetProfile(Py_tracefunc func
, PyObject
*arg
)
3509 PyThreadState
*tstate
= PyThreadState_GET();
3510 PyObject
*temp
= tstate
->c_profileobj
;
3512 tstate
->c_profilefunc
= NULL
;
3513 tstate
->c_profileobj
= NULL
;
3514 /* Must make sure that tracing is not ignored if 'temp' is freed */
3515 tstate
->use_tracing
= tstate
->c_tracefunc
!= NULL
;
3517 tstate
->c_profilefunc
= func
;
3518 tstate
->c_profileobj
= arg
;
3519 /* Flag that tracing or profiling is turned on */
3520 tstate
->use_tracing
= (func
!= NULL
) || (tstate
->c_tracefunc
!= NULL
);
3524 PyEval_SetTrace(Py_tracefunc func
, PyObject
*arg
)
3526 PyThreadState
*tstate
= PyThreadState_GET();
3527 PyObject
*temp
= tstate
->c_traceobj
;
3528 _Py_TracingPossible
+= (func
!= NULL
) - (tstate
->c_tracefunc
!= NULL
);
3530 tstate
->c_tracefunc
= NULL
;
3531 tstate
->c_traceobj
= NULL
;
3532 /* Must make sure that profiling is not ignored if 'temp' is freed */
3533 tstate
->use_tracing
= tstate
->c_profilefunc
!= NULL
;
3535 tstate
->c_tracefunc
= func
;
3536 tstate
->c_traceobj
= arg
;
3537 /* Flag that tracing or profiling is turned on */
3538 tstate
->use_tracing
= ((func
!= NULL
)
3539 || (tstate
->c_profilefunc
!= NULL
));
3543 PyEval_GetBuiltins(void)
3545 PyFrameObject
*current_frame
= PyEval_GetFrame();
3546 if (current_frame
== NULL
)
3547 return PyThreadState_GET()->interp
->builtins
;
3549 return current_frame
->f_builtins
;
3553 PyEval_GetLocals(void)
3555 PyFrameObject
*current_frame
= PyEval_GetFrame();
3556 if (current_frame
== NULL
)
3558 PyFrame_FastToLocals(current_frame
);
3559 return current_frame
->f_locals
;
3563 PyEval_GetGlobals(void)
3565 PyFrameObject
*current_frame
= PyEval_GetFrame();
3566 if (current_frame
== NULL
)
3569 return current_frame
->f_globals
;
3573 PyEval_GetFrame(void)
3575 PyThreadState
*tstate
= PyThreadState_GET();
3576 return _PyThreadState_GetFrame(tstate
);
3580 PyEval_MergeCompilerFlags(PyCompilerFlags
*cf
)
3582 PyFrameObject
*current_frame
= PyEval_GetFrame();
3583 int result
= cf
->cf_flags
!= 0;
3585 if (current_frame
!= NULL
) {
3586 const int codeflags
= current_frame
->f_code
->co_flags
;
3587 const int compilerflags
= codeflags
& PyCF_MASK
;
3588 if (compilerflags
) {
3590 cf
->cf_flags
|= compilerflags
;
3592 #if 0 /* future keyword */
3593 if (codeflags
& CO_GENERATOR_ALLOWED
) {
3595 cf
->cf_flags
|= CO_GENERATOR_ALLOWED
;
3603 /* External interface to call any callable object.
3604 The arg must be a tuple or NULL. */
3606 #undef PyEval_CallObject
3607 /* for backward compatibility: export this interface */
3610 PyEval_CallObject(PyObject
*func
, PyObject
*arg
)
3612 return PyEval_CallObjectWithKeywords(func
, arg
, (PyObject
*)NULL
);
3614 #define PyEval_CallObject(func,arg) \
3615 PyEval_CallObjectWithKeywords(func, arg, (PyObject *)NULL)
3618 PyEval_CallObjectWithKeywords(PyObject
*func
, PyObject
*arg
, PyObject
*kw
)
3623 arg
= PyTuple_New(0);
3627 else if (!PyTuple_Check(arg
)) {
3628 PyErr_SetString(PyExc_TypeError
,
3629 "argument list must be a tuple");
3635 if (kw
!= NULL
&& !PyDict_Check(kw
)) {
3636 PyErr_SetString(PyExc_TypeError
,
3637 "keyword list must be a dictionary");
3642 result
= PyObject_Call(func
, arg
, kw
);
3648 PyEval_GetFuncName(PyObject
*func
)
3650 if (PyMethod_Check(func
))
3651 return PyEval_GetFuncName(PyMethod_GET_FUNCTION(func
));
3652 else if (PyFunction_Check(func
))
3653 return _PyUnicode_AsString(((PyFunctionObject
*)func
)->func_name
);
3654 else if (PyCFunction_Check(func
))
3655 return ((PyCFunctionObject
*)func
)->m_ml
->ml_name
;
3657 return func
->ob_type
->tp_name
;
3661 PyEval_GetFuncDesc(PyObject
*func
)
3663 if (PyMethod_Check(func
))
3665 else if (PyFunction_Check(func
))
3667 else if (PyCFunction_Check(func
))
3674 err_args(PyObject
*func
, int flags
, int nargs
)
3676 if (flags
& METH_NOARGS
)
3677 PyErr_Format(PyExc_TypeError
,
3678 "%.200s() takes no arguments (%d given)",
3679 ((PyCFunctionObject
*)func
)->m_ml
->ml_name
,
3682 PyErr_Format(PyExc_TypeError
,
3683 "%.200s() takes exactly one argument (%d given)",
3684 ((PyCFunctionObject
*)func
)->m_ml
->ml_name
,
3688 #define C_TRACE(x, call) \
3689 if (tstate->use_tracing && tstate->c_profilefunc) { \
3690 if (call_trace(tstate->c_profilefunc, \
3691 tstate->c_profileobj, \
3692 tstate->frame, PyTrace_C_CALL, \
3698 if (tstate->c_profilefunc != NULL) { \
3700 call_trace_protected(tstate->c_profilefunc, \
3701 tstate->c_profileobj, \
3702 tstate->frame, PyTrace_C_EXCEPTION, \
3704 /* XXX should pass (type, value, tb) */ \
3706 if (call_trace(tstate->c_profilefunc, \
3707 tstate->c_profileobj, \
3708 tstate->frame, PyTrace_C_RETURN, \
3721 call_function(PyObject
***pp_stack
, int oparg
3723 , uint64
* pintr0
, uint64
* pintr1
3727 int na
= oparg
& 0xff;
3728 int nk
= (oparg
>>8) & 0xff;
3729 int n
= na
+ 2 * nk
;
3730 PyObject
**pfunc
= (*pp_stack
) - n
- 1;
3731 PyObject
*func
= *pfunc
;
3734 /* Always dispatch PyCFunction first, because these are
3735 presumed to be the most frequent callable object.
3737 if (PyCFunction_Check(func
) && nk
== 0) {
3738 int flags
= PyCFunction_GET_FLAGS(func
);
3739 PyThreadState
*tstate
= PyThreadState_GET();
3741 PCALL(PCALL_CFUNCTION
);
3742 if (flags
& (METH_NOARGS
| METH_O
)) {
3743 PyCFunction meth
= PyCFunction_GET_FUNCTION(func
);
3744 PyObject
*self
= PyCFunction_GET_SELF(func
);
3745 if (flags
& METH_NOARGS
&& na
== 0) {
3746 C_TRACE(x
, (*meth
)(self
,NULL
));
3748 else if (flags
& METH_O
&& na
== 1) {
3749 PyObject
*arg
= EXT_POP(*pp_stack
);
3750 C_TRACE(x
, (*meth
)(self
,arg
));
3754 err_args(func
, flags
, na
);
3760 callargs
= load_args(pp_stack
, na
);
3761 READ_TIMESTAMP(*pintr0
);
3762 C_TRACE(x
, PyCFunction_Call(func
,callargs
,NULL
));
3763 READ_TIMESTAMP(*pintr1
);
3764 Py_XDECREF(callargs
);
3767 if (PyMethod_Check(func
) && PyMethod_GET_SELF(func
) != NULL
) {
3768 /* optimize access to bound methods */
3769 PyObject
*self
= PyMethod_GET_SELF(func
);
3770 PCALL(PCALL_METHOD
);
3771 PCALL(PCALL_BOUND_METHOD
);
3773 func
= PyMethod_GET_FUNCTION(func
);
3781 READ_TIMESTAMP(*pintr0
);
3782 if (PyFunction_Check(func
))
3783 x
= fast_function(func
, pp_stack
, n
, na
, nk
);
3785 x
= do_call(func
, pp_stack
, na
, nk
);
3786 READ_TIMESTAMP(*pintr1
);
3790 /* Clear the stack of the function object. Also removes
3791 the arguments in case they weren't consumed already
3792 (fast_function() and err_args() leave them on the stack).
3794 while ((*pp_stack
) > pfunc
) {
3795 w
= EXT_POP(*pp_stack
);
3802 /* The fast_function() function optimize calls for which no argument
3803 tuple is necessary; the objects are passed directly from the stack.
3804 For the simplest case -- a function that takes only positional
3805 arguments and is called with only positional arguments -- it
3806 inlines the most primitive frame setup code from
3807 PyEval_EvalCodeEx(), which vastly reduces the checks that must be
3808 done before evaluating the frame.
3812 fast_function(PyObject
*func
, PyObject
***pp_stack
, int n
, int na
, int nk
)
3814 PyCodeObject
*co
= (PyCodeObject
*)PyFunction_GET_CODE(func
);
3815 PyObject
*globals
= PyFunction_GET_GLOBALS(func
);
3816 PyObject
*argdefs
= PyFunction_GET_DEFAULTS(func
);
3817 PyObject
*kwdefs
= PyFunction_GET_KW_DEFAULTS(func
);
3818 PyObject
**d
= NULL
;
3821 PCALL(PCALL_FUNCTION
);
3822 PCALL(PCALL_FAST_FUNCTION
);
3823 if (argdefs
== NULL
&& co
->co_argcount
== n
&&
3824 co
->co_kwonlyargcount
== 0 && nk
==0 &&
3825 co
->co_flags
== (CO_OPTIMIZED
| CO_NEWLOCALS
| CO_NOFREE
)) {
3827 PyObject
*retval
= NULL
;
3828 PyThreadState
*tstate
= PyThreadState_GET();
3829 PyObject
**fastlocals
, **stack
;
3832 PCALL(PCALL_FASTER_FUNCTION
);
3833 assert(globals
!= NULL
);
3834 /* XXX Perhaps we should create a specialized
3835 PyFrame_New() that doesn't take locals, but does
3836 take builtins without sanity checking them.
3838 assert(tstate
!= NULL
);
3839 f
= PyFrame_New(tstate
, co
, globals
, NULL
);
3843 fastlocals
= f
->f_localsplus
;
3844 stack
= (*pp_stack
) - n
;
3846 for (i
= 0; i
< n
; i
++) {
3848 fastlocals
[i
] = *stack
++;
3850 retval
= PyEval_EvalFrameEx(f
,0);
3851 ++tstate
->recursion_depth
;
3853 --tstate
->recursion_depth
;
3856 if (argdefs
!= NULL
) {
3857 d
= &PyTuple_GET_ITEM(argdefs
, 0);
3858 nd
= Py_SIZE(argdefs
);
3860 return PyEval_EvalCodeEx(co
, globals
,
3861 (PyObject
*)NULL
, (*pp_stack
)-n
, na
,
3862 (*pp_stack
)-2*nk
, nk
, d
, nd
, kwdefs
,
3863 PyFunction_GET_CLOSURE(func
));
3867 update_keyword_args(PyObject
*orig_kwdict
, int nk
, PyObject
***pp_stack
,
3870 PyObject
*kwdict
= NULL
;
3871 if (orig_kwdict
== NULL
)
3872 kwdict
= PyDict_New();
3874 kwdict
= PyDict_Copy(orig_kwdict
);
3875 Py_DECREF(orig_kwdict
);
3881 PyObject
*value
= EXT_POP(*pp_stack
);
3882 PyObject
*key
= EXT_POP(*pp_stack
);
3883 if (PyDict_GetItem(kwdict
, key
) != NULL
) {
3884 PyErr_Format(PyExc_TypeError
,
3885 "%.200s%s got multiple values "
3886 "for keyword argument '%U'",
3887 PyEval_GetFuncName(func
),
3888 PyEval_GetFuncDesc(func
),
3895 err
= PyDict_SetItem(kwdict
, key
, value
);
3907 update_star_args(int nstack
, int nstar
, PyObject
*stararg
,
3908 PyObject
***pp_stack
)
3910 PyObject
*callargs
, *w
;
3912 callargs
= PyTuple_New(nstack
+ nstar
);
3913 if (callargs
== NULL
) {
3918 for (i
= 0; i
< nstar
; i
++) {
3919 PyObject
*a
= PyTuple_GET_ITEM(stararg
, i
);
3921 PyTuple_SET_ITEM(callargs
, nstack
+ i
, a
);
3924 while (--nstack
>= 0) {
3925 w
= EXT_POP(*pp_stack
);
3926 PyTuple_SET_ITEM(callargs
, nstack
, w
);
3932 load_args(PyObject
***pp_stack
, int na
)
3934 PyObject
*args
= PyTuple_New(na
);
3940 w
= EXT_POP(*pp_stack
);
3941 PyTuple_SET_ITEM(args
, na
, w
);
3947 do_call(PyObject
*func
, PyObject
***pp_stack
, int na
, int nk
)
3949 PyObject
*callargs
= NULL
;
3950 PyObject
*kwdict
= NULL
;
3951 PyObject
*result
= NULL
;
3954 kwdict
= update_keyword_args(NULL
, nk
, pp_stack
, func
);
3958 callargs
= load_args(pp_stack
, na
);
3959 if (callargs
== NULL
)
3962 /* At this point, we have to look at the type of func to
3963 update the call stats properly. Do it here so as to avoid
3964 exposing the call stats machinery outside ceval.c
3966 if (PyFunction_Check(func
))
3967 PCALL(PCALL_FUNCTION
);
3968 else if (PyMethod_Check(func
))
3969 PCALL(PCALL_METHOD
);
3970 else if (PyType_Check(func
))
3972 else if (PyCFunction_Check(func
))
3973 PCALL(PCALL_CFUNCTION
);
3977 if (PyCFunction_Check(func
)) {
3978 PyThreadState
*tstate
= PyThreadState_GET();
3979 C_TRACE(result
, PyCFunction_Call(func
, callargs
, kwdict
));
3982 result
= PyObject_Call(func
, callargs
, kwdict
);
3984 Py_XDECREF(callargs
);
3990 ext_do_call(PyObject
*func
, PyObject
***pp_stack
, int flags
, int na
, int nk
)
3993 PyObject
*callargs
= NULL
;
3994 PyObject
*stararg
= NULL
;
3995 PyObject
*kwdict
= NULL
;
3996 PyObject
*result
= NULL
;
3998 if (flags
& CALL_FLAG_KW
) {
3999 kwdict
= EXT_POP(*pp_stack
);
4000 if (!PyDict_Check(kwdict
)) {
4005 if (PyDict_Update(d
, kwdict
) != 0) {
4007 /* PyDict_Update raises attribute
4008 * error (percolated from an attempt
4009 * to get 'keys' attribute) instead of
4010 * a type error if its second argument
4013 if (PyErr_ExceptionMatches(PyExc_AttributeError
)) {
4014 PyErr_Format(PyExc_TypeError
,
4015 "%.200s%.200s argument after ** "
4016 "must be a mapping, not %.200s",
4017 PyEval_GetFuncName(func
),
4018 PyEval_GetFuncDesc(func
),
4019 kwdict
->ob_type
->tp_name
);
4027 if (flags
& CALL_FLAG_VAR
) {
4028 stararg
= EXT_POP(*pp_stack
);
4029 if (!PyTuple_Check(stararg
)) {
4031 t
= PySequence_Tuple(stararg
);
4033 if (PyErr_ExceptionMatches(PyExc_TypeError
)) {
4034 PyErr_Format(PyExc_TypeError
,
4035 "%.200s%.200s argument after * "
4036 "must be a sequence, not %200s",
4037 PyEval_GetFuncName(func
),
4038 PyEval_GetFuncDesc(func
),
4039 stararg
->ob_type
->tp_name
);
4046 nstar
= PyTuple_GET_SIZE(stararg
);
4049 kwdict
= update_keyword_args(kwdict
, nk
, pp_stack
, func
);
4053 callargs
= update_star_args(na
, nstar
, stararg
, pp_stack
);
4054 if (callargs
== NULL
)
4057 /* At this point, we have to look at the type of func to
4058 update the call stats properly. Do it here so as to avoid
4059 exposing the call stats machinery outside ceval.c
4061 if (PyFunction_Check(func
))
4062 PCALL(PCALL_FUNCTION
);
4063 else if (PyMethod_Check(func
))
4064 PCALL(PCALL_METHOD
);
4065 else if (PyType_Check(func
))
4067 else if (PyCFunction_Check(func
))
4068 PCALL(PCALL_CFUNCTION
);
4072 if (PyCFunction_Check(func
)) {
4073 PyThreadState
*tstate
= PyThreadState_GET();
4074 C_TRACE(result
, PyCFunction_Call(func
, callargs
, kwdict
));
4077 result
= PyObject_Call(func
, callargs
, kwdict
);
4079 Py_XDECREF(callargs
);
4081 Py_XDECREF(stararg
);
4085 /* Extract a slice index from a PyInt or PyLong or an object with the
4086 nb_index slot defined, and store in *pi.
4087 Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX,
4088 and silently boost values less than -PY_SSIZE_T_MAX-1 to -PY_SSIZE_T_MAX-1.
4089 Return 0 on error, 1 on success.
4091 /* Note: If v is NULL, return success without storing into *pi. This
4092 is because_PyEval_SliceIndex() is called by apply_slice(), which can be
4093 called by the SLICE opcode with v and/or w equal to NULL.
4096 _PyEval_SliceIndex(PyObject
*v
, Py_ssize_t
*pi
)
4100 if (PyIndex_Check(v
)) {
4101 x
= PyNumber_AsSsize_t(v
, NULL
);
4102 if (x
== -1 && PyErr_Occurred())
4106 PyErr_SetString(PyExc_TypeError
,
4107 "slice indices must be integers or "
4108 "None or have an __index__ method");
4116 #define CANNOT_CATCH_MSG "catching classes that do not inherit from "\
4117 "BaseException is not allowed"
4120 cmp_outcome(int op
, register PyObject
*v
, register PyObject
*w
)
4131 res
= PySequence_Contains(w
, v
);
4136 res
= PySequence_Contains(w
, v
);
4141 case PyCmp_EXC_MATCH
:
4142 if (PyTuple_Check(w
)) {
4143 Py_ssize_t i
, length
;
4144 length
= PyTuple_Size(w
);
4145 for (i
= 0; i
< length
; i
+= 1) {
4146 PyObject
*exc
= PyTuple_GET_ITEM(w
, i
);
4147 if (!PyExceptionClass_Check(exc
)) {
4148 PyErr_SetString(PyExc_TypeError
,
4155 if (!PyExceptionClass_Check(w
)) {
4156 PyErr_SetString(PyExc_TypeError
,
4161 res
= PyErr_GivenExceptionMatches(v
, w
);
4164 return PyObject_RichCompare(v
, w
, op
);
4166 v
= res
? Py_True
: Py_False
;
4172 import_from(PyObject
*v
, PyObject
*name
)
4176 x
= PyObject_GetAttr(v
, name
);
4177 if (x
== NULL
&& PyErr_ExceptionMatches(PyExc_AttributeError
)) {
4178 PyErr_Format(PyExc_ImportError
, "cannot import name %S", name
);
4184 import_all_from(PyObject
*locals
, PyObject
*v
)
4186 PyObject
*all
= PyObject_GetAttrString(v
, "__all__");
4187 PyObject
*dict
, *name
, *value
;
4188 int skip_leading_underscores
= 0;
4192 if (!PyErr_ExceptionMatches(PyExc_AttributeError
))
4193 return -1; /* Unexpected error */
4195 dict
= PyObject_GetAttrString(v
, "__dict__");
4197 if (!PyErr_ExceptionMatches(PyExc_AttributeError
))
4199 PyErr_SetString(PyExc_ImportError
,
4200 "from-import-* object has no __dict__ and no __all__");
4203 all
= PyMapping_Keys(dict
);
4207 skip_leading_underscores
= 1;
4210 for (pos
= 0, err
= 0; ; pos
++) {
4211 name
= PySequence_GetItem(all
, pos
);
4213 if (!PyErr_ExceptionMatches(PyExc_IndexError
))
4219 if (skip_leading_underscores
&&
4220 PyUnicode_Check(name
) &&
4221 PyUnicode_AS_UNICODE(name
)[0] == '_')
4226 value
= PyObject_GetAttr(v
, name
);
4229 else if (PyDict_CheckExact(locals
))
4230 err
= PyDict_SetItem(locals
, name
, value
);
4232 err
= PyObject_SetItem(locals
, name
, value
);
4243 format_exc_check_arg(PyObject
*exc
, const char *format_str
, PyObject
*obj
)
4245 const char *obj_str
;
4250 obj_str
= _PyUnicode_AsString(obj
);
4254 PyErr_Format(exc
, format_str
, obj_str
);
4258 unicode_concatenate(PyObject
*v
, PyObject
*w
,
4259 PyFrameObject
*f
, unsigned char *next_instr
)
4261 /* This function implements 'variable += expr' when both arguments
4262 are (Unicode) strings. */
4263 Py_ssize_t v_len
= PyUnicode_GET_SIZE(v
);
4264 Py_ssize_t w_len
= PyUnicode_GET_SIZE(w
);
4265 Py_ssize_t new_len
= v_len
+ w_len
;
4267 PyErr_SetString(PyExc_OverflowError
,
4268 "strings are too large to concat");
4272 if (v
->ob_refcnt
== 2) {
4273 /* In the common case, there are 2 references to the value
4274 * stored in 'variable' when the += is performed: one on the
4275 * value stack (in 'v') and one still stored in the
4276 * 'variable'. We try to delete the variable now to reduce
4279 switch (*next_instr
) {
4282 int oparg
= PEEKARG();
4283 PyObject
**fastlocals
= f
->f_localsplus
;
4284 if (GETLOCAL(oparg
) == v
)
4285 SETLOCAL(oparg
, NULL
);
4290 PyObject
**freevars
= (f
->f_localsplus
+
4291 f
->f_code
->co_nlocals
);
4292 PyObject
*c
= freevars
[PEEKARG()];
4293 if (PyCell_GET(c
) == v
)
4294 PyCell_Set(c
, NULL
);
4299 PyObject
*names
= f
->f_code
->co_names
;
4300 PyObject
*name
= GETITEM(names
, PEEKARG());
4301 PyObject
*locals
= f
->f_locals
;
4302 if (PyDict_CheckExact(locals
) &&
4303 PyDict_GetItem(locals
, name
) == v
) {
4304 if (PyDict_DelItem(locals
, name
) != 0) {
4313 if (v
->ob_refcnt
== 1 && !PyUnicode_CHECK_INTERNED(v
)) {
4314 /* Now we own the last reference to 'v', so we can resize it
4317 if (PyUnicode_Resize(&v
, new_len
) != 0) {
4318 /* XXX if PyUnicode_Resize() fails, 'v' has been
4319 * deallocated so it cannot be put back into
4320 * 'variable'. The MemoryError is raised when there
4321 * is no value in 'variable', which might (very
4322 * remotely) be a cause of incompatibilities.
4326 /* copy 'w' into the newly allocated area of 'v' */
4327 memcpy(PyUnicode_AS_UNICODE(v
) + v_len
,
4328 PyUnicode_AS_UNICODE(w
), w_len
*sizeof(Py_UNICODE
));
4332 /* When in-place resizing is not an option. */
4333 w
= PyUnicode_Concat(v
, w
);
4339 #ifdef DYNAMIC_EXECUTION_PROFILE
4342 getarray(long a
[256])
4345 PyObject
*l
= PyList_New(256);
4346 if (l
== NULL
) return NULL
;
4347 for (i
= 0; i
< 256; i
++) {
4348 PyObject
*x
= PyLong_FromLong(a
[i
]);
4353 PyList_SetItem(l
, i
, x
);
4355 for (i
= 0; i
< 256; i
++)
4361 _Py_GetDXProfile(PyObject
*self
, PyObject
*args
)
4364 return getarray(dxp
);
4367 PyObject
*l
= PyList_New(257);
4368 if (l
== NULL
) return NULL
;
4369 for (i
= 0; i
< 257; i
++) {
4370 PyObject
*x
= getarray(dxpairs
[i
]);
4375 PyList_SetItem(l
, i
, x
);