2 * This file compiles an abstract syntax tree (AST) into Python bytecode.
4 * The primary entry point is PyAST_Compile(), which returns a
5 * PyCodeObject. The compiler makes several passes to build the code
7 * 1. Checks for future statements. See future.c
8 * 2. Builds a symbol table. See symtable.c.
9 * 3. Generate code for basic blocks. See compiler_mod() in this file.
10 * 4. Assemble the basic blocks into final code. See assemble() in
12 * 5. Optimize the byte code (peephole optimizations). See peephole.c
14 * Note that compiler_mod() suggests module, but the module ast type
15 * (mod_ty) has cases for expressions and interactive statements.
17 * CAUTION: The VISIT_* macros abort the current function when they
18 * encounter a problem. So don't invoke them when there is memory
19 * which needs to be released. Code blocks are OK, as the compiler
20 * structure takes care of releasing those. Use the arena to manage
26 #include "Python-ast.h"
35 int Py_OptimizeFlag
= 0;
37 #define DEFAULT_BLOCK_SIZE 16
38 #define DEFAULT_BLOCKS 8
39 #define DEFAULT_CODE_SIZE 128
40 #define DEFAULT_LNOTAB_SIZE 16
45 unsigned i_hasarg
: 1;
46 unsigned char i_opcode
;
48 struct basicblock_
*i_target
; /* target block (if jump instruction) */
52 typedef struct basicblock_
{
53 /* Each basicblock in a compilation unit is linked via b_list in the
54 reverse order that the block are allocated. b_list points to the next
55 block, not to be confused with b_next, which is next by control flow. */
56 struct basicblock_
*b_list
;
57 /* number of instructions used */
59 /* length of instruction array (b_instr) */
61 /* pointer to an array of instructions, initially NULL */
62 struct instr
*b_instr
;
63 /* If b_next is non-NULL, it is a pointer to the next
64 block reached by normal control flow. */
65 struct basicblock_
*b_next
;
66 /* b_seen is used to perform a DFS of basicblocks. */
68 /* b_return is true if a RETURN_VALUE opcode is inserted. */
69 unsigned b_return
: 1;
70 /* depth of stack upon entry of block, computed by stackdepth() */
72 /* instruction offset for block, computed by assemble_jump_offsets() */
76 /* fblockinfo tracks the current frame block.
78 A frame block is used to handle loops, try/except, and try/finally.
79 It's called a frame block to distinguish it from a basic block in the
83 enum fblocktype
{ LOOP
, EXCEPT
, FINALLY_TRY
, FINALLY_END
};
86 enum fblocktype fb_type
;
90 /* The following items change on entry and exit of code blocks.
91 They must be saved and restored when returning to a block.
93 struct compiler_unit
{
94 PySTEntryObject
*u_ste
;
97 /* The following fields are dicts that map objects to
98 the index of them in co_XXX. The index is used as
99 the argument for opcodes that refer to those collections.
101 PyObject
*u_consts
; /* all constants */
102 PyObject
*u_names
; /* all names */
103 PyObject
*u_varnames
; /* local variables */
104 PyObject
*u_cellvars
; /* cell variables */
105 PyObject
*u_freevars
; /* free variables */
107 PyObject
*u_private
; /* for private name mangling */
109 int u_argcount
; /* number of arguments for block */
110 /* Pointer to the most recently allocated block. By following b_list
111 members, you can reach all early allocated blocks. */
112 basicblock
*u_blocks
;
113 basicblock
*u_curblock
; /* pointer to current block */
116 struct fblockinfo u_fblock
[CO_MAXBLOCKS
];
118 int u_firstlineno
; /* the first lineno of the block */
119 int u_lineno
; /* the lineno for the current stmt */
120 bool u_lineno_set
; /* boolean to indicate whether instr
121 has been generated with current lineno */
124 /* This struct captures the global state of a compilation.
126 The u pointer points to the current compilation unit, while units
127 for enclosing blocks are stored in c_stack. The u and c_stack are
128 managed by compiler_enter_scope() and compiler_exit_scope().
132 const char *c_filename
;
133 struct symtable
*c_st
;
134 PyFutureFeatures
*c_future
; /* pointer to module's __future__ */
135 PyCompilerFlags
*c_flags
;
137 int c_interactive
; /* true if in interactive mode */
140 struct compiler_unit
*u
; /* compiler state for current block */
141 PyObject
*c_stack
; /* Python list holding compiler_unit ptrs */
142 PyArena
*c_arena
; /* pointer to memory allocation arena */
145 static int compiler_enter_scope(struct compiler
*, identifier
, void *, int);
146 static void compiler_free(struct compiler
*);
147 static basicblock
*compiler_new_block(struct compiler
*);
148 static int compiler_next_instr(struct compiler
*, basicblock
*);
149 static int compiler_addop(struct compiler
*, int);
150 static int compiler_addop_o(struct compiler
*, int, PyObject
*, PyObject
*);
151 static int compiler_addop_i(struct compiler
*, int, int);
152 static int compiler_addop_j(struct compiler
*, int, basicblock
*, int);
153 static basicblock
*compiler_use_new_block(struct compiler
*);
154 static int compiler_error(struct compiler
*, const char *);
155 static int compiler_nameop(struct compiler
*, identifier
, expr_context_ty
);
157 static PyCodeObject
*compiler_mod(struct compiler
*, mod_ty
);
158 static int compiler_visit_stmt(struct compiler
*, stmt_ty
);
159 static int compiler_visit_keyword(struct compiler
*, keyword_ty
);
160 static int compiler_visit_expr(struct compiler
*, expr_ty
);
161 static int compiler_augassign(struct compiler
*, stmt_ty
);
162 static int compiler_visit_slice(struct compiler
*, slice_ty
,
165 static int compiler_push_fblock(struct compiler
*, enum fblocktype
,
167 static void compiler_pop_fblock(struct compiler
*, enum fblocktype
,
169 /* Returns true if there is a loop on the fblock stack. */
170 static int compiler_in_loop(struct compiler
*);
172 static int inplace_binop(struct compiler
*, operator_ty
);
173 static int expr_constant(expr_ty e
);
175 static int compiler_with(struct compiler
*, stmt_ty
);
177 static PyCodeObject
*assemble(struct compiler
*, int addNone
);
178 static PyObject
*__doc__
;
181 _Py_Mangle(PyObject
*privateobj
, PyObject
*ident
)
183 /* Name mangling: __private becomes _classname__private.
184 This is independent from how the name is used. */
185 const char *p
, *name
= PyString_AsString(ident
);
188 if (privateobj
== NULL
|| !PyString_Check(privateobj
) ||
189 name
== NULL
|| name
[0] != '_' || name
[1] != '_') {
193 p
= PyString_AsString(privateobj
);
195 /* Don't mangle __id__ or names with dots.
197 The only time a name with a dot can occur is when
198 we are compiling an import statement that has a
201 TODO(jhylton): Decide whether we want to support
202 mangling of the module name, e.g. __M.X.
204 if ((name
[nlen
-1] == '_' && name
[nlen
-2] == '_')
205 || strchr(name
, '.')) {
207 return ident
; /* Don't mangle __whatever__ */
209 /* Strip leading underscores from class name */
214 return ident
; /* Don't mangle if class is just underscores */
218 assert(1 <= PY_SSIZE_T_MAX
- nlen
);
219 assert(1 + nlen
<= PY_SSIZE_T_MAX
- plen
);
221 ident
= PyString_FromStringAndSize(NULL
, 1 + nlen
+ plen
);
224 /* ident = "_" + p[:plen] + name # i.e. 1+plen+nlen bytes */
225 buffer
= PyString_AS_STRING(ident
);
227 strncpy(buffer
+1, p
, plen
);
228 strcpy(buffer
+1+plen
, name
);
233 compiler_init(struct compiler
*c
)
235 memset(c
, 0, sizeof(struct compiler
));
237 c
->c_stack
= PyList_New(0);
245 PyAST_Compile(mod_ty mod
, const char *filename
, PyCompilerFlags
*flags
,
249 PyCodeObject
*co
= NULL
;
250 PyCompilerFlags local_flags
;
254 __doc__
= PyString_InternFromString("__doc__");
259 if (!compiler_init(&c
))
261 c
.c_filename
= filename
;
263 c
.c_future
= PyFuture_FromAST(mod
, filename
);
264 if (c
.c_future
== NULL
)
267 local_flags
.cf_flags
= 0;
268 flags
= &local_flags
;
270 merged
= c
.c_future
->ff_features
| flags
->cf_flags
;
271 c
.c_future
->ff_features
= merged
;
272 flags
->cf_flags
= merged
;
276 c
.c_st
= PySymtable_Build(mod
, filename
, c
.c_future
);
277 if (c
.c_st
== NULL
) {
278 if (!PyErr_Occurred())
279 PyErr_SetString(PyExc_SystemError
, "no symtable");
283 co
= compiler_mod(&c
, mod
);
287 assert(co
|| PyErr_Occurred());
292 PyNode_Compile(struct _node
*n
, const char *filename
)
294 PyCodeObject
*co
= NULL
;
296 PyArena
*arena
= PyArena_New();
299 mod
= PyAST_FromNode(n
, NULL
, filename
, arena
);
301 co
= PyAST_Compile(mod
, filename
, NULL
, arena
);
307 compiler_free(struct compiler
*c
)
310 PySymtable_Free(c
->c_st
);
312 PyObject_Free(c
->c_future
);
313 Py_DECREF(c
->c_stack
);
317 list2dict(PyObject
*list
)
321 PyObject
*dict
= PyDict_New();
322 if (!dict
) return NULL
;
324 n
= PyList_Size(list
);
325 for (i
= 0; i
< n
; i
++) {
326 v
= PyInt_FromLong(i
);
331 k
= PyList_GET_ITEM(list
, i
);
332 k
= PyTuple_Pack(2, k
, k
->ob_type
);
333 if (k
== NULL
|| PyDict_SetItem(dict
, k
, v
) < 0) {
345 /* Return new dict containing names from src that match scope(s).
347 src is a symbol table dictionary. If the scope of a name matches
348 either scope_type or flag is set, insert it into the new dict. The
349 values are integers, starting at offset and increasing by one for
354 dictbytype(PyObject
*src
, int scope_type
, int flag
, int offset
)
356 Py_ssize_t pos
= 0, i
= offset
, scope
;
357 PyObject
*k
, *v
, *dest
= PyDict_New();
363 while (PyDict_Next(src
, &pos
, &k
, &v
)) {
364 /* XXX this should probably be a macro in symtable.h */
365 assert(PyInt_Check(v
));
366 scope
= (PyInt_AS_LONG(v
) >> SCOPE_OFF
) & SCOPE_MASK
;
368 if (scope
== scope_type
|| PyInt_AS_LONG(v
) & flag
) {
369 PyObject
*tuple
, *item
= PyInt_FromLong(i
);
375 tuple
= PyTuple_Pack(2, k
, k
->ob_type
);
376 if (!tuple
|| PyDict_SetItem(dest
, tuple
, item
) < 0) {
390 compiler_unit_check(struct compiler_unit
*u
)
393 for (block
= u
->u_blocks
; block
!= NULL
; block
= block
->b_list
) {
394 assert((void *)block
!= (void *)0xcbcbcbcb);
395 assert((void *)block
!= (void *)0xfbfbfbfb);
396 assert((void *)block
!= (void *)0xdbdbdbdb);
397 if (block
->b_instr
!= NULL
) {
398 assert(block
->b_ialloc
> 0);
399 assert(block
->b_iused
> 0);
400 assert(block
->b_ialloc
>= block
->b_iused
);
403 assert (block
->b_iused
== 0);
404 assert (block
->b_ialloc
== 0);
410 compiler_unit_free(struct compiler_unit
*u
)
412 basicblock
*b
, *next
;
414 compiler_unit_check(u
);
418 PyObject_Free((void *)b
->b_instr
);
420 PyObject_Free((void *)b
);
425 Py_CLEAR(u
->u_consts
);
426 Py_CLEAR(u
->u_names
);
427 Py_CLEAR(u
->u_varnames
);
428 Py_CLEAR(u
->u_freevars
);
429 Py_CLEAR(u
->u_cellvars
);
430 Py_CLEAR(u
->u_private
);
435 compiler_enter_scope(struct compiler
*c
, identifier name
, void *key
,
438 struct compiler_unit
*u
;
440 u
= (struct compiler_unit
*)PyObject_Malloc(sizeof(
441 struct compiler_unit
));
446 memset(u
, 0, sizeof(struct compiler_unit
));
448 u
->u_ste
= PySymtable_Lookup(c
->c_st
, key
);
450 compiler_unit_free(u
);
455 u
->u_varnames
= list2dict(u
->u_ste
->ste_varnames
);
456 u
->u_cellvars
= dictbytype(u
->u_ste
->ste_symbols
, CELL
, 0, 0);
457 if (!u
->u_varnames
|| !u
->u_cellvars
) {
458 compiler_unit_free(u
);
462 u
->u_freevars
= dictbytype(u
->u_ste
->ste_symbols
, FREE
, DEF_FREE_CLASS
,
463 PyDict_Size(u
->u_cellvars
));
464 if (!u
->u_freevars
) {
465 compiler_unit_free(u
);
471 u
->u_firstlineno
= lineno
;
473 u
->u_lineno_set
= false;
474 u
->u_consts
= PyDict_New();
476 compiler_unit_free(u
);
479 u
->u_names
= PyDict_New();
481 compiler_unit_free(u
);
487 /* Push the old compiler_unit on the stack. */
489 PyObject
*wrapper
= PyCObject_FromVoidPtr(c
->u
, NULL
);
490 if (!wrapper
|| PyList_Append(c
->c_stack
, wrapper
) < 0) {
492 compiler_unit_free(u
);
496 u
->u_private
= c
->u
->u_private
;
497 Py_XINCREF(u
->u_private
);
502 if (compiler_use_new_block(c
) == NULL
)
509 compiler_exit_scope(struct compiler
*c
)
515 compiler_unit_free(c
->u
);
516 /* Restore c->u to the parent unit. */
517 n
= PyList_GET_SIZE(c
->c_stack
) - 1;
519 wrapper
= PyList_GET_ITEM(c
->c_stack
, n
);
520 c
->u
= (struct compiler_unit
*)PyCObject_AsVoidPtr(wrapper
);
522 /* we are deleting from a list so this really shouldn't fail */
523 if (PySequence_DelItem(c
->c_stack
, n
) < 0)
524 Py_FatalError("compiler_exit_scope()");
525 compiler_unit_check(c
->u
);
532 /* Allocate a new block and return a pointer to it.
533 Returns NULL on error.
537 compiler_new_block(struct compiler
*c
)
540 struct compiler_unit
*u
;
543 b
= (basicblock
*)PyObject_Malloc(sizeof(basicblock
));
548 memset((void *)b
, 0, sizeof(basicblock
));
549 /* Extend the singly linked list of blocks with new block. */
550 b
->b_list
= u
->u_blocks
;
556 compiler_use_new_block(struct compiler
*c
)
558 basicblock
*block
= compiler_new_block(c
);
561 c
->u
->u_curblock
= block
;
566 compiler_next_block(struct compiler
*c
)
568 basicblock
*block
= compiler_new_block(c
);
571 c
->u
->u_curblock
->b_next
= block
;
572 c
->u
->u_curblock
= block
;
577 compiler_use_next_block(struct compiler
*c
, basicblock
*block
)
579 assert(block
!= NULL
);
580 c
->u
->u_curblock
->b_next
= block
;
581 c
->u
->u_curblock
= block
;
585 /* Returns the offset of the next instruction in the current block's
586 b_instr array. Resizes the b_instr as necessary.
587 Returns -1 on failure.
591 compiler_next_instr(struct compiler
*c
, basicblock
*b
)
594 if (b
->b_instr
== NULL
) {
595 b
->b_instr
= (struct instr
*)PyObject_Malloc(
596 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
597 if (b
->b_instr
== NULL
) {
601 b
->b_ialloc
= DEFAULT_BLOCK_SIZE
;
602 memset((char *)b
->b_instr
, 0,
603 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
605 else if (b
->b_iused
== b
->b_ialloc
) {
607 size_t oldsize
, newsize
;
608 oldsize
= b
->b_ialloc
* sizeof(struct instr
);
609 newsize
= oldsize
<< 1;
611 if (oldsize
> (PY_SIZE_MAX
>> 1)) {
621 tmp
= (struct instr
*)PyObject_Realloc(
622 (void *)b
->b_instr
, newsize
);
628 memset((char *)b
->b_instr
+ oldsize
, 0, newsize
- oldsize
);
633 /* Set the i_lineno member of the instruction at offset off if the
634 line number for the current expression/statement has not
635 already been set. If it has been set, the call has no effect.
637 The line number is reset in the following cases:
638 - when entering a new scope
640 - on each expression that start a new line
641 - before the "except" clause
642 - before the "for" and "while" expressions
646 compiler_set_lineno(struct compiler
*c
, int off
)
649 if (c
->u
->u_lineno_set
)
651 c
->u
->u_lineno_set
= true;
652 b
= c
->u
->u_curblock
;
653 b
->b_instr
[off
].i_lineno
= c
->u
->u_lineno
;
657 opcode_stack_effect(int opcode
, int oparg
)
681 case BINARY_MULTIPLY
:
685 case BINARY_SUBTRACT
:
687 case BINARY_FLOOR_DIVIDE
:
688 case BINARY_TRUE_DIVIDE
:
690 case INPLACE_FLOOR_DIVIDE
:
691 case INPLACE_TRUE_DIVIDE
:
722 case INPLACE_SUBTRACT
:
723 case INPLACE_MULTIPLY
:
753 case PRINT_NEWLINE_TO
:
766 return -1; /* XXX Sometimes more */
781 return -3; /* or -1 or -2 if no exception occurred or
782 return/break/continue */
790 case UNPACK_SEQUENCE
:
793 return 1; /* or -1, at end of iterator */
824 case JUMP_IF_TRUE_OR_POP
: /* -1 if jump not taken */
825 case JUMP_IF_FALSE_OR_POP
: /* "" */
829 case POP_JUMP_IF_FALSE
:
830 case POP_JUMP_IF_TRUE
:
852 #define NARGS(o) (((o) % 256) + 2*((o) / 256))
854 return -NARGS(oparg
);
855 case CALL_FUNCTION_VAR
:
856 case CALL_FUNCTION_KW
:
857 return -NARGS(oparg
)-1;
858 case CALL_FUNCTION_VAR_KW
:
859 return -NARGS(oparg
)-2;
878 fprintf(stderr
, "opcode = %d\n", opcode
);
879 Py_FatalError("opcode_stack_effect()");
882 return 0; /* not reachable */
885 /* Add an opcode with no argument.
886 Returns 0 on failure, 1 on success.
890 compiler_addop(struct compiler
*c
, int opcode
)
895 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
898 b
= c
->u
->u_curblock
;
899 i
= &b
->b_instr
[off
];
900 i
->i_opcode
= opcode
;
902 if (opcode
== RETURN_VALUE
)
904 compiler_set_lineno(c
, off
);
909 compiler_add_o(struct compiler
*c
, PyObject
*dict
, PyObject
*o
)
916 /* necessary to make sure types aren't coerced (e.g., int and long) */
917 /* _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms */
918 if (PyFloat_Check(o
)) {
919 d
= PyFloat_AS_DOUBLE(o
);
920 p
= (unsigned char*) &d
;
921 /* all we need is to make the tuple different in either the 0.0
922 * or -0.0 case from all others, just to avoid the "coercion".
924 if (*p
==0 && p
[sizeof(double)-1]==0)
925 t
= PyTuple_Pack(3, o
, o
->ob_type
, Py_None
);
927 t
= PyTuple_Pack(2, o
, o
->ob_type
);
929 #ifndef WITHOUT_COMPLEX
930 else if (PyComplex_Check(o
)) {
932 int real_part_zero
, imag_part_zero
;
934 /* complex case is even messier: we need to make complex(x,
935 0.) different from complex(x, -0.) and complex(0., y)
936 different from complex(-0., y), for any x and y. In
937 particular, all four complex zeros should be
939 z
= PyComplex_AsCComplex(o
);
940 p
= (unsigned char*) &(z
.real
);
941 q
= (unsigned char*) &(z
.imag
);
942 /* all that matters here is that on IEEE platforms
943 real_part_zero will be true if z.real == 0., and false if
944 z.real == -0. In fact, real_part_zero will also be true
945 for some other rarely occurring nonzero floats, but this
946 doesn't matter. Similar comments apply to
948 real_part_zero
= *p
==0 && p
[sizeof(double)-1]==0;
949 imag_part_zero
= *q
==0 && q
[sizeof(double)-1]==0;
950 if (real_part_zero
&& imag_part_zero
) {
951 t
= PyTuple_Pack(4, o
, o
->ob_type
, Py_True
, Py_True
);
953 else if (real_part_zero
&& !imag_part_zero
) {
954 t
= PyTuple_Pack(4, o
, o
->ob_type
, Py_True
, Py_False
);
956 else if (!real_part_zero
&& imag_part_zero
) {
957 t
= PyTuple_Pack(4, o
, o
->ob_type
, Py_False
, Py_True
);
960 t
= PyTuple_Pack(2, o
, o
->ob_type
);
963 #endif /* WITHOUT_COMPLEX */
965 t
= PyTuple_Pack(2, o
, o
->ob_type
);
970 v
= PyDict_GetItem(dict
, t
);
972 arg
= PyDict_Size(dict
);
973 v
= PyInt_FromLong(arg
);
978 if (PyDict_SetItem(dict
, t
, v
) < 0) {
986 arg
= PyInt_AsLong(v
);
992 compiler_addop_o(struct compiler
*c
, int opcode
, PyObject
*dict
,
995 int arg
= compiler_add_o(c
, dict
, o
);
998 return compiler_addop_i(c
, opcode
, arg
);
1002 compiler_addop_name(struct compiler
*c
, int opcode
, PyObject
*dict
,
1006 PyObject
*mangled
= _Py_Mangle(c
->u
->u_private
, o
);
1009 arg
= compiler_add_o(c
, dict
, mangled
);
1013 return compiler_addop_i(c
, opcode
, arg
);
1016 /* Add an opcode with an integer argument.
1017 Returns 0 on failure, 1 on success.
1021 compiler_addop_i(struct compiler
*c
, int opcode
, int oparg
)
1025 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1028 i
= &c
->u
->u_curblock
->b_instr
[off
];
1029 i
->i_opcode
= opcode
;
1032 compiler_set_lineno(c
, off
);
1037 compiler_addop_j(struct compiler
*c
, int opcode
, basicblock
*b
, int absolute
)
1043 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1046 i
= &c
->u
->u_curblock
->b_instr
[off
];
1047 i
->i_opcode
= opcode
;
1054 compiler_set_lineno(c
, off
);
1058 /* The distinction between NEW_BLOCK and NEXT_BLOCK is subtle. (I'd
1059 like to find better names.) NEW_BLOCK() creates a new block and sets
1060 it as the current block. NEXT_BLOCK() also creates an implicit jump
1061 from the current block to the new block.
1064 /* The returns inside these macros make it impossible to decref objects
1065 created in the local function. Local objects should use the arena.
1069 #define NEW_BLOCK(C) { \
1070 if (compiler_use_new_block((C)) == NULL) \
1074 #define NEXT_BLOCK(C) { \
1075 if (compiler_next_block((C)) == NULL) \
1079 #define ADDOP(C, OP) { \
1080 if (!compiler_addop((C), (OP))) \
1084 #define ADDOP_IN_SCOPE(C, OP) { \
1085 if (!compiler_addop((C), (OP))) { \
1086 compiler_exit_scope(c); \
1091 #define ADDOP_O(C, OP, O, TYPE) { \
1092 if (!compiler_addop_o((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1096 #define ADDOP_NAME(C, OP, O, TYPE) { \
1097 if (!compiler_addop_name((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1101 #define ADDOP_I(C, OP, O) { \
1102 if (!compiler_addop_i((C), (OP), (O))) \
1106 #define ADDOP_JABS(C, OP, O) { \
1107 if (!compiler_addop_j((C), (OP), (O), 1)) \
1111 #define ADDOP_JREL(C, OP, O) { \
1112 if (!compiler_addop_j((C), (OP), (O), 0)) \
1116 /* VISIT and VISIT_SEQ takes an ASDL type as their second argument. They use
1117 the ASDL name to synthesize the name of the C type and the visit function.
1120 #define VISIT(C, TYPE, V) {\
1121 if (!compiler_visit_ ## TYPE((C), (V))) \
1125 #define VISIT_IN_SCOPE(C, TYPE, V) {\
1126 if (!compiler_visit_ ## TYPE((C), (V))) { \
1127 compiler_exit_scope(c); \
1132 #define VISIT_SLICE(C, V, CTX) {\
1133 if (!compiler_visit_slice((C), (V), (CTX))) \
1137 #define VISIT_SEQ(C, TYPE, SEQ) { \
1139 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1140 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1141 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1142 if (!compiler_visit_ ## TYPE((C), elt)) \
1147 #define VISIT_SEQ_IN_SCOPE(C, TYPE, SEQ) { \
1149 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1150 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1151 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1152 if (!compiler_visit_ ## TYPE((C), elt)) { \
1153 compiler_exit_scope(c); \
1160 compiler_isdocstring(stmt_ty s
)
1162 if (s
->kind
!= Expr_kind
)
1164 return s
->v
.Expr
.value
->kind
== Str_kind
;
1167 /* Compile a sequence of statements, checking for a docstring. */
1170 compiler_body(struct compiler
*c
, asdl_seq
*stmts
)
1175 if (!asdl_seq_LEN(stmts
))
1177 st
= (stmt_ty
)asdl_seq_GET(stmts
, 0);
1178 if (compiler_isdocstring(st
) && Py_OptimizeFlag
< 2) {
1179 /* don't generate docstrings if -OO */
1181 VISIT(c
, expr
, st
->v
.Expr
.value
);
1182 if (!compiler_nameop(c
, __doc__
, Store
))
1185 for (; i
< asdl_seq_LEN(stmts
); i
++)
1186 VISIT(c
, stmt
, (stmt_ty
)asdl_seq_GET(stmts
, i
));
1190 static PyCodeObject
*
1191 compiler_mod(struct compiler
*c
, mod_ty mod
)
1195 static PyObject
*module
;
1197 module
= PyString_InternFromString("<module>");
1201 /* Use 0 for firstlineno initially, will fixup in assemble(). */
1202 if (!compiler_enter_scope(c
, module
, mod
, 0))
1204 switch (mod
->kind
) {
1206 if (!compiler_body(c
, mod
->v
.Module
.body
)) {
1207 compiler_exit_scope(c
);
1211 case Interactive_kind
:
1212 c
->c_interactive
= 1;
1213 VISIT_SEQ_IN_SCOPE(c
, stmt
,
1214 mod
->v
.Interactive
.body
);
1216 case Expression_kind
:
1217 VISIT_IN_SCOPE(c
, expr
, mod
->v
.Expression
.body
);
1221 PyErr_SetString(PyExc_SystemError
,
1222 "suite should not be possible");
1225 PyErr_Format(PyExc_SystemError
,
1226 "module kind %d should not be possible",
1230 co
= assemble(c
, addNone
);
1231 compiler_exit_scope(c
);
1235 /* The test for LOCAL must come before the test for FREE in order to
1236 handle classes where name is both local and free. The local var is
1237 a method and the free var is a free var referenced within a method.
1241 get_ref_type(struct compiler
*c
, PyObject
*name
)
1243 int scope
= PyST_GetScope(c
->u
->u_ste
, name
);
1246 PyOS_snprintf(buf
, sizeof(buf
),
1247 "unknown scope for %.100s in %.100s(%s) in %s\n"
1248 "symbols: %s\nlocals: %s\nglobals: %s",
1249 PyString_AS_STRING(name
),
1250 PyString_AS_STRING(c
->u
->u_name
),
1251 PyObject_REPR(c
->u
->u_ste
->ste_id
),
1253 PyObject_REPR(c
->u
->u_ste
->ste_symbols
),
1254 PyObject_REPR(c
->u
->u_varnames
),
1255 PyObject_REPR(c
->u
->u_names
)
1264 compiler_lookup_arg(PyObject
*dict
, PyObject
*name
)
1267 k
= PyTuple_Pack(2, name
, name
->ob_type
);
1270 v
= PyDict_GetItem(dict
, k
);
1274 return PyInt_AS_LONG(v
);
1278 compiler_make_closure(struct compiler
*c
, PyCodeObject
*co
, int args
)
1280 int i
, free
= PyCode_GetNumFree(co
);
1282 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1283 ADDOP_I(c
, MAKE_FUNCTION
, args
);
1286 for (i
= 0; i
< free
; ++i
) {
1287 /* Bypass com_addop_varname because it will generate
1288 LOAD_DEREF but LOAD_CLOSURE is needed.
1290 PyObject
*name
= PyTuple_GET_ITEM(co
->co_freevars
, i
);
1293 /* Special case: If a class contains a method with a
1294 free variable that has the same name as a method,
1295 the name will be considered free *and* local in the
1296 class. It should be handled by the closure, as
1297 well as by the normal name loookup logic.
1299 reftype
= get_ref_type(c
, name
);
1300 if (reftype
== CELL
)
1301 arg
= compiler_lookup_arg(c
->u
->u_cellvars
, name
);
1302 else /* (reftype == FREE) */
1303 arg
= compiler_lookup_arg(c
->u
->u_freevars
, name
);
1305 printf("lookup %s in %s %d %d\n"
1306 "freevars of %s: %s\n",
1307 PyObject_REPR(name
),
1308 PyString_AS_STRING(c
->u
->u_name
),
1310 PyString_AS_STRING(co
->co_name
),
1311 PyObject_REPR(co
->co_freevars
));
1312 Py_FatalError("compiler_make_closure()");
1314 ADDOP_I(c
, LOAD_CLOSURE
, arg
);
1316 ADDOP_I(c
, BUILD_TUPLE
, free
);
1317 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1318 ADDOP_I(c
, MAKE_CLOSURE
, args
);
1323 compiler_decorators(struct compiler
*c
, asdl_seq
* decos
)
1330 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1331 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(decos
, i
));
1337 compiler_arguments(struct compiler
*c
, arguments_ty args
)
1340 int n
= asdl_seq_LEN(args
->args
);
1341 /* Correctly handle nested argument lists */
1342 for (i
= 0; i
< n
; i
++) {
1343 expr_ty arg
= (expr_ty
)asdl_seq_GET(args
->args
, i
);
1344 if (arg
->kind
== Tuple_kind
) {
1345 PyObject
*id
= PyString_FromFormat(".%d", i
);
1349 if (!compiler_nameop(c
, id
, Load
)) {
1354 VISIT(c
, expr
, arg
);
1361 compiler_function(struct compiler
*c
, stmt_ty s
)
1364 PyObject
*first_const
= Py_None
;
1365 arguments_ty args
= s
->v
.FunctionDef
.args
;
1366 asdl_seq
* decos
= s
->v
.FunctionDef
.decorator_list
;
1368 int i
, n
, docstring
;
1370 assert(s
->kind
== FunctionDef_kind
);
1372 if (!compiler_decorators(c
, decos
))
1375 VISIT_SEQ(c
, expr
, args
->defaults
);
1376 if (!compiler_enter_scope(c
, s
->v
.FunctionDef
.name
, (void *)s
,
1380 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, 0);
1381 docstring
= compiler_isdocstring(st
);
1382 if (docstring
&& Py_OptimizeFlag
< 2)
1383 first_const
= st
->v
.Expr
.value
->v
.Str
.s
;
1384 if (compiler_add_o(c
, c
->u
->u_consts
, first_const
) < 0) {
1385 compiler_exit_scope(c
);
1389 /* unpack nested arguments */
1390 compiler_arguments(c
, args
);
1392 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
1393 n
= asdl_seq_LEN(s
->v
.FunctionDef
.body
);
1394 /* if there was a docstring, we need to skip the first statement */
1395 for (i
= docstring
; i
< n
; i
++) {
1396 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, i
);
1397 VISIT_IN_SCOPE(c
, stmt
, st
);
1399 co
= assemble(c
, 1);
1400 compiler_exit_scope(c
);
1404 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
1407 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1408 ADDOP_I(c
, CALL_FUNCTION
, 1);
1411 return compiler_nameop(c
, s
->v
.FunctionDef
.name
, Store
);
1415 compiler_class(struct compiler
*c
, stmt_ty s
)
1420 asdl_seq
* decos
= s
->v
.ClassDef
.decorator_list
;
1422 if (!compiler_decorators(c
, decos
))
1425 /* push class name on stack, needed by BUILD_CLASS */
1426 ADDOP_O(c
, LOAD_CONST
, s
->v
.ClassDef
.name
, consts
);
1427 /* push the tuple of base classes on the stack */
1428 n
= asdl_seq_LEN(s
->v
.ClassDef
.bases
);
1430 VISIT_SEQ(c
, expr
, s
->v
.ClassDef
.bases
);
1431 ADDOP_I(c
, BUILD_TUPLE
, n
);
1432 if (!compiler_enter_scope(c
, s
->v
.ClassDef
.name
, (void *)s
,
1435 Py_XDECREF(c
->u
->u_private
);
1436 c
->u
->u_private
= s
->v
.ClassDef
.name
;
1437 Py_INCREF(c
->u
->u_private
);
1438 str
= PyString_InternFromString("__name__");
1439 if (!str
|| !compiler_nameop(c
, str
, Load
)) {
1441 compiler_exit_scope(c
);
1446 str
= PyString_InternFromString("__module__");
1447 if (!str
|| !compiler_nameop(c
, str
, Store
)) {
1449 compiler_exit_scope(c
);
1454 if (!compiler_body(c
, s
->v
.ClassDef
.body
)) {
1455 compiler_exit_scope(c
);
1459 ADDOP_IN_SCOPE(c
, LOAD_LOCALS
);
1460 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
1461 co
= assemble(c
, 1);
1462 compiler_exit_scope(c
);
1466 compiler_make_closure(c
, co
, 0);
1469 ADDOP_I(c
, CALL_FUNCTION
, 0);
1470 ADDOP(c
, BUILD_CLASS
);
1471 /* apply decorators */
1472 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1473 ADDOP_I(c
, CALL_FUNCTION
, 1);
1475 if (!compiler_nameop(c
, s
->v
.ClassDef
.name
, Store
))
1481 compiler_ifexp(struct compiler
*c
, expr_ty e
)
1483 basicblock
*end
, *next
;
1485 assert(e
->kind
== IfExp_kind
);
1486 end
= compiler_new_block(c
);
1489 next
= compiler_new_block(c
);
1492 VISIT(c
, expr
, e
->v
.IfExp
.test
);
1493 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, next
);
1494 VISIT(c
, expr
, e
->v
.IfExp
.body
);
1495 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1496 compiler_use_next_block(c
, next
);
1497 VISIT(c
, expr
, e
->v
.IfExp
.orelse
);
1498 compiler_use_next_block(c
, end
);
1503 compiler_lambda(struct compiler
*c
, expr_ty e
)
1506 static identifier name
;
1507 arguments_ty args
= e
->v
.Lambda
.args
;
1508 assert(e
->kind
== Lambda_kind
);
1511 name
= PyString_InternFromString("<lambda>");
1517 VISIT_SEQ(c
, expr
, args
->defaults
);
1518 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
1521 /* unpack nested arguments */
1522 compiler_arguments(c
, args
);
1524 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
1525 VISIT_IN_SCOPE(c
, expr
, e
->v
.Lambda
.body
);
1526 if (c
->u
->u_ste
->ste_generator
) {
1527 ADDOP_IN_SCOPE(c
, POP_TOP
);
1530 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
1532 co
= assemble(c
, 1);
1533 compiler_exit_scope(c
);
1537 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
1544 compiler_print(struct compiler
*c
, stmt_ty s
)
1549 assert(s
->kind
== Print_kind
);
1550 n
= asdl_seq_LEN(s
->v
.Print
.values
);
1552 if (s
->v
.Print
.dest
) {
1553 VISIT(c
, expr
, s
->v
.Print
.dest
);
1556 for (i
= 0; i
< n
; i
++) {
1557 expr_ty e
= (expr_ty
)asdl_seq_GET(s
->v
.Print
.values
, i
);
1562 ADDOP(c
, PRINT_ITEM_TO
);
1566 ADDOP(c
, PRINT_ITEM
);
1569 if (s
->v
.Print
.nl
) {
1571 ADDOP(c
, PRINT_NEWLINE_TO
)
1573 ADDOP(c
, PRINT_NEWLINE
)
1581 compiler_if(struct compiler
*c
, stmt_ty s
)
1583 basicblock
*end
, *next
;
1585 assert(s
->kind
== If_kind
);
1586 end
= compiler_new_block(c
);
1590 constant
= expr_constant(s
->v
.If
.test
);
1591 /* constant = 0: "if 0"
1592 * constant = 1: "if 1", "if 2", ...
1593 * constant = -1: rest */
1594 if (constant
== 0) {
1596 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
1597 } else if (constant
== 1) {
1598 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
1600 if (s
->v
.If
.orelse
) {
1601 next
= compiler_new_block(c
);
1607 VISIT(c
, expr
, s
->v
.If
.test
);
1608 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, next
);
1609 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
1610 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1611 if (s
->v
.If
.orelse
) {
1612 compiler_use_next_block(c
, next
);
1613 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
1616 compiler_use_next_block(c
, end
);
1621 compiler_for(struct compiler
*c
, stmt_ty s
)
1623 basicblock
*start
, *cleanup
, *end
;
1625 start
= compiler_new_block(c
);
1626 cleanup
= compiler_new_block(c
);
1627 end
= compiler_new_block(c
);
1628 if (start
== NULL
|| end
== NULL
|| cleanup
== NULL
)
1630 ADDOP_JREL(c
, SETUP_LOOP
, end
);
1631 if (!compiler_push_fblock(c
, LOOP
, start
))
1633 VISIT(c
, expr
, s
->v
.For
.iter
);
1635 compiler_use_next_block(c
, start
);
1636 ADDOP_JREL(c
, FOR_ITER
, cleanup
);
1637 VISIT(c
, expr
, s
->v
.For
.target
);
1638 VISIT_SEQ(c
, stmt
, s
->v
.For
.body
);
1639 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
1640 compiler_use_next_block(c
, cleanup
);
1641 ADDOP(c
, POP_BLOCK
);
1642 compiler_pop_fblock(c
, LOOP
, start
);
1643 VISIT_SEQ(c
, stmt
, s
->v
.For
.orelse
);
1644 compiler_use_next_block(c
, end
);
1649 compiler_while(struct compiler
*c
, stmt_ty s
)
1651 basicblock
*loop
, *orelse
, *end
, *anchor
= NULL
;
1652 int constant
= expr_constant(s
->v
.While
.test
);
1654 if (constant
== 0) {
1655 if (s
->v
.While
.orelse
)
1656 VISIT_SEQ(c
, stmt
, s
->v
.While
.orelse
);
1659 loop
= compiler_new_block(c
);
1660 end
= compiler_new_block(c
);
1661 if (constant
== -1) {
1662 anchor
= compiler_new_block(c
);
1666 if (loop
== NULL
|| end
== NULL
)
1668 if (s
->v
.While
.orelse
) {
1669 orelse
= compiler_new_block(c
);
1676 ADDOP_JREL(c
, SETUP_LOOP
, end
);
1677 compiler_use_next_block(c
, loop
);
1678 if (!compiler_push_fblock(c
, LOOP
, loop
))
1680 if (constant
== -1) {
1681 VISIT(c
, expr
, s
->v
.While
.test
);
1682 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, anchor
);
1684 VISIT_SEQ(c
, stmt
, s
->v
.While
.body
);
1685 ADDOP_JABS(c
, JUMP_ABSOLUTE
, loop
);
1687 /* XXX should the two POP instructions be in a separate block
1688 if there is no else clause ?
1691 if (constant
== -1) {
1692 compiler_use_next_block(c
, anchor
);
1693 ADDOP(c
, POP_BLOCK
);
1695 compiler_pop_fblock(c
, LOOP
, loop
);
1696 if (orelse
!= NULL
) /* what if orelse is just pass? */
1697 VISIT_SEQ(c
, stmt
, s
->v
.While
.orelse
);
1698 compiler_use_next_block(c
, end
);
1704 compiler_continue(struct compiler
*c
)
1706 static const char LOOP_ERROR_MSG
[] = "'continue' not properly in loop";
1707 static const char IN_FINALLY_ERROR_MSG
[] =
1708 "'continue' not supported inside 'finally' clause";
1711 if (!c
->u
->u_nfblocks
)
1712 return compiler_error(c
, LOOP_ERROR_MSG
);
1713 i
= c
->u
->u_nfblocks
- 1;
1714 switch (c
->u
->u_fblock
[i
].fb_type
) {
1716 ADDOP_JABS(c
, JUMP_ABSOLUTE
, c
->u
->u_fblock
[i
].fb_block
);
1720 while (--i
>= 0 && c
->u
->u_fblock
[i
].fb_type
!= LOOP
) {
1721 /* Prevent continue anywhere under a finally
1722 even if hidden in a sub-try or except. */
1723 if (c
->u
->u_fblock
[i
].fb_type
== FINALLY_END
)
1724 return compiler_error(c
, IN_FINALLY_ERROR_MSG
);
1727 return compiler_error(c
, LOOP_ERROR_MSG
);
1728 ADDOP_JABS(c
, CONTINUE_LOOP
, c
->u
->u_fblock
[i
].fb_block
);
1731 return compiler_error(c
, IN_FINALLY_ERROR_MSG
);
1737 /* Code generated for "try: <body> finally: <finalbody>" is as follows:
1743 L: <code for finalbody>
1746 The special instructions use the block stack. Each block
1747 stack entry contains the instruction that created it (here
1748 SETUP_FINALLY), the level of the value stack at the time the
1749 block stack entry was created, and a label (here L).
1752 Pushes the current value stack level and the label
1753 onto the block stack.
1755 Pops en entry from the block stack, and pops the value
1756 stack until its level is the same as indicated on the
1757 block stack. (The label is ignored.)
1759 Pops a variable number of entries from the *value* stack
1760 and re-raises the exception they specify. The number of
1761 entries popped depends on the (pseudo) exception type.
1763 The block stack is unwound when an exception is raised:
1764 when a SETUP_FINALLY entry is found, the exception is pushed
1765 onto the value stack (and the exception condition is cleared),
1766 and the interpreter jumps to the label gotten from the block
1771 compiler_try_finally(struct compiler
*c
, stmt_ty s
)
1773 basicblock
*body
, *end
;
1774 body
= compiler_new_block(c
);
1775 end
= compiler_new_block(c
);
1776 if (body
== NULL
|| end
== NULL
)
1779 ADDOP_JREL(c
, SETUP_FINALLY
, end
);
1780 compiler_use_next_block(c
, body
);
1781 if (!compiler_push_fblock(c
, FINALLY_TRY
, body
))
1783 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.body
);
1784 ADDOP(c
, POP_BLOCK
);
1785 compiler_pop_fblock(c
, FINALLY_TRY
, body
);
1787 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
1788 compiler_use_next_block(c
, end
);
1789 if (!compiler_push_fblock(c
, FINALLY_END
, end
))
1791 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.finalbody
);
1792 ADDOP(c
, END_FINALLY
);
1793 compiler_pop_fblock(c
, FINALLY_END
, end
);
1799 Code generated for "try: S except E1, V1: S1 except E2, V2: S2 ...":
1800 (The contents of the value stack is shown in [], with the top
1801 at the right; 'tb' is trace-back info, 'val' the exception's
1802 associated value, and 'exc' the exception.)
1804 Value stack Label Instruction Argument
1810 [tb, val, exc] L1: DUP )
1811 [tb, val, exc, exc] <evaluate E1> )
1812 [tb, val, exc, exc, E1] COMPARE_OP EXC_MATCH ) only if E1
1813 [tb, val, exc, 1-or-0] POP_JUMP_IF_FALSE L2 )
1815 [tb, val] <assign to V1> (or POP if no V1)
1820 [tb, val, exc] L2: DUP
1821 .............................etc.......................
1823 [tb, val, exc] Ln+1: END_FINALLY # re-raise exception
1825 [] L0: <next statement>
1827 Of course, parts are not generated if Vi or Ei is not present.
1830 compiler_try_except(struct compiler
*c
, stmt_ty s
)
1832 basicblock
*body
, *orelse
, *except
, *end
;
1835 body
= compiler_new_block(c
);
1836 except
= compiler_new_block(c
);
1837 orelse
= compiler_new_block(c
);
1838 end
= compiler_new_block(c
);
1839 if (body
== NULL
|| except
== NULL
|| orelse
== NULL
|| end
== NULL
)
1841 ADDOP_JREL(c
, SETUP_EXCEPT
, except
);
1842 compiler_use_next_block(c
, body
);
1843 if (!compiler_push_fblock(c
, EXCEPT
, body
))
1845 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.body
);
1846 ADDOP(c
, POP_BLOCK
);
1847 compiler_pop_fblock(c
, EXCEPT
, body
);
1848 ADDOP_JREL(c
, JUMP_FORWARD
, orelse
);
1849 n
= asdl_seq_LEN(s
->v
.TryExcept
.handlers
);
1850 compiler_use_next_block(c
, except
);
1851 for (i
= 0; i
< n
; i
++) {
1852 excepthandler_ty handler
= (excepthandler_ty
)asdl_seq_GET(
1853 s
->v
.TryExcept
.handlers
, i
);
1854 if (!handler
->v
.ExceptHandler
.type
&& i
< n
-1)
1855 return compiler_error(c
, "default 'except:' must be last");
1856 c
->u
->u_lineno_set
= false;
1857 c
->u
->u_lineno
= handler
->lineno
;
1858 except
= compiler_new_block(c
);
1861 if (handler
->v
.ExceptHandler
.type
) {
1863 VISIT(c
, expr
, handler
->v
.ExceptHandler
.type
);
1864 ADDOP_I(c
, COMPARE_OP
, PyCmp_EXC_MATCH
);
1865 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, except
);
1868 if (handler
->v
.ExceptHandler
.name
) {
1869 VISIT(c
, expr
, handler
->v
.ExceptHandler
.name
);
1875 VISIT_SEQ(c
, stmt
, handler
->v
.ExceptHandler
.body
);
1876 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1877 compiler_use_next_block(c
, except
);
1879 ADDOP(c
, END_FINALLY
);
1880 compiler_use_next_block(c
, orelse
);
1881 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.orelse
);
1882 compiler_use_next_block(c
, end
);
1887 compiler_import_as(struct compiler
*c
, identifier name
, identifier asname
)
1889 /* The IMPORT_NAME opcode was already generated. This function
1890 merely needs to bind the result to a name.
1892 If there is a dot in name, we need to split it and emit a
1893 LOAD_ATTR for each name.
1895 const char *src
= PyString_AS_STRING(name
);
1896 const char *dot
= strchr(src
, '.');
1898 /* Consume the base module name to get the first attribute */
1901 /* NB src is only defined when dot != NULL */
1903 dot
= strchr(src
, '.');
1904 attr
= PyString_FromStringAndSize(src
,
1905 dot
? dot
- src
: strlen(src
));
1908 ADDOP_O(c
, LOAD_ATTR
, attr
, names
);
1913 return compiler_nameop(c
, asname
, Store
);
1917 compiler_import(struct compiler
*c
, stmt_ty s
)
1919 /* The Import node stores a module name like a.b.c as a single
1920 string. This is convenient for all cases except
1922 where we need to parse that string to extract the individual
1924 XXX Perhaps change the representation to make this case simpler?
1926 int i
, n
= asdl_seq_LEN(s
->v
.Import
.names
);
1928 for (i
= 0; i
< n
; i
++) {
1929 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.Import
.names
, i
);
1933 if (c
->c_flags
&& (c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
1934 level
= PyInt_FromLong(0);
1936 level
= PyInt_FromLong(-1);
1941 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
1943 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
1944 ADDOP_NAME(c
, IMPORT_NAME
, alias
->name
, names
);
1946 if (alias
->asname
) {
1947 r
= compiler_import_as(c
, alias
->name
, alias
->asname
);
1952 identifier tmp
= alias
->name
;
1953 const char *base
= PyString_AS_STRING(alias
->name
);
1954 char *dot
= strchr(base
, '.');
1956 tmp
= PyString_FromStringAndSize(base
,
1958 r
= compiler_nameop(c
, tmp
, Store
);
1970 compiler_from_import(struct compiler
*c
, stmt_ty s
)
1972 int i
, n
= asdl_seq_LEN(s
->v
.ImportFrom
.names
);
1974 PyObject
*names
= PyTuple_New(n
);
1976 static PyObject
*empty_string
;
1978 if (!empty_string
) {
1979 empty_string
= PyString_FromString("");
1987 if (s
->v
.ImportFrom
.level
== 0 && c
->c_flags
&&
1988 !(c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
1989 level
= PyInt_FromLong(-1);
1991 level
= PyInt_FromLong(s
->v
.ImportFrom
.level
);
1998 /* build up the names */
1999 for (i
= 0; i
< n
; i
++) {
2000 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
2001 Py_INCREF(alias
->name
);
2002 PyTuple_SET_ITEM(names
, i
, alias
->name
);
2005 if (s
->lineno
> c
->c_future
->ff_lineno
&& s
->v
.ImportFrom
.module
&&
2006 !strcmp(PyString_AS_STRING(s
->v
.ImportFrom
.module
), "__future__")) {
2009 return compiler_error(c
, "from __future__ imports must occur "
2010 "at the beginning of the file");
2013 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
2015 ADDOP_O(c
, LOAD_CONST
, names
, consts
);
2017 if (s
->v
.ImportFrom
.module
) {
2018 ADDOP_NAME(c
, IMPORT_NAME
, s
->v
.ImportFrom
.module
, names
);
2021 ADDOP_NAME(c
, IMPORT_NAME
, empty_string
, names
);
2023 for (i
= 0; i
< n
; i
++) {
2024 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
2025 identifier store_name
;
2027 if (i
== 0 && *PyString_AS_STRING(alias
->name
) == '*') {
2029 ADDOP(c
, IMPORT_STAR
);
2033 ADDOP_NAME(c
, IMPORT_FROM
, alias
->name
, names
);
2034 store_name
= alias
->name
;
2036 store_name
= alias
->asname
;
2038 if (!compiler_nameop(c
, store_name
, Store
)) {
2043 /* remove imported module */
2049 compiler_assert(struct compiler
*c
, stmt_ty s
)
2051 static PyObject
*assertion_error
= NULL
;
2054 if (Py_OptimizeFlag
)
2056 if (assertion_error
== NULL
) {
2057 assertion_error
= PyString_InternFromString("AssertionError");
2058 if (assertion_error
== NULL
)
2061 if (s
->v
.Assert
.test
->kind
== Tuple_kind
&&
2062 asdl_seq_LEN(s
->v
.Assert
.test
->v
.Tuple
.elts
) > 0) {
2064 "assertion is always true, perhaps remove parentheses?";
2065 if (PyErr_WarnExplicit(PyExc_SyntaxWarning
, msg
, c
->c_filename
,
2066 c
->u
->u_lineno
, NULL
, NULL
) == -1)
2069 VISIT(c
, expr
, s
->v
.Assert
.test
);
2070 end
= compiler_new_block(c
);
2073 ADDOP_JABS(c
, POP_JUMP_IF_TRUE
, end
);
2074 ADDOP_O(c
, LOAD_GLOBAL
, assertion_error
, names
);
2075 if (s
->v
.Assert
.msg
) {
2076 VISIT(c
, expr
, s
->v
.Assert
.msg
);
2077 ADDOP_I(c
, RAISE_VARARGS
, 2);
2080 ADDOP_I(c
, RAISE_VARARGS
, 1);
2082 compiler_use_next_block(c
, end
);
2087 compiler_visit_stmt(struct compiler
*c
, stmt_ty s
)
2091 /* Always assign a lineno to the next instruction for a stmt. */
2092 c
->u
->u_lineno
= s
->lineno
;
2093 c
->u
->u_lineno_set
= false;
2096 case FunctionDef_kind
:
2097 return compiler_function(c
, s
);
2099 return compiler_class(c
, s
);
2101 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
2102 return compiler_error(c
, "'return' outside function");
2103 if (s
->v
.Return
.value
) {
2104 VISIT(c
, expr
, s
->v
.Return
.value
);
2107 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2108 ADDOP(c
, RETURN_VALUE
);
2111 VISIT_SEQ(c
, expr
, s
->v
.Delete
.targets
)
2114 n
= asdl_seq_LEN(s
->v
.Assign
.targets
);
2115 VISIT(c
, expr
, s
->v
.Assign
.value
);
2116 for (i
= 0; i
< n
; i
++) {
2120 (expr_ty
)asdl_seq_GET(s
->v
.Assign
.targets
, i
));
2123 case AugAssign_kind
:
2124 return compiler_augassign(c
, s
);
2126 return compiler_print(c
, s
);
2128 return compiler_for(c
, s
);
2130 return compiler_while(c
, s
);
2132 return compiler_if(c
, s
);
2135 if (s
->v
.Raise
.type
) {
2136 VISIT(c
, expr
, s
->v
.Raise
.type
);
2138 if (s
->v
.Raise
.inst
) {
2139 VISIT(c
, expr
, s
->v
.Raise
.inst
);
2141 if (s
->v
.Raise
.tback
) {
2142 VISIT(c
, expr
, s
->v
.Raise
.tback
);
2147 ADDOP_I(c
, RAISE_VARARGS
, n
);
2149 case TryExcept_kind
:
2150 return compiler_try_except(c
, s
);
2151 case TryFinally_kind
:
2152 return compiler_try_finally(c
, s
);
2154 return compiler_assert(c
, s
);
2156 return compiler_import(c
, s
);
2157 case ImportFrom_kind
:
2158 return compiler_from_import(c
, s
);
2160 VISIT(c
, expr
, s
->v
.Exec
.body
);
2161 if (s
->v
.Exec
.globals
) {
2162 VISIT(c
, expr
, s
->v
.Exec
.globals
);
2163 if (s
->v
.Exec
.locals
) {
2164 VISIT(c
, expr
, s
->v
.Exec
.locals
);
2169 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2172 ADDOP(c
, EXEC_STMT
);
2177 if (c
->c_interactive
&& c
->c_nestlevel
<= 1) {
2178 VISIT(c
, expr
, s
->v
.Expr
.value
);
2179 ADDOP(c
, PRINT_EXPR
);
2181 else if (s
->v
.Expr
.value
->kind
!= Str_kind
&&
2182 s
->v
.Expr
.value
->kind
!= Num_kind
) {
2183 VISIT(c
, expr
, s
->v
.Expr
.value
);
2190 if (!compiler_in_loop(c
))
2191 return compiler_error(c
, "'break' outside loop");
2192 ADDOP(c
, BREAK_LOOP
);
2195 return compiler_continue(c
);
2197 return compiler_with(c
, s
);
2203 unaryop(unaryop_ty op
)
2207 return UNARY_INVERT
;
2211 return UNARY_POSITIVE
;
2213 return UNARY_NEGATIVE
;
2215 PyErr_Format(PyExc_SystemError
,
2216 "unary op %d should not be possible", op
);
2222 binop(struct compiler
*c
, operator_ty op
)
2228 return BINARY_SUBTRACT
;
2230 return BINARY_MULTIPLY
;
2232 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2233 return BINARY_TRUE_DIVIDE
;
2235 return BINARY_DIVIDE
;
2237 return BINARY_MODULO
;
2239 return BINARY_POWER
;
2241 return BINARY_LSHIFT
;
2243 return BINARY_RSHIFT
;
2251 return BINARY_FLOOR_DIVIDE
;
2253 PyErr_Format(PyExc_SystemError
,
2254 "binary op %d should not be possible", op
);
2278 return PyCmp_IS_NOT
;
2282 return PyCmp_NOT_IN
;
2289 inplace_binop(struct compiler
*c
, operator_ty op
)
2295 return INPLACE_SUBTRACT
;
2297 return INPLACE_MULTIPLY
;
2299 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2300 return INPLACE_TRUE_DIVIDE
;
2302 return INPLACE_DIVIDE
;
2304 return INPLACE_MODULO
;
2306 return INPLACE_POWER
;
2308 return INPLACE_LSHIFT
;
2310 return INPLACE_RSHIFT
;
2318 return INPLACE_FLOOR_DIVIDE
;
2320 PyErr_Format(PyExc_SystemError
,
2321 "inplace binary op %d should not be possible", op
);
2327 compiler_nameop(struct compiler
*c
, identifier name
, expr_context_ty ctx
)
2330 enum { OP_FAST
, OP_GLOBAL
, OP_DEREF
, OP_NAME
} optype
;
2332 PyObject
*dict
= c
->u
->u_names
;
2334 /* XXX AugStore isn't used anywhere! */
2336 mangled
= _Py_Mangle(c
->u
->u_private
, name
);
2342 scope
= PyST_GetScope(c
->u
->u_ste
, mangled
);
2345 dict
= c
->u
->u_freevars
;
2349 dict
= c
->u
->u_cellvars
;
2353 if (c
->u
->u_ste
->ste_type
== FunctionBlock
)
2356 case GLOBAL_IMPLICIT
:
2357 if (c
->u
->u_ste
->ste_type
== FunctionBlock
&&
2358 !c
->u
->u_ste
->ste_unoptimized
)
2361 case GLOBAL_EXPLICIT
:
2365 /* scope can be 0 */
2369 /* XXX Leave assert here, but handle __doc__ and the like better */
2370 assert(scope
|| PyString_AS_STRING(name
)[0] == '_');
2375 case Load
: op
= LOAD_DEREF
; break;
2376 case Store
: op
= STORE_DEREF
; break;
2381 PyErr_Format(PyExc_SyntaxError
,
2382 "can not delete variable '%s' referenced "
2384 PyString_AS_STRING(name
));
2389 PyErr_SetString(PyExc_SystemError
,
2390 "param invalid for deref variable");
2396 case Load
: op
= LOAD_FAST
; break;
2397 case Store
: op
= STORE_FAST
; break;
2398 case Del
: op
= DELETE_FAST
; break;
2404 PyErr_SetString(PyExc_SystemError
,
2405 "param invalid for local variable");
2408 ADDOP_O(c
, op
, mangled
, varnames
);
2413 case Load
: op
= LOAD_GLOBAL
; break;
2414 case Store
: op
= STORE_GLOBAL
; break;
2415 case Del
: op
= DELETE_GLOBAL
; break;
2421 PyErr_SetString(PyExc_SystemError
,
2422 "param invalid for global variable");
2428 case Load
: op
= LOAD_NAME
; break;
2429 case Store
: op
= STORE_NAME
; break;
2430 case Del
: op
= DELETE_NAME
; break;
2436 PyErr_SetString(PyExc_SystemError
,
2437 "param invalid for name variable");
2444 arg
= compiler_add_o(c
, dict
, mangled
);
2448 return compiler_addop_i(c
, op
, arg
);
2452 compiler_boolop(struct compiler
*c
, expr_ty e
)
2458 assert(e
->kind
== BoolOp_kind
);
2459 if (e
->v
.BoolOp
.op
== And
)
2460 jumpi
= JUMP_IF_FALSE_OR_POP
;
2462 jumpi
= JUMP_IF_TRUE_OR_POP
;
2463 end
= compiler_new_block(c
);
2466 s
= e
->v
.BoolOp
.values
;
2467 n
= asdl_seq_LEN(s
) - 1;
2469 for (i
= 0; i
< n
; ++i
) {
2470 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, i
));
2471 ADDOP_JABS(c
, jumpi
, end
);
2473 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, n
));
2474 compiler_use_next_block(c
, end
);
2479 compiler_list(struct compiler
*c
, expr_ty e
)
2481 int n
= asdl_seq_LEN(e
->v
.List
.elts
);
2482 if (e
->v
.List
.ctx
== Store
) {
2483 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
2485 VISIT_SEQ(c
, expr
, e
->v
.List
.elts
);
2486 if (e
->v
.List
.ctx
== Load
) {
2487 ADDOP_I(c
, BUILD_LIST
, n
);
2493 compiler_tuple(struct compiler
*c
, expr_ty e
)
2495 int n
= asdl_seq_LEN(e
->v
.Tuple
.elts
);
2496 if (e
->v
.Tuple
.ctx
== Store
) {
2497 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
2499 VISIT_SEQ(c
, expr
, e
->v
.Tuple
.elts
);
2500 if (e
->v
.Tuple
.ctx
== Load
) {
2501 ADDOP_I(c
, BUILD_TUPLE
, n
);
2507 compiler_compare(struct compiler
*c
, expr_ty e
)
2510 basicblock
*cleanup
= NULL
;
2512 /* XXX the logic can be cleaned up for 1 or multiple comparisons */
2513 VISIT(c
, expr
, e
->v
.Compare
.left
);
2514 n
= asdl_seq_LEN(e
->v
.Compare
.ops
);
2517 cleanup
= compiler_new_block(c
);
2518 if (cleanup
== NULL
)
2521 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, 0));
2523 for (i
= 1; i
< n
; i
++) {
2525 ADDOP(c
, ROT_THREE
);
2526 ADDOP_I(c
, COMPARE_OP
,
2527 cmpop((cmpop_ty
)(asdl_seq_GET(
2528 e
->v
.Compare
.ops
, i
- 1))));
2529 ADDOP_JABS(c
, JUMP_IF_FALSE_OR_POP
, cleanup
);
2533 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, i
));
2535 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, n
- 1));
2536 ADDOP_I(c
, COMPARE_OP
,
2537 cmpop((cmpop_ty
)(asdl_seq_GET(e
->v
.Compare
.ops
, n
- 1))));
2539 basicblock
*end
= compiler_new_block(c
);
2542 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
2543 compiler_use_next_block(c
, cleanup
);
2546 compiler_use_next_block(c
, end
);
2552 compiler_call(struct compiler
*c
, expr_ty e
)
2556 VISIT(c
, expr
, e
->v
.Call
.func
);
2557 n
= asdl_seq_LEN(e
->v
.Call
.args
);
2558 VISIT_SEQ(c
, expr
, e
->v
.Call
.args
);
2559 if (e
->v
.Call
.keywords
) {
2560 VISIT_SEQ(c
, keyword
, e
->v
.Call
.keywords
);
2561 n
|= asdl_seq_LEN(e
->v
.Call
.keywords
) << 8;
2563 if (e
->v
.Call
.starargs
) {
2564 VISIT(c
, expr
, e
->v
.Call
.starargs
);
2567 if (e
->v
.Call
.kwargs
) {
2568 VISIT(c
, expr
, e
->v
.Call
.kwargs
);
2573 ADDOP_I(c
, CALL_FUNCTION
, n
);
2576 ADDOP_I(c
, CALL_FUNCTION_VAR
, n
);
2579 ADDOP_I(c
, CALL_FUNCTION_KW
, n
);
2582 ADDOP_I(c
, CALL_FUNCTION_VAR_KW
, n
);
2589 compiler_listcomp_generator(struct compiler
*c
, asdl_seq
*generators
,
2590 int gen_index
, expr_ty elt
)
2592 /* generate code for the iterator, then each of the ifs,
2593 and then write to the element */
2596 basicblock
*start
, *anchor
, *skip
, *if_cleanup
;
2599 start
= compiler_new_block(c
);
2600 skip
= compiler_new_block(c
);
2601 if_cleanup
= compiler_new_block(c
);
2602 anchor
= compiler_new_block(c
);
2604 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
2608 l
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
2609 VISIT(c
, expr
, l
->iter
);
2611 compiler_use_next_block(c
, start
);
2612 ADDOP_JREL(c
, FOR_ITER
, anchor
);
2614 VISIT(c
, expr
, l
->target
);
2616 /* XXX this needs to be cleaned up...a lot! */
2617 n
= asdl_seq_LEN(l
->ifs
);
2618 for (i
= 0; i
< n
; i
++) {
2619 expr_ty e
= (expr_ty
)asdl_seq_GET(l
->ifs
, i
);
2621 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, if_cleanup
);
2625 if (++gen_index
< asdl_seq_LEN(generators
))
2626 if (!compiler_listcomp_generator(c
, generators
, gen_index
, elt
))
2629 /* only append after the last for generator */
2630 if (gen_index
>= asdl_seq_LEN(generators
)) {
2631 VISIT(c
, expr
, elt
);
2632 ADDOP_I(c
, LIST_APPEND
, gen_index
+1);
2634 compiler_use_next_block(c
, skip
);
2636 compiler_use_next_block(c
, if_cleanup
);
2637 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
2638 compiler_use_next_block(c
, anchor
);
2644 compiler_listcomp(struct compiler
*c
, expr_ty e
)
2646 assert(e
->kind
== ListComp_kind
);
2647 ADDOP_I(c
, BUILD_LIST
, 0);
2648 return compiler_listcomp_generator(c
, e
->v
.ListComp
.generators
, 0,
2653 compiler_genexp_generator(struct compiler
*c
,
2654 asdl_seq
*generators
, int gen_index
,
2657 /* generate code for the iterator, then each of the ifs,
2658 and then write to the element */
2660 comprehension_ty ge
;
2661 basicblock
*start
, *anchor
, *skip
, *if_cleanup
, *end
;
2664 start
= compiler_new_block(c
);
2665 skip
= compiler_new_block(c
);
2666 if_cleanup
= compiler_new_block(c
);
2667 anchor
= compiler_new_block(c
);
2668 end
= compiler_new_block(c
);
2670 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
2671 anchor
== NULL
|| end
== NULL
)
2674 ge
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
2675 ADDOP_JREL(c
, SETUP_LOOP
, end
);
2676 if (!compiler_push_fblock(c
, LOOP
, start
))
2679 if (gen_index
== 0) {
2680 /* Receive outermost iter as an implicit argument */
2681 c
->u
->u_argcount
= 1;
2682 ADDOP_I(c
, LOAD_FAST
, 0);
2685 /* Sub-iter - calculate on the fly */
2686 VISIT(c
, expr
, ge
->iter
);
2689 compiler_use_next_block(c
, start
);
2690 ADDOP_JREL(c
, FOR_ITER
, anchor
);
2692 VISIT(c
, expr
, ge
->target
);
2694 /* XXX this needs to be cleaned up...a lot! */
2695 n
= asdl_seq_LEN(ge
->ifs
);
2696 for (i
= 0; i
< n
; i
++) {
2697 expr_ty e
= (expr_ty
)asdl_seq_GET(ge
->ifs
, i
);
2699 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, if_cleanup
);
2703 if (++gen_index
< asdl_seq_LEN(generators
))
2704 if (!compiler_genexp_generator(c
, generators
, gen_index
, elt
))
2707 /* only append after the last 'for' generator */
2708 if (gen_index
>= asdl_seq_LEN(generators
)) {
2709 VISIT(c
, expr
, elt
);
2710 ADDOP(c
, YIELD_VALUE
);
2713 compiler_use_next_block(c
, skip
);
2715 compiler_use_next_block(c
, if_cleanup
);
2716 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
2717 compiler_use_next_block(c
, anchor
);
2718 ADDOP(c
, POP_BLOCK
);
2719 compiler_pop_fblock(c
, LOOP
, start
);
2720 compiler_use_next_block(c
, end
);
2726 compiler_genexp(struct compiler
*c
, expr_ty e
)
2728 static identifier name
;
2730 expr_ty outermost_iter
= ((comprehension_ty
)
2731 (asdl_seq_GET(e
->v
.GeneratorExp
.generators
,
2735 name
= PyString_FromString("<genexpr>");
2740 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
2742 compiler_genexp_generator(c
, e
->v
.GeneratorExp
.generators
, 0,
2743 e
->v
.GeneratorExp
.elt
);
2744 co
= assemble(c
, 1);
2745 compiler_exit_scope(c
);
2749 compiler_make_closure(c
, co
, 0);
2752 VISIT(c
, expr
, outermost_iter
);
2754 ADDOP_I(c
, CALL_FUNCTION
, 1);
2760 compiler_visit_keyword(struct compiler
*c
, keyword_ty k
)
2762 ADDOP_O(c
, LOAD_CONST
, k
->arg
, consts
);
2763 VISIT(c
, expr
, k
->value
);
2767 /* Test whether expression is constant. For constants, report
2768 whether they are true or false.
2770 Return values: 1 for true, 0 for false, -1 for non-constant.
2774 expr_constant(expr_ty e
)
2778 return PyObject_IsTrue(e
->v
.Num
.n
);
2780 return PyObject_IsTrue(e
->v
.Str
.s
);
2782 /* __debug__ is not assignable, so we can optimize
2783 * it away in if and while statements */
2784 if (strcmp(PyString_AS_STRING(e
->v
.Name
.id
),
2786 return ! Py_OptimizeFlag
;
2794 Implements the with statement from PEP 343.
2796 The semantics outlined in that PEP are as follows:
2801 It is implemented roughly as:
2804 exit = context.__exit__ # not calling it
2805 value = context.__enter__()
2807 VAR = value # if VAR present in the syntax
2810 if an exception was raised:
2811 exc = copy of (exception, instance, traceback)
2813 exc = (None, None, None)
2817 compiler_with(struct compiler
*c
, stmt_ty s
)
2819 basicblock
*block
, *finally
;
2821 assert(s
->kind
== With_kind
);
2823 block
= compiler_new_block(c
);
2824 finally
= compiler_new_block(c
);
2825 if (!block
|| !finally
)
2829 VISIT(c
, expr
, s
->v
.With
.context_expr
);
2830 ADDOP_JREL(c
, SETUP_WITH
, finally
);
2832 /* SETUP_WITH pushes a finally block. */
2833 compiler_use_next_block(c
, block
);
2834 if (!compiler_push_fblock(c
, FINALLY_TRY
, block
)) {
2838 if (s
->v
.With
.optional_vars
) {
2839 VISIT(c
, expr
, s
->v
.With
.optional_vars
);
2842 /* Discard result from context.__enter__() */
2847 VISIT_SEQ(c
, stmt
, s
->v
.With
.body
);
2849 /* End of try block; start the finally block */
2850 ADDOP(c
, POP_BLOCK
);
2851 compiler_pop_fblock(c
, FINALLY_TRY
, block
);
2853 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2854 compiler_use_next_block(c
, finally
);
2855 if (!compiler_push_fblock(c
, FINALLY_END
, finally
))
2858 /* Finally block starts; context.__exit__ is on the stack under
2859 the exception or return information. Just issue our magic
2861 ADDOP(c
, WITH_CLEANUP
);
2863 /* Finally block ends. */
2864 ADDOP(c
, END_FINALLY
);
2865 compiler_pop_fblock(c
, FINALLY_END
, finally
);
2870 compiler_visit_expr(struct compiler
*c
, expr_ty e
)
2874 /* If expr e has a different line number than the last expr/stmt,
2875 set a new line number for the next instruction.
2877 if (e
->lineno
> c
->u
->u_lineno
) {
2878 c
->u
->u_lineno
= e
->lineno
;
2879 c
->u
->u_lineno_set
= false;
2883 return compiler_boolop(c
, e
);
2885 VISIT(c
, expr
, e
->v
.BinOp
.left
);
2886 VISIT(c
, expr
, e
->v
.BinOp
.right
);
2887 ADDOP(c
, binop(c
, e
->v
.BinOp
.op
));
2890 VISIT(c
, expr
, e
->v
.UnaryOp
.operand
);
2891 ADDOP(c
, unaryop(e
->v
.UnaryOp
.op
));
2894 return compiler_lambda(c
, e
);
2896 return compiler_ifexp(c
, e
);
2898 n
= asdl_seq_LEN(e
->v
.Dict
.values
);
2899 ADDOP_I(c
, BUILD_MAP
, (n
>0xFFFF ? 0xFFFF : n
));
2900 for (i
= 0; i
< n
; i
++) {
2902 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.values
, i
));
2904 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.keys
, i
));
2905 ADDOP(c
, STORE_MAP
);
2909 return compiler_listcomp(c
, e
);
2910 case GeneratorExp_kind
:
2911 return compiler_genexp(c
, e
);
2913 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
2914 return compiler_error(c
, "'yield' outside function");
2915 if (e
->v
.Yield
.value
) {
2916 VISIT(c
, expr
, e
->v
.Yield
.value
);
2919 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2921 ADDOP(c
, YIELD_VALUE
);
2924 return compiler_compare(c
, e
);
2926 return compiler_call(c
, e
);
2928 VISIT(c
, expr
, e
->v
.Repr
.value
);
2929 ADDOP(c
, UNARY_CONVERT
);
2932 ADDOP_O(c
, LOAD_CONST
, e
->v
.Num
.n
, consts
);
2935 ADDOP_O(c
, LOAD_CONST
, e
->v
.Str
.s
, consts
);
2937 /* The following exprs can be assignment targets. */
2938 case Attribute_kind
:
2939 if (e
->v
.Attribute
.ctx
!= AugStore
)
2940 VISIT(c
, expr
, e
->v
.Attribute
.value
);
2941 switch (e
->v
.Attribute
.ctx
) {
2944 /* Fall through to load */
2946 ADDOP_NAME(c
, LOAD_ATTR
, e
->v
.Attribute
.attr
, names
);
2950 /* Fall through to save */
2952 ADDOP_NAME(c
, STORE_ATTR
, e
->v
.Attribute
.attr
, names
);
2955 ADDOP_NAME(c
, DELETE_ATTR
, e
->v
.Attribute
.attr
, names
);
2959 PyErr_SetString(PyExc_SystemError
,
2960 "param invalid in attribute expression");
2964 case Subscript_kind
:
2965 switch (e
->v
.Subscript
.ctx
) {
2967 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2968 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugLoad
);
2971 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2972 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Load
);
2975 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugStore
);
2978 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2979 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Store
);
2982 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2983 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Del
);
2987 PyErr_SetString(PyExc_SystemError
,
2988 "param invalid in subscript expression");
2993 return compiler_nameop(c
, e
->v
.Name
.id
, e
->v
.Name
.ctx
);
2994 /* child nodes of List and Tuple will have expr_context set */
2996 return compiler_list(c
, e
);
2998 return compiler_tuple(c
, e
);
3004 compiler_augassign(struct compiler
*c
, stmt_ty s
)
3006 expr_ty e
= s
->v
.AugAssign
.target
;
3009 assert(s
->kind
== AugAssign_kind
);
3012 case Attribute_kind
:
3013 auge
= Attribute(e
->v
.Attribute
.value
, e
->v
.Attribute
.attr
,
3014 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3017 VISIT(c
, expr
, auge
);
3018 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3019 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3020 auge
->v
.Attribute
.ctx
= AugStore
;
3021 VISIT(c
, expr
, auge
);
3023 case Subscript_kind
:
3024 auge
= Subscript(e
->v
.Subscript
.value
, e
->v
.Subscript
.slice
,
3025 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3028 VISIT(c
, expr
, auge
);
3029 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3030 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3031 auge
->v
.Subscript
.ctx
= AugStore
;
3032 VISIT(c
, expr
, auge
);
3035 if (!compiler_nameop(c
, e
->v
.Name
.id
, Load
))
3037 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3038 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3039 return compiler_nameop(c
, e
->v
.Name
.id
, Store
);
3041 PyErr_Format(PyExc_SystemError
,
3042 "invalid node type (%d) for augmented assignment",
3050 compiler_push_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3052 struct fblockinfo
*f
;
3053 if (c
->u
->u_nfblocks
>= CO_MAXBLOCKS
) {
3054 PyErr_SetString(PyExc_SystemError
,
3055 "too many statically nested blocks");
3058 f
= &c
->u
->u_fblock
[c
->u
->u_nfblocks
++];
3065 compiler_pop_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3067 struct compiler_unit
*u
= c
->u
;
3068 assert(u
->u_nfblocks
> 0);
3070 assert(u
->u_fblock
[u
->u_nfblocks
].fb_type
== t
);
3071 assert(u
->u_fblock
[u
->u_nfblocks
].fb_block
== b
);
3075 compiler_in_loop(struct compiler
*c
) {
3077 struct compiler_unit
*u
= c
->u
;
3078 for (i
= 0; i
< u
->u_nfblocks
; ++i
) {
3079 if (u
->u_fblock
[i
].fb_type
== LOOP
)
3084 /* Raises a SyntaxError and returns 0.
3085 If something goes wrong, a different exception may be raised.
3089 compiler_error(struct compiler
*c
, const char *errstr
)
3092 PyObject
*u
= NULL
, *v
= NULL
;
3094 loc
= PyErr_ProgramText(c
->c_filename
, c
->u
->u_lineno
);
3099 u
= Py_BuildValue("(ziOO)", c
->c_filename
, c
->u
->u_lineno
,
3103 v
= Py_BuildValue("(zO)", errstr
, u
);
3106 PyErr_SetObject(PyExc_SyntaxError
, v
);
3115 compiler_handle_subscr(struct compiler
*c
, const char *kind
,
3116 expr_context_ty ctx
)
3120 /* XXX this code is duplicated */
3122 case AugLoad
: /* fall through to Load */
3123 case Load
: op
= BINARY_SUBSCR
; break;
3124 case AugStore
:/* fall through to Store */
3125 case Store
: op
= STORE_SUBSCR
; break;
3126 case Del
: op
= DELETE_SUBSCR
; break;
3128 PyErr_Format(PyExc_SystemError
,
3129 "invalid %s kind %d in subscript\n",
3133 if (ctx
== AugLoad
) {
3134 ADDOP_I(c
, DUP_TOPX
, 2);
3136 else if (ctx
== AugStore
) {
3137 ADDOP(c
, ROT_THREE
);
3144 compiler_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3147 assert(s
->kind
== Slice_kind
);
3149 /* only handles the cases where BUILD_SLICE is emitted */
3150 if (s
->v
.Slice
.lower
) {
3151 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3154 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3157 if (s
->v
.Slice
.upper
) {
3158 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3161 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3164 if (s
->v
.Slice
.step
) {
3166 VISIT(c
, expr
, s
->v
.Slice
.step
);
3168 ADDOP_I(c
, BUILD_SLICE
, n
);
3173 compiler_simple_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3175 int op
= 0, slice_offset
= 0, stack_count
= 0;
3177 assert(s
->v
.Slice
.step
== NULL
);
3178 if (s
->v
.Slice
.lower
) {
3181 if (ctx
!= AugStore
)
3182 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3184 if (s
->v
.Slice
.upper
) {
3187 if (ctx
!= AugStore
)
3188 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3191 if (ctx
== AugLoad
) {
3192 switch (stack_count
) {
3193 case 0: ADDOP(c
, DUP_TOP
); break;
3194 case 1: ADDOP_I(c
, DUP_TOPX
, 2); break;
3195 case 2: ADDOP_I(c
, DUP_TOPX
, 3); break;
3198 else if (ctx
== AugStore
) {
3199 switch (stack_count
) {
3200 case 0: ADDOP(c
, ROT_TWO
); break;
3201 case 1: ADDOP(c
, ROT_THREE
); break;
3202 case 2: ADDOP(c
, ROT_FOUR
); break;
3207 case AugLoad
: /* fall through to Load */
3208 case Load
: op
= SLICE
; break;
3209 case AugStore
:/* fall through to Store */
3210 case Store
: op
= STORE_SLICE
; break;
3211 case Del
: op
= DELETE_SLICE
; break;
3214 PyErr_SetString(PyExc_SystemError
,
3215 "param invalid in simple slice");
3219 ADDOP(c
, op
+ slice_offset
);
3224 compiler_visit_nested_slice(struct compiler
*c
, slice_ty s
,
3225 expr_context_ty ctx
)
3229 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3232 return compiler_slice(c
, s
, ctx
);
3234 VISIT(c
, expr
, s
->v
.Index
.value
);
3238 PyErr_SetString(PyExc_SystemError
,
3239 "extended slice invalid in nested slice");
3246 compiler_visit_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3248 char * kindname
= NULL
;
3252 if (ctx
!= AugStore
) {
3253 VISIT(c
, expr
, s
->v
.Index
.value
);
3257 kindname
= "ellipsis";
3258 if (ctx
!= AugStore
) {
3259 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3264 if (!s
->v
.Slice
.step
)
3265 return compiler_simple_slice(c
, s
, ctx
);
3266 if (ctx
!= AugStore
) {
3267 if (!compiler_slice(c
, s
, ctx
))
3272 kindname
= "extended slice";
3273 if (ctx
!= AugStore
) {
3274 int i
, n
= asdl_seq_LEN(s
->v
.ExtSlice
.dims
);
3275 for (i
= 0; i
< n
; i
++) {
3276 slice_ty sub
= (slice_ty
)asdl_seq_GET(
3277 s
->v
.ExtSlice
.dims
, i
);
3278 if (!compiler_visit_nested_slice(c
, sub
, ctx
))
3281 ADDOP_I(c
, BUILD_TUPLE
, n
);
3285 PyErr_Format(PyExc_SystemError
,
3286 "invalid subscript kind %d", s
->kind
);
3289 return compiler_handle_subscr(c
, kindname
, ctx
);
3293 /* End of the compiler section, beginning of the assembler section */
3295 /* do depth-first search of basic block graph, starting with block.
3296 post records the block indices in post-order.
3298 XXX must handle implicit jumps from one block to next
3302 PyObject
*a_bytecode
; /* string containing bytecode */
3303 int a_offset
; /* offset into bytecode */
3304 int a_nblocks
; /* number of reachable blocks */
3305 basicblock
**a_postorder
; /* list of blocks in dfs postorder */
3306 PyObject
*a_lnotab
; /* string containing lnotab */
3307 int a_lnotab_off
; /* offset into lnotab */
3308 int a_lineno
; /* last lineno of emitted instruction */
3309 int a_lineno_off
; /* bytecode offset of last lineno */
3313 dfs(struct compiler
*c
, basicblock
*b
, struct assembler
*a
)
3316 struct instr
*instr
= NULL
;
3321 if (b
->b_next
!= NULL
)
3322 dfs(c
, b
->b_next
, a
);
3323 for (i
= 0; i
< b
->b_iused
; i
++) {
3324 instr
= &b
->b_instr
[i
];
3325 if (instr
->i_jrel
|| instr
->i_jabs
)
3326 dfs(c
, instr
->i_target
, a
);
3328 a
->a_postorder
[a
->a_nblocks
++] = b
;
3332 stackdepth_walk(struct compiler
*c
, basicblock
*b
, int depth
, int maxdepth
)
3334 int i
, target_depth
;
3335 struct instr
*instr
;
3336 if (b
->b_seen
|| b
->b_startdepth
>= depth
)
3339 b
->b_startdepth
= depth
;
3340 for (i
= 0; i
< b
->b_iused
; i
++) {
3341 instr
= &b
->b_instr
[i
];
3342 depth
+= opcode_stack_effect(instr
->i_opcode
, instr
->i_oparg
);
3343 if (depth
> maxdepth
)
3345 assert(depth
>= 0); /* invalid code or bug in stackdepth() */
3346 if (instr
->i_jrel
|| instr
->i_jabs
) {
3347 target_depth
= depth
;
3348 if (instr
->i_opcode
== FOR_ITER
) {
3349 target_depth
= depth
-2;
3350 } else if (instr
->i_opcode
== SETUP_FINALLY
||
3351 instr
->i_opcode
== SETUP_EXCEPT
) {
3352 target_depth
= depth
+3;
3353 if (target_depth
> maxdepth
)
3354 maxdepth
= target_depth
;
3356 maxdepth
= stackdepth_walk(c
, instr
->i_target
,
3357 target_depth
, maxdepth
);
3358 if (instr
->i_opcode
== JUMP_ABSOLUTE
||
3359 instr
->i_opcode
== JUMP_FORWARD
) {
3360 goto out
; /* remaining code is dead */
3365 maxdepth
= stackdepth_walk(c
, b
->b_next
, depth
, maxdepth
);
3371 /* Find the flow path that needs the largest stack. We assume that
3372 * cycles in the flow graph have no net effect on the stack depth.
3375 stackdepth(struct compiler
*c
)
3377 basicblock
*b
, *entryblock
;
3379 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3381 b
->b_startdepth
= INT_MIN
;
3386 return stackdepth_walk(c
, entryblock
, 0, 0);
3390 assemble_init(struct assembler
*a
, int nblocks
, int firstlineno
)
3392 memset(a
, 0, sizeof(struct assembler
));
3393 a
->a_lineno
= firstlineno
;
3394 a
->a_bytecode
= PyString_FromStringAndSize(NULL
, DEFAULT_CODE_SIZE
);
3397 a
->a_lnotab
= PyString_FromStringAndSize(NULL
, DEFAULT_LNOTAB_SIZE
);
3400 if (nblocks
> PY_SIZE_MAX
/ sizeof(basicblock
*)) {
3404 a
->a_postorder
= (basicblock
**)PyObject_Malloc(
3405 sizeof(basicblock
*) * nblocks
);
3406 if (!a
->a_postorder
) {
3414 assemble_free(struct assembler
*a
)
3416 Py_XDECREF(a
->a_bytecode
);
3417 Py_XDECREF(a
->a_lnotab
);
3419 PyObject_Free(a
->a_postorder
);
3422 /* Return the size of a basic block in bytes. */
3425 instrsize(struct instr
*instr
)
3427 if (!instr
->i_hasarg
)
3428 return 1; /* 1 byte for the opcode*/
3429 if (instr
->i_oparg
> 0xffff)
3430 return 6; /* 1 (opcode) + 1 (EXTENDED_ARG opcode) + 2 (oparg) + 2(oparg extended) */
3431 return 3; /* 1 (opcode) + 2 (oparg) */
3435 blocksize(basicblock
*b
)
3440 for (i
= 0; i
< b
->b_iused
; i
++)
3441 size
+= instrsize(&b
->b_instr
[i
]);
3445 /* Appends a pair to the end of the line number table, a_lnotab, representing
3446 the instruction's bytecode offset and line number. See
3447 Objects/lnotab_notes.txt for the description of the line number table. */
3450 assemble_lnotab(struct assembler
*a
, struct instr
*i
)
3452 int d_bytecode
, d_lineno
;
3454 unsigned char *lnotab
;
3456 d_bytecode
= a
->a_offset
- a
->a_lineno_off
;
3457 d_lineno
= i
->i_lineno
- a
->a_lineno
;
3459 assert(d_bytecode
>= 0);
3460 assert(d_lineno
>= 0);
3462 if(d_bytecode
== 0 && d_lineno
== 0)
3465 if (d_bytecode
> 255) {
3466 int j
, nbytes
, ncodes
= d_bytecode
/ 255;
3467 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
3468 len
= PyString_GET_SIZE(a
->a_lnotab
);
3469 if (nbytes
>= len
) {
3470 if ((len
<= INT_MAX
/ 2) && (len
* 2 < nbytes
))
3472 else if (len
<= INT_MAX
/ 2)
3478 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
3481 lnotab
= (unsigned char *)
3482 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3483 for (j
= 0; j
< ncodes
; j
++) {
3487 d_bytecode
-= ncodes
* 255;
3488 a
->a_lnotab_off
+= ncodes
* 2;
3490 assert(d_bytecode
<= 255);
3491 if (d_lineno
> 255) {
3492 int j
, nbytes
, ncodes
= d_lineno
/ 255;
3493 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
3494 len
= PyString_GET_SIZE(a
->a_lnotab
);
3495 if (nbytes
>= len
) {
3496 if ((len
<= INT_MAX
/ 2) && len
* 2 < nbytes
)
3498 else if (len
<= INT_MAX
/ 2)
3504 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
3507 lnotab
= (unsigned char *)
3508 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3509 *lnotab
++ = d_bytecode
;
3512 for (j
= 1; j
< ncodes
; j
++) {
3516 d_lineno
-= ncodes
* 255;
3517 a
->a_lnotab_off
+= ncodes
* 2;
3520 len
= PyString_GET_SIZE(a
->a_lnotab
);
3521 if (a
->a_lnotab_off
+ 2 >= len
) {
3522 if (_PyString_Resize(&a
->a_lnotab
, len
* 2) < 0)
3525 lnotab
= (unsigned char *)
3526 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3528 a
->a_lnotab_off
+= 2;
3530 *lnotab
++ = d_bytecode
;
3531 *lnotab
++ = d_lineno
;
3533 else { /* First line of a block; def stmt, etc. */
3535 *lnotab
++ = d_lineno
;
3537 a
->a_lineno
= i
->i_lineno
;
3538 a
->a_lineno_off
= a
->a_offset
;
3543 Extend the bytecode with a new instruction.
3544 Update lnotab if necessary.
3548 assemble_emit(struct assembler
*a
, struct instr
*i
)
3550 int size
, arg
= 0, ext
= 0;
3551 Py_ssize_t len
= PyString_GET_SIZE(a
->a_bytecode
);
3554 size
= instrsize(i
);
3559 if (i
->i_lineno
&& !assemble_lnotab(a
, i
))
3561 if (a
->a_offset
+ size
>= len
) {
3562 if (len
> PY_SSIZE_T_MAX
/ 2)
3564 if (_PyString_Resize(&a
->a_bytecode
, len
* 2) < 0)
3567 code
= PyString_AS_STRING(a
->a_bytecode
) + a
->a_offset
;
3568 a
->a_offset
+= size
;
3570 assert(i
->i_hasarg
);
3571 *code
++ = (char)EXTENDED_ARG
;
3572 *code
++ = ext
& 0xff;
3576 *code
++ = i
->i_opcode
;
3578 assert(size
== 3 || size
== 6);
3579 *code
++ = arg
& 0xff;
3586 assemble_jump_offsets(struct assembler
*a
, struct compiler
*c
)
3589 int bsize
, totsize
, extended_arg_count
, last_extended_arg_count
= 0;
3592 /* Compute the size of each block and fixup jump args.
3593 Replace block pointer with position in bytecode. */
3596 for (i
= a
->a_nblocks
- 1; i
>= 0; i
--) {
3597 b
= a
->a_postorder
[i
];
3598 bsize
= blocksize(b
);
3599 b
->b_offset
= totsize
;
3602 extended_arg_count
= 0;
3603 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3604 bsize
= b
->b_offset
;
3605 for (i
= 0; i
< b
->b_iused
; i
++) {
3606 struct instr
*instr
= &b
->b_instr
[i
];
3607 /* Relative jumps are computed relative to
3608 the instruction pointer after fetching
3609 the jump instruction.
3611 bsize
+= instrsize(instr
);
3613 instr
->i_oparg
= instr
->i_target
->b_offset
;
3614 else if (instr
->i_jrel
) {
3615 int delta
= instr
->i_target
->b_offset
- bsize
;
3616 instr
->i_oparg
= delta
;
3620 if (instr
->i_oparg
> 0xffff)
3621 extended_arg_count
++;
3625 /* XXX: This is an awful hack that could hurt performance, but
3626 on the bright side it should work until we come up
3627 with a better solution.
3629 In the meantime, should the goto be dropped in favor
3632 The issue is that in the first loop blocksize() is called
3633 which calls instrsize() which requires i_oparg be set
3634 appropriately. There is a bootstrap problem because
3635 i_oparg is calculated in the second loop above.
3637 So we loop until we stop seeing new EXTENDED_ARGs.
3638 The only EXTENDED_ARGs that could be popping up are
3639 ones in jump instructions. So this should converge
3642 if (last_extended_arg_count
!= extended_arg_count
) {
3643 last_extended_arg_count
= extended_arg_count
;
3649 dict_keys_inorder(PyObject
*dict
, int offset
)
3651 PyObject
*tuple
, *k
, *v
;
3652 Py_ssize_t i
, pos
= 0, size
= PyDict_Size(dict
);
3654 tuple
= PyTuple_New(size
);
3657 while (PyDict_Next(dict
, &pos
, &k
, &v
)) {
3658 i
= PyInt_AS_LONG(v
);
3659 /* The keys of the dictionary are tuples. (see compiler_add_o)
3660 The object we want is always first, though. */
3661 k
= PyTuple_GET_ITEM(k
, 0);
3663 assert((i
- offset
) < size
);
3664 assert((i
- offset
) >= 0);
3665 PyTuple_SET_ITEM(tuple
, i
- offset
, k
);
3671 compute_code_flags(struct compiler
*c
)
3673 PySTEntryObject
*ste
= c
->u
->u_ste
;
3675 if (ste
->ste_type
!= ModuleBlock
)
3676 flags
|= CO_NEWLOCALS
;
3677 if (ste
->ste_type
== FunctionBlock
) {
3678 if (!ste
->ste_unoptimized
)
3679 flags
|= CO_OPTIMIZED
;
3680 if (ste
->ste_nested
)
3682 if (ste
->ste_generator
)
3683 flags
|= CO_GENERATOR
;
3684 if (ste
->ste_varargs
)
3685 flags
|= CO_VARARGS
;
3686 if (ste
->ste_varkeywords
)
3687 flags
|= CO_VARKEYWORDS
;
3690 /* (Only) inherit compilerflags in PyCF_MASK */
3691 flags
|= (c
->c_flags
->cf_flags
& PyCF_MASK
);
3693 n
= PyDict_Size(c
->u
->u_freevars
);
3697 n
= PyDict_Size(c
->u
->u_cellvars
);
3708 static PyCodeObject
*
3709 makecode(struct compiler
*c
, struct assembler
*a
)
3712 PyCodeObject
*co
= NULL
;
3713 PyObject
*consts
= NULL
;
3714 PyObject
*names
= NULL
;
3715 PyObject
*varnames
= NULL
;
3716 PyObject
*filename
= NULL
;
3717 PyObject
*name
= NULL
;
3718 PyObject
*freevars
= NULL
;
3719 PyObject
*cellvars
= NULL
;
3720 PyObject
*bytecode
= NULL
;
3723 tmp
= dict_keys_inorder(c
->u
->u_consts
, 0);
3726 consts
= PySequence_List(tmp
); /* optimize_code requires a list */
3729 names
= dict_keys_inorder(c
->u
->u_names
, 0);
3730 varnames
= dict_keys_inorder(c
->u
->u_varnames
, 0);
3731 if (!consts
|| !names
|| !varnames
)
3734 cellvars
= dict_keys_inorder(c
->u
->u_cellvars
, 0);
3737 freevars
= dict_keys_inorder(c
->u
->u_freevars
, PyTuple_Size(cellvars
));
3740 filename
= PyString_FromString(c
->c_filename
);
3744 nlocals
= PyDict_Size(c
->u
->u_varnames
);
3745 flags
= compute_code_flags(c
);
3749 bytecode
= PyCode_Optimize(a
->a_bytecode
, consts
, names
, a
->a_lnotab
);
3753 tmp
= PyList_AsTuple(consts
); /* PyCode_New requires a tuple */
3759 co
= PyCode_New(c
->u
->u_argcount
, nlocals
, stackdepth(c
), flags
,
3760 bytecode
, consts
, names
, varnames
,
3762 filename
, c
->u
->u_name
,
3763 c
->u
->u_firstlineno
,
3768 Py_XDECREF(varnames
);
3769 Py_XDECREF(filename
);
3771 Py_XDECREF(freevars
);
3772 Py_XDECREF(cellvars
);
3773 Py_XDECREF(bytecode
);
3778 /* For debugging purposes only */
3781 dump_instr(const struct instr
*i
)
3783 const char *jrel
= i
->i_jrel
? "jrel " : "";
3784 const char *jabs
= i
->i_jabs
? "jabs " : "";
3789 sprintf(arg
, "arg: %d ", i
->i_oparg
);
3791 fprintf(stderr
, "line: %d, opcode: %d %s%s%s\n",
3792 i
->i_lineno
, i
->i_opcode
, arg
, jabs
, jrel
);
3796 dump_basicblock(const basicblock
*b
)
3798 const char *seen
= b
->b_seen
? "seen " : "";
3799 const char *b_return
= b
->b_return
? "return " : "";
3800 fprintf(stderr
, "used: %d, depth: %d, offset: %d %s%s\n",
3801 b
->b_iused
, b
->b_startdepth
, b
->b_offset
, seen
, b_return
);
3804 for (i
= 0; i
< b
->b_iused
; i
++) {
3805 fprintf(stderr
, " [%02d] ", i
);
3806 dump_instr(b
->b_instr
+ i
);
3812 static PyCodeObject
*
3813 assemble(struct compiler
*c
, int addNone
)
3815 basicblock
*b
, *entryblock
;
3818 PyCodeObject
*co
= NULL
;
3820 /* Make sure every block that falls off the end returns None.
3821 XXX NEXT_BLOCK() isn't quite right, because if the last
3822 block ends with a jump or return b_next shouldn't set.
3824 if (!c
->u
->u_curblock
->b_return
) {
3827 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3828 ADDOP(c
, RETURN_VALUE
);
3833 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3838 /* Set firstlineno if it wasn't explicitly set. */
3839 if (!c
->u
->u_firstlineno
) {
3840 if (entryblock
&& entryblock
->b_instr
)
3841 c
->u
->u_firstlineno
= entryblock
->b_instr
->i_lineno
;
3843 c
->u
->u_firstlineno
= 1;
3845 if (!assemble_init(&a
, nblocks
, c
->u
->u_firstlineno
))
3847 dfs(c
, entryblock
, &a
);
3849 /* Can't modify the bytecode after computing jump offsets. */
3850 assemble_jump_offsets(&a
, c
);
3852 /* Emit code in reverse postorder from dfs. */
3853 for (i
= a
.a_nblocks
- 1; i
>= 0; i
--) {
3854 b
= a
.a_postorder
[i
];
3855 for (j
= 0; j
< b
->b_iused
; j
++)
3856 if (!assemble_emit(&a
, &b
->b_instr
[j
]))
3860 if (_PyString_Resize(&a
.a_lnotab
, a
.a_lnotab_off
) < 0)
3862 if (_PyString_Resize(&a
.a_bytecode
, a
.a_offset
) < 0)
3865 co
= makecode(c
, &a
);