2 * This file compiles an abstract syntax tree (AST) into Python bytecode.
4 * The primary entry point is PyAST_Compile(), which returns a
5 * PyCodeObject. The compiler makes several passes to build the code
7 * 1. Checks for future statements. See future.c
8 * 2. Builds a symbol table. See symtable.c.
9 * 3. Generate code for basic blocks. See compiler_mod() in this file.
10 * 4. Assemble the basic blocks into final code. See assemble() in
12 * 5. Optimize the byte code (peephole optimizations). See peephole.c
14 * Note that compiler_mod() suggests module, but the module ast type
15 * (mod_ty) has cases for expressions and interactive statements.
17 * CAUTION: The VISIT_* macros abort the current function when they
18 * encounter a problem. So don't invoke them when there is memory
19 * which needs to be released. Code blocks are OK, as the compiler
20 * structure takes care of releasing those. Use the arena to manage
26 #include "Python-ast.h"
35 int Py_OptimizeFlag
= 0;
37 #define DEFAULT_BLOCK_SIZE 16
38 #define DEFAULT_BLOCKS 8
39 #define DEFAULT_CODE_SIZE 128
40 #define DEFAULT_LNOTAB_SIZE 16
45 unsigned i_hasarg
: 1;
46 unsigned char i_opcode
;
48 struct basicblock_
*i_target
; /* target block (if jump instruction) */
52 typedef struct basicblock_
{
53 /* Each basicblock in a compilation unit is linked via b_list in the
54 reverse order that the block are allocated. b_list points to the next
55 block, not to be confused with b_next, which is next by control flow. */
56 struct basicblock_
*b_list
;
57 /* number of instructions used */
59 /* length of instruction array (b_instr) */
61 /* pointer to an array of instructions, initially NULL */
62 struct instr
*b_instr
;
63 /* If b_next is non-NULL, it is a pointer to the next
64 block reached by normal control flow. */
65 struct basicblock_
*b_next
;
66 /* b_seen is used to perform a DFS of basicblocks. */
68 /* b_return is true if a RETURN_VALUE opcode is inserted. */
69 unsigned b_return
: 1;
70 /* depth of stack upon entry of block, computed by stackdepth() */
72 /* instruction offset for block, computed by assemble_jump_offsets() */
76 /* fblockinfo tracks the current frame block.
78 A frame block is used to handle loops, try/except, and try/finally.
79 It's called a frame block to distinguish it from a basic block in the
83 enum fblocktype
{ LOOP
, EXCEPT
, FINALLY_TRY
, FINALLY_END
};
86 enum fblocktype fb_type
;
90 /* The following items change on entry and exit of code blocks.
91 They must be saved and restored when returning to a block.
93 struct compiler_unit
{
94 PySTEntryObject
*u_ste
;
97 /* The following fields are dicts that map objects to
98 the index of them in co_XXX. The index is used as
99 the argument for opcodes that refer to those collections.
101 PyObject
*u_consts
; /* all constants */
102 PyObject
*u_names
; /* all names */
103 PyObject
*u_varnames
; /* local variables */
104 PyObject
*u_cellvars
; /* cell variables */
105 PyObject
*u_freevars
; /* free variables */
107 PyObject
*u_private
; /* for private name mangling */
109 int u_argcount
; /* number of arguments for block */
110 /* Pointer to the most recently allocated block. By following b_list
111 members, you can reach all early allocated blocks. */
112 basicblock
*u_blocks
;
113 basicblock
*u_curblock
; /* pointer to current block */
116 struct fblockinfo u_fblock
[CO_MAXBLOCKS
];
118 int u_firstlineno
; /* the first lineno of the block */
119 int u_lineno
; /* the lineno for the current stmt */
120 bool u_lineno_set
; /* boolean to indicate whether instr
121 has been generated with current lineno */
124 /* This struct captures the global state of a compilation.
126 The u pointer points to the current compilation unit, while units
127 for enclosing blocks are stored in c_stack. The u and c_stack are
128 managed by compiler_enter_scope() and compiler_exit_scope().
132 const char *c_filename
;
133 struct symtable
*c_st
;
134 PyFutureFeatures
*c_future
; /* pointer to module's __future__ */
135 PyCompilerFlags
*c_flags
;
137 int c_interactive
; /* true if in interactive mode */
140 struct compiler_unit
*u
; /* compiler state for current block */
141 PyObject
*c_stack
; /* Python list holding compiler_unit ptrs */
142 PyArena
*c_arena
; /* pointer to memory allocation arena */
145 static int compiler_enter_scope(struct compiler
*, identifier
, void *, int);
146 static void compiler_free(struct compiler
*);
147 static basicblock
*compiler_new_block(struct compiler
*);
148 static int compiler_next_instr(struct compiler
*, basicblock
*);
149 static int compiler_addop(struct compiler
*, int);
150 static int compiler_addop_o(struct compiler
*, int, PyObject
*, PyObject
*);
151 static int compiler_addop_i(struct compiler
*, int, int);
152 static int compiler_addop_j(struct compiler
*, int, basicblock
*, int);
153 static basicblock
*compiler_use_new_block(struct compiler
*);
154 static int compiler_error(struct compiler
*, const char *);
155 static int compiler_nameop(struct compiler
*, identifier
, expr_context_ty
);
157 static PyCodeObject
*compiler_mod(struct compiler
*, mod_ty
);
158 static int compiler_visit_stmt(struct compiler
*, stmt_ty
);
159 static int compiler_visit_keyword(struct compiler
*, keyword_ty
);
160 static int compiler_visit_expr(struct compiler
*, expr_ty
);
161 static int compiler_augassign(struct compiler
*, stmt_ty
);
162 static int compiler_visit_slice(struct compiler
*, slice_ty
,
165 static int compiler_push_fblock(struct compiler
*, enum fblocktype
,
167 static void compiler_pop_fblock(struct compiler
*, enum fblocktype
,
169 /* Returns true if there is a loop on the fblock stack. */
170 static int compiler_in_loop(struct compiler
*);
172 static int inplace_binop(struct compiler
*, operator_ty
);
173 static int expr_constant(expr_ty e
);
175 static int compiler_with(struct compiler
*, stmt_ty
);
177 static PyCodeObject
*assemble(struct compiler
*, int addNone
);
178 static PyObject
*__doc__
;
181 _Py_Mangle(PyObject
*privateobj
, PyObject
*ident
)
183 /* Name mangling: __private becomes _classname__private.
184 This is independent from how the name is used. */
185 const char *p
, *name
= PyString_AsString(ident
);
188 if (privateobj
== NULL
|| !PyString_Check(privateobj
) ||
189 name
== NULL
|| name
[0] != '_' || name
[1] != '_') {
193 p
= PyString_AsString(privateobj
);
195 /* Don't mangle __id__ or names with dots.
197 The only time a name with a dot can occur is when
198 we are compiling an import statement that has a
201 TODO(jhylton): Decide whether we want to support
202 mangling of the module name, e.g. __M.X.
204 if ((name
[nlen
-1] == '_' && name
[nlen
-2] == '_')
205 || strchr(name
, '.')) {
207 return ident
; /* Don't mangle __whatever__ */
209 /* Strip leading underscores from class name */
214 return ident
; /* Don't mangle if class is just underscores */
218 assert(1 <= PY_SSIZE_T_MAX
- nlen
);
219 assert(1 + nlen
<= PY_SSIZE_T_MAX
- plen
);
221 ident
= PyString_FromStringAndSize(NULL
, 1 + nlen
+ plen
);
224 /* ident = "_" + p[:plen] + name # i.e. 1+plen+nlen bytes */
225 buffer
= PyString_AS_STRING(ident
);
227 strncpy(buffer
+1, p
, plen
);
228 strcpy(buffer
+1+plen
, name
);
233 compiler_init(struct compiler
*c
)
235 memset(c
, 0, sizeof(struct compiler
));
237 c
->c_stack
= PyList_New(0);
245 PyAST_Compile(mod_ty mod
, const char *filename
, PyCompilerFlags
*flags
,
249 PyCodeObject
*co
= NULL
;
250 PyCompilerFlags local_flags
;
254 __doc__
= PyString_InternFromString("__doc__");
259 if (!compiler_init(&c
))
261 c
.c_filename
= filename
;
263 c
.c_future
= PyFuture_FromAST(mod
, filename
);
264 if (c
.c_future
== NULL
)
267 local_flags
.cf_flags
= 0;
268 flags
= &local_flags
;
270 merged
= c
.c_future
->ff_features
| flags
->cf_flags
;
271 c
.c_future
->ff_features
= merged
;
272 flags
->cf_flags
= merged
;
276 c
.c_st
= PySymtable_Build(mod
, filename
, c
.c_future
);
277 if (c
.c_st
== NULL
) {
278 if (!PyErr_Occurred())
279 PyErr_SetString(PyExc_SystemError
, "no symtable");
283 co
= compiler_mod(&c
, mod
);
287 assert(co
|| PyErr_Occurred());
292 PyNode_Compile(struct _node
*n
, const char *filename
)
294 PyCodeObject
*co
= NULL
;
296 PyArena
*arena
= PyArena_New();
299 mod
= PyAST_FromNode(n
, NULL
, filename
, arena
);
301 co
= PyAST_Compile(mod
, filename
, NULL
, arena
);
307 compiler_free(struct compiler
*c
)
310 PySymtable_Free(c
->c_st
);
312 PyObject_Free(c
->c_future
);
313 Py_DECREF(c
->c_stack
);
317 list2dict(PyObject
*list
)
321 PyObject
*dict
= PyDict_New();
322 if (!dict
) return NULL
;
324 n
= PyList_Size(list
);
325 for (i
= 0; i
< n
; i
++) {
326 v
= PyInt_FromLong(i
);
331 k
= PyList_GET_ITEM(list
, i
);
332 k
= PyTuple_Pack(2, k
, k
->ob_type
);
333 if (k
== NULL
|| PyDict_SetItem(dict
, k
, v
) < 0) {
345 /* Return new dict containing names from src that match scope(s).
347 src is a symbol table dictionary. If the scope of a name matches
348 either scope_type or flag is set, insert it into the new dict. The
349 values are integers, starting at offset and increasing by one for
354 dictbytype(PyObject
*src
, int scope_type
, int flag
, int offset
)
356 Py_ssize_t pos
= 0, i
= offset
, scope
;
357 PyObject
*k
, *v
, *dest
= PyDict_New();
363 while (PyDict_Next(src
, &pos
, &k
, &v
)) {
364 /* XXX this should probably be a macro in symtable.h */
365 assert(PyInt_Check(v
));
366 scope
= (PyInt_AS_LONG(v
) >> SCOPE_OFF
) & SCOPE_MASK
;
368 if (scope
== scope_type
|| PyInt_AS_LONG(v
) & flag
) {
369 PyObject
*tuple
, *item
= PyInt_FromLong(i
);
375 tuple
= PyTuple_Pack(2, k
, k
->ob_type
);
376 if (!tuple
|| PyDict_SetItem(dest
, tuple
, item
) < 0) {
390 compiler_unit_check(struct compiler_unit
*u
)
393 for (block
= u
->u_blocks
; block
!= NULL
; block
= block
->b_list
) {
394 assert((void *)block
!= (void *)0xcbcbcbcb);
395 assert((void *)block
!= (void *)0xfbfbfbfb);
396 assert((void *)block
!= (void *)0xdbdbdbdb);
397 if (block
->b_instr
!= NULL
) {
398 assert(block
->b_ialloc
> 0);
399 assert(block
->b_iused
> 0);
400 assert(block
->b_ialloc
>= block
->b_iused
);
403 assert (block
->b_iused
== 0);
404 assert (block
->b_ialloc
== 0);
410 compiler_unit_free(struct compiler_unit
*u
)
412 basicblock
*b
, *next
;
414 compiler_unit_check(u
);
418 PyObject_Free((void *)b
->b_instr
);
420 PyObject_Free((void *)b
);
425 Py_CLEAR(u
->u_consts
);
426 Py_CLEAR(u
->u_names
);
427 Py_CLEAR(u
->u_varnames
);
428 Py_CLEAR(u
->u_freevars
);
429 Py_CLEAR(u
->u_cellvars
);
430 Py_CLEAR(u
->u_private
);
435 compiler_enter_scope(struct compiler
*c
, identifier name
, void *key
,
438 struct compiler_unit
*u
;
440 u
= (struct compiler_unit
*)PyObject_Malloc(sizeof(
441 struct compiler_unit
));
446 memset(u
, 0, sizeof(struct compiler_unit
));
448 u
->u_ste
= PySymtable_Lookup(c
->c_st
, key
);
450 compiler_unit_free(u
);
455 u
->u_varnames
= list2dict(u
->u_ste
->ste_varnames
);
456 u
->u_cellvars
= dictbytype(u
->u_ste
->ste_symbols
, CELL
, 0, 0);
457 if (!u
->u_varnames
|| !u
->u_cellvars
) {
458 compiler_unit_free(u
);
462 u
->u_freevars
= dictbytype(u
->u_ste
->ste_symbols
, FREE
, DEF_FREE_CLASS
,
463 PyDict_Size(u
->u_cellvars
));
464 if (!u
->u_freevars
) {
465 compiler_unit_free(u
);
471 u
->u_firstlineno
= lineno
;
473 u
->u_lineno_set
= false;
474 u
->u_consts
= PyDict_New();
476 compiler_unit_free(u
);
479 u
->u_names
= PyDict_New();
481 compiler_unit_free(u
);
487 /* Push the old compiler_unit on the stack. */
489 PyObject
*wrapper
= PyCObject_FromVoidPtr(c
->u
, NULL
);
490 if (!wrapper
|| PyList_Append(c
->c_stack
, wrapper
) < 0) {
492 compiler_unit_free(u
);
496 u
->u_private
= c
->u
->u_private
;
497 Py_XINCREF(u
->u_private
);
502 if (compiler_use_new_block(c
) == NULL
)
509 compiler_exit_scope(struct compiler
*c
)
515 compiler_unit_free(c
->u
);
516 /* Restore c->u to the parent unit. */
517 n
= PyList_GET_SIZE(c
->c_stack
) - 1;
519 wrapper
= PyList_GET_ITEM(c
->c_stack
, n
);
520 c
->u
= (struct compiler_unit
*)PyCObject_AsVoidPtr(wrapper
);
522 /* we are deleting from a list so this really shouldn't fail */
523 if (PySequence_DelItem(c
->c_stack
, n
) < 0)
524 Py_FatalError("compiler_exit_scope()");
525 compiler_unit_check(c
->u
);
532 /* Allocate a new block and return a pointer to it.
533 Returns NULL on error.
537 compiler_new_block(struct compiler
*c
)
540 struct compiler_unit
*u
;
543 b
= (basicblock
*)PyObject_Malloc(sizeof(basicblock
));
548 memset((void *)b
, 0, sizeof(basicblock
));
549 /* Extend the singly linked list of blocks with new block. */
550 b
->b_list
= u
->u_blocks
;
556 compiler_use_new_block(struct compiler
*c
)
558 basicblock
*block
= compiler_new_block(c
);
561 c
->u
->u_curblock
= block
;
566 compiler_next_block(struct compiler
*c
)
568 basicblock
*block
= compiler_new_block(c
);
571 c
->u
->u_curblock
->b_next
= block
;
572 c
->u
->u_curblock
= block
;
577 compiler_use_next_block(struct compiler
*c
, basicblock
*block
)
579 assert(block
!= NULL
);
580 c
->u
->u_curblock
->b_next
= block
;
581 c
->u
->u_curblock
= block
;
585 /* Returns the offset of the next instruction in the current block's
586 b_instr array. Resizes the b_instr as necessary.
587 Returns -1 on failure.
591 compiler_next_instr(struct compiler
*c
, basicblock
*b
)
594 if (b
->b_instr
== NULL
) {
595 b
->b_instr
= (struct instr
*)PyObject_Malloc(
596 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
597 if (b
->b_instr
== NULL
) {
601 b
->b_ialloc
= DEFAULT_BLOCK_SIZE
;
602 memset((char *)b
->b_instr
, 0,
603 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
605 else if (b
->b_iused
== b
->b_ialloc
) {
607 size_t oldsize
, newsize
;
608 oldsize
= b
->b_ialloc
* sizeof(struct instr
);
609 newsize
= oldsize
<< 1;
611 if (oldsize
> (PY_SIZE_MAX
>> 1)) {
621 tmp
= (struct instr
*)PyObject_Realloc(
622 (void *)b
->b_instr
, newsize
);
628 memset((char *)b
->b_instr
+ oldsize
, 0, newsize
- oldsize
);
633 /* Set the i_lineno member of the instruction at offset off if the
634 line number for the current expression/statement has not
635 already been set. If it has been set, the call has no effect.
637 The line number is reset in the following cases:
638 - when entering a new scope
640 - on each expression that start a new line
641 - before the "except" clause
642 - before the "for" and "while" expressions
646 compiler_set_lineno(struct compiler
*c
, int off
)
649 if (c
->u
->u_lineno_set
)
651 c
->u
->u_lineno_set
= true;
652 b
= c
->u
->u_curblock
;
653 b
->b_instr
[off
].i_lineno
= c
->u
->u_lineno
;
657 opcode_stack_effect(int opcode
, int oparg
)
681 case BINARY_MULTIPLY
:
685 case BINARY_SUBTRACT
:
687 case BINARY_FLOOR_DIVIDE
:
688 case BINARY_TRUE_DIVIDE
:
690 case INPLACE_FLOOR_DIVIDE
:
691 case INPLACE_TRUE_DIVIDE
:
722 case INPLACE_SUBTRACT
:
723 case INPLACE_MULTIPLY
:
753 case PRINT_NEWLINE_TO
:
766 return -1; /* XXX Sometimes more */
781 return -3; /* or -1 or -2 if no exception occurred or
782 return/break/continue */
790 case UNPACK_SEQUENCE
:
793 return 1; /* or -1, at end of iterator */
824 case JUMP_IF_TRUE_OR_POP
: /* -1 if jump not taken */
825 case JUMP_IF_FALSE_OR_POP
: /* "" */
829 case POP_JUMP_IF_FALSE
:
830 case POP_JUMP_IF_TRUE
:
852 #define NARGS(o) (((o) % 256) + 2*((o) / 256))
854 return -NARGS(oparg
);
855 case CALL_FUNCTION_VAR
:
856 case CALL_FUNCTION_KW
:
857 return -NARGS(oparg
)-1;
858 case CALL_FUNCTION_VAR_KW
:
859 return -NARGS(oparg
)-2;
878 fprintf(stderr
, "opcode = %d\n", opcode
);
879 Py_FatalError("opcode_stack_effect()");
882 return 0; /* not reachable */
885 /* Add an opcode with no argument.
886 Returns 0 on failure, 1 on success.
890 compiler_addop(struct compiler
*c
, int opcode
)
895 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
898 b
= c
->u
->u_curblock
;
899 i
= &b
->b_instr
[off
];
900 i
->i_opcode
= opcode
;
902 if (opcode
== RETURN_VALUE
)
904 compiler_set_lineno(c
, off
);
909 compiler_add_o(struct compiler
*c
, PyObject
*dict
, PyObject
*o
)
915 /* necessary to make sure types aren't coerced (e.g., int and long) */
916 /* _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms */
917 if (PyFloat_Check(o
)) {
918 d
= PyFloat_AS_DOUBLE(o
);
919 /* all we need is to make the tuple different in either the 0.0
920 * or -0.0 case from all others, just to avoid the "coercion".
922 if (d
== 0.0 && copysign(1.0, d
) < 0.0)
923 t
= PyTuple_Pack(3, o
, o
->ob_type
, Py_None
);
925 t
= PyTuple_Pack(2, o
, o
->ob_type
);
927 #ifndef WITHOUT_COMPLEX
928 else if (PyComplex_Check(o
)) {
930 int real_negzero
, imag_negzero
;
931 /* For the complex case we must make complex(x, 0.)
932 different from complex(x, -0.) and complex(0., y)
933 different from complex(-0., y), for any x and y.
934 All four complex zeros must be distinguished.*/
935 z
= PyComplex_AsCComplex(o
);
936 real_negzero
= z
.real
== 0.0 && copysign(1.0, z
.real
) < 0.0;
937 imag_negzero
= z
.imag
== 0.0 && copysign(1.0, z
.imag
) < 0.0;
938 if (real_negzero
&& imag_negzero
) {
939 t
= PyTuple_Pack(5, o
, o
->ob_type
,
940 Py_None
, Py_None
, Py_None
);
942 else if (imag_negzero
) {
943 t
= PyTuple_Pack(4, o
, o
->ob_type
, Py_None
, Py_None
);
945 else if (real_negzero
) {
946 t
= PyTuple_Pack(3, o
, o
->ob_type
, Py_None
);
949 t
= PyTuple_Pack(2, o
, o
->ob_type
);
952 #endif /* WITHOUT_COMPLEX */
954 t
= PyTuple_Pack(2, o
, o
->ob_type
);
959 v
= PyDict_GetItem(dict
, t
);
961 arg
= PyDict_Size(dict
);
962 v
= PyInt_FromLong(arg
);
967 if (PyDict_SetItem(dict
, t
, v
) < 0) {
975 arg
= PyInt_AsLong(v
);
981 compiler_addop_o(struct compiler
*c
, int opcode
, PyObject
*dict
,
984 int arg
= compiler_add_o(c
, dict
, o
);
987 return compiler_addop_i(c
, opcode
, arg
);
991 compiler_addop_name(struct compiler
*c
, int opcode
, PyObject
*dict
,
995 PyObject
*mangled
= _Py_Mangle(c
->u
->u_private
, o
);
998 arg
= compiler_add_o(c
, dict
, mangled
);
1002 return compiler_addop_i(c
, opcode
, arg
);
1005 /* Add an opcode with an integer argument.
1006 Returns 0 on failure, 1 on success.
1010 compiler_addop_i(struct compiler
*c
, int opcode
, int oparg
)
1014 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1017 i
= &c
->u
->u_curblock
->b_instr
[off
];
1018 i
->i_opcode
= opcode
;
1021 compiler_set_lineno(c
, off
);
1026 compiler_addop_j(struct compiler
*c
, int opcode
, basicblock
*b
, int absolute
)
1032 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1035 i
= &c
->u
->u_curblock
->b_instr
[off
];
1036 i
->i_opcode
= opcode
;
1043 compiler_set_lineno(c
, off
);
1047 /* The distinction between NEW_BLOCK and NEXT_BLOCK is subtle. (I'd
1048 like to find better names.) NEW_BLOCK() creates a new block and sets
1049 it as the current block. NEXT_BLOCK() also creates an implicit jump
1050 from the current block to the new block.
1053 /* The returns inside these macros make it impossible to decref objects
1054 created in the local function. Local objects should use the arena.
1058 #define NEW_BLOCK(C) { \
1059 if (compiler_use_new_block((C)) == NULL) \
1063 #define NEXT_BLOCK(C) { \
1064 if (compiler_next_block((C)) == NULL) \
1068 #define ADDOP(C, OP) { \
1069 if (!compiler_addop((C), (OP))) \
1073 #define ADDOP_IN_SCOPE(C, OP) { \
1074 if (!compiler_addop((C), (OP))) { \
1075 compiler_exit_scope(c); \
1080 #define ADDOP_O(C, OP, O, TYPE) { \
1081 if (!compiler_addop_o((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1085 #define ADDOP_NAME(C, OP, O, TYPE) { \
1086 if (!compiler_addop_name((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1090 #define ADDOP_I(C, OP, O) { \
1091 if (!compiler_addop_i((C), (OP), (O))) \
1095 #define ADDOP_JABS(C, OP, O) { \
1096 if (!compiler_addop_j((C), (OP), (O), 1)) \
1100 #define ADDOP_JREL(C, OP, O) { \
1101 if (!compiler_addop_j((C), (OP), (O), 0)) \
1105 /* VISIT and VISIT_SEQ takes an ASDL type as their second argument. They use
1106 the ASDL name to synthesize the name of the C type and the visit function.
1109 #define VISIT(C, TYPE, V) {\
1110 if (!compiler_visit_ ## TYPE((C), (V))) \
1114 #define VISIT_IN_SCOPE(C, TYPE, V) {\
1115 if (!compiler_visit_ ## TYPE((C), (V))) { \
1116 compiler_exit_scope(c); \
1121 #define VISIT_SLICE(C, V, CTX) {\
1122 if (!compiler_visit_slice((C), (V), (CTX))) \
1126 #define VISIT_SEQ(C, TYPE, SEQ) { \
1128 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1129 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1130 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1131 if (!compiler_visit_ ## TYPE((C), elt)) \
1136 #define VISIT_SEQ_IN_SCOPE(C, TYPE, SEQ) { \
1138 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1139 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1140 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1141 if (!compiler_visit_ ## TYPE((C), elt)) { \
1142 compiler_exit_scope(c); \
1149 compiler_isdocstring(stmt_ty s
)
1151 if (s
->kind
!= Expr_kind
)
1153 return s
->v
.Expr
.value
->kind
== Str_kind
;
1156 /* Compile a sequence of statements, checking for a docstring. */
1159 compiler_body(struct compiler
*c
, asdl_seq
*stmts
)
1164 if (!asdl_seq_LEN(stmts
))
1166 st
= (stmt_ty
)asdl_seq_GET(stmts
, 0);
1167 if (compiler_isdocstring(st
) && Py_OptimizeFlag
< 2) {
1168 /* don't generate docstrings if -OO */
1170 VISIT(c
, expr
, st
->v
.Expr
.value
);
1171 if (!compiler_nameop(c
, __doc__
, Store
))
1174 for (; i
< asdl_seq_LEN(stmts
); i
++)
1175 VISIT(c
, stmt
, (stmt_ty
)asdl_seq_GET(stmts
, i
));
1179 static PyCodeObject
*
1180 compiler_mod(struct compiler
*c
, mod_ty mod
)
1184 static PyObject
*module
;
1186 module
= PyString_InternFromString("<module>");
1190 /* Use 0 for firstlineno initially, will fixup in assemble(). */
1191 if (!compiler_enter_scope(c
, module
, mod
, 0))
1193 switch (mod
->kind
) {
1195 if (!compiler_body(c
, mod
->v
.Module
.body
)) {
1196 compiler_exit_scope(c
);
1200 case Interactive_kind
:
1201 c
->c_interactive
= 1;
1202 VISIT_SEQ_IN_SCOPE(c
, stmt
,
1203 mod
->v
.Interactive
.body
);
1205 case Expression_kind
:
1206 VISIT_IN_SCOPE(c
, expr
, mod
->v
.Expression
.body
);
1210 PyErr_SetString(PyExc_SystemError
,
1211 "suite should not be possible");
1214 PyErr_Format(PyExc_SystemError
,
1215 "module kind %d should not be possible",
1219 co
= assemble(c
, addNone
);
1220 compiler_exit_scope(c
);
1224 /* The test for LOCAL must come before the test for FREE in order to
1225 handle classes where name is both local and free. The local var is
1226 a method and the free var is a free var referenced within a method.
1230 get_ref_type(struct compiler
*c
, PyObject
*name
)
1232 int scope
= PyST_GetScope(c
->u
->u_ste
, name
);
1235 PyOS_snprintf(buf
, sizeof(buf
),
1236 "unknown scope for %.100s in %.100s(%s) in %s\n"
1237 "symbols: %s\nlocals: %s\nglobals: %s",
1238 PyString_AS_STRING(name
),
1239 PyString_AS_STRING(c
->u
->u_name
),
1240 PyObject_REPR(c
->u
->u_ste
->ste_id
),
1242 PyObject_REPR(c
->u
->u_ste
->ste_symbols
),
1243 PyObject_REPR(c
->u
->u_varnames
),
1244 PyObject_REPR(c
->u
->u_names
)
1253 compiler_lookup_arg(PyObject
*dict
, PyObject
*name
)
1256 k
= PyTuple_Pack(2, name
, name
->ob_type
);
1259 v
= PyDict_GetItem(dict
, k
);
1263 return PyInt_AS_LONG(v
);
1267 compiler_make_closure(struct compiler
*c
, PyCodeObject
*co
, int args
)
1269 int i
, free
= PyCode_GetNumFree(co
);
1271 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1272 ADDOP_I(c
, MAKE_FUNCTION
, args
);
1275 for (i
= 0; i
< free
; ++i
) {
1276 /* Bypass com_addop_varname because it will generate
1277 LOAD_DEREF but LOAD_CLOSURE is needed.
1279 PyObject
*name
= PyTuple_GET_ITEM(co
->co_freevars
, i
);
1282 /* Special case: If a class contains a method with a
1283 free variable that has the same name as a method,
1284 the name will be considered free *and* local in the
1285 class. It should be handled by the closure, as
1286 well as by the normal name loookup logic.
1288 reftype
= get_ref_type(c
, name
);
1289 if (reftype
== CELL
)
1290 arg
= compiler_lookup_arg(c
->u
->u_cellvars
, name
);
1291 else /* (reftype == FREE) */
1292 arg
= compiler_lookup_arg(c
->u
->u_freevars
, name
);
1294 printf("lookup %s in %s %d %d\n"
1295 "freevars of %s: %s\n",
1296 PyObject_REPR(name
),
1297 PyString_AS_STRING(c
->u
->u_name
),
1299 PyString_AS_STRING(co
->co_name
),
1300 PyObject_REPR(co
->co_freevars
));
1301 Py_FatalError("compiler_make_closure()");
1303 ADDOP_I(c
, LOAD_CLOSURE
, arg
);
1305 ADDOP_I(c
, BUILD_TUPLE
, free
);
1306 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1307 ADDOP_I(c
, MAKE_CLOSURE
, args
);
1312 compiler_decorators(struct compiler
*c
, asdl_seq
* decos
)
1319 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1320 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(decos
, i
));
1326 compiler_arguments(struct compiler
*c
, arguments_ty args
)
1329 int n
= asdl_seq_LEN(args
->args
);
1330 /* Correctly handle nested argument lists */
1331 for (i
= 0; i
< n
; i
++) {
1332 expr_ty arg
= (expr_ty
)asdl_seq_GET(args
->args
, i
);
1333 if (arg
->kind
== Tuple_kind
) {
1334 PyObject
*id
= PyString_FromFormat(".%d", i
);
1338 if (!compiler_nameop(c
, id
, Load
)) {
1343 VISIT(c
, expr
, arg
);
1350 compiler_function(struct compiler
*c
, stmt_ty s
)
1353 PyObject
*first_const
= Py_None
;
1354 arguments_ty args
= s
->v
.FunctionDef
.args
;
1355 asdl_seq
* decos
= s
->v
.FunctionDef
.decorator_list
;
1357 int i
, n
, docstring
;
1359 assert(s
->kind
== FunctionDef_kind
);
1361 if (!compiler_decorators(c
, decos
))
1364 VISIT_SEQ(c
, expr
, args
->defaults
);
1365 if (!compiler_enter_scope(c
, s
->v
.FunctionDef
.name
, (void *)s
,
1369 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, 0);
1370 docstring
= compiler_isdocstring(st
);
1371 if (docstring
&& Py_OptimizeFlag
< 2)
1372 first_const
= st
->v
.Expr
.value
->v
.Str
.s
;
1373 if (compiler_add_o(c
, c
->u
->u_consts
, first_const
) < 0) {
1374 compiler_exit_scope(c
);
1378 /* unpack nested arguments */
1379 compiler_arguments(c
, args
);
1381 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
1382 n
= asdl_seq_LEN(s
->v
.FunctionDef
.body
);
1383 /* if there was a docstring, we need to skip the first statement */
1384 for (i
= docstring
; i
< n
; i
++) {
1385 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, i
);
1386 VISIT_IN_SCOPE(c
, stmt
, st
);
1388 co
= assemble(c
, 1);
1389 compiler_exit_scope(c
);
1393 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
1396 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1397 ADDOP_I(c
, CALL_FUNCTION
, 1);
1400 return compiler_nameop(c
, s
->v
.FunctionDef
.name
, Store
);
1404 compiler_class(struct compiler
*c
, stmt_ty s
)
1409 asdl_seq
* decos
= s
->v
.ClassDef
.decorator_list
;
1411 if (!compiler_decorators(c
, decos
))
1414 /* push class name on stack, needed by BUILD_CLASS */
1415 ADDOP_O(c
, LOAD_CONST
, s
->v
.ClassDef
.name
, consts
);
1416 /* push the tuple of base classes on the stack */
1417 n
= asdl_seq_LEN(s
->v
.ClassDef
.bases
);
1419 VISIT_SEQ(c
, expr
, s
->v
.ClassDef
.bases
);
1420 ADDOP_I(c
, BUILD_TUPLE
, n
);
1421 if (!compiler_enter_scope(c
, s
->v
.ClassDef
.name
, (void *)s
,
1424 Py_XDECREF(c
->u
->u_private
);
1425 c
->u
->u_private
= s
->v
.ClassDef
.name
;
1426 Py_INCREF(c
->u
->u_private
);
1427 str
= PyString_InternFromString("__name__");
1428 if (!str
|| !compiler_nameop(c
, str
, Load
)) {
1430 compiler_exit_scope(c
);
1435 str
= PyString_InternFromString("__module__");
1436 if (!str
|| !compiler_nameop(c
, str
, Store
)) {
1438 compiler_exit_scope(c
);
1443 if (!compiler_body(c
, s
->v
.ClassDef
.body
)) {
1444 compiler_exit_scope(c
);
1448 ADDOP_IN_SCOPE(c
, LOAD_LOCALS
);
1449 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
1450 co
= assemble(c
, 1);
1451 compiler_exit_scope(c
);
1455 compiler_make_closure(c
, co
, 0);
1458 ADDOP_I(c
, CALL_FUNCTION
, 0);
1459 ADDOP(c
, BUILD_CLASS
);
1460 /* apply decorators */
1461 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1462 ADDOP_I(c
, CALL_FUNCTION
, 1);
1464 if (!compiler_nameop(c
, s
->v
.ClassDef
.name
, Store
))
1470 compiler_ifexp(struct compiler
*c
, expr_ty e
)
1472 basicblock
*end
, *next
;
1474 assert(e
->kind
== IfExp_kind
);
1475 end
= compiler_new_block(c
);
1478 next
= compiler_new_block(c
);
1481 VISIT(c
, expr
, e
->v
.IfExp
.test
);
1482 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, next
);
1483 VISIT(c
, expr
, e
->v
.IfExp
.body
);
1484 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1485 compiler_use_next_block(c
, next
);
1486 VISIT(c
, expr
, e
->v
.IfExp
.orelse
);
1487 compiler_use_next_block(c
, end
);
1492 compiler_lambda(struct compiler
*c
, expr_ty e
)
1495 static identifier name
;
1496 arguments_ty args
= e
->v
.Lambda
.args
;
1497 assert(e
->kind
== Lambda_kind
);
1500 name
= PyString_InternFromString("<lambda>");
1506 VISIT_SEQ(c
, expr
, args
->defaults
);
1507 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
1510 /* unpack nested arguments */
1511 compiler_arguments(c
, args
);
1513 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
1514 VISIT_IN_SCOPE(c
, expr
, e
->v
.Lambda
.body
);
1515 if (c
->u
->u_ste
->ste_generator
) {
1516 ADDOP_IN_SCOPE(c
, POP_TOP
);
1519 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
1521 co
= assemble(c
, 1);
1522 compiler_exit_scope(c
);
1526 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
1533 compiler_print(struct compiler
*c
, stmt_ty s
)
1538 assert(s
->kind
== Print_kind
);
1539 n
= asdl_seq_LEN(s
->v
.Print
.values
);
1541 if (s
->v
.Print
.dest
) {
1542 VISIT(c
, expr
, s
->v
.Print
.dest
);
1545 for (i
= 0; i
< n
; i
++) {
1546 expr_ty e
= (expr_ty
)asdl_seq_GET(s
->v
.Print
.values
, i
);
1551 ADDOP(c
, PRINT_ITEM_TO
);
1555 ADDOP(c
, PRINT_ITEM
);
1558 if (s
->v
.Print
.nl
) {
1560 ADDOP(c
, PRINT_NEWLINE_TO
)
1562 ADDOP(c
, PRINT_NEWLINE
)
1570 compiler_if(struct compiler
*c
, stmt_ty s
)
1572 basicblock
*end
, *next
;
1574 assert(s
->kind
== If_kind
);
1575 end
= compiler_new_block(c
);
1579 constant
= expr_constant(s
->v
.If
.test
);
1580 /* constant = 0: "if 0"
1581 * constant = 1: "if 1", "if 2", ...
1582 * constant = -1: rest */
1583 if (constant
== 0) {
1585 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
1586 } else if (constant
== 1) {
1587 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
1589 if (s
->v
.If
.orelse
) {
1590 next
= compiler_new_block(c
);
1596 VISIT(c
, expr
, s
->v
.If
.test
);
1597 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, next
);
1598 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
1599 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1600 if (s
->v
.If
.orelse
) {
1601 compiler_use_next_block(c
, next
);
1602 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
1605 compiler_use_next_block(c
, end
);
1610 compiler_for(struct compiler
*c
, stmt_ty s
)
1612 basicblock
*start
, *cleanup
, *end
;
1614 start
= compiler_new_block(c
);
1615 cleanup
= compiler_new_block(c
);
1616 end
= compiler_new_block(c
);
1617 if (start
== NULL
|| end
== NULL
|| cleanup
== NULL
)
1619 ADDOP_JREL(c
, SETUP_LOOP
, end
);
1620 if (!compiler_push_fblock(c
, LOOP
, start
))
1622 VISIT(c
, expr
, s
->v
.For
.iter
);
1624 compiler_use_next_block(c
, start
);
1625 ADDOP_JREL(c
, FOR_ITER
, cleanup
);
1626 VISIT(c
, expr
, s
->v
.For
.target
);
1627 VISIT_SEQ(c
, stmt
, s
->v
.For
.body
);
1628 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
1629 compiler_use_next_block(c
, cleanup
);
1630 ADDOP(c
, POP_BLOCK
);
1631 compiler_pop_fblock(c
, LOOP
, start
);
1632 VISIT_SEQ(c
, stmt
, s
->v
.For
.orelse
);
1633 compiler_use_next_block(c
, end
);
1638 compiler_while(struct compiler
*c
, stmt_ty s
)
1640 basicblock
*loop
, *orelse
, *end
, *anchor
= NULL
;
1641 int constant
= expr_constant(s
->v
.While
.test
);
1643 if (constant
== 0) {
1644 if (s
->v
.While
.orelse
)
1645 VISIT_SEQ(c
, stmt
, s
->v
.While
.orelse
);
1648 loop
= compiler_new_block(c
);
1649 end
= compiler_new_block(c
);
1650 if (constant
== -1) {
1651 anchor
= compiler_new_block(c
);
1655 if (loop
== NULL
|| end
== NULL
)
1657 if (s
->v
.While
.orelse
) {
1658 orelse
= compiler_new_block(c
);
1665 ADDOP_JREL(c
, SETUP_LOOP
, end
);
1666 compiler_use_next_block(c
, loop
);
1667 if (!compiler_push_fblock(c
, LOOP
, loop
))
1669 if (constant
== -1) {
1670 VISIT(c
, expr
, s
->v
.While
.test
);
1671 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, anchor
);
1673 VISIT_SEQ(c
, stmt
, s
->v
.While
.body
);
1674 ADDOP_JABS(c
, JUMP_ABSOLUTE
, loop
);
1676 /* XXX should the two POP instructions be in a separate block
1677 if there is no else clause ?
1680 if (constant
== -1) {
1681 compiler_use_next_block(c
, anchor
);
1682 ADDOP(c
, POP_BLOCK
);
1684 compiler_pop_fblock(c
, LOOP
, loop
);
1685 if (orelse
!= NULL
) /* what if orelse is just pass? */
1686 VISIT_SEQ(c
, stmt
, s
->v
.While
.orelse
);
1687 compiler_use_next_block(c
, end
);
1693 compiler_continue(struct compiler
*c
)
1695 static const char LOOP_ERROR_MSG
[] = "'continue' not properly in loop";
1696 static const char IN_FINALLY_ERROR_MSG
[] =
1697 "'continue' not supported inside 'finally' clause";
1700 if (!c
->u
->u_nfblocks
)
1701 return compiler_error(c
, LOOP_ERROR_MSG
);
1702 i
= c
->u
->u_nfblocks
- 1;
1703 switch (c
->u
->u_fblock
[i
].fb_type
) {
1705 ADDOP_JABS(c
, JUMP_ABSOLUTE
, c
->u
->u_fblock
[i
].fb_block
);
1709 while (--i
>= 0 && c
->u
->u_fblock
[i
].fb_type
!= LOOP
) {
1710 /* Prevent continue anywhere under a finally
1711 even if hidden in a sub-try or except. */
1712 if (c
->u
->u_fblock
[i
].fb_type
== FINALLY_END
)
1713 return compiler_error(c
, IN_FINALLY_ERROR_MSG
);
1716 return compiler_error(c
, LOOP_ERROR_MSG
);
1717 ADDOP_JABS(c
, CONTINUE_LOOP
, c
->u
->u_fblock
[i
].fb_block
);
1720 return compiler_error(c
, IN_FINALLY_ERROR_MSG
);
1726 /* Code generated for "try: <body> finally: <finalbody>" is as follows:
1732 L: <code for finalbody>
1735 The special instructions use the block stack. Each block
1736 stack entry contains the instruction that created it (here
1737 SETUP_FINALLY), the level of the value stack at the time the
1738 block stack entry was created, and a label (here L).
1741 Pushes the current value stack level and the label
1742 onto the block stack.
1744 Pops en entry from the block stack, and pops the value
1745 stack until its level is the same as indicated on the
1746 block stack. (The label is ignored.)
1748 Pops a variable number of entries from the *value* stack
1749 and re-raises the exception they specify. The number of
1750 entries popped depends on the (pseudo) exception type.
1752 The block stack is unwound when an exception is raised:
1753 when a SETUP_FINALLY entry is found, the exception is pushed
1754 onto the value stack (and the exception condition is cleared),
1755 and the interpreter jumps to the label gotten from the block
1760 compiler_try_finally(struct compiler
*c
, stmt_ty s
)
1762 basicblock
*body
, *end
;
1763 body
= compiler_new_block(c
);
1764 end
= compiler_new_block(c
);
1765 if (body
== NULL
|| end
== NULL
)
1768 ADDOP_JREL(c
, SETUP_FINALLY
, end
);
1769 compiler_use_next_block(c
, body
);
1770 if (!compiler_push_fblock(c
, FINALLY_TRY
, body
))
1772 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.body
);
1773 ADDOP(c
, POP_BLOCK
);
1774 compiler_pop_fblock(c
, FINALLY_TRY
, body
);
1776 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
1777 compiler_use_next_block(c
, end
);
1778 if (!compiler_push_fblock(c
, FINALLY_END
, end
))
1780 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.finalbody
);
1781 ADDOP(c
, END_FINALLY
);
1782 compiler_pop_fblock(c
, FINALLY_END
, end
);
1788 Code generated for "try: S except E1, V1: S1 except E2, V2: S2 ...":
1789 (The contents of the value stack is shown in [], with the top
1790 at the right; 'tb' is trace-back info, 'val' the exception's
1791 associated value, and 'exc' the exception.)
1793 Value stack Label Instruction Argument
1799 [tb, val, exc] L1: DUP )
1800 [tb, val, exc, exc] <evaluate E1> )
1801 [tb, val, exc, exc, E1] COMPARE_OP EXC_MATCH ) only if E1
1802 [tb, val, exc, 1-or-0] POP_JUMP_IF_FALSE L2 )
1804 [tb, val] <assign to V1> (or POP if no V1)
1809 [tb, val, exc] L2: DUP
1810 .............................etc.......................
1812 [tb, val, exc] Ln+1: END_FINALLY # re-raise exception
1814 [] L0: <next statement>
1816 Of course, parts are not generated if Vi or Ei is not present.
1819 compiler_try_except(struct compiler
*c
, stmt_ty s
)
1821 basicblock
*body
, *orelse
, *except
, *end
;
1824 body
= compiler_new_block(c
);
1825 except
= compiler_new_block(c
);
1826 orelse
= compiler_new_block(c
);
1827 end
= compiler_new_block(c
);
1828 if (body
== NULL
|| except
== NULL
|| orelse
== NULL
|| end
== NULL
)
1830 ADDOP_JREL(c
, SETUP_EXCEPT
, except
);
1831 compiler_use_next_block(c
, body
);
1832 if (!compiler_push_fblock(c
, EXCEPT
, body
))
1834 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.body
);
1835 ADDOP(c
, POP_BLOCK
);
1836 compiler_pop_fblock(c
, EXCEPT
, body
);
1837 ADDOP_JREL(c
, JUMP_FORWARD
, orelse
);
1838 n
= asdl_seq_LEN(s
->v
.TryExcept
.handlers
);
1839 compiler_use_next_block(c
, except
);
1840 for (i
= 0; i
< n
; i
++) {
1841 excepthandler_ty handler
= (excepthandler_ty
)asdl_seq_GET(
1842 s
->v
.TryExcept
.handlers
, i
);
1843 if (!handler
->v
.ExceptHandler
.type
&& i
< n
-1)
1844 return compiler_error(c
, "default 'except:' must be last");
1845 c
->u
->u_lineno_set
= false;
1846 c
->u
->u_lineno
= handler
->lineno
;
1847 except
= compiler_new_block(c
);
1850 if (handler
->v
.ExceptHandler
.type
) {
1852 VISIT(c
, expr
, handler
->v
.ExceptHandler
.type
);
1853 ADDOP_I(c
, COMPARE_OP
, PyCmp_EXC_MATCH
);
1854 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, except
);
1857 if (handler
->v
.ExceptHandler
.name
) {
1858 VISIT(c
, expr
, handler
->v
.ExceptHandler
.name
);
1864 VISIT_SEQ(c
, stmt
, handler
->v
.ExceptHandler
.body
);
1865 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
1866 compiler_use_next_block(c
, except
);
1868 ADDOP(c
, END_FINALLY
);
1869 compiler_use_next_block(c
, orelse
);
1870 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.orelse
);
1871 compiler_use_next_block(c
, end
);
1876 compiler_import_as(struct compiler
*c
, identifier name
, identifier asname
)
1878 /* The IMPORT_NAME opcode was already generated. This function
1879 merely needs to bind the result to a name.
1881 If there is a dot in name, we need to split it and emit a
1882 LOAD_ATTR for each name.
1884 const char *src
= PyString_AS_STRING(name
);
1885 const char *dot
= strchr(src
, '.');
1887 /* Consume the base module name to get the first attribute */
1890 /* NB src is only defined when dot != NULL */
1892 dot
= strchr(src
, '.');
1893 attr
= PyString_FromStringAndSize(src
,
1894 dot
? dot
- src
: strlen(src
));
1897 ADDOP_O(c
, LOAD_ATTR
, attr
, names
);
1902 return compiler_nameop(c
, asname
, Store
);
1906 compiler_import(struct compiler
*c
, stmt_ty s
)
1908 /* The Import node stores a module name like a.b.c as a single
1909 string. This is convenient for all cases except
1911 where we need to parse that string to extract the individual
1913 XXX Perhaps change the representation to make this case simpler?
1915 int i
, n
= asdl_seq_LEN(s
->v
.Import
.names
);
1917 for (i
= 0; i
< n
; i
++) {
1918 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.Import
.names
, i
);
1922 if (c
->c_flags
&& (c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
1923 level
= PyInt_FromLong(0);
1925 level
= PyInt_FromLong(-1);
1930 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
1932 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
1933 ADDOP_NAME(c
, IMPORT_NAME
, alias
->name
, names
);
1935 if (alias
->asname
) {
1936 r
= compiler_import_as(c
, alias
->name
, alias
->asname
);
1941 identifier tmp
= alias
->name
;
1942 const char *base
= PyString_AS_STRING(alias
->name
);
1943 char *dot
= strchr(base
, '.');
1945 tmp
= PyString_FromStringAndSize(base
,
1947 r
= compiler_nameop(c
, tmp
, Store
);
1959 compiler_from_import(struct compiler
*c
, stmt_ty s
)
1961 int i
, n
= asdl_seq_LEN(s
->v
.ImportFrom
.names
);
1963 PyObject
*names
= PyTuple_New(n
);
1965 static PyObject
*empty_string
;
1967 if (!empty_string
) {
1968 empty_string
= PyString_FromString("");
1976 if (s
->v
.ImportFrom
.level
== 0 && c
->c_flags
&&
1977 !(c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
1978 level
= PyInt_FromLong(-1);
1980 level
= PyInt_FromLong(s
->v
.ImportFrom
.level
);
1987 /* build up the names */
1988 for (i
= 0; i
< n
; i
++) {
1989 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
1990 Py_INCREF(alias
->name
);
1991 PyTuple_SET_ITEM(names
, i
, alias
->name
);
1994 if (s
->lineno
> c
->c_future
->ff_lineno
&& s
->v
.ImportFrom
.module
&&
1995 !strcmp(PyString_AS_STRING(s
->v
.ImportFrom
.module
), "__future__")) {
1998 return compiler_error(c
, "from __future__ imports must occur "
1999 "at the beginning of the file");
2002 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
2004 ADDOP_O(c
, LOAD_CONST
, names
, consts
);
2006 if (s
->v
.ImportFrom
.module
) {
2007 ADDOP_NAME(c
, IMPORT_NAME
, s
->v
.ImportFrom
.module
, names
);
2010 ADDOP_NAME(c
, IMPORT_NAME
, empty_string
, names
);
2012 for (i
= 0; i
< n
; i
++) {
2013 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
2014 identifier store_name
;
2016 if (i
== 0 && *PyString_AS_STRING(alias
->name
) == '*') {
2018 ADDOP(c
, IMPORT_STAR
);
2022 ADDOP_NAME(c
, IMPORT_FROM
, alias
->name
, names
);
2023 store_name
= alias
->name
;
2025 store_name
= alias
->asname
;
2027 if (!compiler_nameop(c
, store_name
, Store
)) {
2032 /* remove imported module */
2038 compiler_assert(struct compiler
*c
, stmt_ty s
)
2040 static PyObject
*assertion_error
= NULL
;
2043 if (Py_OptimizeFlag
)
2045 if (assertion_error
== NULL
) {
2046 assertion_error
= PyString_InternFromString("AssertionError");
2047 if (assertion_error
== NULL
)
2050 if (s
->v
.Assert
.test
->kind
== Tuple_kind
&&
2051 asdl_seq_LEN(s
->v
.Assert
.test
->v
.Tuple
.elts
) > 0) {
2053 "assertion is always true, perhaps remove parentheses?";
2054 if (PyErr_WarnExplicit(PyExc_SyntaxWarning
, msg
, c
->c_filename
,
2055 c
->u
->u_lineno
, NULL
, NULL
) == -1)
2058 VISIT(c
, expr
, s
->v
.Assert
.test
);
2059 end
= compiler_new_block(c
);
2062 ADDOP_JABS(c
, POP_JUMP_IF_TRUE
, end
);
2063 ADDOP_O(c
, LOAD_GLOBAL
, assertion_error
, names
);
2064 if (s
->v
.Assert
.msg
) {
2065 VISIT(c
, expr
, s
->v
.Assert
.msg
);
2066 ADDOP_I(c
, RAISE_VARARGS
, 2);
2069 ADDOP_I(c
, RAISE_VARARGS
, 1);
2071 compiler_use_next_block(c
, end
);
2076 compiler_visit_stmt(struct compiler
*c
, stmt_ty s
)
2080 /* Always assign a lineno to the next instruction for a stmt. */
2081 c
->u
->u_lineno
= s
->lineno
;
2082 c
->u
->u_lineno_set
= false;
2085 case FunctionDef_kind
:
2086 return compiler_function(c
, s
);
2088 return compiler_class(c
, s
);
2090 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
2091 return compiler_error(c
, "'return' outside function");
2092 if (s
->v
.Return
.value
) {
2093 VISIT(c
, expr
, s
->v
.Return
.value
);
2096 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2097 ADDOP(c
, RETURN_VALUE
);
2100 VISIT_SEQ(c
, expr
, s
->v
.Delete
.targets
)
2103 n
= asdl_seq_LEN(s
->v
.Assign
.targets
);
2104 VISIT(c
, expr
, s
->v
.Assign
.value
);
2105 for (i
= 0; i
< n
; i
++) {
2109 (expr_ty
)asdl_seq_GET(s
->v
.Assign
.targets
, i
));
2112 case AugAssign_kind
:
2113 return compiler_augassign(c
, s
);
2115 return compiler_print(c
, s
);
2117 return compiler_for(c
, s
);
2119 return compiler_while(c
, s
);
2121 return compiler_if(c
, s
);
2124 if (s
->v
.Raise
.type
) {
2125 VISIT(c
, expr
, s
->v
.Raise
.type
);
2127 if (s
->v
.Raise
.inst
) {
2128 VISIT(c
, expr
, s
->v
.Raise
.inst
);
2130 if (s
->v
.Raise
.tback
) {
2131 VISIT(c
, expr
, s
->v
.Raise
.tback
);
2136 ADDOP_I(c
, RAISE_VARARGS
, n
);
2138 case TryExcept_kind
:
2139 return compiler_try_except(c
, s
);
2140 case TryFinally_kind
:
2141 return compiler_try_finally(c
, s
);
2143 return compiler_assert(c
, s
);
2145 return compiler_import(c
, s
);
2146 case ImportFrom_kind
:
2147 return compiler_from_import(c
, s
);
2149 VISIT(c
, expr
, s
->v
.Exec
.body
);
2150 if (s
->v
.Exec
.globals
) {
2151 VISIT(c
, expr
, s
->v
.Exec
.globals
);
2152 if (s
->v
.Exec
.locals
) {
2153 VISIT(c
, expr
, s
->v
.Exec
.locals
);
2158 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2161 ADDOP(c
, EXEC_STMT
);
2166 if (c
->c_interactive
&& c
->c_nestlevel
<= 1) {
2167 VISIT(c
, expr
, s
->v
.Expr
.value
);
2168 ADDOP(c
, PRINT_EXPR
);
2170 else if (s
->v
.Expr
.value
->kind
!= Str_kind
&&
2171 s
->v
.Expr
.value
->kind
!= Num_kind
) {
2172 VISIT(c
, expr
, s
->v
.Expr
.value
);
2179 if (!compiler_in_loop(c
))
2180 return compiler_error(c
, "'break' outside loop");
2181 ADDOP(c
, BREAK_LOOP
);
2184 return compiler_continue(c
);
2186 return compiler_with(c
, s
);
2192 unaryop(unaryop_ty op
)
2196 return UNARY_INVERT
;
2200 return UNARY_POSITIVE
;
2202 return UNARY_NEGATIVE
;
2204 PyErr_Format(PyExc_SystemError
,
2205 "unary op %d should not be possible", op
);
2211 binop(struct compiler
*c
, operator_ty op
)
2217 return BINARY_SUBTRACT
;
2219 return BINARY_MULTIPLY
;
2221 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2222 return BINARY_TRUE_DIVIDE
;
2224 return BINARY_DIVIDE
;
2226 return BINARY_MODULO
;
2228 return BINARY_POWER
;
2230 return BINARY_LSHIFT
;
2232 return BINARY_RSHIFT
;
2240 return BINARY_FLOOR_DIVIDE
;
2242 PyErr_Format(PyExc_SystemError
,
2243 "binary op %d should not be possible", op
);
2267 return PyCmp_IS_NOT
;
2271 return PyCmp_NOT_IN
;
2278 inplace_binop(struct compiler
*c
, operator_ty op
)
2284 return INPLACE_SUBTRACT
;
2286 return INPLACE_MULTIPLY
;
2288 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2289 return INPLACE_TRUE_DIVIDE
;
2291 return INPLACE_DIVIDE
;
2293 return INPLACE_MODULO
;
2295 return INPLACE_POWER
;
2297 return INPLACE_LSHIFT
;
2299 return INPLACE_RSHIFT
;
2307 return INPLACE_FLOOR_DIVIDE
;
2309 PyErr_Format(PyExc_SystemError
,
2310 "inplace binary op %d should not be possible", op
);
2316 compiler_nameop(struct compiler
*c
, identifier name
, expr_context_ty ctx
)
2319 enum { OP_FAST
, OP_GLOBAL
, OP_DEREF
, OP_NAME
} optype
;
2321 PyObject
*dict
= c
->u
->u_names
;
2323 /* XXX AugStore isn't used anywhere! */
2325 mangled
= _Py_Mangle(c
->u
->u_private
, name
);
2331 scope
= PyST_GetScope(c
->u
->u_ste
, mangled
);
2334 dict
= c
->u
->u_freevars
;
2338 dict
= c
->u
->u_cellvars
;
2342 if (c
->u
->u_ste
->ste_type
== FunctionBlock
)
2345 case GLOBAL_IMPLICIT
:
2346 if (c
->u
->u_ste
->ste_type
== FunctionBlock
&&
2347 !c
->u
->u_ste
->ste_unoptimized
)
2350 case GLOBAL_EXPLICIT
:
2354 /* scope can be 0 */
2358 /* XXX Leave assert here, but handle __doc__ and the like better */
2359 assert(scope
|| PyString_AS_STRING(name
)[0] == '_');
2364 case Load
: op
= LOAD_DEREF
; break;
2365 case Store
: op
= STORE_DEREF
; break;
2370 PyErr_Format(PyExc_SyntaxError
,
2371 "can not delete variable '%s' referenced "
2373 PyString_AS_STRING(name
));
2378 PyErr_SetString(PyExc_SystemError
,
2379 "param invalid for deref variable");
2385 case Load
: op
= LOAD_FAST
; break;
2386 case Store
: op
= STORE_FAST
; break;
2387 case Del
: op
= DELETE_FAST
; break;
2393 PyErr_SetString(PyExc_SystemError
,
2394 "param invalid for local variable");
2397 ADDOP_O(c
, op
, mangled
, varnames
);
2402 case Load
: op
= LOAD_GLOBAL
; break;
2403 case Store
: op
= STORE_GLOBAL
; break;
2404 case Del
: op
= DELETE_GLOBAL
; break;
2410 PyErr_SetString(PyExc_SystemError
,
2411 "param invalid for global variable");
2417 case Load
: op
= LOAD_NAME
; break;
2418 case Store
: op
= STORE_NAME
; break;
2419 case Del
: op
= DELETE_NAME
; break;
2425 PyErr_SetString(PyExc_SystemError
,
2426 "param invalid for name variable");
2433 arg
= compiler_add_o(c
, dict
, mangled
);
2437 return compiler_addop_i(c
, op
, arg
);
2441 compiler_boolop(struct compiler
*c
, expr_ty e
)
2447 assert(e
->kind
== BoolOp_kind
);
2448 if (e
->v
.BoolOp
.op
== And
)
2449 jumpi
= JUMP_IF_FALSE_OR_POP
;
2451 jumpi
= JUMP_IF_TRUE_OR_POP
;
2452 end
= compiler_new_block(c
);
2455 s
= e
->v
.BoolOp
.values
;
2456 n
= asdl_seq_LEN(s
) - 1;
2458 for (i
= 0; i
< n
; ++i
) {
2459 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, i
));
2460 ADDOP_JABS(c
, jumpi
, end
);
2462 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, n
));
2463 compiler_use_next_block(c
, end
);
2468 compiler_list(struct compiler
*c
, expr_ty e
)
2470 int n
= asdl_seq_LEN(e
->v
.List
.elts
);
2471 if (e
->v
.List
.ctx
== Store
) {
2472 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
2474 VISIT_SEQ(c
, expr
, e
->v
.List
.elts
);
2475 if (e
->v
.List
.ctx
== Load
) {
2476 ADDOP_I(c
, BUILD_LIST
, n
);
2482 compiler_tuple(struct compiler
*c
, expr_ty e
)
2484 int n
= asdl_seq_LEN(e
->v
.Tuple
.elts
);
2485 if (e
->v
.Tuple
.ctx
== Store
) {
2486 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
2488 VISIT_SEQ(c
, expr
, e
->v
.Tuple
.elts
);
2489 if (e
->v
.Tuple
.ctx
== Load
) {
2490 ADDOP_I(c
, BUILD_TUPLE
, n
);
2496 compiler_compare(struct compiler
*c
, expr_ty e
)
2499 basicblock
*cleanup
= NULL
;
2501 /* XXX the logic can be cleaned up for 1 or multiple comparisons */
2502 VISIT(c
, expr
, e
->v
.Compare
.left
);
2503 n
= asdl_seq_LEN(e
->v
.Compare
.ops
);
2506 cleanup
= compiler_new_block(c
);
2507 if (cleanup
== NULL
)
2510 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, 0));
2512 for (i
= 1; i
< n
; i
++) {
2514 ADDOP(c
, ROT_THREE
);
2515 ADDOP_I(c
, COMPARE_OP
,
2516 cmpop((cmpop_ty
)(asdl_seq_GET(
2517 e
->v
.Compare
.ops
, i
- 1))));
2518 ADDOP_JABS(c
, JUMP_IF_FALSE_OR_POP
, cleanup
);
2522 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, i
));
2524 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, n
- 1));
2525 ADDOP_I(c
, COMPARE_OP
,
2526 cmpop((cmpop_ty
)(asdl_seq_GET(e
->v
.Compare
.ops
, n
- 1))));
2528 basicblock
*end
= compiler_new_block(c
);
2531 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
2532 compiler_use_next_block(c
, cleanup
);
2535 compiler_use_next_block(c
, end
);
2541 compiler_call(struct compiler
*c
, expr_ty e
)
2545 VISIT(c
, expr
, e
->v
.Call
.func
);
2546 n
= asdl_seq_LEN(e
->v
.Call
.args
);
2547 VISIT_SEQ(c
, expr
, e
->v
.Call
.args
);
2548 if (e
->v
.Call
.keywords
) {
2549 VISIT_SEQ(c
, keyword
, e
->v
.Call
.keywords
);
2550 n
|= asdl_seq_LEN(e
->v
.Call
.keywords
) << 8;
2552 if (e
->v
.Call
.starargs
) {
2553 VISIT(c
, expr
, e
->v
.Call
.starargs
);
2556 if (e
->v
.Call
.kwargs
) {
2557 VISIT(c
, expr
, e
->v
.Call
.kwargs
);
2562 ADDOP_I(c
, CALL_FUNCTION
, n
);
2565 ADDOP_I(c
, CALL_FUNCTION_VAR
, n
);
2568 ADDOP_I(c
, CALL_FUNCTION_KW
, n
);
2571 ADDOP_I(c
, CALL_FUNCTION_VAR_KW
, n
);
2578 compiler_listcomp_generator(struct compiler
*c
, asdl_seq
*generators
,
2579 int gen_index
, expr_ty elt
)
2581 /* generate code for the iterator, then each of the ifs,
2582 and then write to the element */
2585 basicblock
*start
, *anchor
, *skip
, *if_cleanup
;
2588 start
= compiler_new_block(c
);
2589 skip
= compiler_new_block(c
);
2590 if_cleanup
= compiler_new_block(c
);
2591 anchor
= compiler_new_block(c
);
2593 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
2597 l
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
2598 VISIT(c
, expr
, l
->iter
);
2600 compiler_use_next_block(c
, start
);
2601 ADDOP_JREL(c
, FOR_ITER
, anchor
);
2603 VISIT(c
, expr
, l
->target
);
2605 /* XXX this needs to be cleaned up...a lot! */
2606 n
= asdl_seq_LEN(l
->ifs
);
2607 for (i
= 0; i
< n
; i
++) {
2608 expr_ty e
= (expr_ty
)asdl_seq_GET(l
->ifs
, i
);
2610 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, if_cleanup
);
2614 if (++gen_index
< asdl_seq_LEN(generators
))
2615 if (!compiler_listcomp_generator(c
, generators
, gen_index
, elt
))
2618 /* only append after the last for generator */
2619 if (gen_index
>= asdl_seq_LEN(generators
)) {
2620 VISIT(c
, expr
, elt
);
2621 ADDOP_I(c
, LIST_APPEND
, gen_index
+1);
2623 compiler_use_next_block(c
, skip
);
2625 compiler_use_next_block(c
, if_cleanup
);
2626 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
2627 compiler_use_next_block(c
, anchor
);
2633 compiler_listcomp(struct compiler
*c
, expr_ty e
)
2635 assert(e
->kind
== ListComp_kind
);
2636 ADDOP_I(c
, BUILD_LIST
, 0);
2637 return compiler_listcomp_generator(c
, e
->v
.ListComp
.generators
, 0,
2642 compiler_genexp_generator(struct compiler
*c
,
2643 asdl_seq
*generators
, int gen_index
,
2646 /* generate code for the iterator, then each of the ifs,
2647 and then write to the element */
2649 comprehension_ty ge
;
2650 basicblock
*start
, *anchor
, *skip
, *if_cleanup
, *end
;
2653 start
= compiler_new_block(c
);
2654 skip
= compiler_new_block(c
);
2655 if_cleanup
= compiler_new_block(c
);
2656 anchor
= compiler_new_block(c
);
2657 end
= compiler_new_block(c
);
2659 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
2660 anchor
== NULL
|| end
== NULL
)
2663 ge
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
2664 ADDOP_JREL(c
, SETUP_LOOP
, end
);
2665 if (!compiler_push_fblock(c
, LOOP
, start
))
2668 if (gen_index
== 0) {
2669 /* Receive outermost iter as an implicit argument */
2670 c
->u
->u_argcount
= 1;
2671 ADDOP_I(c
, LOAD_FAST
, 0);
2674 /* Sub-iter - calculate on the fly */
2675 VISIT(c
, expr
, ge
->iter
);
2678 compiler_use_next_block(c
, start
);
2679 ADDOP_JREL(c
, FOR_ITER
, anchor
);
2681 VISIT(c
, expr
, ge
->target
);
2683 /* XXX this needs to be cleaned up...a lot! */
2684 n
= asdl_seq_LEN(ge
->ifs
);
2685 for (i
= 0; i
< n
; i
++) {
2686 expr_ty e
= (expr_ty
)asdl_seq_GET(ge
->ifs
, i
);
2688 ADDOP_JABS(c
, POP_JUMP_IF_FALSE
, if_cleanup
);
2692 if (++gen_index
< asdl_seq_LEN(generators
))
2693 if (!compiler_genexp_generator(c
, generators
, gen_index
, elt
))
2696 /* only append after the last 'for' generator */
2697 if (gen_index
>= asdl_seq_LEN(generators
)) {
2698 VISIT(c
, expr
, elt
);
2699 ADDOP(c
, YIELD_VALUE
);
2702 compiler_use_next_block(c
, skip
);
2704 compiler_use_next_block(c
, if_cleanup
);
2705 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
2706 compiler_use_next_block(c
, anchor
);
2707 ADDOP(c
, POP_BLOCK
);
2708 compiler_pop_fblock(c
, LOOP
, start
);
2709 compiler_use_next_block(c
, end
);
2715 compiler_genexp(struct compiler
*c
, expr_ty e
)
2717 static identifier name
;
2719 expr_ty outermost_iter
= ((comprehension_ty
)
2720 (asdl_seq_GET(e
->v
.GeneratorExp
.generators
,
2724 name
= PyString_FromString("<genexpr>");
2729 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
2731 compiler_genexp_generator(c
, e
->v
.GeneratorExp
.generators
, 0,
2732 e
->v
.GeneratorExp
.elt
);
2733 co
= assemble(c
, 1);
2734 compiler_exit_scope(c
);
2738 compiler_make_closure(c
, co
, 0);
2741 VISIT(c
, expr
, outermost_iter
);
2743 ADDOP_I(c
, CALL_FUNCTION
, 1);
2749 compiler_visit_keyword(struct compiler
*c
, keyword_ty k
)
2751 ADDOP_O(c
, LOAD_CONST
, k
->arg
, consts
);
2752 VISIT(c
, expr
, k
->value
);
2756 /* Test whether expression is constant. For constants, report
2757 whether they are true or false.
2759 Return values: 1 for true, 0 for false, -1 for non-constant.
2763 expr_constant(expr_ty e
)
2767 return PyObject_IsTrue(e
->v
.Num
.n
);
2769 return PyObject_IsTrue(e
->v
.Str
.s
);
2771 /* __debug__ is not assignable, so we can optimize
2772 * it away in if and while statements */
2773 if (strcmp(PyString_AS_STRING(e
->v
.Name
.id
),
2775 return ! Py_OptimizeFlag
;
2783 Implements the with statement from PEP 343.
2785 The semantics outlined in that PEP are as follows:
2790 It is implemented roughly as:
2793 exit = context.__exit__ # not calling it
2794 value = context.__enter__()
2796 VAR = value # if VAR present in the syntax
2799 if an exception was raised:
2800 exc = copy of (exception, instance, traceback)
2802 exc = (None, None, None)
2806 compiler_with(struct compiler
*c
, stmt_ty s
)
2808 basicblock
*block
, *finally
;
2810 assert(s
->kind
== With_kind
);
2812 block
= compiler_new_block(c
);
2813 finally
= compiler_new_block(c
);
2814 if (!block
|| !finally
)
2818 VISIT(c
, expr
, s
->v
.With
.context_expr
);
2819 ADDOP_JREL(c
, SETUP_WITH
, finally
);
2821 /* SETUP_WITH pushes a finally block. */
2822 compiler_use_next_block(c
, block
);
2823 if (!compiler_push_fblock(c
, FINALLY_TRY
, block
)) {
2827 if (s
->v
.With
.optional_vars
) {
2828 VISIT(c
, expr
, s
->v
.With
.optional_vars
);
2831 /* Discard result from context.__enter__() */
2836 VISIT_SEQ(c
, stmt
, s
->v
.With
.body
);
2838 /* End of try block; start the finally block */
2839 ADDOP(c
, POP_BLOCK
);
2840 compiler_pop_fblock(c
, FINALLY_TRY
, block
);
2842 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2843 compiler_use_next_block(c
, finally
);
2844 if (!compiler_push_fblock(c
, FINALLY_END
, finally
))
2847 /* Finally block starts; context.__exit__ is on the stack under
2848 the exception or return information. Just issue our magic
2850 ADDOP(c
, WITH_CLEANUP
);
2852 /* Finally block ends. */
2853 ADDOP(c
, END_FINALLY
);
2854 compiler_pop_fblock(c
, FINALLY_END
, finally
);
2859 compiler_visit_expr(struct compiler
*c
, expr_ty e
)
2863 /* If expr e has a different line number than the last expr/stmt,
2864 set a new line number for the next instruction.
2866 if (e
->lineno
> c
->u
->u_lineno
) {
2867 c
->u
->u_lineno
= e
->lineno
;
2868 c
->u
->u_lineno_set
= false;
2872 return compiler_boolop(c
, e
);
2874 VISIT(c
, expr
, e
->v
.BinOp
.left
);
2875 VISIT(c
, expr
, e
->v
.BinOp
.right
);
2876 ADDOP(c
, binop(c
, e
->v
.BinOp
.op
));
2879 VISIT(c
, expr
, e
->v
.UnaryOp
.operand
);
2880 ADDOP(c
, unaryop(e
->v
.UnaryOp
.op
));
2883 return compiler_lambda(c
, e
);
2885 return compiler_ifexp(c
, e
);
2887 n
= asdl_seq_LEN(e
->v
.Dict
.values
);
2888 ADDOP_I(c
, BUILD_MAP
, (n
>0xFFFF ? 0xFFFF : n
));
2889 for (i
= 0; i
< n
; i
++) {
2891 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.values
, i
));
2893 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.keys
, i
));
2894 ADDOP(c
, STORE_MAP
);
2898 return compiler_listcomp(c
, e
);
2899 case GeneratorExp_kind
:
2900 return compiler_genexp(c
, e
);
2902 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
2903 return compiler_error(c
, "'yield' outside function");
2904 if (e
->v
.Yield
.value
) {
2905 VISIT(c
, expr
, e
->v
.Yield
.value
);
2908 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2910 ADDOP(c
, YIELD_VALUE
);
2913 return compiler_compare(c
, e
);
2915 return compiler_call(c
, e
);
2917 VISIT(c
, expr
, e
->v
.Repr
.value
);
2918 ADDOP(c
, UNARY_CONVERT
);
2921 ADDOP_O(c
, LOAD_CONST
, e
->v
.Num
.n
, consts
);
2924 ADDOP_O(c
, LOAD_CONST
, e
->v
.Str
.s
, consts
);
2926 /* The following exprs can be assignment targets. */
2927 case Attribute_kind
:
2928 if (e
->v
.Attribute
.ctx
!= AugStore
)
2929 VISIT(c
, expr
, e
->v
.Attribute
.value
);
2930 switch (e
->v
.Attribute
.ctx
) {
2933 /* Fall through to load */
2935 ADDOP_NAME(c
, LOAD_ATTR
, e
->v
.Attribute
.attr
, names
);
2939 /* Fall through to save */
2941 ADDOP_NAME(c
, STORE_ATTR
, e
->v
.Attribute
.attr
, names
);
2944 ADDOP_NAME(c
, DELETE_ATTR
, e
->v
.Attribute
.attr
, names
);
2948 PyErr_SetString(PyExc_SystemError
,
2949 "param invalid in attribute expression");
2953 case Subscript_kind
:
2954 switch (e
->v
.Subscript
.ctx
) {
2956 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2957 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugLoad
);
2960 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2961 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Load
);
2964 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugStore
);
2967 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2968 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Store
);
2971 VISIT(c
, expr
, e
->v
.Subscript
.value
);
2972 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Del
);
2976 PyErr_SetString(PyExc_SystemError
,
2977 "param invalid in subscript expression");
2982 return compiler_nameop(c
, e
->v
.Name
.id
, e
->v
.Name
.ctx
);
2983 /* child nodes of List and Tuple will have expr_context set */
2985 return compiler_list(c
, e
);
2987 return compiler_tuple(c
, e
);
2993 compiler_augassign(struct compiler
*c
, stmt_ty s
)
2995 expr_ty e
= s
->v
.AugAssign
.target
;
2998 assert(s
->kind
== AugAssign_kind
);
3001 case Attribute_kind
:
3002 auge
= Attribute(e
->v
.Attribute
.value
, e
->v
.Attribute
.attr
,
3003 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3006 VISIT(c
, expr
, auge
);
3007 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3008 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3009 auge
->v
.Attribute
.ctx
= AugStore
;
3010 VISIT(c
, expr
, auge
);
3012 case Subscript_kind
:
3013 auge
= Subscript(e
->v
.Subscript
.value
, e
->v
.Subscript
.slice
,
3014 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3017 VISIT(c
, expr
, auge
);
3018 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3019 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3020 auge
->v
.Subscript
.ctx
= AugStore
;
3021 VISIT(c
, expr
, auge
);
3024 if (!compiler_nameop(c
, e
->v
.Name
.id
, Load
))
3026 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3027 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3028 return compiler_nameop(c
, e
->v
.Name
.id
, Store
);
3030 PyErr_Format(PyExc_SystemError
,
3031 "invalid node type (%d) for augmented assignment",
3039 compiler_push_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3041 struct fblockinfo
*f
;
3042 if (c
->u
->u_nfblocks
>= CO_MAXBLOCKS
) {
3043 PyErr_SetString(PyExc_SystemError
,
3044 "too many statically nested blocks");
3047 f
= &c
->u
->u_fblock
[c
->u
->u_nfblocks
++];
3054 compiler_pop_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3056 struct compiler_unit
*u
= c
->u
;
3057 assert(u
->u_nfblocks
> 0);
3059 assert(u
->u_fblock
[u
->u_nfblocks
].fb_type
== t
);
3060 assert(u
->u_fblock
[u
->u_nfblocks
].fb_block
== b
);
3064 compiler_in_loop(struct compiler
*c
) {
3066 struct compiler_unit
*u
= c
->u
;
3067 for (i
= 0; i
< u
->u_nfblocks
; ++i
) {
3068 if (u
->u_fblock
[i
].fb_type
== LOOP
)
3073 /* Raises a SyntaxError and returns 0.
3074 If something goes wrong, a different exception may be raised.
3078 compiler_error(struct compiler
*c
, const char *errstr
)
3081 PyObject
*u
= NULL
, *v
= NULL
;
3083 loc
= PyErr_ProgramText(c
->c_filename
, c
->u
->u_lineno
);
3088 u
= Py_BuildValue("(ziOO)", c
->c_filename
, c
->u
->u_lineno
,
3092 v
= Py_BuildValue("(zO)", errstr
, u
);
3095 PyErr_SetObject(PyExc_SyntaxError
, v
);
3104 compiler_handle_subscr(struct compiler
*c
, const char *kind
,
3105 expr_context_ty ctx
)
3109 /* XXX this code is duplicated */
3111 case AugLoad
: /* fall through to Load */
3112 case Load
: op
= BINARY_SUBSCR
; break;
3113 case AugStore
:/* fall through to Store */
3114 case Store
: op
= STORE_SUBSCR
; break;
3115 case Del
: op
= DELETE_SUBSCR
; break;
3117 PyErr_Format(PyExc_SystemError
,
3118 "invalid %s kind %d in subscript\n",
3122 if (ctx
== AugLoad
) {
3123 ADDOP_I(c
, DUP_TOPX
, 2);
3125 else if (ctx
== AugStore
) {
3126 ADDOP(c
, ROT_THREE
);
3133 compiler_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3136 assert(s
->kind
== Slice_kind
);
3138 /* only handles the cases where BUILD_SLICE is emitted */
3139 if (s
->v
.Slice
.lower
) {
3140 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3143 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3146 if (s
->v
.Slice
.upper
) {
3147 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3150 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3153 if (s
->v
.Slice
.step
) {
3155 VISIT(c
, expr
, s
->v
.Slice
.step
);
3157 ADDOP_I(c
, BUILD_SLICE
, n
);
3162 compiler_simple_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3164 int op
= 0, slice_offset
= 0, stack_count
= 0;
3166 assert(s
->v
.Slice
.step
== NULL
);
3167 if (s
->v
.Slice
.lower
) {
3170 if (ctx
!= AugStore
)
3171 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3173 if (s
->v
.Slice
.upper
) {
3176 if (ctx
!= AugStore
)
3177 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3180 if (ctx
== AugLoad
) {
3181 switch (stack_count
) {
3182 case 0: ADDOP(c
, DUP_TOP
); break;
3183 case 1: ADDOP_I(c
, DUP_TOPX
, 2); break;
3184 case 2: ADDOP_I(c
, DUP_TOPX
, 3); break;
3187 else if (ctx
== AugStore
) {
3188 switch (stack_count
) {
3189 case 0: ADDOP(c
, ROT_TWO
); break;
3190 case 1: ADDOP(c
, ROT_THREE
); break;
3191 case 2: ADDOP(c
, ROT_FOUR
); break;
3196 case AugLoad
: /* fall through to Load */
3197 case Load
: op
= SLICE
; break;
3198 case AugStore
:/* fall through to Store */
3199 case Store
: op
= STORE_SLICE
; break;
3200 case Del
: op
= DELETE_SLICE
; break;
3203 PyErr_SetString(PyExc_SystemError
,
3204 "param invalid in simple slice");
3208 ADDOP(c
, op
+ slice_offset
);
3213 compiler_visit_nested_slice(struct compiler
*c
, slice_ty s
,
3214 expr_context_ty ctx
)
3218 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3221 return compiler_slice(c
, s
, ctx
);
3223 VISIT(c
, expr
, s
->v
.Index
.value
);
3227 PyErr_SetString(PyExc_SystemError
,
3228 "extended slice invalid in nested slice");
3235 compiler_visit_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3237 char * kindname
= NULL
;
3241 if (ctx
!= AugStore
) {
3242 VISIT(c
, expr
, s
->v
.Index
.value
);
3246 kindname
= "ellipsis";
3247 if (ctx
!= AugStore
) {
3248 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3253 if (!s
->v
.Slice
.step
)
3254 return compiler_simple_slice(c
, s
, ctx
);
3255 if (ctx
!= AugStore
) {
3256 if (!compiler_slice(c
, s
, ctx
))
3261 kindname
= "extended slice";
3262 if (ctx
!= AugStore
) {
3263 int i
, n
= asdl_seq_LEN(s
->v
.ExtSlice
.dims
);
3264 for (i
= 0; i
< n
; i
++) {
3265 slice_ty sub
= (slice_ty
)asdl_seq_GET(
3266 s
->v
.ExtSlice
.dims
, i
);
3267 if (!compiler_visit_nested_slice(c
, sub
, ctx
))
3270 ADDOP_I(c
, BUILD_TUPLE
, n
);
3274 PyErr_Format(PyExc_SystemError
,
3275 "invalid subscript kind %d", s
->kind
);
3278 return compiler_handle_subscr(c
, kindname
, ctx
);
3282 /* End of the compiler section, beginning of the assembler section */
3284 /* do depth-first search of basic block graph, starting with block.
3285 post records the block indices in post-order.
3287 XXX must handle implicit jumps from one block to next
3291 PyObject
*a_bytecode
; /* string containing bytecode */
3292 int a_offset
; /* offset into bytecode */
3293 int a_nblocks
; /* number of reachable blocks */
3294 basicblock
**a_postorder
; /* list of blocks in dfs postorder */
3295 PyObject
*a_lnotab
; /* string containing lnotab */
3296 int a_lnotab_off
; /* offset into lnotab */
3297 int a_lineno
; /* last lineno of emitted instruction */
3298 int a_lineno_off
; /* bytecode offset of last lineno */
3302 dfs(struct compiler
*c
, basicblock
*b
, struct assembler
*a
)
3305 struct instr
*instr
= NULL
;
3310 if (b
->b_next
!= NULL
)
3311 dfs(c
, b
->b_next
, a
);
3312 for (i
= 0; i
< b
->b_iused
; i
++) {
3313 instr
= &b
->b_instr
[i
];
3314 if (instr
->i_jrel
|| instr
->i_jabs
)
3315 dfs(c
, instr
->i_target
, a
);
3317 a
->a_postorder
[a
->a_nblocks
++] = b
;
3321 stackdepth_walk(struct compiler
*c
, basicblock
*b
, int depth
, int maxdepth
)
3323 int i
, target_depth
;
3324 struct instr
*instr
;
3325 if (b
->b_seen
|| b
->b_startdepth
>= depth
)
3328 b
->b_startdepth
= depth
;
3329 for (i
= 0; i
< b
->b_iused
; i
++) {
3330 instr
= &b
->b_instr
[i
];
3331 depth
+= opcode_stack_effect(instr
->i_opcode
, instr
->i_oparg
);
3332 if (depth
> maxdepth
)
3334 assert(depth
>= 0); /* invalid code or bug in stackdepth() */
3335 if (instr
->i_jrel
|| instr
->i_jabs
) {
3336 target_depth
= depth
;
3337 if (instr
->i_opcode
== FOR_ITER
) {
3338 target_depth
= depth
-2;
3339 } else if (instr
->i_opcode
== SETUP_FINALLY
||
3340 instr
->i_opcode
== SETUP_EXCEPT
) {
3341 target_depth
= depth
+3;
3342 if (target_depth
> maxdepth
)
3343 maxdepth
= target_depth
;
3345 maxdepth
= stackdepth_walk(c
, instr
->i_target
,
3346 target_depth
, maxdepth
);
3347 if (instr
->i_opcode
== JUMP_ABSOLUTE
||
3348 instr
->i_opcode
== JUMP_FORWARD
) {
3349 goto out
; /* remaining code is dead */
3354 maxdepth
= stackdepth_walk(c
, b
->b_next
, depth
, maxdepth
);
3360 /* Find the flow path that needs the largest stack. We assume that
3361 * cycles in the flow graph have no net effect on the stack depth.
3364 stackdepth(struct compiler
*c
)
3366 basicblock
*b
, *entryblock
;
3368 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3370 b
->b_startdepth
= INT_MIN
;
3375 return stackdepth_walk(c
, entryblock
, 0, 0);
3379 assemble_init(struct assembler
*a
, int nblocks
, int firstlineno
)
3381 memset(a
, 0, sizeof(struct assembler
));
3382 a
->a_lineno
= firstlineno
;
3383 a
->a_bytecode
= PyString_FromStringAndSize(NULL
, DEFAULT_CODE_SIZE
);
3386 a
->a_lnotab
= PyString_FromStringAndSize(NULL
, DEFAULT_LNOTAB_SIZE
);
3389 if (nblocks
> PY_SIZE_MAX
/ sizeof(basicblock
*)) {
3393 a
->a_postorder
= (basicblock
**)PyObject_Malloc(
3394 sizeof(basicblock
*) * nblocks
);
3395 if (!a
->a_postorder
) {
3403 assemble_free(struct assembler
*a
)
3405 Py_XDECREF(a
->a_bytecode
);
3406 Py_XDECREF(a
->a_lnotab
);
3408 PyObject_Free(a
->a_postorder
);
3411 /* Return the size of a basic block in bytes. */
3414 instrsize(struct instr
*instr
)
3416 if (!instr
->i_hasarg
)
3417 return 1; /* 1 byte for the opcode*/
3418 if (instr
->i_oparg
> 0xffff)
3419 return 6; /* 1 (opcode) + 1 (EXTENDED_ARG opcode) + 2 (oparg) + 2(oparg extended) */
3420 return 3; /* 1 (opcode) + 2 (oparg) */
3424 blocksize(basicblock
*b
)
3429 for (i
= 0; i
< b
->b_iused
; i
++)
3430 size
+= instrsize(&b
->b_instr
[i
]);
3434 /* Appends a pair to the end of the line number table, a_lnotab, representing
3435 the instruction's bytecode offset and line number. See
3436 Objects/lnotab_notes.txt for the description of the line number table. */
3439 assemble_lnotab(struct assembler
*a
, struct instr
*i
)
3441 int d_bytecode
, d_lineno
;
3443 unsigned char *lnotab
;
3445 d_bytecode
= a
->a_offset
- a
->a_lineno_off
;
3446 d_lineno
= i
->i_lineno
- a
->a_lineno
;
3448 assert(d_bytecode
>= 0);
3449 assert(d_lineno
>= 0);
3451 if(d_bytecode
== 0 && d_lineno
== 0)
3454 if (d_bytecode
> 255) {
3455 int j
, nbytes
, ncodes
= d_bytecode
/ 255;
3456 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
3457 len
= PyString_GET_SIZE(a
->a_lnotab
);
3458 if (nbytes
>= len
) {
3459 if ((len
<= INT_MAX
/ 2) && (len
* 2 < nbytes
))
3461 else if (len
<= INT_MAX
/ 2)
3467 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
3470 lnotab
= (unsigned char *)
3471 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3472 for (j
= 0; j
< ncodes
; j
++) {
3476 d_bytecode
-= ncodes
* 255;
3477 a
->a_lnotab_off
+= ncodes
* 2;
3479 assert(d_bytecode
<= 255);
3480 if (d_lineno
> 255) {
3481 int j
, nbytes
, ncodes
= d_lineno
/ 255;
3482 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
3483 len
= PyString_GET_SIZE(a
->a_lnotab
);
3484 if (nbytes
>= len
) {
3485 if ((len
<= INT_MAX
/ 2) && len
* 2 < nbytes
)
3487 else if (len
<= INT_MAX
/ 2)
3493 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
3496 lnotab
= (unsigned char *)
3497 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3498 *lnotab
++ = d_bytecode
;
3501 for (j
= 1; j
< ncodes
; j
++) {
3505 d_lineno
-= ncodes
* 255;
3506 a
->a_lnotab_off
+= ncodes
* 2;
3509 len
= PyString_GET_SIZE(a
->a_lnotab
);
3510 if (a
->a_lnotab_off
+ 2 >= len
) {
3511 if (_PyString_Resize(&a
->a_lnotab
, len
* 2) < 0)
3514 lnotab
= (unsigned char *)
3515 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
3517 a
->a_lnotab_off
+= 2;
3519 *lnotab
++ = d_bytecode
;
3520 *lnotab
++ = d_lineno
;
3522 else { /* First line of a block; def stmt, etc. */
3524 *lnotab
++ = d_lineno
;
3526 a
->a_lineno
= i
->i_lineno
;
3527 a
->a_lineno_off
= a
->a_offset
;
3532 Extend the bytecode with a new instruction.
3533 Update lnotab if necessary.
3537 assemble_emit(struct assembler
*a
, struct instr
*i
)
3539 int size
, arg
= 0, ext
= 0;
3540 Py_ssize_t len
= PyString_GET_SIZE(a
->a_bytecode
);
3543 size
= instrsize(i
);
3548 if (i
->i_lineno
&& !assemble_lnotab(a
, i
))
3550 if (a
->a_offset
+ size
>= len
) {
3551 if (len
> PY_SSIZE_T_MAX
/ 2)
3553 if (_PyString_Resize(&a
->a_bytecode
, len
* 2) < 0)
3556 code
= PyString_AS_STRING(a
->a_bytecode
) + a
->a_offset
;
3557 a
->a_offset
+= size
;
3559 assert(i
->i_hasarg
);
3560 *code
++ = (char)EXTENDED_ARG
;
3561 *code
++ = ext
& 0xff;
3565 *code
++ = i
->i_opcode
;
3567 assert(size
== 3 || size
== 6);
3568 *code
++ = arg
& 0xff;
3575 assemble_jump_offsets(struct assembler
*a
, struct compiler
*c
)
3578 int bsize
, totsize
, extended_arg_count
= 0, last_extended_arg_count
;
3581 /* Compute the size of each block and fixup jump args.
3582 Replace block pointer with position in bytecode. */
3585 for (i
= a
->a_nblocks
- 1; i
>= 0; i
--) {
3586 b
= a
->a_postorder
[i
];
3587 bsize
= blocksize(b
);
3588 b
->b_offset
= totsize
;
3591 last_extended_arg_count
= extended_arg_count
;
3592 extended_arg_count
= 0;
3593 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3594 bsize
= b
->b_offset
;
3595 for (i
= 0; i
< b
->b_iused
; i
++) {
3596 struct instr
*instr
= &b
->b_instr
[i
];
3597 /* Relative jumps are computed relative to
3598 the instruction pointer after fetching
3599 the jump instruction.
3601 bsize
+= instrsize(instr
);
3603 instr
->i_oparg
= instr
->i_target
->b_offset
;
3604 else if (instr
->i_jrel
) {
3605 int delta
= instr
->i_target
->b_offset
- bsize
;
3606 instr
->i_oparg
= delta
;
3610 if (instr
->i_oparg
> 0xffff)
3611 extended_arg_count
++;
3615 /* XXX: This is an awful hack that could hurt performance, but
3616 on the bright side it should work until we come up
3617 with a better solution.
3619 The issue is that in the first loop blocksize() is called
3620 which calls instrsize() which requires i_oparg be set
3621 appropriately. There is a bootstrap problem because
3622 i_oparg is calculated in the second loop above.
3624 So we loop until we stop seeing new EXTENDED_ARGs.
3625 The only EXTENDED_ARGs that could be popping up are
3626 ones in jump instructions. So this should converge
3629 } while (last_extended_arg_count
!= extended_arg_count
);
3633 dict_keys_inorder(PyObject
*dict
, int offset
)
3635 PyObject
*tuple
, *k
, *v
;
3636 Py_ssize_t i
, pos
= 0, size
= PyDict_Size(dict
);
3638 tuple
= PyTuple_New(size
);
3641 while (PyDict_Next(dict
, &pos
, &k
, &v
)) {
3642 i
= PyInt_AS_LONG(v
);
3643 /* The keys of the dictionary are tuples. (see compiler_add_o)
3644 The object we want is always first, though. */
3645 k
= PyTuple_GET_ITEM(k
, 0);
3647 assert((i
- offset
) < size
);
3648 assert((i
- offset
) >= 0);
3649 PyTuple_SET_ITEM(tuple
, i
- offset
, k
);
3655 compute_code_flags(struct compiler
*c
)
3657 PySTEntryObject
*ste
= c
->u
->u_ste
;
3659 if (ste
->ste_type
!= ModuleBlock
)
3660 flags
|= CO_NEWLOCALS
;
3661 if (ste
->ste_type
== FunctionBlock
) {
3662 if (!ste
->ste_unoptimized
)
3663 flags
|= CO_OPTIMIZED
;
3664 if (ste
->ste_nested
)
3666 if (ste
->ste_generator
)
3667 flags
|= CO_GENERATOR
;
3668 if (ste
->ste_varargs
)
3669 flags
|= CO_VARARGS
;
3670 if (ste
->ste_varkeywords
)
3671 flags
|= CO_VARKEYWORDS
;
3674 /* (Only) inherit compilerflags in PyCF_MASK */
3675 flags
|= (c
->c_flags
->cf_flags
& PyCF_MASK
);
3677 n
= PyDict_Size(c
->u
->u_freevars
);
3681 n
= PyDict_Size(c
->u
->u_cellvars
);
3692 static PyCodeObject
*
3693 makecode(struct compiler
*c
, struct assembler
*a
)
3696 PyCodeObject
*co
= NULL
;
3697 PyObject
*consts
= NULL
;
3698 PyObject
*names
= NULL
;
3699 PyObject
*varnames
= NULL
;
3700 PyObject
*filename
= NULL
;
3701 PyObject
*name
= NULL
;
3702 PyObject
*freevars
= NULL
;
3703 PyObject
*cellvars
= NULL
;
3704 PyObject
*bytecode
= NULL
;
3707 tmp
= dict_keys_inorder(c
->u
->u_consts
, 0);
3710 consts
= PySequence_List(tmp
); /* optimize_code requires a list */
3713 names
= dict_keys_inorder(c
->u
->u_names
, 0);
3714 varnames
= dict_keys_inorder(c
->u
->u_varnames
, 0);
3715 if (!consts
|| !names
|| !varnames
)
3718 cellvars
= dict_keys_inorder(c
->u
->u_cellvars
, 0);
3721 freevars
= dict_keys_inorder(c
->u
->u_freevars
, PyTuple_Size(cellvars
));
3724 filename
= PyString_FromString(c
->c_filename
);
3728 nlocals
= PyDict_Size(c
->u
->u_varnames
);
3729 flags
= compute_code_flags(c
);
3733 bytecode
= PyCode_Optimize(a
->a_bytecode
, consts
, names
, a
->a_lnotab
);
3737 tmp
= PyList_AsTuple(consts
); /* PyCode_New requires a tuple */
3743 co
= PyCode_New(c
->u
->u_argcount
, nlocals
, stackdepth(c
), flags
,
3744 bytecode
, consts
, names
, varnames
,
3746 filename
, c
->u
->u_name
,
3747 c
->u
->u_firstlineno
,
3752 Py_XDECREF(varnames
);
3753 Py_XDECREF(filename
);
3755 Py_XDECREF(freevars
);
3756 Py_XDECREF(cellvars
);
3757 Py_XDECREF(bytecode
);
3762 /* For debugging purposes only */
3765 dump_instr(const struct instr
*i
)
3767 const char *jrel
= i
->i_jrel
? "jrel " : "";
3768 const char *jabs
= i
->i_jabs
? "jabs " : "";
3773 sprintf(arg
, "arg: %d ", i
->i_oparg
);
3775 fprintf(stderr
, "line: %d, opcode: %d %s%s%s\n",
3776 i
->i_lineno
, i
->i_opcode
, arg
, jabs
, jrel
);
3780 dump_basicblock(const basicblock
*b
)
3782 const char *seen
= b
->b_seen
? "seen " : "";
3783 const char *b_return
= b
->b_return
? "return " : "";
3784 fprintf(stderr
, "used: %d, depth: %d, offset: %d %s%s\n",
3785 b
->b_iused
, b
->b_startdepth
, b
->b_offset
, seen
, b_return
);
3788 for (i
= 0; i
< b
->b_iused
; i
++) {
3789 fprintf(stderr
, " [%02d] ", i
);
3790 dump_instr(b
->b_instr
+ i
);
3796 static PyCodeObject
*
3797 assemble(struct compiler
*c
, int addNone
)
3799 basicblock
*b
, *entryblock
;
3802 PyCodeObject
*co
= NULL
;
3804 /* Make sure every block that falls off the end returns None.
3805 XXX NEXT_BLOCK() isn't quite right, because if the last
3806 block ends with a jump or return b_next shouldn't set.
3808 if (!c
->u
->u_curblock
->b_return
) {
3811 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3812 ADDOP(c
, RETURN_VALUE
);
3817 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
3822 /* Set firstlineno if it wasn't explicitly set. */
3823 if (!c
->u
->u_firstlineno
) {
3824 if (entryblock
&& entryblock
->b_instr
)
3825 c
->u
->u_firstlineno
= entryblock
->b_instr
->i_lineno
;
3827 c
->u
->u_firstlineno
= 1;
3829 if (!assemble_init(&a
, nblocks
, c
->u
->u_firstlineno
))
3831 dfs(c
, entryblock
, &a
);
3833 /* Can't modify the bytecode after computing jump offsets. */
3834 assemble_jump_offsets(&a
, c
);
3836 /* Emit code in reverse postorder from dfs. */
3837 for (i
= a
.a_nblocks
- 1; i
>= 0; i
--) {
3838 b
= a
.a_postorder
[i
];
3839 for (j
= 0; j
< b
->b_iused
; j
++)
3840 if (!assemble_emit(&a
, &b
->b_instr
[j
]))
3844 if (_PyString_Resize(&a
.a_lnotab
, a
.a_lnotab_off
) < 0)
3846 if (_PyString_Resize(&a
.a_bytecode
, a
.a_offset
) < 0)
3849 co
= makecode(c
, &a
);