2 * This file compiles an abstract syntax tree (AST) into Python bytecode.
4 * The primary entry point is PyAST_Compile(), which returns a
5 * PyCodeObject. The compiler makes several passes to build the code
7 * 1. Checks for future statements. See future.c
8 * 2. Builds a symbol table. See symtable.c.
9 * 3. Generate code for basic blocks. See compiler_mod() in this file.
10 * 4. Assemble the basic blocks into final code. See assemble() in
13 * Note that compiler_mod() suggests module, but the module ast type
14 * (mod_ty) has cases for expressions and interactive statements.
16 * CAUTION: The VISIT_* macros abort the current function when they
17 * encounter a problem. So don't invoke them when there is memory
18 * which needs to be released. Code blocks are OK, as the compiler
19 * structure takes care of releasing those.
24 #include "Python-ast.h"
33 int Py_OptimizeFlag
= 0;
38 opcode_stack_effect() function should be reviewed since stack depth bugs
39 could be really hard to find later.
41 Dead code is being generated (i.e. after unconditional jumps).
42 XXX(nnorwitz): not sure this is still true
45 #define DEFAULT_BLOCK_SIZE 16
46 #define DEFAULT_BLOCKS 8
47 #define DEFAULT_CODE_SIZE 128
48 #define DEFAULT_LNOTAB_SIZE 16
53 unsigned i_hasarg
: 1;
54 unsigned char i_opcode
;
56 struct basicblock_
*i_target
; /* target block (if jump instruction) */
60 typedef struct basicblock_
{
61 /* Each basicblock in a compilation unit is linked via b_list in the
62 reverse order that the block are allocated. b_list points to the next
63 block, not to be confused with b_next, which is next by control flow. */
64 struct basicblock_
*b_list
;
65 /* number of instructions used */
67 /* length of instruction array (b_instr) */
69 /* pointer to an array of instructions, initially NULL */
70 struct instr
*b_instr
;
71 /* If b_next is non-NULL, it is a pointer to the next
72 block reached by normal control flow. */
73 struct basicblock_
*b_next
;
74 /* b_seen is used to perform a DFS of basicblocks. */
76 /* b_return is true if a RETURN_VALUE opcode is inserted. */
77 unsigned b_return
: 1;
78 /* depth of stack upon entry of block, computed by stackdepth() */
80 /* instruction offset for block, computed by assemble_jump_offsets() */
84 /* fblockinfo tracks the current frame block.
86 A frame block is used to handle loops, try/except, and try/finally.
87 It's called a frame block to distinguish it from a basic block in the
91 enum fblocktype
{ LOOP
, EXCEPT
, FINALLY_TRY
, FINALLY_END
};
94 enum fblocktype fb_type
;
98 /* The following items change on entry and exit of code blocks.
99 They must be saved and restored when returning to a block.
101 struct compiler_unit
{
102 PySTEntryObject
*u_ste
;
105 /* The following fields are dicts that map objects to
106 the index of them in co_XXX. The index is used as
107 the argument for opcodes that refer to those collections.
109 PyObject
*u_consts
; /* all constants */
110 PyObject
*u_names
; /* all names */
111 PyObject
*u_varnames
; /* local variables */
112 PyObject
*u_cellvars
; /* cell variables */
113 PyObject
*u_freevars
; /* free variables */
115 PyObject
*u_private
; /* for private name mangling */
117 int u_argcount
; /* number of arguments for block */
118 /* Pointer to the most recently allocated block. By following b_list
119 members, you can reach all early allocated blocks. */
120 basicblock
*u_blocks
;
121 basicblock
*u_curblock
; /* pointer to current block */
122 int u_tmpname
; /* temporary variables for list comps */
125 struct fblockinfo u_fblock
[CO_MAXBLOCKS
];
127 int u_firstlineno
; /* the first lineno of the block */
128 int u_lineno
; /* the lineno for the current stmt */
129 bool u_lineno_set
; /* boolean to indicate whether instr
130 has been generated with current lineno */
133 /* This struct captures the global state of a compilation.
135 The u pointer points to the current compilation unit, while units
136 for enclosing blocks are stored in c_stack. The u and c_stack are
137 managed by compiler_enter_scope() and compiler_exit_scope().
141 const char *c_filename
;
142 struct symtable
*c_st
;
143 PyFutureFeatures
*c_future
; /* pointer to module's __future__ */
144 PyCompilerFlags
*c_flags
;
146 int c_interactive
; /* true if in interactive mode */
149 struct compiler_unit
*u
; /* compiler state for current block */
150 PyObject
*c_stack
; /* Python list holding compiler_unit ptrs */
151 char *c_encoding
; /* source encoding (a borrowed reference) */
152 PyArena
*c_arena
; /* pointer to memory allocation arena */
156 PyObject
*a_bytecode
; /* string containing bytecode */
157 int a_offset
; /* offset into bytecode */
158 int a_nblocks
; /* number of reachable blocks */
159 basicblock
**a_postorder
; /* list of blocks in dfs postorder */
160 PyObject
*a_lnotab
; /* string containing lnotab */
161 int a_lnotab_off
; /* offset into lnotab */
162 int a_lineno
; /* last lineno of emitted instruction */
163 int a_lineno_off
; /* bytecode offset of last lineno */
166 static int compiler_enter_scope(struct compiler
*, identifier
, void *, int);
167 static void compiler_free(struct compiler
*);
168 static basicblock
*compiler_new_block(struct compiler
*);
169 static int compiler_next_instr(struct compiler
*, basicblock
*);
170 static int compiler_addop(struct compiler
*, int);
171 static int compiler_addop_o(struct compiler
*, int, PyObject
*, PyObject
*);
172 static int compiler_addop_i(struct compiler
*, int, int);
173 static int compiler_addop_j(struct compiler
*, int, basicblock
*, int);
174 static basicblock
*compiler_use_new_block(struct compiler
*);
175 static int compiler_error(struct compiler
*, const char *);
176 static int compiler_nameop(struct compiler
*, identifier
, expr_context_ty
);
178 static PyCodeObject
*compiler_mod(struct compiler
*, mod_ty
);
179 static int compiler_visit_stmt(struct compiler
*, stmt_ty
);
180 static int compiler_visit_keyword(struct compiler
*, keyword_ty
);
181 static int compiler_visit_expr(struct compiler
*, expr_ty
);
182 static int compiler_augassign(struct compiler
*, stmt_ty
);
183 static int compiler_visit_slice(struct compiler
*, slice_ty
,
186 static int compiler_push_fblock(struct compiler
*, enum fblocktype
,
188 static void compiler_pop_fblock(struct compiler
*, enum fblocktype
,
191 static int inplace_binop(struct compiler
*, operator_ty
);
192 static int expr_constant(expr_ty e
);
194 static int compiler_with(struct compiler
*, stmt_ty
);
196 static PyCodeObject
*assemble(struct compiler
*, int addNone
);
197 static PyObject
*__doc__
;
200 _Py_Mangle(PyObject
*privateobj
, PyObject
*ident
)
202 /* Name mangling: __private becomes _classname__private.
203 This is independent from how the name is used. */
204 const char *p
, *name
= PyString_AsString(ident
);
207 if (privateobj
== NULL
|| name
== NULL
|| name
[0] != '_' ||
212 p
= PyString_AsString(privateobj
);
214 if (name
[nlen
-1] == '_' && name
[nlen
-2] == '_') {
216 return ident
; /* Don't mangle __whatever__ */
218 /* Strip leading underscores from class name */
223 return ident
; /* Don't mangle if class is just underscores */
226 ident
= PyString_FromStringAndSize(NULL
, 1 + nlen
+ plen
);
229 /* ident = "_" + p[:plen] + name # i.e. 1+plen+nlen bytes */
230 buffer
= PyString_AS_STRING(ident
);
232 strncpy(buffer
+1, p
, plen
);
233 strcpy(buffer
+1+plen
, name
);
238 compiler_init(struct compiler
*c
)
240 memset(c
, 0, sizeof(struct compiler
));
242 c
->c_stack
= PyList_New(0);
250 PyAST_Compile(mod_ty mod
, const char *filename
, PyCompilerFlags
*flags
,
254 PyCodeObject
*co
= NULL
;
255 PyCompilerFlags local_flags
;
259 __doc__
= PyString_InternFromString("__doc__");
264 if (!compiler_init(&c
))
266 c
.c_filename
= filename
;
268 c
.c_future
= PyFuture_FromAST(mod
, filename
);
269 if (c
.c_future
== NULL
)
272 local_flags
.cf_flags
= 0;
273 flags
= &local_flags
;
275 merged
= c
.c_future
->ff_features
| flags
->cf_flags
;
276 c
.c_future
->ff_features
= merged
;
277 flags
->cf_flags
= merged
;
281 c
.c_st
= PySymtable_Build(mod
, filename
, c
.c_future
);
282 if (c
.c_st
== NULL
) {
283 if (!PyErr_Occurred())
284 PyErr_SetString(PyExc_SystemError
, "no symtable");
288 /* XXX initialize to NULL for now, need to handle */
291 co
= compiler_mod(&c
, mod
);
295 assert(co
|| PyErr_Occurred());
300 PyNode_Compile(struct _node
*n
, const char *filename
)
302 PyCodeObject
*co
= NULL
;
304 PyArena
*arena
= PyArena_New();
307 mod
= PyAST_FromNode(n
, NULL
, filename
, arena
);
309 co
= PyAST_Compile(mod
, filename
, NULL
, arena
);
315 compiler_free(struct compiler
*c
)
318 PySymtable_Free(c
->c_st
);
320 PyObject_Free(c
->c_future
);
321 Py_DECREF(c
->c_stack
);
325 list2dict(PyObject
*list
)
329 PyObject
*dict
= PyDict_New();
330 if (!dict
) return NULL
;
332 n
= PyList_Size(list
);
333 for (i
= 0; i
< n
; i
++) {
334 v
= PyInt_FromLong(i
);
339 k
= PyList_GET_ITEM(list
, i
);
340 k
= PyTuple_Pack(2, k
, k
->ob_type
);
341 if (k
== NULL
|| PyDict_SetItem(dict
, k
, v
) < 0) {
353 /* Return new dict containing names from src that match scope(s).
355 src is a symbol table dictionary. If the scope of a name matches
356 either scope_type or flag is set, insert it into the new dict. The
357 values are integers, starting at offset and increasing by one for
362 dictbytype(PyObject
*src
, int scope_type
, int flag
, int offset
)
364 Py_ssize_t pos
= 0, i
= offset
, scope
;
365 PyObject
*k
, *v
, *dest
= PyDict_New();
371 while (PyDict_Next(src
, &pos
, &k
, &v
)) {
372 /* XXX this should probably be a macro in symtable.h */
373 assert(PyInt_Check(v
));
374 scope
= (PyInt_AS_LONG(v
) >> SCOPE_OFF
) & SCOPE_MASK
;
376 if (scope
== scope_type
|| PyInt_AS_LONG(v
) & flag
) {
377 PyObject
*tuple
, *item
= PyInt_FromLong(i
);
383 tuple
= PyTuple_Pack(2, k
, k
->ob_type
);
384 if (!tuple
|| PyDict_SetItem(dest
, tuple
, item
) < 0) {
397 /* Begin: Peephole optimizations ----------------------------------------- */
399 #define GETARG(arr, i) ((int)((arr[i+2]<<8) + arr[i+1]))
400 #define UNCONDITIONAL_JUMP(op) (op==JUMP_ABSOLUTE || op==JUMP_FORWARD)
401 #define ABSOLUTE_JUMP(op) (op==JUMP_ABSOLUTE || op==CONTINUE_LOOP)
402 #define GETJUMPTGT(arr, i) (GETARG(arr,i) + (ABSOLUTE_JUMP(arr[i]) ? 0 : i+3))
403 #define SETARG(arr, i, val) arr[i+2] = val>>8; arr[i+1] = val & 255
404 #define CODESIZE(op) (HAS_ARG(op) ? 3 : 1)
405 #define ISBASICBLOCK(blocks, start, bytes) \
406 (blocks[start]==blocks[start+bytes-1])
408 /* Replace LOAD_CONST c1. LOAD_CONST c2 ... LOAD_CONST cn BUILD_TUPLE n
409 with LOAD_CONST (c1, c2, ... cn).
410 The consts table must still be in list form so that the
411 new constant (c1, c2, ... cn) can be appended.
412 Called with codestr pointing to the first LOAD_CONST.
413 Bails out with no change if one or more of the LOAD_CONSTs is missing.
414 Also works for BUILD_LIST when followed by an "in" or "not in" test.
417 tuple_of_constants(unsigned char *codestr
, int n
, PyObject
*consts
)
419 PyObject
*newconst
, *constant
;
420 Py_ssize_t i
, arg
, len_consts
;
423 assert(PyList_CheckExact(consts
));
424 assert(codestr
[n
*3] == BUILD_TUPLE
|| codestr
[n
*3] == BUILD_LIST
);
425 assert(GETARG(codestr
, (n
*3)) == n
);
426 for (i
=0 ; i
<n
; i
++)
427 assert(codestr
[i
*3] == LOAD_CONST
);
429 /* Buildup new tuple of constants */
430 newconst
= PyTuple_New(n
);
431 if (newconst
== NULL
)
433 len_consts
= PyList_GET_SIZE(consts
);
434 for (i
=0 ; i
<n
; i
++) {
435 arg
= GETARG(codestr
, (i
*3));
436 assert(arg
< len_consts
);
437 constant
= PyList_GET_ITEM(consts
, arg
);
439 PyTuple_SET_ITEM(newconst
, i
, constant
);
442 /* Append folded constant onto consts */
443 if (PyList_Append(consts
, newconst
)) {
449 /* Write NOPs over old LOAD_CONSTS and
450 add a new LOAD_CONST newconst on top of the BUILD_TUPLE n */
451 memset(codestr
, NOP
, n
*3);
452 codestr
[n
*3] = LOAD_CONST
;
453 SETARG(codestr
, (n
*3), len_consts
);
457 /* Replace LOAD_CONST c1. LOAD_CONST c2 BINOP
458 with LOAD_CONST binop(c1,c2)
459 The consts table must still be in list form so that the
460 new constant can be appended.
461 Called with codestr pointing to the first LOAD_CONST.
462 Abandons the transformation if the folding fails (i.e. 1+'a').
463 If the new constant is a sequence, only folds when the size
464 is below a threshold value. That keeps pyc files from
465 becoming large in the presence of code like: (None,)*1000.
468 fold_binops_on_constants(unsigned char *codestr
, PyObject
*consts
)
470 PyObject
*newconst
, *v
, *w
;
471 Py_ssize_t len_consts
, size
;
475 assert(PyList_CheckExact(consts
));
476 assert(codestr
[0] == LOAD_CONST
);
477 assert(codestr
[3] == LOAD_CONST
);
479 /* Create new constant */
480 v
= PyList_GET_ITEM(consts
, GETARG(codestr
, 0));
481 w
= PyList_GET_ITEM(consts
, GETARG(codestr
, 3));
485 newconst
= PyNumber_Power(v
, w
, Py_None
);
487 case BINARY_MULTIPLY
:
488 newconst
= PyNumber_Multiply(v
, w
);
491 /* Cannot fold this operation statically since
492 the result can depend on the run-time presence
495 case BINARY_TRUE_DIVIDE
:
496 newconst
= PyNumber_TrueDivide(v
, w
);
498 case BINARY_FLOOR_DIVIDE
:
499 newconst
= PyNumber_FloorDivide(v
, w
);
502 newconst
= PyNumber_Remainder(v
, w
);
505 newconst
= PyNumber_Add(v
, w
);
507 case BINARY_SUBTRACT
:
508 newconst
= PyNumber_Subtract(v
, w
);
511 newconst
= PyObject_GetItem(v
, w
);
514 newconst
= PyNumber_Lshift(v
, w
);
517 newconst
= PyNumber_Rshift(v
, w
);
520 newconst
= PyNumber_And(v
, w
);
523 newconst
= PyNumber_Xor(v
, w
);
526 newconst
= PyNumber_Or(v
, w
);
529 /* Called with an unknown opcode */
530 PyErr_Format(PyExc_SystemError
,
531 "unexpected binary operation %d on a constant",
535 if (newconst
== NULL
) {
539 size
= PyObject_Size(newconst
);
542 else if (size
> 20) {
547 /* Append folded constant into consts table */
548 len_consts
= PyList_GET_SIZE(consts
);
549 if (PyList_Append(consts
, newconst
)) {
555 /* Write NOP NOP NOP NOP LOAD_CONST newconst */
556 memset(codestr
, NOP
, 4);
557 codestr
[4] = LOAD_CONST
;
558 SETARG(codestr
, 4, len_consts
);
563 fold_unaryops_on_constants(unsigned char *codestr
, PyObject
*consts
)
565 PyObject
*newconst
=NULL
, *v
;
566 Py_ssize_t len_consts
;
570 assert(PyList_CheckExact(consts
));
571 assert(codestr
[0] == LOAD_CONST
);
573 /* Create new constant */
574 v
= PyList_GET_ITEM(consts
, GETARG(codestr
, 0));
578 /* Preserve the sign of -0.0 */
579 if (PyObject_IsTrue(v
) == 1)
580 newconst
= PyNumber_Negative(v
);
583 newconst
= PyObject_Repr(v
);
586 newconst
= PyNumber_Invert(v
);
589 /* Called with an unknown opcode */
590 PyErr_Format(PyExc_SystemError
,
591 "unexpected unary operation %d on a constant",
595 if (newconst
== NULL
) {
600 /* Append folded constant into consts table */
601 len_consts
= PyList_GET_SIZE(consts
);
602 if (PyList_Append(consts
, newconst
)) {
608 /* Write NOP LOAD_CONST newconst */
610 codestr
[1] = LOAD_CONST
;
611 SETARG(codestr
, 1, len_consts
);
615 static unsigned int *
616 markblocks(unsigned char *code
, int len
)
618 unsigned int *blocks
= (unsigned int *)PyMem_Malloc(len
*sizeof(int));
619 int i
,j
, opcode
, blockcnt
= 0;
621 if (blocks
== NULL
) {
625 memset(blocks
, 0, len
*sizeof(int));
627 /* Mark labels in the first pass */
628 for (i
=0 ; i
<len
; i
+=CODESIZE(opcode
)) {
640 j
= GETJUMPTGT(code
, i
);
645 /* Build block numbers in the second pass */
646 for (i
=0 ; i
<len
; i
++) {
647 blockcnt
+= blocks
[i
]; /* increment blockcnt over labels */
648 blocks
[i
] = blockcnt
;
653 /* Perform basic peephole optimizations to components of a code object.
654 The consts object should still be in list form to allow new constants
657 To keep the optimizer simple, it bails out (does nothing) for code
658 containing extended arguments or that has a length over 32,700. That
659 allows us to avoid overflow and sign issues. Likewise, it bails when
660 the lineno table has complex encoding for gaps >= 255.
662 Optimizations are restricted to simple transformations occuring within a
663 single basic block. All transformations keep the code size the same or
664 smaller. For those that reduce size, the gaps are initially filled with
665 NOPs. Later those NOPs are removed and the jump addresses retargeted in
666 a single pass. Line numbering is adjusted accordingly. */
669 optimize_code(PyObject
*code
, PyObject
* consts
, PyObject
*names
,
670 PyObject
*lineno_obj
)
672 Py_ssize_t i
, j
, codelen
;
674 int tgt
, tgttgt
, opcode
;
675 unsigned char *codestr
= NULL
;
676 unsigned char *lineno
;
678 int new_line
, cum_orig_line
, last_line
, tabsiz
;
679 int cumlc
=0, lastlc
=0; /* Count runs of consecutive LOAD_CONSTs */
680 unsigned int *blocks
= NULL
;
683 /* Bail out if an exception is set */
684 if (PyErr_Occurred())
687 /* Bypass optimization when the lineno table is too complex */
688 assert(PyString_Check(lineno_obj
));
689 lineno
= (unsigned char*)PyString_AS_STRING(lineno_obj
);
690 tabsiz
= PyString_GET_SIZE(lineno_obj
);
691 if (memchr(lineno
, 255, tabsiz
) != NULL
)
694 /* Avoid situations where jump retargeting could overflow */
695 assert(PyString_Check(code
));
696 codelen
= PyString_Size(code
);
700 /* Make a modifiable copy of the code string */
701 codestr
= (unsigned char *)PyMem_Malloc(codelen
);
704 codestr
= (unsigned char *)memcpy(codestr
,
705 PyString_AS_STRING(code
), codelen
);
707 /* Verify that RETURN_VALUE terminates the codestring. This allows
708 the various transformation patterns to look ahead several
709 instructions without additional checks to make sure they are not
710 looking beyond the end of the code string.
712 if (codestr
[codelen
-1] != RETURN_VALUE
)
715 /* Mapping to new jump targets after NOPs are removed */
716 addrmap
= (int *)PyMem_Malloc(codelen
* sizeof(int));
720 blocks
= markblocks(codestr
, codelen
);
723 assert(PyList_Check(consts
));
725 for (i
=0 ; i
<codelen
; i
+= CODESIZE(codestr
[i
])) {
733 /* Replace UNARY_NOT JUMP_IF_FALSE POP_TOP with
734 with JUMP_IF_TRUE POP_TOP */
736 if (codestr
[i
+1] != JUMP_IF_FALSE
||
737 codestr
[i
+4] != POP_TOP
||
738 !ISBASICBLOCK(blocks
,i
,5))
740 tgt
= GETJUMPTGT(codestr
, (i
+1));
741 if (codestr
[tgt
] != POP_TOP
)
743 j
= GETARG(codestr
, i
+1) + 1;
744 codestr
[i
] = JUMP_IF_TRUE
;
745 SETARG(codestr
, i
, j
);
746 codestr
[i
+3] = POP_TOP
;
750 /* not a is b --> a is not b
751 not a in b --> a not in b
752 not a is not b --> a is b
753 not a not in b --> a in b
756 j
= GETARG(codestr
, i
);
757 if (j
< 6 || j
> 9 ||
758 codestr
[i
+3] != UNARY_NOT
||
759 !ISBASICBLOCK(blocks
,i
,4))
761 SETARG(codestr
, i
, (j
^1));
765 /* Replace LOAD_GLOBAL/LOAD_NAME None
766 with LOAD_CONST None */
769 j
= GETARG(codestr
, i
);
770 name
= PyString_AsString(PyTuple_GET_ITEM(names
, j
));
771 if (name
== NULL
|| strcmp(name
, "None") != 0)
773 for (j
=0 ; j
< PyList_GET_SIZE(consts
) ; j
++) {
774 if (PyList_GET_ITEM(consts
, j
) == Py_None
) {
775 codestr
[i
] = LOAD_CONST
;
776 SETARG(codestr
, i
, j
);
783 /* Skip over LOAD_CONST trueconst
784 JUMP_IF_FALSE xx POP_TOP */
787 j
= GETARG(codestr
, i
);
788 if (codestr
[i
+3] != JUMP_IF_FALSE
||
789 codestr
[i
+6] != POP_TOP
||
790 !ISBASICBLOCK(blocks
,i
,7) ||
791 !PyObject_IsTrue(PyList_GET_ITEM(consts
, j
)))
793 memset(codestr
+i
, NOP
, 7);
797 /* Try to fold tuples of constants (includes a case for lists
798 which are only used for "in" and "not in" tests).
799 Skip over BUILD_SEQN 1 UNPACK_SEQN 1.
800 Replace BUILD_SEQN 2 UNPACK_SEQN 2 with ROT2.
801 Replace BUILD_SEQN 3 UNPACK_SEQN 3 with ROT3 ROT2. */
804 j
= GETARG(codestr
, i
);
808 ((opcode
== BUILD_TUPLE
&&
809 ISBASICBLOCK(blocks
, h
, 3*(j
+1))) ||
810 (opcode
== BUILD_LIST
&&
811 codestr
[i
+3]==COMPARE_OP
&&
812 ISBASICBLOCK(blocks
, h
, 3*(j
+2)) &&
813 (GETARG(codestr
,i
+3)==6 ||
814 GETARG(codestr
,i
+3)==7))) &&
815 tuple_of_constants(&codestr
[h
], j
, consts
)) {
816 assert(codestr
[i
] == LOAD_CONST
);
820 if (codestr
[i
+3] != UNPACK_SEQUENCE
||
821 !ISBASICBLOCK(blocks
,i
,6) ||
822 j
!= GETARG(codestr
, i
+3))
825 memset(codestr
+i
, NOP
, 6);
827 codestr
[i
] = ROT_TWO
;
828 memset(codestr
+i
+1, NOP
, 5);
830 codestr
[i
] = ROT_THREE
;
831 codestr
[i
+1] = ROT_TWO
;
832 memset(codestr
+i
+2, NOP
, 4);
836 /* Fold binary ops on constants.
837 LOAD_CONST c1 LOAD_CONST c2 BINOP --> LOAD_CONST binop(c1,c2) */
839 case BINARY_MULTIPLY
:
840 case BINARY_TRUE_DIVIDE
:
841 case BINARY_FLOOR_DIVIDE
:
844 case BINARY_SUBTRACT
:
852 ISBASICBLOCK(blocks
, i
-6, 7) &&
853 fold_binops_on_constants(&codestr
[i
-6], consts
)) {
855 assert(codestr
[i
] == LOAD_CONST
);
860 /* Fold unary ops on constants.
861 LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
866 ISBASICBLOCK(blocks
, i
-3, 4) &&
867 fold_unaryops_on_constants(&codestr
[i
-3], consts
)) {
869 assert(codestr
[i
] == LOAD_CONST
);
874 /* Simplify conditional jump to conditional jump where the
875 result of the first test implies the success of a similar
876 test or the failure of the opposite test.
882 x:JUMP_IF_FALSE y y:JUMP_IF_FALSE z --> x:JUMP_IF_FALSE z
883 x:JUMP_IF_FALSE y y:JUMP_IF_TRUE z --> x:JUMP_IF_FALSE y+3
884 where y+3 is the instruction following the second test.
888 tgt
= GETJUMPTGT(codestr
, i
);
890 if (j
== JUMP_IF_FALSE
|| j
== JUMP_IF_TRUE
) {
892 tgttgt
= GETJUMPTGT(codestr
, tgt
) - i
- 3;
893 SETARG(codestr
, i
, tgttgt
);
896 SETARG(codestr
, i
, tgt
);
900 /* Intentional fallthrough */
902 /* Replace jumps to unconditional jumps */
910 tgt
= GETJUMPTGT(codestr
, i
);
911 if (!UNCONDITIONAL_JUMP(codestr
[tgt
]))
913 tgttgt
= GETJUMPTGT(codestr
, tgt
);
914 if (opcode
== JUMP_FORWARD
) /* JMP_ABS can go backwards */
915 opcode
= JUMP_ABSOLUTE
;
916 if (!ABSOLUTE_JUMP(opcode
))
917 tgttgt
-= i
+ 3; /* Calc relative jump addr */
918 if (tgttgt
< 0) /* No backward relative jumps */
921 SETARG(codestr
, i
, tgttgt
);
927 /* Replace RETURN LOAD_CONST None RETURN with just RETURN */
929 if (i
+4 >= codelen
||
930 codestr
[i
+4] != RETURN_VALUE
||
931 !ISBASICBLOCK(blocks
,i
,5))
933 memset(codestr
+i
+1, NOP
, 4);
938 /* Fixup linenotab */
939 for (i
=0, nops
=0 ; i
<codelen
; i
+= CODESIZE(codestr
[i
])) {
940 addrmap
[i
] = i
- nops
;
941 if (codestr
[i
] == NOP
)
946 for (i
=0 ; i
< tabsiz
; i
+=2) {
947 cum_orig_line
+= lineno
[i
];
948 new_line
= addrmap
[cum_orig_line
];
949 assert (new_line
- last_line
< 255);
950 lineno
[i
] =((unsigned char)(new_line
- last_line
));
951 last_line
= new_line
;
954 /* Remove NOPs and fixup jump targets */
955 for (i
=0, h
=0 ; i
<codelen
; ) {
964 j
= addrmap
[GETARG(codestr
, i
)];
965 SETARG(codestr
, i
, j
);
975 j
= addrmap
[GETARG(codestr
, i
) + i
+ 3] - addrmap
[i
] - 3;
976 SETARG(codestr
, i
, j
);
979 adj
= CODESIZE(opcode
);
981 codestr
[h
++] = codestr
[i
++];
983 assert(h
+ nops
== codelen
);
985 code
= PyString_FromStringAndSize((char *)codestr
, h
);
1002 /* End: Peephole optimizations ----------------------------------------- */
1006 Leave this debugging code for just a little longer.
1009 compiler_display_symbols(PyObject *name, PyObject *symbols)
1011 PyObject *key, *value;
1015 fprintf(stderr, "block %s\n", PyString_AS_STRING(name));
1016 while (PyDict_Next(symbols, &pos, &key, &value)) {
1017 flags = PyInt_AsLong(value);
1018 fprintf(stderr, "var %s:", PyString_AS_STRING(key));
1019 if (flags & DEF_GLOBAL)
1020 fprintf(stderr, " declared_global");
1021 if (flags & DEF_LOCAL)
1022 fprintf(stderr, " local");
1023 if (flags & DEF_PARAM)
1024 fprintf(stderr, " param");
1025 if (flags & DEF_STAR)
1026 fprintf(stderr, " stararg");
1027 if (flags & DEF_DOUBLESTAR)
1028 fprintf(stderr, " starstar");
1029 if (flags & DEF_INTUPLE)
1030 fprintf(stderr, " tuple");
1031 if (flags & DEF_FREE)
1032 fprintf(stderr, " free");
1033 if (flags & DEF_FREE_GLOBAL)
1034 fprintf(stderr, " global");
1035 if (flags & DEF_FREE_CLASS)
1036 fprintf(stderr, " free/class");
1037 if (flags & DEF_IMPORT)
1038 fprintf(stderr, " import");
1039 fprintf(stderr, "\n");
1041 fprintf(stderr, "\n");
1046 compiler_unit_check(struct compiler_unit
*u
)
1049 for (block
= u
->u_blocks
; block
!= NULL
; block
= block
->b_list
) {
1050 assert(block
!= (void *)0xcbcbcbcb);
1051 assert(block
!= (void *)0xfbfbfbfb);
1052 assert(block
!= (void *)0xdbdbdbdb);
1053 if (block
->b_instr
!= NULL
) {
1054 assert(block
->b_ialloc
> 0);
1055 assert(block
->b_iused
> 0);
1056 assert(block
->b_ialloc
>= block
->b_iused
);
1059 assert (block
->b_iused
== 0);
1060 assert (block
->b_ialloc
== 0);
1066 compiler_unit_free(struct compiler_unit
*u
)
1068 basicblock
*b
, *next
;
1070 compiler_unit_check(u
);
1074 PyObject_Free((void *)b
->b_instr
);
1076 PyObject_Free((void *)b
);
1080 Py_CLEAR(u
->u_name
);
1081 Py_CLEAR(u
->u_consts
);
1082 Py_CLEAR(u
->u_names
);
1083 Py_CLEAR(u
->u_varnames
);
1084 Py_CLEAR(u
->u_freevars
);
1085 Py_CLEAR(u
->u_cellvars
);
1086 Py_CLEAR(u
->u_private
);
1091 compiler_enter_scope(struct compiler
*c
, identifier name
, void *key
,
1094 struct compiler_unit
*u
;
1096 u
= (struct compiler_unit
*)PyObject_Malloc(sizeof(
1097 struct compiler_unit
));
1102 memset(u
, 0, sizeof(struct compiler_unit
));
1104 u
->u_ste
= PySymtable_Lookup(c
->c_st
, key
);
1106 compiler_unit_free(u
);
1111 u
->u_varnames
= list2dict(u
->u_ste
->ste_varnames
);
1112 u
->u_cellvars
= dictbytype(u
->u_ste
->ste_symbols
, CELL
, 0, 0);
1113 if (!u
->u_varnames
|| !u
->u_cellvars
) {
1114 compiler_unit_free(u
);
1118 u
->u_freevars
= dictbytype(u
->u_ste
->ste_symbols
, FREE
, DEF_FREE_CLASS
,
1119 PyDict_Size(u
->u_cellvars
));
1120 if (!u
->u_freevars
) {
1121 compiler_unit_free(u
);
1128 u
->u_firstlineno
= lineno
;
1130 u
->u_lineno_set
= false;
1131 u
->u_consts
= PyDict_New();
1133 compiler_unit_free(u
);
1136 u
->u_names
= PyDict_New();
1138 compiler_unit_free(u
);
1142 u
->u_private
= NULL
;
1144 /* Push the old compiler_unit on the stack. */
1146 PyObject
*wrapper
= PyCObject_FromVoidPtr(c
->u
, NULL
);
1147 if (!wrapper
|| PyList_Append(c
->c_stack
, wrapper
) < 0) {
1148 Py_XDECREF(wrapper
);
1149 compiler_unit_free(u
);
1153 u
->u_private
= c
->u
->u_private
;
1154 Py_XINCREF(u
->u_private
);
1159 if (compiler_use_new_block(c
) == NULL
)
1166 compiler_exit_scope(struct compiler
*c
)
1172 compiler_unit_free(c
->u
);
1173 /* Restore c->u to the parent unit. */
1174 n
= PyList_GET_SIZE(c
->c_stack
) - 1;
1176 wrapper
= PyList_GET_ITEM(c
->c_stack
, n
);
1177 c
->u
= (struct compiler_unit
*)PyCObject_AsVoidPtr(wrapper
);
1178 /* we are deleting from a list so this really shouldn't fail */
1179 if (PySequence_DelItem(c
->c_stack
, n
) < 0)
1180 Py_FatalError("compiler_exit_scope()");
1181 compiler_unit_check(c
->u
);
1188 /* Allocate a new "anonymous" local variable.
1189 Used by list comprehensions and with statements.
1193 compiler_new_tmpname(struct compiler
*c
)
1196 PyOS_snprintf(tmpname
, sizeof(tmpname
), "_[%d]", ++c
->u
->u_tmpname
);
1197 return PyString_FromString(tmpname
);
1200 /* Allocate a new block and return a pointer to it.
1201 Returns NULL on error.
1205 compiler_new_block(struct compiler
*c
)
1208 struct compiler_unit
*u
;
1211 b
= (basicblock
*)PyObject_Malloc(sizeof(basicblock
));
1216 memset((void *)b
, 0, sizeof(basicblock
));
1217 /* Extend the singly linked list of blocks with new block. */
1218 b
->b_list
= u
->u_blocks
;
1224 compiler_use_new_block(struct compiler
*c
)
1226 basicblock
*block
= compiler_new_block(c
);
1229 c
->u
->u_curblock
= block
;
1234 compiler_next_block(struct compiler
*c
)
1236 basicblock
*block
= compiler_new_block(c
);
1239 c
->u
->u_curblock
->b_next
= block
;
1240 c
->u
->u_curblock
= block
;
1245 compiler_use_next_block(struct compiler
*c
, basicblock
*block
)
1247 assert(block
!= NULL
);
1248 c
->u
->u_curblock
->b_next
= block
;
1249 c
->u
->u_curblock
= block
;
1253 /* Returns the offset of the next instruction in the current block's
1254 b_instr array. Resizes the b_instr as necessary.
1255 Returns -1 on failure.
1259 compiler_next_instr(struct compiler
*c
, basicblock
*b
)
1262 if (b
->b_instr
== NULL
) {
1263 b
->b_instr
= (struct instr
*)PyObject_Malloc(
1264 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
1265 if (b
->b_instr
== NULL
) {
1269 b
->b_ialloc
= DEFAULT_BLOCK_SIZE
;
1270 memset((char *)b
->b_instr
, 0,
1271 sizeof(struct instr
) * DEFAULT_BLOCK_SIZE
);
1273 else if (b
->b_iused
== b
->b_ialloc
) {
1275 size_t oldsize
, newsize
;
1276 oldsize
= b
->b_ialloc
* sizeof(struct instr
);
1277 newsize
= oldsize
<< 1;
1283 tmp
= (struct instr
*)PyObject_Realloc(
1284 (void *)b
->b_instr
, newsize
);
1290 memset((char *)b
->b_instr
+ oldsize
, 0, newsize
- oldsize
);
1292 return b
->b_iused
++;
1295 /* Set the i_lineno member of the instruction at offse off if the
1296 line number for the current expression/statement (?) has not
1297 already been set. If it has been set, the call has no effect.
1299 Every time a new node is b
1303 compiler_set_lineno(struct compiler
*c
, int off
)
1306 if (c
->u
->u_lineno_set
)
1308 c
->u
->u_lineno_set
= true;
1309 b
= c
->u
->u_curblock
;
1310 b
->b_instr
[off
].i_lineno
= c
->u
->u_lineno
;
1314 opcode_stack_effect(int opcode
, int oparg
)
1327 case UNARY_POSITIVE
:
1328 case UNARY_NEGATIVE
:
1338 case BINARY_MULTIPLY
:
1342 case BINARY_SUBTRACT
:
1344 case BINARY_FLOOR_DIVIDE
:
1345 case BINARY_TRUE_DIVIDE
:
1347 case INPLACE_FLOOR_DIVIDE
:
1348 case INPLACE_TRUE_DIVIDE
:
1369 case DELETE_SLICE
+0:
1371 case DELETE_SLICE
+1:
1373 case DELETE_SLICE
+2:
1375 case DELETE_SLICE
+3:
1379 case INPLACE_SUBTRACT
:
1380 case INPLACE_MULTIPLY
:
1381 case INPLACE_DIVIDE
:
1382 case INPLACE_MODULO
:
1408 case PRINT_NEWLINE_TO
:
1410 case INPLACE_LSHIFT
:
1411 case INPLACE_RSHIFT
:
1419 return -1; /* XXX Sometimes more */
1434 return -1; /* or -2 or -3 if exception occurred */
1442 case UNPACK_SEQUENCE
:
1490 return 3; /* actually pushed by an exception */
1501 #define NARGS(o) (((o) % 256) + 2*((o) / 256))
1503 return -NARGS(oparg
);
1504 case CALL_FUNCTION_VAR
:
1505 case CALL_FUNCTION_KW
:
1506 return -NARGS(oparg
)-1;
1507 case CALL_FUNCTION_VAR_KW
:
1508 return -NARGS(oparg
)-2;
1527 fprintf(stderr
, "opcode = %d\n", opcode
);
1528 Py_FatalError("opcode_stack_effect()");
1531 return 0; /* not reachable */
1534 /* Add an opcode with no argument.
1535 Returns 0 on failure, 1 on success.
1539 compiler_addop(struct compiler
*c
, int opcode
)
1544 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1547 b
= c
->u
->u_curblock
;
1548 i
= &b
->b_instr
[off
];
1549 i
->i_opcode
= opcode
;
1551 if (opcode
== RETURN_VALUE
)
1553 compiler_set_lineno(c
, off
);
1558 compiler_add_o(struct compiler
*c
, PyObject
*dict
, PyObject
*o
)
1563 /* necessary to make sure types aren't coerced (e.g., int and long) */
1564 t
= PyTuple_Pack(2, o
, o
->ob_type
);
1568 v
= PyDict_GetItem(dict
, t
);
1570 arg
= PyDict_Size(dict
);
1571 v
= PyInt_FromLong(arg
);
1576 if (PyDict_SetItem(dict
, t
, v
) < 0) {
1584 arg
= PyInt_AsLong(v
);
1590 compiler_addop_o(struct compiler
*c
, int opcode
, PyObject
*dict
,
1593 int arg
= compiler_add_o(c
, dict
, o
);
1596 return compiler_addop_i(c
, opcode
, arg
);
1600 compiler_addop_name(struct compiler
*c
, int opcode
, PyObject
*dict
,
1604 PyObject
*mangled
= _Py_Mangle(c
->u
->u_private
, o
);
1607 arg
= compiler_add_o(c
, dict
, mangled
);
1611 return compiler_addop_i(c
, opcode
, arg
);
1614 /* Add an opcode with an integer argument.
1615 Returns 0 on failure, 1 on success.
1619 compiler_addop_i(struct compiler
*c
, int opcode
, int oparg
)
1623 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1626 i
= &c
->u
->u_curblock
->b_instr
[off
];
1627 i
->i_opcode
= opcode
;
1630 compiler_set_lineno(c
, off
);
1635 compiler_addop_j(struct compiler
*c
, int opcode
, basicblock
*b
, int absolute
)
1641 off
= compiler_next_instr(c
, c
->u
->u_curblock
);
1644 i
= &c
->u
->u_curblock
->b_instr
[off
];
1645 i
->i_opcode
= opcode
;
1652 compiler_set_lineno(c
, off
);
1656 /* The distinction between NEW_BLOCK and NEXT_BLOCK is subtle. (I'd
1657 like to find better names.) NEW_BLOCK() creates a new block and sets
1658 it as the current block. NEXT_BLOCK() also creates an implicit jump
1659 from the current block to the new block.
1662 /* XXX The returns inside these macros make it impossible to decref
1663 objects created in the local function.
1667 #define NEW_BLOCK(C) { \
1668 if (compiler_use_new_block((C)) == NULL) \
1672 #define NEXT_BLOCK(C) { \
1673 if (compiler_next_block((C)) == NULL) \
1677 #define ADDOP(C, OP) { \
1678 if (!compiler_addop((C), (OP))) \
1682 #define ADDOP_IN_SCOPE(C, OP) { \
1683 if (!compiler_addop((C), (OP))) { \
1684 compiler_exit_scope(c); \
1689 #define ADDOP_O(C, OP, O, TYPE) { \
1690 if (!compiler_addop_o((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1694 #define ADDOP_NAME(C, OP, O, TYPE) { \
1695 if (!compiler_addop_name((C), (OP), (C)->u->u_ ## TYPE, (O))) \
1699 #define ADDOP_I(C, OP, O) { \
1700 if (!compiler_addop_i((C), (OP), (O))) \
1704 #define ADDOP_JABS(C, OP, O) { \
1705 if (!compiler_addop_j((C), (OP), (O), 1)) \
1709 #define ADDOP_JREL(C, OP, O) { \
1710 if (!compiler_addop_j((C), (OP), (O), 0)) \
1714 /* VISIT and VISIT_SEQ takes an ASDL type as their second argument. They use
1715 the ASDL name to synthesize the name of the C type and the visit function.
1718 #define VISIT(C, TYPE, V) {\
1719 if (!compiler_visit_ ## TYPE((C), (V))) \
1723 #define VISIT_IN_SCOPE(C, TYPE, V) {\
1724 if (!compiler_visit_ ## TYPE((C), (V))) { \
1725 compiler_exit_scope(c); \
1730 #define VISIT_SLICE(C, V, CTX) {\
1731 if (!compiler_visit_slice((C), (V), (CTX))) \
1735 #define VISIT_SEQ(C, TYPE, SEQ) { \
1737 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1738 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1739 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1740 if (!compiler_visit_ ## TYPE((C), elt)) \
1745 #define VISIT_SEQ_IN_SCOPE(C, TYPE, SEQ) { \
1747 asdl_seq *seq = (SEQ); /* avoid variable capture */ \
1748 for (_i = 0; _i < asdl_seq_LEN(seq); _i++) { \
1749 TYPE ## _ty elt = (TYPE ## _ty)asdl_seq_GET(seq, _i); \
1750 if (!compiler_visit_ ## TYPE((C), elt)) { \
1751 compiler_exit_scope(c); \
1758 compiler_isdocstring(stmt_ty s
)
1760 if (s
->kind
!= Expr_kind
)
1762 return s
->v
.Expr
.value
->kind
== Str_kind
;
1765 /* Compile a sequence of statements, checking for a docstring. */
1768 compiler_body(struct compiler
*c
, asdl_seq
*stmts
)
1773 if (!asdl_seq_LEN(stmts
))
1775 st
= (stmt_ty
)asdl_seq_GET(stmts
, 0);
1776 if (compiler_isdocstring(st
)) {
1778 VISIT(c
, expr
, st
->v
.Expr
.value
);
1779 if (!compiler_nameop(c
, __doc__
, Store
))
1782 for (; i
< asdl_seq_LEN(stmts
); i
++)
1783 VISIT(c
, stmt
, (stmt_ty
)asdl_seq_GET(stmts
, i
));
1787 static PyCodeObject
*
1788 compiler_mod(struct compiler
*c
, mod_ty mod
)
1792 static PyObject
*module
;
1794 module
= PyString_FromString("<module>");
1798 /* Use 0 for firstlineno initially, will fixup in assemble(). */
1799 if (!compiler_enter_scope(c
, module
, mod
, 0))
1801 switch (mod
->kind
) {
1803 if (!compiler_body(c
, mod
->v
.Module
.body
)) {
1804 compiler_exit_scope(c
);
1808 case Interactive_kind
:
1809 c
->c_interactive
= 1;
1810 VISIT_SEQ_IN_SCOPE(c
, stmt
,
1811 mod
->v
.Interactive
.body
);
1813 case Expression_kind
:
1814 VISIT_IN_SCOPE(c
, expr
, mod
->v
.Expression
.body
);
1818 PyErr_SetString(PyExc_SystemError
,
1819 "suite should not be possible");
1822 PyErr_Format(PyExc_SystemError
,
1823 "module kind %d should not be possible",
1827 co
= assemble(c
, addNone
);
1828 compiler_exit_scope(c
);
1832 /* The test for LOCAL must come before the test for FREE in order to
1833 handle classes where name is both local and free. The local var is
1834 a method and the free var is a free var referenced within a method.
1838 get_ref_type(struct compiler
*c
, PyObject
*name
)
1840 int scope
= PyST_GetScope(c
->u
->u_ste
, name
);
1843 PyOS_snprintf(buf
, sizeof(buf
),
1844 "unknown scope for %.100s in %.100s(%s) in %s\n"
1845 "symbols: %s\nlocals: %s\nglobals: %s\n",
1846 PyString_AS_STRING(name
),
1847 PyString_AS_STRING(c
->u
->u_name
),
1848 PyObject_REPR(c
->u
->u_ste
->ste_id
),
1850 PyObject_REPR(c
->u
->u_ste
->ste_symbols
),
1851 PyObject_REPR(c
->u
->u_varnames
),
1852 PyObject_REPR(c
->u
->u_names
)
1861 compiler_lookup_arg(PyObject
*dict
, PyObject
*name
)
1864 k
= PyTuple_Pack(2, name
, name
->ob_type
);
1867 v
= PyDict_GetItem(dict
, k
);
1871 return PyInt_AS_LONG(v
);
1875 compiler_make_closure(struct compiler
*c
, PyCodeObject
*co
, int args
)
1877 int i
, free
= PyCode_GetNumFree(co
);
1879 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1880 ADDOP_I(c
, MAKE_FUNCTION
, args
);
1883 for (i
= 0; i
< free
; ++i
) {
1884 /* Bypass com_addop_varname because it will generate
1885 LOAD_DEREF but LOAD_CLOSURE is needed.
1887 PyObject
*name
= PyTuple_GET_ITEM(co
->co_freevars
, i
);
1890 /* Special case: If a class contains a method with a
1891 free variable that has the same name as a method,
1892 the name will be considered free *and* local in the
1893 class. It should be handled by the closure, as
1894 well as by the normal name loookup logic.
1896 reftype
= get_ref_type(c
, name
);
1897 if (reftype
== CELL
)
1898 arg
= compiler_lookup_arg(c
->u
->u_cellvars
, name
);
1899 else /* (reftype == FREE) */
1900 arg
= compiler_lookup_arg(c
->u
->u_freevars
, name
);
1902 printf("lookup %s in %s %d %d\n"
1903 "freevars of %s: %s\n",
1904 PyObject_REPR(name
),
1905 PyString_AS_STRING(c
->u
->u_name
),
1907 PyString_AS_STRING(co
->co_name
),
1908 PyObject_REPR(co
->co_freevars
));
1909 Py_FatalError("compiler_make_closure()");
1911 ADDOP_I(c
, LOAD_CLOSURE
, arg
);
1913 ADDOP_I(c
, BUILD_TUPLE
, free
);
1914 ADDOP_O(c
, LOAD_CONST
, (PyObject
*)co
, consts
);
1915 ADDOP_I(c
, MAKE_CLOSURE
, args
);
1920 compiler_decorators(struct compiler
*c
, asdl_seq
* decos
)
1927 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
1928 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(decos
, i
));
1934 compiler_arguments(struct compiler
*c
, arguments_ty args
)
1937 int n
= asdl_seq_LEN(args
->args
);
1938 /* Correctly handle nested argument lists */
1939 for (i
= 0; i
< n
; i
++) {
1940 expr_ty arg
= (expr_ty
)asdl_seq_GET(args
->args
, i
);
1941 if (arg
->kind
== Tuple_kind
) {
1942 PyObject
*id
= PyString_FromFormat(".%d", i
);
1946 if (!compiler_nameop(c
, id
, Load
)) {
1951 VISIT(c
, expr
, arg
);
1958 compiler_function(struct compiler
*c
, stmt_ty s
)
1961 PyObject
*first_const
= Py_None
;
1962 arguments_ty args
= s
->v
.FunctionDef
.args
;
1963 asdl_seq
* decos
= s
->v
.FunctionDef
.decorators
;
1965 int i
, n
, docstring
;
1967 assert(s
->kind
== FunctionDef_kind
);
1969 if (!compiler_decorators(c
, decos
))
1972 VISIT_SEQ(c
, expr
, args
->defaults
);
1973 if (!compiler_enter_scope(c
, s
->v
.FunctionDef
.name
, (void *)s
,
1977 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, 0);
1978 docstring
= compiler_isdocstring(st
);
1980 first_const
= st
->v
.Expr
.value
->v
.Str
.s
;
1981 if (compiler_add_o(c
, c
->u
->u_consts
, first_const
) < 0) {
1982 compiler_exit_scope(c
);
1986 /* unpack nested arguments */
1987 compiler_arguments(c
, args
);
1989 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
1990 n
= asdl_seq_LEN(s
->v
.FunctionDef
.body
);
1991 /* if there was a docstring, we need to skip the first statement */
1992 for (i
= docstring
; i
< n
; i
++) {
1993 st
= (stmt_ty
)asdl_seq_GET(s
->v
.FunctionDef
.body
, i
);
1994 VISIT_IN_SCOPE(c
, stmt
, st
);
1996 co
= assemble(c
, 1);
1997 compiler_exit_scope(c
);
2001 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
2004 for (i
= 0; i
< asdl_seq_LEN(decos
); i
++) {
2005 ADDOP_I(c
, CALL_FUNCTION
, 1);
2008 return compiler_nameop(c
, s
->v
.FunctionDef
.name
, Store
);
2012 compiler_class(struct compiler
*c
, stmt_ty s
)
2017 /* push class name on stack, needed by BUILD_CLASS */
2018 ADDOP_O(c
, LOAD_CONST
, s
->v
.ClassDef
.name
, consts
);
2019 /* push the tuple of base classes on the stack */
2020 n
= asdl_seq_LEN(s
->v
.ClassDef
.bases
);
2022 VISIT_SEQ(c
, expr
, s
->v
.ClassDef
.bases
);
2023 ADDOP_I(c
, BUILD_TUPLE
, n
);
2024 if (!compiler_enter_scope(c
, s
->v
.ClassDef
.name
, (void *)s
,
2027 c
->u
->u_private
= s
->v
.ClassDef
.name
;
2028 Py_INCREF(c
->u
->u_private
);
2029 str
= PyString_InternFromString("__name__");
2030 if (!str
|| !compiler_nameop(c
, str
, Load
)) {
2032 compiler_exit_scope(c
);
2037 str
= PyString_InternFromString("__module__");
2038 if (!str
|| !compiler_nameop(c
, str
, Store
)) {
2040 compiler_exit_scope(c
);
2045 if (!compiler_body(c
, s
->v
.ClassDef
.body
)) {
2046 compiler_exit_scope(c
);
2050 ADDOP_IN_SCOPE(c
, LOAD_LOCALS
);
2051 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
2052 co
= assemble(c
, 1);
2053 compiler_exit_scope(c
);
2057 compiler_make_closure(c
, co
, 0);
2060 ADDOP_I(c
, CALL_FUNCTION
, 0);
2061 ADDOP(c
, BUILD_CLASS
);
2062 if (!compiler_nameop(c
, s
->v
.ClassDef
.name
, Store
))
2068 compiler_ifexp(struct compiler
*c
, expr_ty e
)
2070 basicblock
*end
, *next
;
2072 assert(e
->kind
== IfExp_kind
);
2073 end
= compiler_new_block(c
);
2076 next
= compiler_new_block(c
);
2079 VISIT(c
, expr
, e
->v
.IfExp
.test
);
2080 ADDOP_JREL(c
, JUMP_IF_FALSE
, next
);
2082 VISIT(c
, expr
, e
->v
.IfExp
.body
);
2083 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
2084 compiler_use_next_block(c
, next
);
2086 VISIT(c
, expr
, e
->v
.IfExp
.orelse
);
2087 compiler_use_next_block(c
, end
);
2092 compiler_lambda(struct compiler
*c
, expr_ty e
)
2095 static identifier name
;
2096 arguments_ty args
= e
->v
.Lambda
.args
;
2097 assert(e
->kind
== Lambda_kind
);
2100 name
= PyString_InternFromString("<lambda>");
2106 VISIT_SEQ(c
, expr
, args
->defaults
);
2107 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
2110 /* unpack nested arguments */
2111 compiler_arguments(c
, args
);
2113 c
->u
->u_argcount
= asdl_seq_LEN(args
->args
);
2114 VISIT_IN_SCOPE(c
, expr
, e
->v
.Lambda
.body
);
2115 ADDOP_IN_SCOPE(c
, RETURN_VALUE
);
2116 co
= assemble(c
, 1);
2117 compiler_exit_scope(c
);
2121 compiler_make_closure(c
, co
, asdl_seq_LEN(args
->defaults
));
2128 compiler_print(struct compiler
*c
, stmt_ty s
)
2133 assert(s
->kind
== Print_kind
);
2134 n
= asdl_seq_LEN(s
->v
.Print
.values
);
2136 if (s
->v
.Print
.dest
) {
2137 VISIT(c
, expr
, s
->v
.Print
.dest
);
2140 for (i
= 0; i
< n
; i
++) {
2141 expr_ty e
= (expr_ty
)asdl_seq_GET(s
->v
.Print
.values
, i
);
2146 ADDOP(c
, PRINT_ITEM_TO
);
2150 ADDOP(c
, PRINT_ITEM
);
2153 if (s
->v
.Print
.nl
) {
2155 ADDOP(c
, PRINT_NEWLINE_TO
)
2157 ADDOP(c
, PRINT_NEWLINE
)
2165 compiler_if(struct compiler
*c
, stmt_ty s
)
2167 basicblock
*end
, *next
;
2169 assert(s
->kind
== If_kind
);
2170 end
= compiler_new_block(c
);
2173 next
= compiler_new_block(c
);
2177 constant
= expr_constant(s
->v
.If
.test
);
2178 /* constant = 0: "if 0"
2179 * constant = 1: "if 1", "if 2", ...
2180 * constant = -1: rest */
2181 if (constant
== 0) {
2183 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
2184 } else if (constant
== 1) {
2185 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
2187 VISIT(c
, expr
, s
->v
.If
.test
);
2188 ADDOP_JREL(c
, JUMP_IF_FALSE
, next
);
2190 VISIT_SEQ(c
, stmt
, s
->v
.If
.body
);
2191 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
2192 compiler_use_next_block(c
, next
);
2195 VISIT_SEQ(c
, stmt
, s
->v
.If
.orelse
);
2197 compiler_use_next_block(c
, end
);
2202 compiler_for(struct compiler
*c
, stmt_ty s
)
2204 basicblock
*start
, *cleanup
, *end
;
2206 start
= compiler_new_block(c
);
2207 cleanup
= compiler_new_block(c
);
2208 end
= compiler_new_block(c
);
2209 if (start
== NULL
|| end
== NULL
|| cleanup
== NULL
)
2211 ADDOP_JREL(c
, SETUP_LOOP
, end
);
2212 if (!compiler_push_fblock(c
, LOOP
, start
))
2214 VISIT(c
, expr
, s
->v
.For
.iter
);
2216 compiler_use_next_block(c
, start
);
2217 /* XXX(nnorwitz): is there a better way to handle this?
2218 for loops are special, we want to be able to trace them
2219 each time around, so we need to set an extra line number. */
2220 c
->u
->u_lineno_set
= false;
2221 ADDOP_JREL(c
, FOR_ITER
, cleanup
);
2222 VISIT(c
, expr
, s
->v
.For
.target
);
2223 VISIT_SEQ(c
, stmt
, s
->v
.For
.body
);
2224 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
2225 compiler_use_next_block(c
, cleanup
);
2226 ADDOP(c
, POP_BLOCK
);
2227 compiler_pop_fblock(c
, LOOP
, start
);
2228 VISIT_SEQ(c
, stmt
, s
->v
.For
.orelse
);
2229 compiler_use_next_block(c
, end
);
2234 compiler_while(struct compiler
*c
, stmt_ty s
)
2236 basicblock
*loop
, *orelse
, *end
, *anchor
= NULL
;
2237 int constant
= expr_constant(s
->v
.While
.test
);
2241 loop
= compiler_new_block(c
);
2242 end
= compiler_new_block(c
);
2243 if (constant
== -1) {
2244 anchor
= compiler_new_block(c
);
2248 if (loop
== NULL
|| end
== NULL
)
2250 if (s
->v
.While
.orelse
) {
2251 orelse
= compiler_new_block(c
);
2258 ADDOP_JREL(c
, SETUP_LOOP
, end
);
2259 compiler_use_next_block(c
, loop
);
2260 if (!compiler_push_fblock(c
, LOOP
, loop
))
2262 if (constant
== -1) {
2263 VISIT(c
, expr
, s
->v
.While
.test
);
2264 ADDOP_JREL(c
, JUMP_IF_FALSE
, anchor
);
2267 VISIT_SEQ(c
, stmt
, s
->v
.While
.body
);
2268 ADDOP_JABS(c
, JUMP_ABSOLUTE
, loop
);
2270 /* XXX should the two POP instructions be in a separate block
2271 if there is no else clause ?
2274 if (constant
== -1) {
2275 compiler_use_next_block(c
, anchor
);
2277 ADDOP(c
, POP_BLOCK
);
2279 compiler_pop_fblock(c
, LOOP
, loop
);
2280 if (orelse
!= NULL
) /* what if orelse is just pass? */
2281 VISIT_SEQ(c
, stmt
, s
->v
.While
.orelse
);
2282 compiler_use_next_block(c
, end
);
2288 compiler_continue(struct compiler
*c
)
2290 static const char LOOP_ERROR_MSG
[] = "'continue' not properly in loop";
2293 if (!c
->u
->u_nfblocks
)
2294 return compiler_error(c
, LOOP_ERROR_MSG
);
2295 i
= c
->u
->u_nfblocks
- 1;
2296 switch (c
->u
->u_fblock
[i
].fb_type
) {
2298 ADDOP_JABS(c
, JUMP_ABSOLUTE
, c
->u
->u_fblock
[i
].fb_block
);
2302 while (--i
>= 0 && c
->u
->u_fblock
[i
].fb_type
!= LOOP
)
2305 return compiler_error(c
, LOOP_ERROR_MSG
);
2306 ADDOP_JABS(c
, CONTINUE_LOOP
, c
->u
->u_fblock
[i
].fb_block
);
2309 return compiler_error(c
,
2310 "'continue' not supported inside 'finally' clause");
2316 /* Code generated for "try: <body> finally: <finalbody>" is as follows:
2322 L: <code for finalbody>
2325 The special instructions use the block stack. Each block
2326 stack entry contains the instruction that created it (here
2327 SETUP_FINALLY), the level of the value stack at the time the
2328 block stack entry was created, and a label (here L).
2331 Pushes the current value stack level and the label
2332 onto the block stack.
2334 Pops en entry from the block stack, and pops the value
2335 stack until its level is the same as indicated on the
2336 block stack. (The label is ignored.)
2338 Pops a variable number of entries from the *value* stack
2339 and re-raises the exception they specify. The number of
2340 entries popped depends on the (pseudo) exception type.
2342 The block stack is unwound when an exception is raised:
2343 when a SETUP_FINALLY entry is found, the exception is pushed
2344 onto the value stack (and the exception condition is cleared),
2345 and the interpreter jumps to the label gotten from the block
2350 compiler_try_finally(struct compiler
*c
, stmt_ty s
)
2352 basicblock
*body
, *end
;
2353 body
= compiler_new_block(c
);
2354 end
= compiler_new_block(c
);
2355 if (body
== NULL
|| end
== NULL
)
2358 ADDOP_JREL(c
, SETUP_FINALLY
, end
);
2359 compiler_use_next_block(c
, body
);
2360 if (!compiler_push_fblock(c
, FINALLY_TRY
, body
))
2362 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.body
);
2363 ADDOP(c
, POP_BLOCK
);
2364 compiler_pop_fblock(c
, FINALLY_TRY
, body
);
2366 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2367 compiler_use_next_block(c
, end
);
2368 if (!compiler_push_fblock(c
, FINALLY_END
, end
))
2370 VISIT_SEQ(c
, stmt
, s
->v
.TryFinally
.finalbody
);
2371 ADDOP(c
, END_FINALLY
);
2372 compiler_pop_fblock(c
, FINALLY_END
, end
);
2378 Code generated for "try: S except E1, V1: S1 except E2, V2: S2 ...":
2379 (The contents of the value stack is shown in [], with the top
2380 at the right; 'tb' is trace-back info, 'val' the exception's
2381 associated value, and 'exc' the exception.)
2383 Value stack Label Instruction Argument
2389 [tb, val, exc] L1: DUP )
2390 [tb, val, exc, exc] <evaluate E1> )
2391 [tb, val, exc, exc, E1] COMPARE_OP EXC_MATCH ) only if E1
2392 [tb, val, exc, 1-or-0] JUMP_IF_FALSE L2 )
2393 [tb, val, exc, 1] POP )
2395 [tb, val] <assign to V1> (or POP if no V1)
2400 [tb, val, exc, 0] L2: POP
2402 .............................etc.......................
2404 [tb, val, exc, 0] Ln+1: POP
2405 [tb, val, exc] END_FINALLY # re-raise exception
2407 [] L0: <next statement>
2409 Of course, parts are not generated if Vi or Ei is not present.
2412 compiler_try_except(struct compiler
*c
, stmt_ty s
)
2414 basicblock
*body
, *orelse
, *except
, *end
;
2417 body
= compiler_new_block(c
);
2418 except
= compiler_new_block(c
);
2419 orelse
= compiler_new_block(c
);
2420 end
= compiler_new_block(c
);
2421 if (body
== NULL
|| except
== NULL
|| orelse
== NULL
|| end
== NULL
)
2423 ADDOP_JREL(c
, SETUP_EXCEPT
, except
);
2424 compiler_use_next_block(c
, body
);
2425 if (!compiler_push_fblock(c
, EXCEPT
, body
))
2427 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.body
);
2428 ADDOP(c
, POP_BLOCK
);
2429 compiler_pop_fblock(c
, EXCEPT
, body
);
2430 ADDOP_JREL(c
, JUMP_FORWARD
, orelse
);
2431 n
= asdl_seq_LEN(s
->v
.TryExcept
.handlers
);
2432 compiler_use_next_block(c
, except
);
2433 for (i
= 0; i
< n
; i
++) {
2434 excepthandler_ty handler
= (excepthandler_ty
)asdl_seq_GET(
2435 s
->v
.TryExcept
.handlers
, i
);
2436 if (!handler
->type
&& i
< n
-1)
2437 return compiler_error(c
, "default 'except:' must be last");
2438 c
->u
->u_lineno_set
= false;
2439 c
->u
->u_lineno
= handler
->lineno
;
2440 except
= compiler_new_block(c
);
2443 if (handler
->type
) {
2445 VISIT(c
, expr
, handler
->type
);
2446 ADDOP_I(c
, COMPARE_OP
, PyCmp_EXC_MATCH
);
2447 ADDOP_JREL(c
, JUMP_IF_FALSE
, except
);
2451 if (handler
->name
) {
2452 VISIT(c
, expr
, handler
->name
);
2458 VISIT_SEQ(c
, stmt
, handler
->body
);
2459 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
2460 compiler_use_next_block(c
, except
);
2464 ADDOP(c
, END_FINALLY
);
2465 compiler_use_next_block(c
, orelse
);
2466 VISIT_SEQ(c
, stmt
, s
->v
.TryExcept
.orelse
);
2467 compiler_use_next_block(c
, end
);
2472 compiler_import_as(struct compiler
*c
, identifier name
, identifier asname
)
2474 /* The IMPORT_NAME opcode was already generated. This function
2475 merely needs to bind the result to a name.
2477 If there is a dot in name, we need to split it and emit a
2478 LOAD_ATTR for each name.
2480 const char *src
= PyString_AS_STRING(name
);
2481 const char *dot
= strchr(src
, '.');
2483 /* Consume the base module name to get the first attribute */
2486 /* NB src is only defined when dot != NULL */
2488 dot
= strchr(src
, '.');
2489 attr
= PyString_FromStringAndSize(src
,
2490 dot
? dot
- src
: strlen(src
));
2493 ADDOP_O(c
, LOAD_ATTR
, attr
, names
);
2498 return compiler_nameop(c
, asname
, Store
);
2502 compiler_import(struct compiler
*c
, stmt_ty s
)
2504 /* The Import node stores a module name like a.b.c as a single
2505 string. This is convenient for all cases except
2507 where we need to parse that string to extract the individual
2509 XXX Perhaps change the representation to make this case simpler?
2511 int i
, n
= asdl_seq_LEN(s
->v
.Import
.names
);
2513 for (i
= 0; i
< n
; i
++) {
2514 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.Import
.names
, i
);
2518 if (c
->c_flags
&& (c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
2519 level
= PyInt_FromLong(0);
2521 level
= PyInt_FromLong(-1);
2526 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
2528 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2529 ADDOP_NAME(c
, IMPORT_NAME
, alias
->name
, names
);
2531 if (alias
->asname
) {
2532 r
= compiler_import_as(c
, alias
->name
, alias
->asname
);
2537 identifier tmp
= alias
->name
;
2538 const char *base
= PyString_AS_STRING(alias
->name
);
2539 char *dot
= strchr(base
, '.');
2541 tmp
= PyString_FromStringAndSize(base
,
2543 r
= compiler_nameop(c
, tmp
, Store
);
2555 compiler_from_import(struct compiler
*c
, stmt_ty s
)
2557 int i
, n
= asdl_seq_LEN(s
->v
.ImportFrom
.names
);
2559 PyObject
*names
= PyTuple_New(n
);
2565 if (s
->v
.ImportFrom
.level
== 0 && c
->c_flags
&&
2566 !(c
->c_flags
->cf_flags
& CO_FUTURE_ABSOLUTE_IMPORT
))
2567 level
= PyInt_FromLong(-1);
2569 level
= PyInt_FromLong(s
->v
.ImportFrom
.level
);
2576 /* build up the names */
2577 for (i
= 0; i
< n
; i
++) {
2578 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
2579 Py_INCREF(alias
->name
);
2580 PyTuple_SET_ITEM(names
, i
, alias
->name
);
2583 if (s
->lineno
> c
->c_future
->ff_lineno
) {
2584 if (!strcmp(PyString_AS_STRING(s
->v
.ImportFrom
.module
),
2588 return compiler_error(c
,
2589 "from __future__ imports must occur "
2590 "at the beginning of the file");
2595 ADDOP_O(c
, LOAD_CONST
, level
, consts
);
2597 ADDOP_O(c
, LOAD_CONST
, names
, consts
);
2599 ADDOP_NAME(c
, IMPORT_NAME
, s
->v
.ImportFrom
.module
, names
);
2600 for (i
= 0; i
< n
; i
++) {
2601 alias_ty alias
= (alias_ty
)asdl_seq_GET(s
->v
.ImportFrom
.names
, i
);
2602 identifier store_name
;
2604 if (i
== 0 && *PyString_AS_STRING(alias
->name
) == '*') {
2606 ADDOP(c
, IMPORT_STAR
);
2610 ADDOP_NAME(c
, IMPORT_FROM
, alias
->name
, names
);
2611 store_name
= alias
->name
;
2613 store_name
= alias
->asname
;
2615 if (!compiler_nameop(c
, store_name
, Store
)) {
2620 /* remove imported module */
2626 compiler_assert(struct compiler
*c
, stmt_ty s
)
2628 static PyObject
*assertion_error
= NULL
;
2631 if (Py_OptimizeFlag
)
2633 if (assertion_error
== NULL
) {
2634 assertion_error
= PyString_FromString("AssertionError");
2635 if (assertion_error
== NULL
)
2638 VISIT(c
, expr
, s
->v
.Assert
.test
);
2639 end
= compiler_new_block(c
);
2642 ADDOP_JREL(c
, JUMP_IF_TRUE
, end
);
2644 ADDOP_O(c
, LOAD_GLOBAL
, assertion_error
, names
);
2645 if (s
->v
.Assert
.msg
) {
2646 VISIT(c
, expr
, s
->v
.Assert
.msg
);
2647 ADDOP_I(c
, RAISE_VARARGS
, 2);
2650 ADDOP_I(c
, RAISE_VARARGS
, 1);
2652 compiler_use_next_block(c
, end
);
2658 compiler_visit_stmt(struct compiler
*c
, stmt_ty s
)
2662 /* Always assign a lineno to the next instruction for a stmt. */
2663 c
->u
->u_lineno
= s
->lineno
;
2664 c
->u
->u_lineno_set
= false;
2667 case FunctionDef_kind
:
2668 return compiler_function(c
, s
);
2670 return compiler_class(c
, s
);
2672 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
2673 return compiler_error(c
, "'return' outside function");
2674 if (s
->v
.Return
.value
) {
2675 VISIT(c
, expr
, s
->v
.Return
.value
);
2678 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2679 ADDOP(c
, RETURN_VALUE
);
2682 VISIT_SEQ(c
, expr
, s
->v
.Delete
.targets
)
2685 n
= asdl_seq_LEN(s
->v
.Assign
.targets
);
2686 VISIT(c
, expr
, s
->v
.Assign
.value
);
2687 for (i
= 0; i
< n
; i
++) {
2691 (expr_ty
)asdl_seq_GET(s
->v
.Assign
.targets
, i
));
2694 case AugAssign_kind
:
2695 return compiler_augassign(c
, s
);
2697 return compiler_print(c
, s
);
2699 return compiler_for(c
, s
);
2701 return compiler_while(c
, s
);
2703 return compiler_if(c
, s
);
2706 if (s
->v
.Raise
.type
) {
2707 VISIT(c
, expr
, s
->v
.Raise
.type
);
2709 if (s
->v
.Raise
.inst
) {
2710 VISIT(c
, expr
, s
->v
.Raise
.inst
);
2712 if (s
->v
.Raise
.tback
) {
2713 VISIT(c
, expr
, s
->v
.Raise
.tback
);
2718 ADDOP_I(c
, RAISE_VARARGS
, n
);
2720 case TryExcept_kind
:
2721 return compiler_try_except(c
, s
);
2722 case TryFinally_kind
:
2723 return compiler_try_finally(c
, s
);
2725 return compiler_assert(c
, s
);
2727 return compiler_import(c
, s
);
2728 case ImportFrom_kind
:
2729 return compiler_from_import(c
, s
);
2731 VISIT(c
, expr
, s
->v
.Exec
.body
);
2732 if (s
->v
.Exec
.globals
) {
2733 VISIT(c
, expr
, s
->v
.Exec
.globals
);
2734 if (s
->v
.Exec
.locals
) {
2735 VISIT(c
, expr
, s
->v
.Exec
.locals
);
2740 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
2743 ADDOP(c
, EXEC_STMT
);
2748 if (c
->c_interactive
&& c
->c_nestlevel
<= 1) {
2749 VISIT(c
, expr
, s
->v
.Expr
.value
);
2750 ADDOP(c
, PRINT_EXPR
);
2752 else if (s
->v
.Expr
.value
->kind
!= Str_kind
&&
2753 s
->v
.Expr
.value
->kind
!= Num_kind
) {
2754 VISIT(c
, expr
, s
->v
.Expr
.value
);
2761 if (!c
->u
->u_nfblocks
)
2762 return compiler_error(c
, "'break' outside loop");
2763 ADDOP(c
, BREAK_LOOP
);
2766 return compiler_continue(c
);
2768 return compiler_with(c
, s
);
2774 unaryop(unaryop_ty op
)
2778 return UNARY_INVERT
;
2782 return UNARY_POSITIVE
;
2784 return UNARY_NEGATIVE
;
2790 binop(struct compiler
*c
, operator_ty op
)
2796 return BINARY_SUBTRACT
;
2798 return BINARY_MULTIPLY
;
2800 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2801 return BINARY_TRUE_DIVIDE
;
2803 return BINARY_DIVIDE
;
2805 return BINARY_MODULO
;
2807 return BINARY_POWER
;
2809 return BINARY_LSHIFT
;
2811 return BINARY_RSHIFT
;
2819 return BINARY_FLOOR_DIVIDE
;
2843 return PyCmp_IS_NOT
;
2847 return PyCmp_NOT_IN
;
2853 inplace_binop(struct compiler
*c
, operator_ty op
)
2859 return INPLACE_SUBTRACT
;
2861 return INPLACE_MULTIPLY
;
2863 if (c
->c_flags
&& c
->c_flags
->cf_flags
& CO_FUTURE_DIVISION
)
2864 return INPLACE_TRUE_DIVIDE
;
2866 return INPLACE_DIVIDE
;
2868 return INPLACE_MODULO
;
2870 return INPLACE_POWER
;
2872 return INPLACE_LSHIFT
;
2874 return INPLACE_RSHIFT
;
2882 return INPLACE_FLOOR_DIVIDE
;
2884 PyErr_Format(PyExc_SystemError
,
2885 "inplace binary op %d should not be possible", op
);
2890 compiler_nameop(struct compiler
*c
, identifier name
, expr_context_ty ctx
)
2893 enum { OP_FAST
, OP_GLOBAL
, OP_DEREF
, OP_NAME
} optype
;
2895 PyObject
*dict
= c
->u
->u_names
;
2897 /* XXX AugStore isn't used anywhere! */
2899 /* First check for assignment to __debug__. Param? */
2900 if ((ctx
== Store
|| ctx
== AugStore
|| ctx
== Del
)
2901 && !strcmp(PyString_AS_STRING(name
), "__debug__")) {
2902 return compiler_error(c
, "can not assign to __debug__");
2905 mangled
= _Py_Mangle(c
->u
->u_private
, name
);
2911 scope
= PyST_GetScope(c
->u
->u_ste
, mangled
);
2914 dict
= c
->u
->u_freevars
;
2918 dict
= c
->u
->u_cellvars
;
2922 if (c
->u
->u_ste
->ste_type
== FunctionBlock
)
2925 case GLOBAL_IMPLICIT
:
2926 if (c
->u
->u_ste
->ste_type
== FunctionBlock
&&
2927 !c
->u
->u_ste
->ste_unoptimized
)
2930 case GLOBAL_EXPLICIT
:
2934 /* scope can be 0 */
2938 /* XXX Leave assert here, but handle __doc__ and the like better */
2939 assert(scope
|| PyString_AS_STRING(name
)[0] == '_');
2944 case Load
: op
= LOAD_DEREF
; break;
2945 case Store
: op
= STORE_DEREF
; break;
2950 PyErr_Format(PyExc_SyntaxError
,
2951 "can not delete variable '%s' referenced "
2953 PyString_AS_STRING(name
));
2958 PyErr_SetString(PyExc_SystemError
,
2959 "param invalid for deref variable");
2965 case Load
: op
= LOAD_FAST
; break;
2966 case Store
: op
= STORE_FAST
; break;
2967 case Del
: op
= DELETE_FAST
; break;
2973 PyErr_SetString(PyExc_SystemError
,
2974 "param invalid for local variable");
2977 ADDOP_O(c
, op
, mangled
, varnames
);
2982 case Load
: op
= LOAD_GLOBAL
; break;
2983 case Store
: op
= STORE_GLOBAL
; break;
2984 case Del
: op
= DELETE_GLOBAL
; break;
2990 PyErr_SetString(PyExc_SystemError
,
2991 "param invalid for global variable");
2997 case Load
: op
= LOAD_NAME
; break;
2998 case Store
: op
= STORE_NAME
; break;
2999 case Del
: op
= DELETE_NAME
; break;
3005 PyErr_SetString(PyExc_SystemError
,
3006 "param invalid for name variable");
3013 arg
= compiler_add_o(c
, dict
, mangled
);
3017 return compiler_addop_i(c
, op
, arg
);
3021 compiler_boolop(struct compiler
*c
, expr_ty e
)
3027 assert(e
->kind
== BoolOp_kind
);
3028 if (e
->v
.BoolOp
.op
== And
)
3029 jumpi
= JUMP_IF_FALSE
;
3031 jumpi
= JUMP_IF_TRUE
;
3032 end
= compiler_new_block(c
);
3035 s
= e
->v
.BoolOp
.values
;
3036 n
= asdl_seq_LEN(s
) - 1;
3038 for (i
= 0; i
< n
; ++i
) {
3039 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, i
));
3040 ADDOP_JREL(c
, jumpi
, end
);
3043 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(s
, n
));
3044 compiler_use_next_block(c
, end
);
3049 compiler_list(struct compiler
*c
, expr_ty e
)
3051 int n
= asdl_seq_LEN(e
->v
.List
.elts
);
3052 if (e
->v
.List
.ctx
== Store
) {
3053 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
3055 VISIT_SEQ(c
, expr
, e
->v
.List
.elts
);
3056 if (e
->v
.List
.ctx
== Load
) {
3057 ADDOP_I(c
, BUILD_LIST
, n
);
3063 compiler_tuple(struct compiler
*c
, expr_ty e
)
3065 int n
= asdl_seq_LEN(e
->v
.Tuple
.elts
);
3066 if (e
->v
.Tuple
.ctx
== Store
) {
3067 ADDOP_I(c
, UNPACK_SEQUENCE
, n
);
3069 VISIT_SEQ(c
, expr
, e
->v
.Tuple
.elts
);
3070 if (e
->v
.Tuple
.ctx
== Load
) {
3071 ADDOP_I(c
, BUILD_TUPLE
, n
);
3077 compiler_compare(struct compiler
*c
, expr_ty e
)
3080 basicblock
*cleanup
= NULL
;
3082 /* XXX the logic can be cleaned up for 1 or multiple comparisons */
3083 VISIT(c
, expr
, e
->v
.Compare
.left
);
3084 n
= asdl_seq_LEN(e
->v
.Compare
.ops
);
3087 cleanup
= compiler_new_block(c
);
3088 if (cleanup
== NULL
)
3091 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, 0));
3093 for (i
= 1; i
< n
; i
++) {
3095 ADDOP(c
, ROT_THREE
);
3096 ADDOP_I(c
, COMPARE_OP
,
3097 cmpop((cmpop_ty
)(asdl_seq_GET(
3098 e
->v
.Compare
.ops
, i
- 1))));
3099 ADDOP_JREL(c
, JUMP_IF_FALSE
, cleanup
);
3104 (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, i
));
3106 VISIT(c
, expr
, (expr_ty
)asdl_seq_GET(e
->v
.Compare
.comparators
, n
- 1));
3107 ADDOP_I(c
, COMPARE_OP
,
3108 cmpop((cmpop_ty
)(asdl_seq_GET(e
->v
.Compare
.ops
, n
- 1))));
3110 basicblock
*end
= compiler_new_block(c
);
3113 ADDOP_JREL(c
, JUMP_FORWARD
, end
);
3114 compiler_use_next_block(c
, cleanup
);
3117 compiler_use_next_block(c
, end
);
3124 compiler_call(struct compiler
*c
, expr_ty e
)
3128 VISIT(c
, expr
, e
->v
.Call
.func
);
3129 n
= asdl_seq_LEN(e
->v
.Call
.args
);
3130 VISIT_SEQ(c
, expr
, e
->v
.Call
.args
);
3131 if (e
->v
.Call
.keywords
) {
3132 VISIT_SEQ(c
, keyword
, e
->v
.Call
.keywords
);
3133 n
|= asdl_seq_LEN(e
->v
.Call
.keywords
) << 8;
3135 if (e
->v
.Call
.starargs
) {
3136 VISIT(c
, expr
, e
->v
.Call
.starargs
);
3139 if (e
->v
.Call
.kwargs
) {
3140 VISIT(c
, expr
, e
->v
.Call
.kwargs
);
3145 ADDOP_I(c
, CALL_FUNCTION
, n
);
3148 ADDOP_I(c
, CALL_FUNCTION_VAR
, n
);
3151 ADDOP_I(c
, CALL_FUNCTION_KW
, n
);
3154 ADDOP_I(c
, CALL_FUNCTION_VAR_KW
, n
);
3161 compiler_listcomp_generator(struct compiler
*c
, PyObject
*tmpname
,
3162 asdl_seq
*generators
, int gen_index
,
3165 /* generate code for the iterator, then each of the ifs,
3166 and then write to the element */
3169 basicblock
*start
, *anchor
, *skip
, *if_cleanup
;
3172 start
= compiler_new_block(c
);
3173 skip
= compiler_new_block(c
);
3174 if_cleanup
= compiler_new_block(c
);
3175 anchor
= compiler_new_block(c
);
3177 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
3181 l
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
3182 VISIT(c
, expr
, l
->iter
);
3184 compiler_use_next_block(c
, start
);
3185 ADDOP_JREL(c
, FOR_ITER
, anchor
);
3187 VISIT(c
, expr
, l
->target
);
3189 /* XXX this needs to be cleaned up...a lot! */
3190 n
= asdl_seq_LEN(l
->ifs
);
3191 for (i
= 0; i
< n
; i
++) {
3192 expr_ty e
= (expr_ty
)asdl_seq_GET(l
->ifs
, i
);
3194 ADDOP_JREL(c
, JUMP_IF_FALSE
, if_cleanup
);
3199 if (++gen_index
< asdl_seq_LEN(generators
))
3200 if (!compiler_listcomp_generator(c
, tmpname
,
3201 generators
, gen_index
, elt
))
3204 /* only append after the last for generator */
3205 if (gen_index
>= asdl_seq_LEN(generators
)) {
3206 if (!compiler_nameop(c
, tmpname
, Load
))
3208 VISIT(c
, expr
, elt
);
3209 ADDOP(c
, LIST_APPEND
);
3211 compiler_use_next_block(c
, skip
);
3213 for (i
= 0; i
< n
; i
++) {
3214 ADDOP_I(c
, JUMP_FORWARD
, 1);
3216 compiler_use_next_block(c
, if_cleanup
);
3219 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
3220 compiler_use_next_block(c
, anchor
);
3221 /* delete the append method added to locals */
3223 if (!compiler_nameop(c
, tmpname
, Del
))
3230 compiler_listcomp(struct compiler
*c
, expr_ty e
)
3234 static identifier append
;
3235 asdl_seq
*generators
= e
->v
.ListComp
.generators
;
3237 assert(e
->kind
== ListComp_kind
);
3239 append
= PyString_InternFromString("append");
3243 tmp
= compiler_new_tmpname(c
);
3246 ADDOP_I(c
, BUILD_LIST
, 0);
3248 if (compiler_nameop(c
, tmp
, Store
))
3249 rc
= compiler_listcomp_generator(c
, tmp
, generators
, 0,
3256 compiler_genexp_generator(struct compiler
*c
,
3257 asdl_seq
*generators
, int gen_index
,
3260 /* generate code for the iterator, then each of the ifs,
3261 and then write to the element */
3263 comprehension_ty ge
;
3264 basicblock
*start
, *anchor
, *skip
, *if_cleanup
, *end
;
3267 start
= compiler_new_block(c
);
3268 skip
= compiler_new_block(c
);
3269 if_cleanup
= compiler_new_block(c
);
3270 anchor
= compiler_new_block(c
);
3271 end
= compiler_new_block(c
);
3273 if (start
== NULL
|| skip
== NULL
|| if_cleanup
== NULL
||
3274 anchor
== NULL
|| end
== NULL
)
3277 ge
= (comprehension_ty
)asdl_seq_GET(generators
, gen_index
);
3278 ADDOP_JREL(c
, SETUP_LOOP
, end
);
3279 if (!compiler_push_fblock(c
, LOOP
, start
))
3282 if (gen_index
== 0) {
3283 /* Receive outermost iter as an implicit argument */
3284 c
->u
->u_argcount
= 1;
3285 ADDOP_I(c
, LOAD_FAST
, 0);
3288 /* Sub-iter - calculate on the fly */
3289 VISIT(c
, expr
, ge
->iter
);
3292 compiler_use_next_block(c
, start
);
3293 ADDOP_JREL(c
, FOR_ITER
, anchor
);
3295 VISIT(c
, expr
, ge
->target
);
3297 /* XXX this needs to be cleaned up...a lot! */
3298 n
= asdl_seq_LEN(ge
->ifs
);
3299 for (i
= 0; i
< n
; i
++) {
3300 expr_ty e
= (expr_ty
)asdl_seq_GET(ge
->ifs
, i
);
3302 ADDOP_JREL(c
, JUMP_IF_FALSE
, if_cleanup
);
3307 if (++gen_index
< asdl_seq_LEN(generators
))
3308 if (!compiler_genexp_generator(c
, generators
, gen_index
, elt
))
3311 /* only append after the last 'for' generator */
3312 if (gen_index
>= asdl_seq_LEN(generators
)) {
3313 VISIT(c
, expr
, elt
);
3314 ADDOP(c
, YIELD_VALUE
);
3317 compiler_use_next_block(c
, skip
);
3319 for (i
= 0; i
< n
; i
++) {
3320 ADDOP_I(c
, JUMP_FORWARD
, 1);
3322 compiler_use_next_block(c
, if_cleanup
);
3326 ADDOP_JABS(c
, JUMP_ABSOLUTE
, start
);
3327 compiler_use_next_block(c
, anchor
);
3328 ADDOP(c
, POP_BLOCK
);
3329 compiler_pop_fblock(c
, LOOP
, start
);
3330 compiler_use_next_block(c
, end
);
3336 compiler_genexp(struct compiler
*c
, expr_ty e
)
3338 static identifier name
;
3340 expr_ty outermost_iter
= ((comprehension_ty
)
3341 (asdl_seq_GET(e
->v
.GeneratorExp
.generators
,
3345 name
= PyString_FromString("<genexpr>");
3350 if (!compiler_enter_scope(c
, name
, (void *)e
, e
->lineno
))
3352 compiler_genexp_generator(c
, e
->v
.GeneratorExp
.generators
, 0,
3353 e
->v
.GeneratorExp
.elt
);
3354 co
= assemble(c
, 1);
3355 compiler_exit_scope(c
);
3359 compiler_make_closure(c
, co
, 0);
3362 VISIT(c
, expr
, outermost_iter
);
3364 ADDOP_I(c
, CALL_FUNCTION
, 1);
3370 compiler_visit_keyword(struct compiler
*c
, keyword_ty k
)
3372 ADDOP_O(c
, LOAD_CONST
, k
->arg
, consts
);
3373 VISIT(c
, expr
, k
->value
);
3377 /* Test whether expression is constant. For constants, report
3378 whether they are true or false.
3380 Return values: 1 for true, 0 for false, -1 for non-constant.
3384 expr_constant(expr_ty e
)
3388 return PyObject_IsTrue(e
->v
.Num
.n
);
3390 return PyObject_IsTrue(e
->v
.Str
.s
);
3392 /* __debug__ is not assignable, so we can optimize
3393 * it away in if and while statements */
3394 if (strcmp(PyString_AS_STRING(e
->v
.Name
.id
),
3396 return ! Py_OptimizeFlag
;
3404 Implements the with statement from PEP 343.
3406 The semantics outlined in that PEP are as follows:
3411 It is implemented roughly as:
3414 exit = context.__exit__ # not calling it
3415 value = context.__enter__()
3417 VAR = value # if VAR present in the syntax
3420 if an exception was raised:
3421 exc = copy of (exception, instance, traceback)
3423 exc = (None, None, None)
3427 compiler_with(struct compiler
*c
, stmt_ty s
)
3429 static identifier enter_attr
, exit_attr
;
3430 basicblock
*block
, *finally
;
3431 identifier tmpexit
, tmpvalue
= NULL
;
3433 assert(s
->kind
== With_kind
);
3436 enter_attr
= PyString_InternFromString("__enter__");
3441 exit_attr
= PyString_InternFromString("__exit__");
3446 block
= compiler_new_block(c
);
3447 finally
= compiler_new_block(c
);
3448 if (!block
|| !finally
)
3451 /* Create a temporary variable to hold context.__exit__ */
3452 tmpexit
= compiler_new_tmpname(c
);
3453 if (tmpexit
== NULL
)
3455 PyArena_AddPyObject(c
->c_arena
, tmpexit
);
3457 if (s
->v
.With
.optional_vars
) {
3458 /* Create a temporary variable to hold context.__enter__().
3459 We need to do this rather than preserving it on the stack
3460 because SETUP_FINALLY remembers the stack level.
3461 We need to do the assignment *inside* the try/finally
3462 so that context.__exit__() is called when the assignment
3463 fails. But we need to call context.__enter__() *before*
3464 the try/finally so that if it fails we won't call
3467 tmpvalue
= compiler_new_tmpname(c
);
3468 if (tmpvalue
== NULL
)
3470 PyArena_AddPyObject(c
->c_arena
, tmpvalue
);
3474 VISIT(c
, expr
, s
->v
.With
.context_expr
);
3476 /* Squirrel away context.__exit__ */
3478 ADDOP_O(c
, LOAD_ATTR
, exit_attr
, names
);
3479 if (!compiler_nameop(c
, tmpexit
, Store
))
3482 /* Call context.__enter__() */
3483 ADDOP_O(c
, LOAD_ATTR
, enter_attr
, names
);
3484 ADDOP_I(c
, CALL_FUNCTION
, 0);
3486 if (s
->v
.With
.optional_vars
) {
3487 /* Store it in tmpvalue */
3488 if (!compiler_nameop(c
, tmpvalue
, Store
))
3492 /* Discard result from context.__enter__() */
3496 /* Start the try block */
3497 ADDOP_JREL(c
, SETUP_FINALLY
, finally
);
3499 compiler_use_next_block(c
, block
);
3500 if (!compiler_push_fblock(c
, FINALLY_TRY
, block
)) {
3504 if (s
->v
.With
.optional_vars
) {
3505 /* Bind saved result of context.__enter__() to VAR */
3506 if (!compiler_nameop(c
, tmpvalue
, Load
) ||
3507 !compiler_nameop(c
, tmpvalue
, Del
))
3509 VISIT(c
, expr
, s
->v
.With
.optional_vars
);
3513 VISIT_SEQ(c
, stmt
, s
->v
.With
.body
);
3515 /* End of try block; start the finally block */
3516 ADDOP(c
, POP_BLOCK
);
3517 compiler_pop_fblock(c
, FINALLY_TRY
, block
);
3519 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3520 compiler_use_next_block(c
, finally
);
3521 if (!compiler_push_fblock(c
, FINALLY_END
, finally
))
3524 /* Finally block starts; push tmpexit and issue our magic opcode. */
3525 if (!compiler_nameop(c
, tmpexit
, Load
) ||
3526 !compiler_nameop(c
, tmpexit
, Del
))
3528 ADDOP(c
, WITH_CLEANUP
);
3530 /* Finally block ends. */
3531 ADDOP(c
, END_FINALLY
);
3532 compiler_pop_fblock(c
, FINALLY_END
, finally
);
3537 compiler_visit_expr(struct compiler
*c
, expr_ty e
)
3541 /* If expr e has a different line number than the last expr/stmt,
3542 set a new line number for the next instruction.
3544 if (e
->lineno
> c
->u
->u_lineno
) {
3545 c
->u
->u_lineno
= e
->lineno
;
3546 c
->u
->u_lineno_set
= false;
3550 return compiler_boolop(c
, e
);
3552 VISIT(c
, expr
, e
->v
.BinOp
.left
);
3553 VISIT(c
, expr
, e
->v
.BinOp
.right
);
3554 ADDOP(c
, binop(c
, e
->v
.BinOp
.op
));
3557 VISIT(c
, expr
, e
->v
.UnaryOp
.operand
);
3558 ADDOP(c
, unaryop(e
->v
.UnaryOp
.op
));
3561 return compiler_lambda(c
, e
);
3563 return compiler_ifexp(c
, e
);
3565 /* XXX get rid of arg? */
3566 ADDOP_I(c
, BUILD_MAP
, 0);
3567 n
= asdl_seq_LEN(e
->v
.Dict
.values
);
3568 /* We must arrange things just right for STORE_SUBSCR.
3569 It wants the stack to look like (value) (dict) (key) */
3570 for (i
= 0; i
< n
; i
++) {
3573 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.values
, i
));
3576 (expr_ty
)asdl_seq_GET(e
->v
.Dict
.keys
, i
));
3577 ADDOP(c
, STORE_SUBSCR
);
3581 return compiler_listcomp(c
, e
);
3582 case GeneratorExp_kind
:
3583 return compiler_genexp(c
, e
);
3585 if (c
->u
->u_ste
->ste_type
!= FunctionBlock
)
3586 return compiler_error(c
, "'yield' outside function");
3588 for (i = 0; i < c->u->u_nfblocks; i++) {
3589 if (c->u->u_fblock[i].fb_type == FINALLY_TRY)
3590 return compiler_error(
3591 c, "'yield' not allowed in a 'try' "
3592 "block with a 'finally' clause");
3595 if (e
->v
.Yield
.value
) {
3596 VISIT(c
, expr
, e
->v
.Yield
.value
);
3599 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3601 ADDOP(c
, YIELD_VALUE
);
3604 return compiler_compare(c
, e
);
3606 return compiler_call(c
, e
);
3608 VISIT(c
, expr
, e
->v
.Repr
.value
);
3609 ADDOP(c
, UNARY_CONVERT
);
3612 ADDOP_O(c
, LOAD_CONST
, e
->v
.Num
.n
, consts
);
3615 ADDOP_O(c
, LOAD_CONST
, e
->v
.Str
.s
, consts
);
3617 /* The following exprs can be assignment targets. */
3618 case Attribute_kind
:
3619 if (e
->v
.Attribute
.ctx
!= AugStore
)
3620 VISIT(c
, expr
, e
->v
.Attribute
.value
);
3621 switch (e
->v
.Attribute
.ctx
) {
3624 /* Fall through to load */
3626 ADDOP_NAME(c
, LOAD_ATTR
, e
->v
.Attribute
.attr
, names
);
3630 /* Fall through to save */
3632 ADDOP_NAME(c
, STORE_ATTR
, e
->v
.Attribute
.attr
, names
);
3635 ADDOP_NAME(c
, DELETE_ATTR
, e
->v
.Attribute
.attr
, names
);
3639 PyErr_SetString(PyExc_SystemError
,
3640 "param invalid in attribute expression");
3644 case Subscript_kind
:
3645 switch (e
->v
.Subscript
.ctx
) {
3647 VISIT(c
, expr
, e
->v
.Subscript
.value
);
3648 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugLoad
);
3651 VISIT(c
, expr
, e
->v
.Subscript
.value
);
3652 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Load
);
3655 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, AugStore
);
3658 VISIT(c
, expr
, e
->v
.Subscript
.value
);
3659 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Store
);
3662 VISIT(c
, expr
, e
->v
.Subscript
.value
);
3663 VISIT_SLICE(c
, e
->v
.Subscript
.slice
, Del
);
3667 PyErr_SetString(PyExc_SystemError
,
3668 "param invalid in subscript expression");
3673 return compiler_nameop(c
, e
->v
.Name
.id
, e
->v
.Name
.ctx
);
3674 /* child nodes of List and Tuple will have expr_context set */
3676 return compiler_list(c
, e
);
3678 return compiler_tuple(c
, e
);
3684 compiler_augassign(struct compiler
*c
, stmt_ty s
)
3686 expr_ty e
= s
->v
.AugAssign
.target
;
3689 assert(s
->kind
== AugAssign_kind
);
3692 case Attribute_kind
:
3693 auge
= Attribute(e
->v
.Attribute
.value
, e
->v
.Attribute
.attr
,
3694 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3697 VISIT(c
, expr
, auge
);
3698 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3699 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3700 auge
->v
.Attribute
.ctx
= AugStore
;
3701 VISIT(c
, expr
, auge
);
3703 case Subscript_kind
:
3704 auge
= Subscript(e
->v
.Subscript
.value
, e
->v
.Subscript
.slice
,
3705 AugLoad
, e
->lineno
, e
->col_offset
, c
->c_arena
);
3708 VISIT(c
, expr
, auge
);
3709 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3710 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3711 auge
->v
.Subscript
.ctx
= AugStore
;
3712 VISIT(c
, expr
, auge
);
3715 if (!compiler_nameop(c
, e
->v
.Name
.id
, Load
))
3717 VISIT(c
, expr
, s
->v
.AugAssign
.value
);
3718 ADDOP(c
, inplace_binop(c
, s
->v
.AugAssign
.op
));
3719 return compiler_nameop(c
, e
->v
.Name
.id
, Store
);
3721 PyErr_Format(PyExc_SystemError
,
3722 "invalid node type (%d) for augmented assignment",
3730 compiler_push_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3732 struct fblockinfo
*f
;
3733 if (c
->u
->u_nfblocks
>= CO_MAXBLOCKS
)
3735 f
= &c
->u
->u_fblock
[c
->u
->u_nfblocks
++];
3742 compiler_pop_fblock(struct compiler
*c
, enum fblocktype t
, basicblock
*b
)
3744 struct compiler_unit
*u
= c
->u
;
3745 assert(u
->u_nfblocks
> 0);
3747 assert(u
->u_fblock
[u
->u_nfblocks
].fb_type
== t
);
3748 assert(u
->u_fblock
[u
->u_nfblocks
].fb_block
== b
);
3751 /* Raises a SyntaxError and returns 0.
3752 If something goes wrong, a different exception may be raised.
3756 compiler_error(struct compiler
*c
, const char *errstr
)
3759 PyObject
*u
= NULL
, *v
= NULL
;
3761 loc
= PyErr_ProgramText(c
->c_filename
, c
->u
->u_lineno
);
3766 u
= Py_BuildValue("(ziOO)", c
->c_filename
, c
->u
->u_lineno
,
3770 v
= Py_BuildValue("(zO)", errstr
, u
);
3773 PyErr_SetObject(PyExc_SyntaxError
, v
);
3782 compiler_handle_subscr(struct compiler
*c
, const char *kind
,
3783 expr_context_ty ctx
)
3787 /* XXX this code is duplicated */
3789 case AugLoad
: /* fall through to Load */
3790 case Load
: op
= BINARY_SUBSCR
; break;
3791 case AugStore
:/* fall through to Store */
3792 case Store
: op
= STORE_SUBSCR
; break;
3793 case Del
: op
= DELETE_SUBSCR
; break;
3795 PyErr_Format(PyExc_SystemError
,
3796 "invalid %s kind %d in subscript\n",
3800 if (ctx
== AugLoad
) {
3801 ADDOP_I(c
, DUP_TOPX
, 2);
3803 else if (ctx
== AugStore
) {
3804 ADDOP(c
, ROT_THREE
);
3811 compiler_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3814 assert(s
->kind
== Slice_kind
);
3816 /* only handles the cases where BUILD_SLICE is emitted */
3817 if (s
->v
.Slice
.lower
) {
3818 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3821 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3824 if (s
->v
.Slice
.upper
) {
3825 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3828 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
3831 if (s
->v
.Slice
.step
) {
3833 VISIT(c
, expr
, s
->v
.Slice
.step
);
3835 ADDOP_I(c
, BUILD_SLICE
, n
);
3840 compiler_simple_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3842 int op
= 0, slice_offset
= 0, stack_count
= 0;
3844 assert(s
->v
.Slice
.step
== NULL
);
3845 if (s
->v
.Slice
.lower
) {
3848 if (ctx
!= AugStore
)
3849 VISIT(c
, expr
, s
->v
.Slice
.lower
);
3851 if (s
->v
.Slice
.upper
) {
3854 if (ctx
!= AugStore
)
3855 VISIT(c
, expr
, s
->v
.Slice
.upper
);
3858 if (ctx
== AugLoad
) {
3859 switch (stack_count
) {
3860 case 0: ADDOP(c
, DUP_TOP
); break;
3861 case 1: ADDOP_I(c
, DUP_TOPX
, 2); break;
3862 case 2: ADDOP_I(c
, DUP_TOPX
, 3); break;
3865 else if (ctx
== AugStore
) {
3866 switch (stack_count
) {
3867 case 0: ADDOP(c
, ROT_TWO
); break;
3868 case 1: ADDOP(c
, ROT_THREE
); break;
3869 case 2: ADDOP(c
, ROT_FOUR
); break;
3874 case AugLoad
: /* fall through to Load */
3875 case Load
: op
= SLICE
; break;
3876 case AugStore
:/* fall through to Store */
3877 case Store
: op
= STORE_SLICE
; break;
3878 case Del
: op
= DELETE_SLICE
; break;
3881 PyErr_SetString(PyExc_SystemError
,
3882 "param invalid in simple slice");
3886 ADDOP(c
, op
+ slice_offset
);
3891 compiler_visit_nested_slice(struct compiler
*c
, slice_ty s
,
3892 expr_context_ty ctx
)
3896 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3899 return compiler_slice(c
, s
, ctx
);
3901 VISIT(c
, expr
, s
->v
.Index
.value
);
3905 PyErr_SetString(PyExc_SystemError
,
3906 "extended slice invalid in nested slice");
3914 compiler_visit_slice(struct compiler
*c
, slice_ty s
, expr_context_ty ctx
)
3916 char * kindname
= NULL
;
3920 if (ctx
!= AugStore
) {
3921 VISIT(c
, expr
, s
->v
.Index
.value
);
3925 kindname
= "ellipsis";
3926 if (ctx
!= AugStore
) {
3927 ADDOP_O(c
, LOAD_CONST
, Py_Ellipsis
, consts
);
3932 if (!s
->v
.Slice
.step
)
3933 return compiler_simple_slice(c
, s
, ctx
);
3934 if (ctx
!= AugStore
) {
3935 if (!compiler_slice(c
, s
, ctx
))
3940 kindname
= "extended slice";
3941 if (ctx
!= AugStore
) {
3942 int i
, n
= asdl_seq_LEN(s
->v
.ExtSlice
.dims
);
3943 for (i
= 0; i
< n
; i
++) {
3944 slice_ty sub
= (slice_ty
)asdl_seq_GET(
3945 s
->v
.ExtSlice
.dims
, i
);
3946 if (!compiler_visit_nested_slice(c
, sub
, ctx
))
3949 ADDOP_I(c
, BUILD_TUPLE
, n
);
3953 PyErr_Format(PyExc_SystemError
,
3954 "invalid subscript kind %d", s
->kind
);
3957 return compiler_handle_subscr(c
, kindname
, ctx
);
3960 /* do depth-first search of basic block graph, starting with block.
3961 post records the block indices in post-order.
3963 XXX must handle implicit jumps from one block to next
3967 dfs(struct compiler
*c
, basicblock
*b
, struct assembler
*a
)
3970 struct instr
*instr
= NULL
;
3975 if (b
->b_next
!= NULL
)
3976 dfs(c
, b
->b_next
, a
);
3977 for (i
= 0; i
< b
->b_iused
; i
++) {
3978 instr
= &b
->b_instr
[i
];
3979 if (instr
->i_jrel
|| instr
->i_jabs
)
3980 dfs(c
, instr
->i_target
, a
);
3982 a
->a_postorder
[a
->a_nblocks
++] = b
;
3986 stackdepth_walk(struct compiler
*c
, basicblock
*b
, int depth
, int maxdepth
)
3989 struct instr
*instr
;
3990 if (b
->b_seen
|| b
->b_startdepth
>= depth
)
3993 b
->b_startdepth
= depth
;
3994 for (i
= 0; i
< b
->b_iused
; i
++) {
3995 instr
= &b
->b_instr
[i
];
3996 depth
+= opcode_stack_effect(instr
->i_opcode
, instr
->i_oparg
);
3997 if (depth
> maxdepth
)
3999 assert(depth
>= 0); /* invalid code or bug in stackdepth() */
4000 if (instr
->i_jrel
|| instr
->i_jabs
) {
4001 maxdepth
= stackdepth_walk(c
, instr
->i_target
,
4003 if (instr
->i_opcode
== JUMP_ABSOLUTE
||
4004 instr
->i_opcode
== JUMP_FORWARD
) {
4005 goto out
; /* remaining code is dead */
4010 maxdepth
= stackdepth_walk(c
, b
->b_next
, depth
, maxdepth
);
4016 /* Find the flow path that needs the largest stack. We assume that
4017 * cycles in the flow graph have no net effect on the stack depth.
4020 stackdepth(struct compiler
*c
)
4022 basicblock
*b
, *entryblock
;
4024 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
4026 b
->b_startdepth
= INT_MIN
;
4031 return stackdepth_walk(c
, entryblock
, 0, 0);
4035 assemble_init(struct assembler
*a
, int nblocks
, int firstlineno
)
4037 memset(a
, 0, sizeof(struct assembler
));
4038 a
->a_lineno
= firstlineno
;
4039 a
->a_bytecode
= PyString_FromStringAndSize(NULL
, DEFAULT_CODE_SIZE
);
4042 a
->a_lnotab
= PyString_FromStringAndSize(NULL
, DEFAULT_LNOTAB_SIZE
);
4045 a
->a_postorder
= (basicblock
**)PyObject_Malloc(
4046 sizeof(basicblock
*) * nblocks
);
4047 if (!a
->a_postorder
) {
4055 assemble_free(struct assembler
*a
)
4057 Py_XDECREF(a
->a_bytecode
);
4058 Py_XDECREF(a
->a_lnotab
);
4060 PyObject_Free(a
->a_postorder
);
4063 /* Return the size of a basic block in bytes. */
4066 instrsize(struct instr
*instr
)
4068 if (!instr
->i_hasarg
)
4070 if (instr
->i_oparg
> 0xffff)
4076 blocksize(basicblock
*b
)
4081 for (i
= 0; i
< b
->b_iused
; i
++)
4082 size
+= instrsize(&b
->b_instr
[i
]);
4086 /* All about a_lnotab.
4088 c_lnotab is an array of unsigned bytes disguised as a Python string.
4089 It is used to map bytecode offsets to source code line #s (when needed
4092 The array is conceptually a list of
4093 (bytecode offset increment, line number increment)
4094 pairs. The details are important and delicate, best illustrated by example:
4096 byte code offset source code line number
4103 The first trick is that these numbers aren't stored, only the increments
4104 from one row to the next (this doesn't really work, but it's a start):
4106 0, 1, 6, 1, 44, 5, 300, 300, 11, 1
4108 The second trick is that an unsigned byte can't hold negative values, or
4109 values larger than 255, so (a) there's a deep assumption that byte code
4110 offsets and their corresponding line #s both increase monotonically, and (b)
4111 if at least one column jumps by more than 255 from one row to the next, more
4112 than one pair is written to the table. In case #b, there's no way to know
4113 from looking at the table later how many were written. That's the delicate
4114 part. A user of c_lnotab desiring to find the source line number
4115 corresponding to a bytecode address A should do something like this
4118 for addr_incr, line_incr in c_lnotab:
4124 In order for this to work, when the addr field increments by more than 255,
4125 the line # increment in each pair generated must be 0 until the remaining addr
4126 increment is < 256. So, in the example above, assemble_lnotab (it used
4127 to be called com_set_lineno) should not (as was actually done until 2.2)
4128 expand 300, 300 to 255, 255, 45, 45,
4129 but to 255, 0, 45, 255, 0, 45.
4133 assemble_lnotab(struct assembler
*a
, struct instr
*i
)
4135 int d_bytecode
, d_lineno
;
4137 unsigned char *lnotab
;
4139 d_bytecode
= a
->a_offset
- a
->a_lineno_off
;
4140 d_lineno
= i
->i_lineno
- a
->a_lineno
;
4142 assert(d_bytecode
>= 0);
4143 assert(d_lineno
>= 0);
4145 /* XXX(nnorwitz): is there a better way to handle this?
4146 for loops are special, we want to be able to trace them
4147 each time around, so we need to set an extra line number. */
4148 if (d_lineno
== 0 && i
->i_opcode
!= FOR_ITER
)
4151 if (d_bytecode
> 255) {
4152 int j
, nbytes
, ncodes
= d_bytecode
/ 255;
4153 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
4154 len
= PyString_GET_SIZE(a
->a_lnotab
);
4155 if (nbytes
>= len
) {
4156 if (len
* 2 < nbytes
)
4160 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
4163 lnotab
= (unsigned char *)
4164 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
4165 for (j
= 0; j
< ncodes
; j
++) {
4169 d_bytecode
-= ncodes
* 255;
4170 a
->a_lnotab_off
+= ncodes
* 2;
4172 assert(d_bytecode
<= 255);
4173 if (d_lineno
> 255) {
4174 int j
, nbytes
, ncodes
= d_lineno
/ 255;
4175 nbytes
= a
->a_lnotab_off
+ 2 * ncodes
;
4176 len
= PyString_GET_SIZE(a
->a_lnotab
);
4177 if (nbytes
>= len
) {
4178 if (len
* 2 < nbytes
)
4182 if (_PyString_Resize(&a
->a_lnotab
, len
) < 0)
4185 lnotab
= (unsigned char *)
4186 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
4187 *lnotab
++ = d_bytecode
;
4190 for (j
= 1; j
< ncodes
; j
++) {
4194 d_lineno
-= ncodes
* 255;
4195 a
->a_lnotab_off
+= ncodes
* 2;
4198 len
= PyString_GET_SIZE(a
->a_lnotab
);
4199 if (a
->a_lnotab_off
+ 2 >= len
) {
4200 if (_PyString_Resize(&a
->a_lnotab
, len
* 2) < 0)
4203 lnotab
= (unsigned char *)
4204 PyString_AS_STRING(a
->a_lnotab
) + a
->a_lnotab_off
;
4206 a
->a_lnotab_off
+= 2;
4208 *lnotab
++ = d_bytecode
;
4209 *lnotab
++ = d_lineno
;
4211 else { /* First line of a block; def stmt, etc. */
4213 *lnotab
++ = d_lineno
;
4215 a
->a_lineno
= i
->i_lineno
;
4216 a
->a_lineno_off
= a
->a_offset
;
4221 Extend the bytecode with a new instruction.
4222 Update lnotab if necessary.
4226 assemble_emit(struct assembler
*a
, struct instr
*i
)
4228 int size
, arg
= 0, ext
= 0;
4229 Py_ssize_t len
= PyString_GET_SIZE(a
->a_bytecode
);
4232 size
= instrsize(i
);
4237 if (i
->i_lineno
&& !assemble_lnotab(a
, i
))
4239 if (a
->a_offset
+ size
>= len
) {
4240 if (_PyString_Resize(&a
->a_bytecode
, len
* 2) < 0)
4243 code
= PyString_AS_STRING(a
->a_bytecode
) + a
->a_offset
;
4244 a
->a_offset
+= size
;
4246 assert(i
->i_hasarg
);
4247 *code
++ = (char)EXTENDED_ARG
;
4248 *code
++ = ext
& 0xff;
4252 *code
++ = i
->i_opcode
;
4254 assert(size
== 3 || size
== 6);
4255 *code
++ = arg
& 0xff;
4262 assemble_jump_offsets(struct assembler
*a
, struct compiler
*c
)
4265 int bsize
, totsize
, extended_arg_count
, last_extended_arg_count
= 0;
4268 /* Compute the size of each block and fixup jump args.
4269 Replace block pointer with position in bytecode. */
4272 for (i
= a
->a_nblocks
- 1; i
>= 0; i
--) {
4273 b
= a
->a_postorder
[i
];
4274 bsize
= blocksize(b
);
4275 b
->b_offset
= totsize
;
4278 extended_arg_count
= 0;
4279 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
4280 bsize
= b
->b_offset
;
4281 for (i
= 0; i
< b
->b_iused
; i
++) {
4282 struct instr
*instr
= &b
->b_instr
[i
];
4283 /* Relative jumps are computed relative to
4284 the instruction pointer after fetching
4285 the jump instruction.
4287 bsize
+= instrsize(instr
);
4289 instr
->i_oparg
= instr
->i_target
->b_offset
;
4290 else if (instr
->i_jrel
) {
4291 int delta
= instr
->i_target
->b_offset
- bsize
;
4292 instr
->i_oparg
= delta
;
4296 if (instr
->i_oparg
> 0xffff)
4297 extended_arg_count
++;
4301 /* XXX: This is an awful hack that could hurt performance, but
4302 on the bright side it should work until we come up
4303 with a better solution.
4305 In the meantime, should the goto be dropped in favor
4308 The issue is that in the first loop blocksize() is called
4309 which calls instrsize() which requires i_oparg be set
4310 appropriately. There is a bootstrap problem because
4311 i_oparg is calculated in the second loop above.
4313 So we loop until we stop seeing new EXTENDED_ARGs.
4314 The only EXTENDED_ARGs that could be popping up are
4315 ones in jump instructions. So this should converge
4318 if (last_extended_arg_count
!= extended_arg_count
) {
4319 last_extended_arg_count
= extended_arg_count
;
4325 dict_keys_inorder(PyObject
*dict
, int offset
)
4327 PyObject
*tuple
, *k
, *v
;
4328 Py_ssize_t i
, pos
= 0, size
= PyDict_Size(dict
);
4330 tuple
= PyTuple_New(size
);
4333 while (PyDict_Next(dict
, &pos
, &k
, &v
)) {
4334 i
= PyInt_AS_LONG(v
);
4335 k
= PyTuple_GET_ITEM(k
, 0);
4337 assert((i
- offset
) < size
);
4338 assert((i
- offset
) >= 0);
4339 PyTuple_SET_ITEM(tuple
, i
- offset
, k
);
4345 compute_code_flags(struct compiler
*c
)
4347 PySTEntryObject
*ste
= c
->u
->u_ste
;
4349 if (ste
->ste_type
!= ModuleBlock
)
4350 flags
|= CO_NEWLOCALS
;
4351 if (ste
->ste_type
== FunctionBlock
) {
4352 if (!ste
->ste_unoptimized
)
4353 flags
|= CO_OPTIMIZED
;
4354 if (ste
->ste_nested
)
4356 if (ste
->ste_generator
)
4357 flags
|= CO_GENERATOR
;
4359 if (ste
->ste_varargs
)
4360 flags
|= CO_VARARGS
;
4361 if (ste
->ste_varkeywords
)
4362 flags
|= CO_VARKEYWORDS
;
4363 if (ste
->ste_generator
)
4364 flags
|= CO_GENERATOR
;
4366 /* (Only) inherit compilerflags in PyCF_MASK */
4367 flags
|= (c
->c_flags
->cf_flags
& PyCF_MASK
);
4369 n
= PyDict_Size(c
->u
->u_freevars
);
4373 n
= PyDict_Size(c
->u
->u_cellvars
);
4384 static PyCodeObject
*
4385 makecode(struct compiler
*c
, struct assembler
*a
)
4388 PyCodeObject
*co
= NULL
;
4389 PyObject
*consts
= NULL
;
4390 PyObject
*names
= NULL
;
4391 PyObject
*varnames
= NULL
;
4392 PyObject
*filename
= NULL
;
4393 PyObject
*name
= NULL
;
4394 PyObject
*freevars
= NULL
;
4395 PyObject
*cellvars
= NULL
;
4396 PyObject
*bytecode
= NULL
;
4399 tmp
= dict_keys_inorder(c
->u
->u_consts
, 0);
4402 consts
= PySequence_List(tmp
); /* optimize_code requires a list */
4405 names
= dict_keys_inorder(c
->u
->u_names
, 0);
4406 varnames
= dict_keys_inorder(c
->u
->u_varnames
, 0);
4407 if (!consts
|| !names
|| !varnames
)
4410 cellvars
= dict_keys_inorder(c
->u
->u_cellvars
, 0);
4413 freevars
= dict_keys_inorder(c
->u
->u_freevars
, PyTuple_Size(cellvars
));
4416 filename
= PyString_FromString(c
->c_filename
);
4420 nlocals
= PyDict_Size(c
->u
->u_varnames
);
4421 flags
= compute_code_flags(c
);
4425 bytecode
= optimize_code(a
->a_bytecode
, consts
, names
, a
->a_lnotab
);
4429 tmp
= PyList_AsTuple(consts
); /* PyCode_New requires a tuple */
4435 co
= PyCode_New(c
->u
->u_argcount
, nlocals
, stackdepth(c
), flags
,
4436 bytecode
, consts
, names
, varnames
,
4438 filename
, c
->u
->u_name
,
4439 c
->u
->u_firstlineno
,
4444 Py_XDECREF(varnames
);
4445 Py_XDECREF(filename
);
4447 Py_XDECREF(freevars
);
4448 Py_XDECREF(cellvars
);
4449 Py_XDECREF(bytecode
);
4454 /* For debugging purposes only */
4457 dump_instr(const struct instr
*i
)
4459 const char *jrel
= i
->i_jrel
? "jrel " : "";
4460 const char *jabs
= i
->i_jabs
? "jabs " : "";
4465 sprintf(arg
, "arg: %d ", i
->i_oparg
);
4467 fprintf(stderr
, "line: %d, opcode: %d %s%s%s\n",
4468 i
->i_lineno
, i
->i_opcode
, arg
, jabs
, jrel
);
4472 dump_basicblock(const basicblock
*b
)
4474 const char *seen
= b
->b_seen
? "seen " : "";
4475 const char *b_return
= b
->b_return
? "return " : "";
4476 fprintf(stderr
, "used: %d, depth: %d, offset: %d %s%s\n",
4477 b
->b_iused
, b
->b_startdepth
, b
->b_offset
, seen
, b_return
);
4480 for (i
= 0; i
< b
->b_iused
; i
++) {
4481 fprintf(stderr
, " [%02d] ", i
);
4482 dump_instr(b
->b_instr
+ i
);
4488 static PyCodeObject
*
4489 assemble(struct compiler
*c
, int addNone
)
4491 basicblock
*b
, *entryblock
;
4494 PyCodeObject
*co
= NULL
;
4496 /* Make sure every block that falls off the end returns None.
4497 XXX NEXT_BLOCK() isn't quite right, because if the last
4498 block ends with a jump or return b_next shouldn't set.
4500 if (!c
->u
->u_curblock
->b_return
) {
4503 ADDOP_O(c
, LOAD_CONST
, Py_None
, consts
);
4504 ADDOP(c
, RETURN_VALUE
);
4509 for (b
= c
->u
->u_blocks
; b
!= NULL
; b
= b
->b_list
) {
4514 /* Set firstlineno if it wasn't explicitly set. */
4515 if (!c
->u
->u_firstlineno
) {
4516 if (entryblock
&& entryblock
->b_instr
)
4517 c
->u
->u_firstlineno
= entryblock
->b_instr
->i_lineno
;
4519 c
->u
->u_firstlineno
= 1;
4521 if (!assemble_init(&a
, nblocks
, c
->u
->u_firstlineno
))
4523 dfs(c
, entryblock
, &a
);
4525 /* Can't modify the bytecode after computing jump offsets. */
4526 assemble_jump_offsets(&a
, c
);
4528 /* Emit code in reverse postorder from dfs. */
4529 for (i
= a
.a_nblocks
- 1; i
>= 0; i
--) {
4530 b
= a
.a_postorder
[i
];
4531 for (j
= 0; j
< b
->b_iused
; j
++)
4532 if (!assemble_emit(&a
, &b
->b_instr
[j
]))
4536 if (_PyString_Resize(&a
.a_lnotab
, a
.a_lnotab_off
) < 0)
4538 if (_PyString_Resize(&a
.a_bytecode
, a
.a_offset
) < 0)
4541 co
= makecode(c
, &a
);