Fix a bug introduced in r62627. see issue2760 and issue2632.
[python.git] / Modules / gcmodule.c
blob4f8c85ffd9785aef380eecf5916fb0c5be1d4e3e
1 /*
3 Reference Cycle Garbage Collection
4 ==================================
6 Neil Schemenauer <nas@arctrix.com>
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
11 http://www.arctrix.com/nas/python/gc/
12 http://www.python.org/pipermail/python-dev/2000-March/003869.html
13 http://www.python.org/pipermail/python-dev/2000-March/004010.html
14 http://www.python.org/pipermail/python-dev/2000-March/004022.html
16 For a highlevel view of the collection process, read the collect
17 function.
21 #include "Python.h"
22 #include "frameobject.h" /* for PyFrame_ClearFreeList */
24 /* Get an object's GC head */
25 #define AS_GC(o) ((PyGC_Head *)(o)-1)
27 /* Get the object given the GC head */
28 #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
30 /*** Global GC state ***/
32 struct gc_generation {
33 PyGC_Head head;
34 int threshold; /* collection threshold */
35 int count; /* count of allocations or collections of younger
36 generations */
39 #define NUM_GENERATIONS 3
40 #define GEN_HEAD(n) (&generations[n].head)
42 /* linked lists of container objects */
43 static struct gc_generation generations[NUM_GENERATIONS] = {
44 /* PyGC_Head, threshold, count */
45 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
46 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
47 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
50 PyGC_Head *_PyGC_generation0 = GEN_HEAD(0);
52 static int enabled = 1; /* automatic collection enabled? */
54 /* true if we are currently running the collector */
55 static int collecting = 0;
57 /* list of uncollectable objects */
58 static PyObject *garbage = NULL;
60 /* Python string to use if unhandled exception occurs */
61 static PyObject *gc_str = NULL;
63 /* Python string used to look for __del__ attribute. */
64 static PyObject *delstr = NULL;
66 /* set for debugging information */
67 #define DEBUG_STATS (1<<0) /* print collection statistics */
68 #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
69 #define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
70 #define DEBUG_INSTANCES (1<<3) /* print instances */
71 #define DEBUG_OBJECTS (1<<4) /* print other objects */
72 #define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
73 #define DEBUG_LEAK DEBUG_COLLECTABLE | \
74 DEBUG_UNCOLLECTABLE | \
75 DEBUG_INSTANCES | \
76 DEBUG_OBJECTS | \
77 DEBUG_SAVEALL
78 static int debug;
79 static PyObject *tmod = NULL;
81 /*--------------------------------------------------------------------------
82 gc_refs values.
84 Between collections, every gc'ed object has one of two gc_refs values:
86 GC_UNTRACKED
87 The initial state; objects returned by PyObject_GC_Malloc are in this
88 state. The object doesn't live in any generation list, and its
89 tp_traverse slot must not be called.
91 GC_REACHABLE
92 The object lives in some generation list, and its tp_traverse is safe to
93 call. An object transitions to GC_REACHABLE when PyObject_GC_Track
94 is called.
96 During a collection, gc_refs can temporarily take on other states:
98 >= 0
99 At the start of a collection, update_refs() copies the true refcount
100 to gc_refs, for each object in the generation being collected.
101 subtract_refs() then adjusts gc_refs so that it equals the number of
102 times an object is referenced directly from outside the generation
103 being collected.
104 gc_refs remains >= 0 throughout these steps.
106 GC_TENTATIVELY_UNREACHABLE
107 move_unreachable() then moves objects not reachable (whether directly or
108 indirectly) from outside the generation into an "unreachable" set.
109 Objects that are found to be reachable have gc_refs set to GC_REACHABLE
110 again. Objects that are found to be unreachable have gc_refs set to
111 GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
112 this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
113 transition back to GC_REACHABLE.
115 Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
116 for collection. If it's decided not to collect such an object (e.g.,
117 it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
118 ----------------------------------------------------------------------------
120 #define GC_UNTRACKED _PyGC_REFS_UNTRACKED
121 #define GC_REACHABLE _PyGC_REFS_REACHABLE
122 #define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
124 #define IS_TRACKED(o) ((AS_GC(o))->gc.gc_refs != GC_UNTRACKED)
125 #define IS_REACHABLE(o) ((AS_GC(o))->gc.gc_refs == GC_REACHABLE)
126 #define IS_TENTATIVELY_UNREACHABLE(o) ( \
127 (AS_GC(o))->gc.gc_refs == GC_TENTATIVELY_UNREACHABLE)
129 /*** list functions ***/
131 static void
132 gc_list_init(PyGC_Head *list)
134 list->gc.gc_prev = list;
135 list->gc.gc_next = list;
138 static int
139 gc_list_is_empty(PyGC_Head *list)
141 return (list->gc.gc_next == list);
144 #if 0
145 /* This became unused after gc_list_move() was introduced. */
146 /* Append `node` to `list`. */
147 static void
148 gc_list_append(PyGC_Head *node, PyGC_Head *list)
150 node->gc.gc_next = list;
151 node->gc.gc_prev = list->gc.gc_prev;
152 node->gc.gc_prev->gc.gc_next = node;
153 list->gc.gc_prev = node;
155 #endif
157 /* Remove `node` from the gc list it's currently in. */
158 static void
159 gc_list_remove(PyGC_Head *node)
161 node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
162 node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
163 node->gc.gc_next = NULL; /* object is not currently tracked */
166 /* Move `node` from the gc list it's currently in (which is not explicitly
167 * named here) to the end of `list`. This is semantically the same as
168 * gc_list_remove(node) followed by gc_list_append(node, list).
170 static void
171 gc_list_move(PyGC_Head *node, PyGC_Head *list)
173 PyGC_Head *new_prev;
174 PyGC_Head *current_prev = node->gc.gc_prev;
175 PyGC_Head *current_next = node->gc.gc_next;
176 /* Unlink from current list. */
177 current_prev->gc.gc_next = current_next;
178 current_next->gc.gc_prev = current_prev;
179 /* Relink at end of new list. */
180 new_prev = node->gc.gc_prev = list->gc.gc_prev;
181 new_prev->gc.gc_next = list->gc.gc_prev = node;
182 node->gc.gc_next = list;
185 /* append list `from` onto list `to`; `from` becomes an empty list */
186 static void
187 gc_list_merge(PyGC_Head *from, PyGC_Head *to)
189 PyGC_Head *tail;
190 assert(from != to);
191 if (!gc_list_is_empty(from)) {
192 tail = to->gc.gc_prev;
193 tail->gc.gc_next = from->gc.gc_next;
194 tail->gc.gc_next->gc.gc_prev = tail;
195 to->gc.gc_prev = from->gc.gc_prev;
196 to->gc.gc_prev->gc.gc_next = to;
198 gc_list_init(from);
201 static Py_ssize_t
202 gc_list_size(PyGC_Head *list)
204 PyGC_Head *gc;
205 Py_ssize_t n = 0;
206 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
207 n++;
209 return n;
212 /* Append objects in a GC list to a Python list.
213 * Return 0 if all OK, < 0 if error (out of memory for list).
215 static int
216 append_objects(PyObject *py_list, PyGC_Head *gc_list)
218 PyGC_Head *gc;
219 for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
220 PyObject *op = FROM_GC(gc);
221 if (op != py_list) {
222 if (PyList_Append(py_list, op)) {
223 return -1; /* exception */
227 return 0;
230 /*** end of list stuff ***/
233 /* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
234 * in containers, and is GC_REACHABLE for all tracked gc objects not in
235 * containers.
237 static void
238 update_refs(PyGC_Head *containers)
240 PyGC_Head *gc = containers->gc.gc_next;
241 for (; gc != containers; gc = gc->gc.gc_next) {
242 assert(gc->gc.gc_refs == GC_REACHABLE);
243 gc->gc.gc_refs = Py_REFCNT(FROM_GC(gc));
244 /* Python's cyclic gc should never see an incoming refcount
245 * of 0: if something decref'ed to 0, it should have been
246 * deallocated immediately at that time.
247 * Possible cause (if the assert triggers): a tp_dealloc
248 * routine left a gc-aware object tracked during its teardown
249 * phase, and did something-- or allowed something to happen --
250 * that called back into Python. gc can trigger then, and may
251 * see the still-tracked dying object. Before this assert
252 * was added, such mistakes went on to allow gc to try to
253 * delete the object again. In a debug build, that caused
254 * a mysterious segfault, when _Py_ForgetReference tried
255 * to remove the object from the doubly-linked list of all
256 * objects a second time. In a release build, an actual
257 * double deallocation occurred, which leads to corruption
258 * of the allocator's internal bookkeeping pointers. That's
259 * so serious that maybe this should be a release-build
260 * check instead of an assert?
262 assert(gc->gc.gc_refs != 0);
266 /* A traversal callback for subtract_refs. */
267 static int
268 visit_decref(PyObject *op, void *data)
270 assert(op != NULL);
271 if (PyObject_IS_GC(op)) {
272 PyGC_Head *gc = AS_GC(op);
273 /* We're only interested in gc_refs for objects in the
274 * generation being collected, which can be recognized
275 * because only they have positive gc_refs.
277 assert(gc->gc.gc_refs != 0); /* else refcount was too small */
278 if (gc->gc.gc_refs > 0)
279 gc->gc.gc_refs--;
281 return 0;
284 /* Subtract internal references from gc_refs. After this, gc_refs is >= 0
285 * for all objects in containers, and is GC_REACHABLE for all tracked gc
286 * objects not in containers. The ones with gc_refs > 0 are directly
287 * reachable from outside containers, and so can't be collected.
289 static void
290 subtract_refs(PyGC_Head *containers)
292 traverseproc traverse;
293 PyGC_Head *gc = containers->gc.gc_next;
294 for (; gc != containers; gc=gc->gc.gc_next) {
295 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
296 (void) traverse(FROM_GC(gc),
297 (visitproc)visit_decref,
298 NULL);
302 /* A traversal callback for move_unreachable. */
303 static int
304 visit_reachable(PyObject *op, PyGC_Head *reachable)
306 if (PyObject_IS_GC(op)) {
307 PyGC_Head *gc = AS_GC(op);
308 const Py_ssize_t gc_refs = gc->gc.gc_refs;
310 if (gc_refs == 0) {
311 /* This is in move_unreachable's 'young' list, but
312 * the traversal hasn't yet gotten to it. All
313 * we need to do is tell move_unreachable that it's
314 * reachable.
316 gc->gc.gc_refs = 1;
318 else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
319 /* This had gc_refs = 0 when move_unreachable got
320 * to it, but turns out it's reachable after all.
321 * Move it back to move_unreachable's 'young' list,
322 * and move_unreachable will eventually get to it
323 * again.
325 gc_list_move(gc, reachable);
326 gc->gc.gc_refs = 1;
328 /* Else there's nothing to do.
329 * If gc_refs > 0, it must be in move_unreachable's 'young'
330 * list, and move_unreachable will eventually get to it.
331 * If gc_refs == GC_REACHABLE, it's either in some other
332 * generation so we don't care about it, or move_unreachable
333 * already dealt with it.
334 * If gc_refs == GC_UNTRACKED, it must be ignored.
336 else {
337 assert(gc_refs > 0
338 || gc_refs == GC_REACHABLE
339 || gc_refs == GC_UNTRACKED);
342 return 0;
345 /* Move the unreachable objects from young to unreachable. After this,
346 * all objects in young have gc_refs = GC_REACHABLE, and all objects in
347 * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
348 * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
349 * All objects in young after this are directly or indirectly reachable
350 * from outside the original young; and all objects in unreachable are
351 * not.
353 static void
354 move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
356 PyGC_Head *gc = young->gc.gc_next;
358 /* Invariants: all objects "to the left" of us in young have gc_refs
359 * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
360 * from outside the young list as it was at entry. All other objects
361 * from the original young "to the left" of us are in unreachable now,
362 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
363 * left of us in 'young' now have been scanned, and no objects here
364 * or to the right have been scanned yet.
367 while (gc != young) {
368 PyGC_Head *next;
370 if (gc->gc.gc_refs) {
371 /* gc is definitely reachable from outside the
372 * original 'young'. Mark it as such, and traverse
373 * its pointers to find any other objects that may
374 * be directly reachable from it. Note that the
375 * call to tp_traverse may append objects to young,
376 * so we have to wait until it returns to determine
377 * the next object to visit.
379 PyObject *op = FROM_GC(gc);
380 traverseproc traverse = Py_TYPE(op)->tp_traverse;
381 assert(gc->gc.gc_refs > 0);
382 gc->gc.gc_refs = GC_REACHABLE;
383 (void) traverse(op,
384 (visitproc)visit_reachable,
385 (void *)young);
386 next = gc->gc.gc_next;
388 else {
389 /* This *may* be unreachable. To make progress,
390 * assume it is. gc isn't directly reachable from
391 * any object we've already traversed, but may be
392 * reachable from an object we haven't gotten to yet.
393 * visit_reachable will eventually move gc back into
394 * young if that's so, and we'll see it again.
396 next = gc->gc.gc_next;
397 gc_list_move(gc, unreachable);
398 gc->gc.gc_refs = GC_TENTATIVELY_UNREACHABLE;
400 gc = next;
404 /* Return true if object has a finalization method.
405 * CAUTION: An instance of an old-style class has to be checked for a
406 *__del__ method, and earlier versions of this used to call PyObject_HasAttr,
407 * which in turn could call the class's __getattr__ hook (if any). That
408 * could invoke arbitrary Python code, mutating the object graph in arbitrary
409 * ways, and that was the source of some excruciatingly subtle bugs.
411 static int
412 has_finalizer(PyObject *op)
414 if (PyInstance_Check(op)) {
415 assert(delstr != NULL);
416 return _PyInstance_Lookup(op, delstr) != NULL;
418 else if (PyType_HasFeature(op->ob_type, Py_TPFLAGS_HEAPTYPE))
419 return op->ob_type->tp_del != NULL;
420 else if (PyGen_CheckExact(op))
421 return PyGen_NeedsFinalizing((PyGenObject *)op);
422 else
423 return 0;
426 /* Move the objects in unreachable with __del__ methods into `finalizers`.
427 * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
428 * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
430 static void
431 move_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
433 PyGC_Head *gc;
434 PyGC_Head *next;
436 /* March over unreachable. Move objects with finalizers into
437 * `finalizers`.
439 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
440 PyObject *op = FROM_GC(gc);
442 assert(IS_TENTATIVELY_UNREACHABLE(op));
443 next = gc->gc.gc_next;
445 if (has_finalizer(op)) {
446 gc_list_move(gc, finalizers);
447 gc->gc.gc_refs = GC_REACHABLE;
452 /* A traversal callback for move_finalizer_reachable. */
453 static int
454 visit_move(PyObject *op, PyGC_Head *tolist)
456 if (PyObject_IS_GC(op)) {
457 if (IS_TENTATIVELY_UNREACHABLE(op)) {
458 PyGC_Head *gc = AS_GC(op);
459 gc_list_move(gc, tolist);
460 gc->gc.gc_refs = GC_REACHABLE;
463 return 0;
466 /* Move objects that are reachable from finalizers, from the unreachable set
467 * into finalizers set.
469 static void
470 move_finalizer_reachable(PyGC_Head *finalizers)
472 traverseproc traverse;
473 PyGC_Head *gc = finalizers->gc.gc_next;
474 for (; gc != finalizers; gc = gc->gc.gc_next) {
475 /* Note that the finalizers list may grow during this. */
476 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
477 (void) traverse(FROM_GC(gc),
478 (visitproc)visit_move,
479 (void *)finalizers);
483 /* Clear all weakrefs to unreachable objects, and if such a weakref has a
484 * callback, invoke it if necessary. Note that it's possible for such
485 * weakrefs to be outside the unreachable set -- indeed, those are precisely
486 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
487 * overview & some details. Some weakrefs with callbacks may be reclaimed
488 * directly by this routine; the number reclaimed is the return value. Other
489 * weakrefs with callbacks may be moved into the `old` generation. Objects
490 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
491 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
492 * no object in `unreachable` is weakly referenced anymore.
494 static int
495 handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
497 PyGC_Head *gc;
498 PyObject *op; /* generally FROM_GC(gc) */
499 PyWeakReference *wr; /* generally a cast of op */
500 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
501 PyGC_Head *next;
502 int num_freed = 0;
504 gc_list_init(&wrcb_to_call);
506 /* Clear all weakrefs to the objects in unreachable. If such a weakref
507 * also has a callback, move it into `wrcb_to_call` if the callback
508 * needs to be invoked. Note that we cannot invoke any callbacks until
509 * all weakrefs to unreachable objects are cleared, lest the callback
510 * resurrect an unreachable object via a still-active weakref. We
511 * make another pass over wrcb_to_call, invoking callbacks, after this
512 * pass completes.
514 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
515 PyWeakReference **wrlist;
517 op = FROM_GC(gc);
518 assert(IS_TENTATIVELY_UNREACHABLE(op));
519 next = gc->gc.gc_next;
521 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
522 continue;
524 /* It supports weakrefs. Does it have any? */
525 wrlist = (PyWeakReference **)
526 PyObject_GET_WEAKREFS_LISTPTR(op);
528 /* `op` may have some weakrefs. March over the list, clear
529 * all the weakrefs, and move the weakrefs with callbacks
530 * that must be called into wrcb_to_call.
532 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
533 PyGC_Head *wrasgc; /* AS_GC(wr) */
535 /* _PyWeakref_ClearRef clears the weakref but leaves
536 * the callback pointer intact. Obscure: it also
537 * changes *wrlist.
539 assert(wr->wr_object == op);
540 _PyWeakref_ClearRef(wr);
541 assert(wr->wr_object == Py_None);
542 if (wr->wr_callback == NULL)
543 continue; /* no callback */
545 /* Headache time. `op` is going away, and is weakly referenced by
546 * `wr`, which has a callback. Should the callback be invoked? If wr
547 * is also trash, no:
549 * 1. There's no need to call it. The object and the weakref are
550 * both going away, so it's legitimate to pretend the weakref is
551 * going away first. The user has to ensure a weakref outlives its
552 * referent if they want a guarantee that the wr callback will get
553 * invoked.
555 * 2. It may be catastrophic to call it. If the callback is also in
556 * cyclic trash (CT), then although the CT is unreachable from
557 * outside the current generation, CT may be reachable from the
558 * callback. Then the callback could resurrect insane objects.
560 * Since the callback is never needed and may be unsafe in this case,
561 * wr is simply left in the unreachable set. Note that because we
562 * already called _PyWeakref_ClearRef(wr), its callback will never
563 * trigger.
565 * OTOH, if wr isn't part of CT, we should invoke the callback: the
566 * weakref outlived the trash. Note that since wr isn't CT in this
567 * case, its callback can't be CT either -- wr acted as an external
568 * root to this generation, and therefore its callback did too. So
569 * nothing in CT is reachable from the callback either, so it's hard
570 * to imagine how calling it later could create a problem for us. wr
571 * is moved to wrcb_to_call in this case.
573 if (IS_TENTATIVELY_UNREACHABLE(wr))
574 continue;
575 assert(IS_REACHABLE(wr));
577 /* Create a new reference so that wr can't go away
578 * before we can process it again.
580 Py_INCREF(wr);
582 /* Move wr to wrcb_to_call, for the next pass. */
583 wrasgc = AS_GC(wr);
584 assert(wrasgc != next); /* wrasgc is reachable, but
585 next isn't, so they can't
586 be the same */
587 gc_list_move(wrasgc, &wrcb_to_call);
591 /* Invoke the callbacks we decided to honor. It's safe to invoke them
592 * because they can't reference unreachable objects.
594 while (! gc_list_is_empty(&wrcb_to_call)) {
595 PyObject *temp;
596 PyObject *callback;
598 gc = wrcb_to_call.gc.gc_next;
599 op = FROM_GC(gc);
600 assert(IS_REACHABLE(op));
601 assert(PyWeakref_Check(op));
602 wr = (PyWeakReference *)op;
603 callback = wr->wr_callback;
604 assert(callback != NULL);
606 /* copy-paste of weakrefobject.c's handle_callback() */
607 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
608 if (temp == NULL)
609 PyErr_WriteUnraisable(callback);
610 else
611 Py_DECREF(temp);
613 /* Give up the reference we created in the first pass. When
614 * op's refcount hits 0 (which it may or may not do right now),
615 * op's tp_dealloc will decref op->wr_callback too. Note
616 * that the refcount probably will hit 0 now, and because this
617 * weakref was reachable to begin with, gc didn't already
618 * add it to its count of freed objects. Example: a reachable
619 * weak value dict maps some key to this reachable weakref.
620 * The callback removes this key->weakref mapping from the
621 * dict, leaving no other references to the weakref (excepting
622 * ours).
624 Py_DECREF(op);
625 if (wrcb_to_call.gc.gc_next == gc) {
626 /* object is still alive -- move it */
627 gc_list_move(gc, old);
629 else
630 ++num_freed;
633 return num_freed;
636 static void
637 debug_instance(char *msg, PyInstanceObject *inst)
639 char *cname;
640 /* simple version of instance_repr */
641 PyObject *classname = inst->in_class->cl_name;
642 if (classname != NULL && PyString_Check(classname))
643 cname = PyString_AsString(classname);
644 else
645 cname = "?";
646 PySys_WriteStderr("gc: %.100s <%.100s instance at %p>\n",
647 msg, cname, inst);
650 static void
651 debug_cycle(char *msg, PyObject *op)
653 if ((debug & DEBUG_INSTANCES) && PyInstance_Check(op)) {
654 debug_instance(msg, (PyInstanceObject *)op);
656 else if (debug & DEBUG_OBJECTS) {
657 PySys_WriteStderr("gc: %.100s <%.100s %p>\n",
658 msg, Py_TYPE(op)->tp_name, op);
662 /* Handle uncollectable garbage (cycles with finalizers, and stuff reachable
663 * only from such cycles).
664 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
665 * garbage list (a Python list), else only the objects in finalizers with
666 * __del__ methods are appended to garbage. All objects in finalizers are
667 * merged into the old list regardless.
668 * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
669 * The finalizers list is made empty on a successful return.
671 static int
672 handle_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
674 PyGC_Head *gc = finalizers->gc.gc_next;
676 if (garbage == NULL) {
677 garbage = PyList_New(0);
678 if (garbage == NULL)
679 Py_FatalError("gc couldn't create gc.garbage list");
681 for (; gc != finalizers; gc = gc->gc.gc_next) {
682 PyObject *op = FROM_GC(gc);
684 if ((debug & DEBUG_SAVEALL) || has_finalizer(op)) {
685 if (PyList_Append(garbage, op) < 0)
686 return -1;
690 gc_list_merge(finalizers, old);
691 return 0;
694 /* Break reference cycles by clearing the containers involved. This is
695 * tricky business as the lists can be changing and we don't know which
696 * objects may be freed. It is possible I screwed something up here.
698 static void
699 delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
701 inquiry clear;
703 while (!gc_list_is_empty(collectable)) {
704 PyGC_Head *gc = collectable->gc.gc_next;
705 PyObject *op = FROM_GC(gc);
707 assert(IS_TENTATIVELY_UNREACHABLE(op));
708 if (debug & DEBUG_SAVEALL) {
709 PyList_Append(garbage, op);
711 else {
712 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
713 Py_INCREF(op);
714 clear(op);
715 Py_DECREF(op);
718 if (collectable->gc.gc_next == gc) {
719 /* object is still alive, move it, it may die later */
720 gc_list_move(gc, old);
721 gc->gc.gc_refs = GC_REACHABLE;
726 /* Clear all free lists
727 * All free lists are cleared during the collection of the highest generation.
728 * Allocated items in the free list may keep a pymalloc arena occupied.
729 * Clearing the free lists may give back memory to the OS earlier.
731 static void
732 clear_freelists(void)
734 (void)PyMethod_ClearFreeList();
735 (void)PyFrame_ClearFreeList();
736 (void)PyCFunction_ClearFreeList();
737 (void)PyTuple_ClearFreeList();
738 (void)PyUnicode_ClearFreeList();
741 /* This is the main function. Read this to understand how the
742 * collection process works. */
743 static Py_ssize_t
744 collect(int generation)
746 int i;
747 Py_ssize_t m = 0; /* # objects collected */
748 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
749 PyGC_Head *young; /* the generation we are examining */
750 PyGC_Head *old; /* next older generation */
751 PyGC_Head unreachable; /* non-problematic unreachable trash */
752 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
753 PyGC_Head *gc;
754 double t1 = 0.0;
756 if (delstr == NULL) {
757 delstr = PyString_InternFromString("__del__");
758 if (delstr == NULL)
759 Py_FatalError("gc couldn't allocate \"__del__\"");
762 if (debug & DEBUG_STATS) {
763 if (tmod != NULL) {
764 PyObject *f = PyObject_CallMethod(tmod, "time", NULL);
765 if (f == NULL) {
766 PyErr_Clear();
768 else {
769 t1 = PyFloat_AsDouble(f);
770 Py_DECREF(f);
773 PySys_WriteStderr("gc: collecting generation %d...\n",
774 generation);
775 PySys_WriteStderr("gc: objects in each generation:");
776 for (i = 0; i < NUM_GENERATIONS; i++)
777 PySys_WriteStderr(" %" PY_FORMAT_SIZE_T "d",
778 gc_list_size(GEN_HEAD(i)));
779 PySys_WriteStderr("\n");
782 /* update collection and allocation counters */
783 if (generation+1 < NUM_GENERATIONS)
784 generations[generation+1].count += 1;
785 for (i = 0; i <= generation; i++)
786 generations[i].count = 0;
788 /* merge younger generations with one we are currently collecting */
789 for (i = 0; i < generation; i++) {
790 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
793 /* handy references */
794 young = GEN_HEAD(generation);
795 if (generation < NUM_GENERATIONS-1)
796 old = GEN_HEAD(generation+1);
797 else
798 old = young;
800 /* Using ob_refcnt and gc_refs, calculate which objects in the
801 * container set are reachable from outside the set (i.e., have a
802 * refcount greater than 0 when all the references within the
803 * set are taken into account).
805 update_refs(young);
806 subtract_refs(young);
808 /* Leave everything reachable from outside young in young, and move
809 * everything else (in young) to unreachable.
810 * NOTE: This used to move the reachable objects into a reachable
811 * set instead. But most things usually turn out to be reachable,
812 * so it's more efficient to move the unreachable things.
814 gc_list_init(&unreachable);
815 move_unreachable(young, &unreachable);
817 /* Move reachable objects to next generation. */
818 if (young != old)
819 gc_list_merge(young, old);
821 /* All objects in unreachable are trash, but objects reachable from
822 * finalizers can't safely be deleted. Python programmers should take
823 * care not to create such things. For Python, finalizers means
824 * instance objects with __del__ methods. Weakrefs with callbacks
825 * can also call arbitrary Python code but they will be dealt with by
826 * handle_weakrefs().
828 gc_list_init(&finalizers);
829 move_finalizers(&unreachable, &finalizers);
830 /* finalizers contains the unreachable objects with a finalizer;
831 * unreachable objects reachable *from* those are also uncollectable,
832 * and we move those into the finalizers list too.
834 move_finalizer_reachable(&finalizers);
836 /* Collect statistics on collectable objects found and print
837 * debugging information.
839 for (gc = unreachable.gc.gc_next; gc != &unreachable;
840 gc = gc->gc.gc_next) {
841 m++;
842 if (debug & DEBUG_COLLECTABLE) {
843 debug_cycle("collectable", FROM_GC(gc));
845 if (tmod != NULL && (debug & DEBUG_STATS)) {
846 PyObject *f = PyObject_CallMethod(tmod, "time", NULL);
847 if (f == NULL) {
848 PyErr_Clear();
850 else {
851 t1 = PyFloat_AsDouble(f)-t1;
852 Py_DECREF(f);
853 PySys_WriteStderr("gc: %.4fs elapsed.\n", t1);
858 /* Clear weakrefs and invoke callbacks as necessary. */
859 m += handle_weakrefs(&unreachable, old);
861 /* Call tp_clear on objects in the unreachable set. This will cause
862 * the reference cycles to be broken. It may also cause some objects
863 * in finalizers to be freed.
865 delete_garbage(&unreachable, old);
867 /* Collect statistics on uncollectable objects found and print
868 * debugging information. */
869 for (gc = finalizers.gc.gc_next;
870 gc != &finalizers;
871 gc = gc->gc.gc_next) {
872 n++;
873 if (debug & DEBUG_UNCOLLECTABLE)
874 debug_cycle("uncollectable", FROM_GC(gc));
876 if (debug & DEBUG_STATS) {
877 if (m == 0 && n == 0)
878 PySys_WriteStderr("gc: done.\n");
879 else
880 PySys_WriteStderr(
881 "gc: done, "
882 "%" PY_FORMAT_SIZE_T "d unreachable, "
883 "%" PY_FORMAT_SIZE_T "d uncollectable.\n",
884 n+m, n);
887 /* Append instances in the uncollectable set to a Python
888 * reachable list of garbage. The programmer has to deal with
889 * this if they insist on creating this type of structure.
891 (void)handle_finalizers(&finalizers, old);
893 /* Clear free list only during the collection of the higest
894 * generation */
895 if (generation == NUM_GENERATIONS-1) {
896 clear_freelists();
899 if (PyErr_Occurred()) {
900 if (gc_str == NULL)
901 gc_str = PyString_FromString("garbage collection");
902 PyErr_WriteUnraisable(gc_str);
903 Py_FatalError("unexpected exception during garbage collection");
905 return n+m;
908 static Py_ssize_t
909 collect_generations(void)
911 int i;
912 Py_ssize_t n = 0;
914 /* Find the oldest generation (higest numbered) where the count
915 * exceeds the threshold. Objects in the that generation and
916 * generations younger than it will be collected. */
917 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
918 if (generations[i].count > generations[i].threshold) {
919 n = collect(i);
920 break;
923 return n;
926 PyDoc_STRVAR(gc_enable__doc__,
927 "enable() -> None\n"
928 "\n"
929 "Enable automatic garbage collection.\n");
931 static PyObject *
932 gc_enable(PyObject *self, PyObject *noargs)
934 enabled = 1;
935 Py_INCREF(Py_None);
936 return Py_None;
939 PyDoc_STRVAR(gc_disable__doc__,
940 "disable() -> None\n"
941 "\n"
942 "Disable automatic garbage collection.\n");
944 static PyObject *
945 gc_disable(PyObject *self, PyObject *noargs)
947 enabled = 0;
948 Py_INCREF(Py_None);
949 return Py_None;
952 PyDoc_STRVAR(gc_isenabled__doc__,
953 "isenabled() -> status\n"
954 "\n"
955 "Returns true if automatic garbage collection is enabled.\n");
957 static PyObject *
958 gc_isenabled(PyObject *self, PyObject *noargs)
960 return PyBool_FromLong((long)enabled);
963 PyDoc_STRVAR(gc_collect__doc__,
964 "collect([generation]) -> n\n"
965 "\n"
966 "With no arguments, run a full collection. The optional argument\n"
967 "may be an integer specifying which generation to collect. A ValueError\n"
968 "is raised if the generation number is invalid.\n\n"
969 "The number of unreachable objects is returned.\n");
971 static PyObject *
972 gc_collect(PyObject *self, PyObject *args, PyObject *kws)
974 static char *keywords[] = {"generation", NULL};
975 int genarg = NUM_GENERATIONS - 1;
976 Py_ssize_t n;
978 if (!PyArg_ParseTupleAndKeywords(args, kws, "|i", keywords, &genarg))
979 return NULL;
981 else if (genarg < 0 || genarg >= NUM_GENERATIONS) {
982 PyErr_SetString(PyExc_ValueError, "invalid generation");
983 return NULL;
986 if (collecting)
987 n = 0; /* already collecting, don't do anything */
988 else {
989 collecting = 1;
990 n = collect(genarg);
991 collecting = 0;
994 return PyInt_FromSsize_t(n);
997 PyDoc_STRVAR(gc_set_debug__doc__,
998 "set_debug(flags) -> None\n"
999 "\n"
1000 "Set the garbage collection debugging flags. Debugging information is\n"
1001 "written to sys.stderr.\n"
1002 "\n"
1003 "flags is an integer and can have the following bits turned on:\n"
1004 "\n"
1005 " DEBUG_STATS - Print statistics during collection.\n"
1006 " DEBUG_COLLECTABLE - Print collectable objects found.\n"
1007 " DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.\n"
1008 " DEBUG_INSTANCES - Print instance objects.\n"
1009 " DEBUG_OBJECTS - Print objects other than instances.\n"
1010 " DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.\n"
1011 " DEBUG_LEAK - Debug leaking programs (everything but STATS).\n");
1013 static PyObject *
1014 gc_set_debug(PyObject *self, PyObject *args)
1016 if (!PyArg_ParseTuple(args, "i:set_debug", &debug))
1017 return NULL;
1019 Py_INCREF(Py_None);
1020 return Py_None;
1023 PyDoc_STRVAR(gc_get_debug__doc__,
1024 "get_debug() -> flags\n"
1025 "\n"
1026 "Get the garbage collection debugging flags.\n");
1028 static PyObject *
1029 gc_get_debug(PyObject *self, PyObject *noargs)
1031 return Py_BuildValue("i", debug);
1034 PyDoc_STRVAR(gc_set_thresh__doc__,
1035 "set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
1036 "\n"
1037 "Sets the collection thresholds. Setting threshold0 to zero disables\n"
1038 "collection.\n");
1040 static PyObject *
1041 gc_set_thresh(PyObject *self, PyObject *args)
1043 int i;
1044 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1045 &generations[0].threshold,
1046 &generations[1].threshold,
1047 &generations[2].threshold))
1048 return NULL;
1049 for (i = 2; i < NUM_GENERATIONS; i++) {
1050 /* generations higher than 2 get the same threshold */
1051 generations[i].threshold = generations[2].threshold;
1054 Py_INCREF(Py_None);
1055 return Py_None;
1058 PyDoc_STRVAR(gc_get_thresh__doc__,
1059 "get_threshold() -> (threshold0, threshold1, threshold2)\n"
1060 "\n"
1061 "Return the current collection thresholds\n");
1063 static PyObject *
1064 gc_get_thresh(PyObject *self, PyObject *noargs)
1066 return Py_BuildValue("(iii)",
1067 generations[0].threshold,
1068 generations[1].threshold,
1069 generations[2].threshold);
1072 PyDoc_STRVAR(gc_get_count__doc__,
1073 "get_count() -> (count0, count1, count2)\n"
1074 "\n"
1075 "Return the current collection counts\n");
1077 static PyObject *
1078 gc_get_count(PyObject *self, PyObject *noargs)
1080 return Py_BuildValue("(iii)",
1081 generations[0].count,
1082 generations[1].count,
1083 generations[2].count);
1086 static int
1087 referrersvisit(PyObject* obj, PyObject *objs)
1089 Py_ssize_t i;
1090 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1091 if (PyTuple_GET_ITEM(objs, i) == obj)
1092 return 1;
1093 return 0;
1096 static int
1097 gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
1099 PyGC_Head *gc;
1100 PyObject *obj;
1101 traverseproc traverse;
1102 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1103 obj = FROM_GC(gc);
1104 traverse = Py_TYPE(obj)->tp_traverse;
1105 if (obj == objs || obj == resultlist)
1106 continue;
1107 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1108 if (PyList_Append(resultlist, obj) < 0)
1109 return 0; /* error */
1112 return 1; /* no error */
1115 PyDoc_STRVAR(gc_get_referrers__doc__,
1116 "get_referrers(*objs) -> list\n\
1117 Return the list of objects that directly refer to any of objs.");
1119 static PyObject *
1120 gc_get_referrers(PyObject *self, PyObject *args)
1122 int i;
1123 PyObject *result = PyList_New(0);
1124 if (!result) return NULL;
1126 for (i = 0; i < NUM_GENERATIONS; i++) {
1127 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1128 Py_DECREF(result);
1129 return NULL;
1132 return result;
1135 /* Append obj to list; return true if error (out of memory), false if OK. */
1136 static int
1137 referentsvisit(PyObject *obj, PyObject *list)
1139 return PyList_Append(list, obj) < 0;
1142 PyDoc_STRVAR(gc_get_referents__doc__,
1143 "get_referents(*objs) -> list\n\
1144 Return the list of objects that are directly referred to by objs.");
1146 static PyObject *
1147 gc_get_referents(PyObject *self, PyObject *args)
1149 Py_ssize_t i;
1150 PyObject *result = PyList_New(0);
1152 if (result == NULL)
1153 return NULL;
1155 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1156 traverseproc traverse;
1157 PyObject *obj = PyTuple_GET_ITEM(args, i);
1159 if (! PyObject_IS_GC(obj))
1160 continue;
1161 traverse = Py_TYPE(obj)->tp_traverse;
1162 if (! traverse)
1163 continue;
1164 if (traverse(obj, (visitproc)referentsvisit, result)) {
1165 Py_DECREF(result);
1166 return NULL;
1169 return result;
1172 PyDoc_STRVAR(gc_get_objects__doc__,
1173 "get_objects() -> [...]\n"
1174 "\n"
1175 "Return a list of objects tracked by the collector (excluding the list\n"
1176 "returned).\n");
1178 static PyObject *
1179 gc_get_objects(PyObject *self, PyObject *noargs)
1181 int i;
1182 PyObject* result;
1184 result = PyList_New(0);
1185 if (result == NULL)
1186 return NULL;
1187 for (i = 0; i < NUM_GENERATIONS; i++) {
1188 if (append_objects(result, GEN_HEAD(i))) {
1189 Py_DECREF(result);
1190 return NULL;
1193 return result;
1197 PyDoc_STRVAR(gc__doc__,
1198 "This module provides access to the garbage collector for reference cycles.\n"
1199 "\n"
1200 "enable() -- Enable automatic garbage collection.\n"
1201 "disable() -- Disable automatic garbage collection.\n"
1202 "isenabled() -- Returns true if automatic collection is enabled.\n"
1203 "collect() -- Do a full collection right now.\n"
1204 "get_count() -- Return the current collection counts.\n"
1205 "set_debug() -- Set debugging flags.\n"
1206 "get_debug() -- Get debugging flags.\n"
1207 "set_threshold() -- Set the collection thresholds.\n"
1208 "get_threshold() -- Return the current the collection thresholds.\n"
1209 "get_objects() -- Return a list of all objects tracked by the collector.\n"
1210 "get_referrers() -- Return the list of objects that refer to an object.\n"
1211 "get_referents() -- Return the list of objects that an object refers to.\n");
1213 static PyMethodDef GcMethods[] = {
1214 {"enable", gc_enable, METH_NOARGS, gc_enable__doc__},
1215 {"disable", gc_disable, METH_NOARGS, gc_disable__doc__},
1216 {"isenabled", gc_isenabled, METH_NOARGS, gc_isenabled__doc__},
1217 {"set_debug", gc_set_debug, METH_VARARGS, gc_set_debug__doc__},
1218 {"get_debug", gc_get_debug, METH_NOARGS, gc_get_debug__doc__},
1219 {"get_count", gc_get_count, METH_NOARGS, gc_get_count__doc__},
1220 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1221 {"get_threshold", gc_get_thresh, METH_NOARGS, gc_get_thresh__doc__},
1222 {"collect", (PyCFunction)gc_collect,
1223 METH_VARARGS | METH_KEYWORDS, gc_collect__doc__},
1224 {"get_objects", gc_get_objects,METH_NOARGS, gc_get_objects__doc__},
1225 {"get_referrers", gc_get_referrers, METH_VARARGS,
1226 gc_get_referrers__doc__},
1227 {"get_referents", gc_get_referents, METH_VARARGS,
1228 gc_get_referents__doc__},
1229 {NULL, NULL} /* Sentinel */
1232 PyMODINIT_FUNC
1233 initgc(void)
1235 PyObject *m;
1237 m = Py_InitModule4("gc",
1238 GcMethods,
1239 gc__doc__,
1240 NULL,
1241 PYTHON_API_VERSION);
1242 if (m == NULL)
1243 return;
1245 if (garbage == NULL) {
1246 garbage = PyList_New(0);
1247 if (garbage == NULL)
1248 return;
1250 Py_INCREF(garbage);
1251 if (PyModule_AddObject(m, "garbage", garbage) < 0)
1252 return;
1254 /* Importing can't be done in collect() because collect()
1255 * can be called via PyGC_Collect() in Py_Finalize().
1256 * This wouldn't be a problem, except that <initialized> is
1257 * reset to 0 before calling collect which trips up
1258 * the import and triggers an assertion.
1260 if (tmod == NULL) {
1261 tmod = PyImport_ImportModuleNoBlock("time");
1262 if (tmod == NULL)
1263 PyErr_Clear();
1266 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return
1267 ADD_INT(DEBUG_STATS);
1268 ADD_INT(DEBUG_COLLECTABLE);
1269 ADD_INT(DEBUG_UNCOLLECTABLE);
1270 ADD_INT(DEBUG_INSTANCES);
1271 ADD_INT(DEBUG_OBJECTS);
1272 ADD_INT(DEBUG_SAVEALL);
1273 ADD_INT(DEBUG_LEAK);
1274 #undef ADD_INT
1277 /* API to invoke gc.collect() from C */
1278 Py_ssize_t
1279 PyGC_Collect(void)
1281 Py_ssize_t n;
1283 if (collecting)
1284 n = 0; /* already collecting, don't do anything */
1285 else {
1286 collecting = 1;
1287 n = collect(NUM_GENERATIONS - 1);
1288 collecting = 0;
1291 return n;
1294 /* for debugging */
1295 void
1296 _PyGC_Dump(PyGC_Head *g)
1298 _PyObject_Dump(FROM_GC(g));
1301 /* extension modules might be compiled with GC support so these
1302 functions must always be available */
1304 #undef PyObject_GC_Track
1305 #undef PyObject_GC_UnTrack
1306 #undef PyObject_GC_Del
1307 #undef _PyObject_GC_Malloc
1309 void
1310 PyObject_GC_Track(void *op)
1312 _PyObject_GC_TRACK(op);
1315 /* for binary compatibility with 2.2 */
1316 void
1317 _PyObject_GC_Track(PyObject *op)
1319 PyObject_GC_Track(op);
1322 void
1323 PyObject_GC_UnTrack(void *op)
1325 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1326 * call PyObject_GC_UnTrack twice on an object.
1328 if (IS_TRACKED(op))
1329 _PyObject_GC_UNTRACK(op);
1332 /* for binary compatibility with 2.2 */
1333 void
1334 _PyObject_GC_UnTrack(PyObject *op)
1336 PyObject_GC_UnTrack(op);
1339 PyObject *
1340 _PyObject_GC_Malloc(size_t basicsize)
1342 PyObject *op;
1343 PyGC_Head *g = (PyGC_Head *)PyObject_MALLOC(
1344 sizeof(PyGC_Head) + basicsize);
1345 if (g == NULL)
1346 return PyErr_NoMemory();
1347 g->gc.gc_refs = GC_UNTRACKED;
1348 generations[0].count++; /* number of allocated GC objects */
1349 if (generations[0].count > generations[0].threshold &&
1350 enabled &&
1351 generations[0].threshold &&
1352 !collecting &&
1353 !PyErr_Occurred()) {
1354 collecting = 1;
1355 collect_generations();
1356 collecting = 0;
1358 op = FROM_GC(g);
1359 return op;
1362 PyObject *
1363 _PyObject_GC_New(PyTypeObject *tp)
1365 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1366 if (op != NULL)
1367 op = PyObject_INIT(op, tp);
1368 return op;
1371 PyVarObject *
1372 _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
1374 const size_t size = _PyObject_VAR_SIZE(tp, nitems);
1375 PyVarObject *op = (PyVarObject *) _PyObject_GC_Malloc(size);
1376 if (op != NULL)
1377 op = PyObject_INIT_VAR(op, tp, nitems);
1378 return op;
1381 PyVarObject *
1382 _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
1384 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1385 PyGC_Head *g = AS_GC(op);
1386 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1387 if (g == NULL)
1388 return (PyVarObject *)PyErr_NoMemory();
1389 op = (PyVarObject *) FROM_GC(g);
1390 Py_SIZE(op) = nitems;
1391 return op;
1394 void
1395 PyObject_GC_Del(void *op)
1397 PyGC_Head *g = AS_GC(op);
1398 if (IS_TRACKED(op))
1399 gc_list_remove(g);
1400 if (generations[0].count > 0) {
1401 generations[0].count--;
1403 PyObject_FREE(g);
1406 /* for binary compatibility with 2.2 */
1407 #undef _PyObject_GC_Del
1408 void
1409 _PyObject_GC_Del(PyObject *op)
1411 PyObject_GC_Del(op);