3 /* GC private-use list allocator.
5 * A private-use list is a list of private-uses conses,
6 * which are just like ordinary conses except that the cdrs
7 * do not have a pointer lowtag and the last cdr is 0, not NIL.
9 * With gencgc we steal a GC card having generation = 0 so that it
10 * won't look like a root page. Optionally, malloc / free can be used
11 * for testing a fallback algorithm that works with cheneygc.
12 * The fallback is susceptible to deadlock with multiple threads
13 * and therefore not suitable for production use.
14 * (if a foreign call entails malloc, and malloc potentially acquires a
15 * mutex, and the Lisp thread making the foreign call is stopped for GC,
16 * then GC can't proceed)
18 * Logically this code is common to both GCs, but it requires some
19 * knowledge of gencgc's structures. You might think it should be split
20 * into two wholly separate implementation files, but the single file
21 * facilitates exercising the malloc/free-based implementation
25 #define PRIVATE_CONS_DEBUG 0
27 #ifdef LISP_FEATURE_CHENEYGC
29 static struct cons* private_cons_chain;
30 static int private_cons_n_avail;
32 #define CHUNKSIZE 4096
33 static struct cons* private_cons_impl()
35 if (!private_cons_n_avail) {
36 struct cons* new = malloc(CHUNKSIZE);
37 private_cons_n_avail = (CHUNKSIZE / sizeof (struct cons)) - 1;
38 /* Treating 'new' as an array of 'private_cons_n_avail' conses,
39 * we allocate backwards toward the 0th element.
40 * Element 0 itself holds the list of chunks to subsequently free. */
41 new->car = 0; // unused
42 new->cdr = (lispobj)private_cons_chain;
43 private_cons_chain = new;
44 if (PRIVATE_CONS_DEBUG)
45 fprintf(stderr, "%d conses @ %p\n", (1+private_cons_n_avail), new);
47 return private_cons_chain + private_cons_n_avail--;
50 static void release_pages_impl()
52 struct cons* list = private_cons_chain, *next;
53 for ( ; list ; list = next ) {
54 if (PRIVATE_CONS_DEBUG) fprintf(stderr, "Freeing %p\n", list);
55 next = (struct cons*)list->cdr;
58 private_cons_chain = NULL;
59 private_cons_n_avail = 0;
64 static page_index_t private_cons_page_chain = -1;
65 #define GC_PRIVATE_CONS_GENERATION 6
67 static struct cons* private_cons_impl()
69 page_index_t page = private_cons_page_chain;
70 page_bytes_t bytes_used;
73 if (page >= 0 && (bytes_used = page_bytes_used(page)) < GENCGC_PAGE_BYTES) {
74 cons = (struct cons*)(page_address(page) + bytes_used);
76 #ifdef LISP_FEATURE_MARK_REGION_GC
77 struct allocator_state alloc_start = get_alloc_start_page(PAGE_TYPE_UNBOXED);
78 page = try_allocate_large(GENCGC_PAGE_BYTES, PAGE_TYPE_UNBOXED, 0,
79 &alloc_start, page_table_pages, NULL);
80 set_alloc_start_page(PAGE_TYPE_UNBOXED, alloc_start);
81 /* TODO: Should figure out what will trigger "heap exhausted" errors.
82 * Probably not this though. */
83 if (page == -1) lose("Ran out of pages for GC-private conses");
85 page = get_alloc_start_page(PAGE_TYPE_UNBOXED);
86 page_index_t last_page __attribute__((unused)) =
87 gc_find_freeish_pages(&page, GENCGC_PAGE_BYTES,
88 SINGLE_OBJECT_FLAG | PAGE_TYPE_UNBOXED,
89 GC_PRIVATE_CONS_GENERATION);
90 // See question about last_page in gc_alloc_large
91 set_alloc_start_page(PAGE_TYPE_UNBOXED, page);
92 gc_assert(last_page == page);
94 page_table[page].gen = GC_PRIVATE_CONS_GENERATION;
95 set_page_type(page_table[page], PAGE_TYPE_UNBOXED);
96 // UNBOXED pages do not generally required zero-fill.
97 prepare_pages(1, page, page, PAGE_TYPE_UNBOXED, -1);
98 struct cons* page_header = (struct cons*)page_address(page);
99 if (PRIVATE_CONS_DEBUG)
100 fprintf(stderr, "GC-private page @ %p\n", page_header);
101 page_index_t tail = private_cons_page_chain;
102 page_header->car = 0; // unused
103 page_header->cdr = (lispobj)(tail >= 0 ? page_address(tail) : 0);
104 private_cons_page_chain = page;
105 bytes_used = 2*N_WORD_BYTES; // one cons (so far)
106 cons = page_header + 1;
108 set_page_bytes_used(page, bytes_used + 2*N_WORD_BYTES);
112 static void release_pages_impl()
114 struct cons *list, *next;
115 if (private_cons_page_chain >= 0) {
116 for (list = (struct cons*)page_address(private_cons_page_chain) ;
117 list ; list = next) {
118 page_index_t index = find_page_index(list);
119 next = (struct cons*)list->cdr; // read prior to decommitting (if we do that)
120 if (PRIVATE_CONS_DEBUG)
121 fprintf(stderr, "Freeing GC-private page @ %p (index %ld)\n",
123 set_page_need_to_zero(index, 1);
124 set_page_bytes_used(index, 0);
125 reset_page_flags(index);
127 private_cons_page_chain = -1;
133 static struct cons* private_cons_recycle_list;
135 uword_t gc_private_cons(uword_t car, uword_t cdr)
137 struct cons* cons = private_cons_recycle_list;
139 private_cons_recycle_list = (struct cons*)cons->cdr;
141 cons = private_cons_impl();
144 #if PRIVATE_CONS_DEBUG
145 if (cdr) fprintf(stderr, "private_cons(%p,%p)=%p\n", (void*)car, (void*)cdr, cons);
147 return (uword_t)cons;
150 /* Push all the conses in 'list' onto the recycle list. */
151 void gc_private_free(struct cons* list)
153 #if PRIVATE_CONS_DEBUG
154 { int n = 0; struct cons* tail = list; while(tail) ++n, tail = (void*)tail->cdr;
155 if (n>1) fprintf(stderr, "private_free(%p){%d}\n", list, n); }
157 struct cons* head = list;
159 list = (struct cons*)list->cdr;
160 list->cdr = (lispobj)private_cons_recycle_list;
161 private_cons_recycle_list = head;
164 /* Give back all the memory used by private cons cells
165 * to either the GC allocator or the malloc implementation. */
166 void gc_dispose_private_pages()
168 private_cons_recycle_list = 0;
169 release_pages_impl();