Fix a problem with the garbage collector, storing the stack top every
[emacs.git] / src / thread.c
blob41a65bdde84808e4b89ac4024896f3ea254723ed
2 #include <config.h>
3 #include "lisp.h"
4 #include "buffer.h"
5 #include "blockinput.h"
6 #include <pthread.h>
8 void mark_byte_stack P_ ((struct byte_stack *));
9 void mark_backtrace P_ ((struct backtrace *));
10 void mark_catchlist P_ ((struct catchtag *));
11 void mark_stack P_ ((char *, char *));
12 void flush_stack_call_func P_ ((void (*) (char *, void *), void *));
14 /* Get the next thread as in circular buffer. */
15 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
17 /* condition var .. w/ global lock */
19 static pthread_cond_t buffer_cond;
21 static struct thread_state primary_thread;
23 static struct thread_state *all_threads = &primary_thread;
25 __thread struct thread_state *current_thread = &primary_thread;
27 static int inhibit_yield_counter = 0;
29 pthread_mutex_t global_lock;
31 /* Used internally by the scheduler, it is the next that will be executed. */
32 static pthread_t next_thread;
34 /* Choose the next thread to be executed. */
35 static void
36 thread_schedule ()
38 struct thread_state *it, *begin = NEXT_THREAD (current_thread);
40 #define CHECK_THREAD(T,B) \
41 if (!other_threads_p () \
42 || ((struct thread_state *)T)->nolock \
43 || EQ (((struct thread_state *)T)->desired_buffer, \
44 ((struct thread_state *)T)->m_current_buffer) \
45 || EQ (B->owner, Qnil) \
46 /* We set the owner to Qt to mean it is being killed. */ \
47 || EQ (B->owner, Qt)) \
48 { \
49 next_thread = ((struct thread_state *)T)->pthread_id; \
50 return; \
51 } \
53 /* Try to wake up the thread that is holding the desired buffer. */
54 if (current_thread->desired_buffer)
56 struct buffer *db = current_thread->desired_buffer;
57 if (!EQ (db->owner, Qnil) && !EQ (db, current_buffer))
58 CHECK_THREAD (XVECTOR (db->owner), db);
61 /* A simple round-robin. We can't just check for it != current_thread
62 because current_thread could be already unlinked from all_threads. */
63 it = begin;
64 while (1)
66 struct buffer *new_buffer = it->desired_buffer;
67 if (!new_buffer)
68 continue;
69 CHECK_THREAD (it, new_buffer);
71 it = NEXT_THREAD (it);
72 if (it == current_thread)
73 break;
77 /* Schedule a new thread and block the caller until it is not scheduled
78 again. */
79 static inline void
80 reschedule_and_wait (char *end)
82 current_thread->stack_top = end;
83 if (!thread_inhibit_yield_p ())
84 thread_schedule ();
86 if (next_thread != current_thread->pthread_id)
87 pthread_cond_broadcast (&buffer_cond);
89 pthread_mutex_unlock (&global_lock);
91 pthread_mutex_lock (&global_lock);
93 while (current_thread->pthread_id != next_thread)
94 pthread_cond_wait (&buffer_cond, &global_lock);
97 static void
98 mark_one_thread (struct thread_state *thread)
100 register struct specbinding *bind;
101 struct handler *handler;
102 Lisp_Object tem;
104 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
106 mark_object (bind->symbol);
107 mark_object (bind->old_value);
110 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
111 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
112 mark_stack (thread->stack_bottom, thread->stack_top);
113 #else
115 register struct gcpro *tail;
116 for (tail = thread->m_gcprolist; tail; tail = tail->next)
117 for (i = 0; i < tail->nvars; i++)
118 mark_object (tail->var[i]);
120 #endif
122 if (thread->m_byte_stack_list)
123 mark_byte_stack (thread->m_byte_stack_list);
125 mark_catchlist (thread->m_catchlist);
127 for (handler = thread->m_handlerlist; handler; handler = handler->next)
129 mark_object (handler->handler);
130 mark_object (handler->var);
133 mark_backtrace (thread->m_backtrace_list);
135 if (thread->m_current_buffer)
137 XSETBUFFER (tem, thread->m_current_buffer);
138 mark_object (tem);
142 static void
143 mark_threads_callback (char *end, void *ignore)
145 struct thread_state *iter;
147 current_thread->stack_top = end;
148 for (iter = all_threads; iter; iter = iter->next_thread)
150 Lisp_Object thread_obj;
151 XSETTHREAD (thread_obj, iter);
152 mark_object (thread_obj);
153 mark_one_thread (iter);
157 void
158 mark_threads (void)
160 flush_stack_call_func (mark_threads_callback, NULL);
163 void
164 unmark_threads (void)
166 struct thread_state *iter;
168 for (iter = all_threads; iter; iter = iter->next_thread)
169 if (iter->m_byte_stack_list)
170 unmark_byte_stack (iter->m_byte_stack_list);
173 void
174 thread_acquire_buffer (char *end, void *nb)
176 struct buffer *new_buffer = nb;
177 current_thread->desired_buffer = new_buffer;
178 if (current_buffer)
180 current_buffer->owner = current_buffer->prev_owner;
181 current_buffer->prev_owner = Qnil;
184 reschedule_and_wait (end);
186 /* FIXME: if buffer is killed */
187 new_buffer->prev_owner = new_buffer->owner;
188 if (current_thread->nolock)
189 new_buffer->owner = Qnil;
190 else
191 new_buffer->owner = get_current_thread ();
195 thread_inhibit_yield_p ()
197 return inhibit_yield_counter || interrupt_input_blocked || abort_on_gc;
200 static void
201 thread_yield_callback (char *end, void *ignore)
203 reschedule_and_wait (end);
206 void
207 thread_yield (void)
209 /* Note: currently it is safe to check this here, but eventually it
210 will require a lock to ensure non-racy operation. */
211 /* Only yield if there is another thread to yield to. */
212 if (all_threads->next_thread)
213 flush_stack_call_func (thread_yield_callback, NULL);
216 DEFUN ("yield", Fyield, Syield, 0, 0, 0,
217 doc: /* Yield to the next thread. */)
218 (void)
220 thread_yield ();
221 return other_threads_p () ? Qt : Qnil;
224 static Lisp_Object
225 invoke_thread_function (void)
227 Lisp_Object iter;
229 int count = SPECPDL_INDEX ();
231 /* Set up specpdl. */
232 for (iter = current_thread->initial_specpdl;
233 !EQ (iter, Qnil);
234 iter = XCDR (iter))
236 /* We may bind a variable twice -- but it doesn't matter because
237 there is no way to undo these bindings without exiting the
238 thread. */
239 specbind (XCAR (XCAR (iter)), XCDR (XCAR (iter)));
241 current_thread->initial_specpdl = Qnil;
243 Feval (current_thread->func);
244 return unbind_to (count, Qnil);
247 static Lisp_Object
248 do_nothing (Lisp_Object whatever)
250 return whatever;
253 static void *
254 run_thread (void *state)
256 struct thread_state *self = state;
257 struct thread_state **iter;
258 struct gcpro gcpro1;
259 Lisp_Object buffer;
260 char stack_pos;
262 self->stack_top = self->stack_bottom = &stack_pos;
264 self->m_specpdl_size = 50;
265 self->m_specpdl = xmalloc (self->m_specpdl_size
266 * sizeof (struct specbinding));
267 self->m_specpdl_ptr = self->m_specpdl;
268 self->pthread_id = pthread_self ();
270 /* Thread-local assignment. */
271 current_thread = self;
273 /* We need special handling to set the initial buffer. Our parent
274 thread is very likely to be using this same buffer so we will
275 typically wait for the parent thread to release it first. */
276 XSETBUFFER (buffer, self->m_current_buffer);
277 GCPRO1 (buffer);
278 self->desired_buffer = (struct buffer *) buffer;
279 self->m_current_buffer = 0;
281 pthread_mutex_lock (&global_lock);
283 set_buffer_internal (XBUFFER (buffer));
285 /* It might be nice to do something with errors here. */
286 internal_condition_case (invoke_thread_function, Qt, do_nothing);
288 /* Unlink this thread from the list of all threads. */
289 for (iter = &all_threads; *iter != self; iter = &(*iter)->next_thread)
291 *iter = (*iter)->next_thread;
293 if (!EQ (self->m_current_buffer->owner, Qt))
294 self->m_current_buffer->owner = self->m_current_buffer->prev_owner;
296 thread_schedule ();
297 pthread_cond_broadcast (&buffer_cond);
299 xfree (self->m_specpdl);
301 pthread_mutex_unlock (&global_lock);
303 return NULL;
306 DEFUN ("run-in-thread", Frun_in_thread, Srun_in_thread, 1, 2, 0,
307 doc: /* Start a new thread and run FUNCTION in it.
308 When the function exits, the thread dies. When NOLOCK is no-nil the thread
309 does not try to get a lock on the current buffer. */)
310 (function, nolock)
311 Lisp_Object function;
312 Lisp_Object nolock;
314 char stack_pos;
315 pthread_t thr;
316 struct thread_state *new_thread;
317 struct specbinding *p;
319 /* Can't start a thread in temacs. */
320 if (!initialized)
321 abort ();
323 new_thread = (struct thread_state *) allocate_pseudovector (VECSIZE (struct thread_state),
324 2, PVEC_THREAD);
325 memset ((char *) new_thread + OFFSETOF (struct thread_state, m_gcprolist),
326 0, sizeof (struct thread_state) - OFFSETOF (struct thread_state,
327 m_gcprolist));
329 new_thread->func = function;
330 new_thread->nolock = !EQ (nolock, Qnil);
331 new_thread->initial_specpdl = Qnil;
332 new_thread->m_current_buffer = current_thread->m_current_buffer;
333 new_thread->stack_bottom = &stack_pos;
335 for (p = specpdl; p != specpdl_ptr; ++p)
337 if (!p->func)
339 Lisp_Object sym = p->symbol;
340 if (!SYMBOLP (sym))
341 sym = XCAR (sym);
342 new_thread->initial_specpdl
343 = Fcons (Fcons (sym, find_symbol_value (sym)),
344 new_thread->initial_specpdl);
348 /* We'll need locking here. */
349 new_thread->next_thread = all_threads;
350 all_threads = new_thread;
352 if (pthread_create (&thr, NULL, run_thread, new_thread))
354 /* Restore the previous situation. */
355 all_threads = all_threads->next_thread;
356 error ("Could not start a new thread");
359 return Qnil;
362 /* Get the current thread as a lisp object. */
363 Lisp_Object
364 get_current_thread (void)
366 Lisp_Object result;
367 XSETTHREAD (result, current_thread);
368 return result;
371 /* Get the main thread as a lisp object. */
372 Lisp_Object
373 get_main_thread (void)
375 Lisp_Object result;
376 XSETTHREAD (result, &primary_thread);
377 return result;
380 /* Is the current an user thread. */
382 user_thread_p (void)
384 struct thread_state *it = all_threads;
385 pthread_t self = pthread_self ();
388 if (it->pthread_id == self)
389 return 1;
391 while (it = it->next_thread);
393 return 0;
396 DEFUN ("inhibit-yield", Finhibit_yield, Sinhibit_yield, 1, 1, 0,
397 doc: /* Inhibit the yield function. */)
398 (val)
399 Lisp_Object val;
401 if (!EQ (val, Qnil))
402 inhibit_yield_counter++;
403 else if (inhibit_yield_counter > 0)
404 inhibit_yield_counter--;
406 return Qnil;
411 other_threads_p (void)
413 return all_threads->next_thread != NULL;
416 void
417 init_threads (void)
419 pthread_mutex_init (&global_lock, NULL);
420 pthread_cond_init (&buffer_cond, NULL);
421 pthread_mutex_lock (&global_lock);
423 primary_thread.pthread_id = pthread_self ();
424 primary_thread.nolock = 0;
425 next_thread = primary_thread.pthread_id;
428 void
429 syms_of_threads (void)
431 defsubr (&Srun_in_thread);
432 defsubr (&Syield);
433 defsubr (&Sinhibit_yield);