make the minibuffer mutex recursive.
[emacs.git] / src / thread.c
blob0be143cfcbbea261fa8ce57b7c694b0498e0d55c
2 #include <config.h>
3 #include <setjmp.h>
4 #include "lisp.h"
5 #include "buffer.h"
6 #include "blockinput.h"
7 #include <pthread.h>
8 #include "systime.h"
9 #include "sysselect.h"
12 void mark_byte_stack P_ ((struct byte_stack *));
13 void mark_backtrace P_ ((struct backtrace *));
14 void mark_catchlist P_ ((struct catchtag *));
15 void mark_stack P_ ((char *, char *));
16 void flush_stack_call_func P_ ((void (*) (char *, void *), void *));
18 /* Get the next thread as in circular buffer. */
19 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
21 /* condition var .. w/ global lock */
23 static pthread_cond_t thread_cond;
25 static struct thread_state primary_thread;
27 static struct thread_state *all_threads = &primary_thread;
29 __thread struct thread_state *current_thread = &primary_thread;
31 pthread_mutex_t global_lock;
33 /* Used internally by the scheduler, it is the next that will be executed. */
34 static pthread_t next_thread;
36 Lisp_Object minibuffer_mutex;
38 /* Choose the next thread to be executed. */
39 static void
40 thread_schedule ()
42 struct thread_state *it = current_thread;
45 it = NEXT_THREAD (it);
47 while (it->blocked && it != current_thread);
49 next_thread = it->pthread_id;
52 /* Schedule a new thread and block the caller until it is not scheduled
53 again. */
54 static inline void
55 reschedule (char *end, int wait)
57 current_thread->stack_top = end;
58 thread_schedule ();
60 if (next_thread != current_thread->pthread_id)
61 pthread_cond_broadcast (&thread_cond);
63 if (!wait)
64 return;
66 pthread_mutex_unlock (&global_lock);
68 pthread_mutex_lock (&global_lock);
70 while (current_thread->pthread_id != next_thread)
71 pthread_cond_wait (&thread_cond, &global_lock);
74 static void
75 mark_one_thread (struct thread_state *thread)
77 register struct specbinding *bind;
78 struct handler *handler;
79 Lisp_Object tem;
81 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
83 mark_object (bind->symbol);
84 mark_object (bind->old_value);
87 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
88 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
89 mark_stack (thread->stack_bottom, thread->stack_top);
90 #else
92 register struct gcpro *tail;
93 for (tail = thread->m_gcprolist; tail; tail = tail->next)
94 for (i = 0; i < tail->nvars; i++)
95 mark_object (tail->var[i]);
97 #endif
99 if (thread->m_byte_stack_list)
100 mark_byte_stack (thread->m_byte_stack_list);
102 mark_catchlist (thread->m_catchlist);
104 for (handler = thread->m_handlerlist; handler; handler = handler->next)
106 mark_object (handler->handler);
107 mark_object (handler->var);
110 mark_backtrace (thread->m_backtrace_list);
112 if (thread->m_current_buffer)
114 XSETBUFFER (tem, thread->m_current_buffer);
115 mark_object (tem);
118 mark_object (thread->m_last_thing_searched);
120 if (thread->m_saved_last_thing_searched)
121 mark_object (thread->m_saved_last_thing_searched);
124 static void
125 mark_threads_callback (char *end, void *ignore)
127 struct thread_state *iter;
129 current_thread->stack_top = end;
130 for (iter = all_threads; iter; iter = iter->next_thread)
132 Lisp_Object thread_obj;
133 XSETTHREAD (thread_obj, iter);
134 mark_object (thread_obj);
135 mark_one_thread (iter);
139 void
140 mark_threads (void)
142 flush_stack_call_func (mark_threads_callback, NULL);
145 void
146 unmark_threads (void)
148 struct thread_state *iter;
150 for (iter = all_threads; iter; iter = iter->next_thread)
151 if (iter->m_byte_stack_list)
152 unmark_byte_stack (iter->m_byte_stack_list);
155 static void
156 thread_yield_callback (char *end, void *ignore)
158 reschedule (end, 1);
161 void
162 thread_yield (void)
164 /* Note: currently it is safe to check this here, but eventually it
165 will require a lock to ensure non-racy operation. */
166 /* Only yield if there is another thread to yield to. */
167 if (all_threads->next_thread)
168 flush_stack_call_func (thread_yield_callback, NULL);
171 DEFUN ("yield", Fyield, Syield, 0, 0, 0,
172 doc: /* Yield to the next thread. */)
173 (void)
175 thread_yield ();
176 return other_threads_p () ? Qt : Qnil;
179 static Lisp_Object
180 invoke_thread_function (void)
182 Lisp_Object iter;
184 int count = SPECPDL_INDEX ();
186 /* Set up specpdl. */
187 for (iter = current_thread->initial_specpdl;
188 !EQ (iter, Qnil);
189 iter = XCDR (iter))
191 /* We may bind a variable twice -- but it doesn't matter because
192 there is no way to undo these bindings without exiting the
193 thread. */
194 specbind (XCAR (XCAR (iter)), XCDR (XCAR (iter)));
196 current_thread->initial_specpdl = Qnil;
198 Ffuncall (1, &current_thread->func);
199 return unbind_to (count, Qnil);
202 static Lisp_Object
203 do_nothing (Lisp_Object whatever)
205 return whatever;
208 static void *
209 run_thread (void *state)
211 struct thread_state *self = state;
212 struct thread_state **iter;
213 struct gcpro gcpro1;
214 Lisp_Object buffer;
215 char stack_pos;
217 self->stack_top = self->stack_bottom = &stack_pos;
219 self->m_specpdl_size = 50;
220 self->m_specpdl = xmalloc (self->m_specpdl_size
221 * sizeof (struct specbinding));
222 self->m_specpdl_ptr = self->m_specpdl;
223 self->pthread_id = pthread_self ();
225 /* Thread-local assignment. */
226 current_thread = self;
228 /* We need special handling to set the initial buffer. Our parent
229 thread is very likely to be using this same buffer so we will
230 typically wait for the parent thread to release it first. */
231 XSETBUFFER (buffer, self->m_current_buffer);
232 GCPRO1 (buffer);
233 self->m_current_buffer = 0;
235 pthread_mutex_lock (&global_lock);
237 set_buffer_internal (XBUFFER (buffer));
239 /* It might be nice to do something with errors here. */
240 internal_condition_case (invoke_thread_function, Qt, do_nothing);
242 blocal_unbind_thread (get_current_thread ());
244 /* Unlink this thread from the list of all threads. */
245 for (iter = &all_threads; *iter != self; iter = &(*iter)->next_thread)
247 *iter = (*iter)->next_thread;
249 thread_schedule ();
250 pthread_cond_broadcast (&thread_cond);
252 xfree (self->m_specpdl);
254 pthread_mutex_unlock (&global_lock);
256 return NULL;
259 DEFUN ("run-in-thread", Frun_in_thread, Srun_in_thread, 1, 1, 0,
260 doc: /* Start a new thread and run FUNCTION in it.
261 When the function exits, the thread dies. */)
262 (function)
263 Lisp_Object function;
265 char stack_pos;
266 pthread_t thr;
267 struct thread_state *new_thread;
268 struct specbinding *p;
270 /* Can't start a thread in temacs. */
271 if (!initialized)
272 abort ();
274 new_thread = ALLOCATE_PSEUDOVECTOR (struct thread_state, m_gcprolist,
275 PVEC_THREAD);
276 memset ((char *) new_thread + OFFSETOF (struct thread_state, m_gcprolist),
277 0, sizeof (struct thread_state) - OFFSETOF (struct thread_state,
278 m_gcprolist));
280 new_thread->func = function;
281 new_thread->blocked = 0;
282 new_thread->initial_specpdl = Qnil;
283 new_thread->m_last_thing_searched = Qnil; /* copy from parent? */
284 new_thread->m_saved_last_thing_searched = Qnil;
285 new_thread->m_current_buffer = current_thread->m_current_buffer;
286 new_thread->stack_bottom = &stack_pos;
288 for (p = specpdl; p != specpdl_ptr; ++p)
290 if (!p->func)
292 Lisp_Object sym = p->symbol;
293 if (!SYMBOLP (sym))
294 sym = XCAR (sym);
295 new_thread->initial_specpdl
296 = Fcons (Fcons (sym, find_symbol_value (sym)),
297 new_thread->initial_specpdl);
301 /* We'll need locking here. */
302 new_thread->next_thread = all_threads;
303 all_threads = new_thread;
305 if (pthread_create (&thr, NULL, run_thread, new_thread))
307 /* Restore the previous situation. */
308 all_threads = all_threads->next_thread;
309 error ("Could not start a new thread");
312 return Qnil;
315 /* Get the current thread as a lisp object. */
316 Lisp_Object
317 get_current_thread (void)
319 Lisp_Object result;
320 XSETTHREAD (result, current_thread);
321 return result;
324 /* Get the main thread as a lisp object. */
325 Lisp_Object
326 get_main_thread (void)
328 Lisp_Object result;
329 XSETTHREAD (result, &primary_thread);
330 return result;
333 /* Is the current an user thread. */
335 user_thread_p (void)
337 struct thread_state *it = all_threads;
338 pthread_t self = pthread_self ();
341 if (it->pthread_id == self)
342 return 1;
344 while (it = it->next_thread);
346 return 0;
349 DEFUN ("make-mutex", Fmake_mutex, Smake_mutex, 0, 1, 0,
350 doc: /* Make a mutex. If RECURSIVE is nil the mutex is not recursive
351 and can be locked once. */)
352 (recursive)
353 Lisp_Object recursive;
355 Lisp_Object ret;
356 struct Lisp_Mutex *mutex = allocate_mutex ();
357 mutex->owner = 0;
358 mutex->rec_counter = 0;
359 mutex->recursive = recursive;
360 XSETMUTEX (ret, mutex);
361 return ret;
364 DEFUN ("mutex-lock", Fmutex_lock, Smutex_lock, 1, 1, 0,
365 doc: /* Lock a mutex. */)
366 (val)
367 Lisp_Object val;
369 struct Lisp_Mutex *mutex = XMUTEX (val);
370 while (1)
372 if (mutex->owner == 0
373 || (!NILP (mutex->recursive) && mutex->owner == pthread_self ()))
375 mutex->owner = pthread_self ();
376 mutex->rec_counter++;
377 return Qt;
380 thread_yield ();
383 return Qt;
386 DEFUN ("mutex-unlock", Fmutex_unlock, Smutex_unlock, 1, 1, 0,
387 doc: /* Unlock a mutex. */)
388 (val)
389 Lisp_Object val;
391 struct Lisp_Mutex *mutex = XMUTEX (val);
393 if (mutex->owner != pthread_self () || mutex->rec_counter == 0)
394 return Qnil;
396 mutex->rec_counter--;
398 if (mutex->rec_counter == 0)
399 mutex->owner = 0;
401 return Qt;
405 thread_select (n, rfd, wfd, xfd, tmo)
406 int n;
407 SELECT_TYPE *rfd, *wfd, *xfd;
408 EMACS_TIME *tmo;
410 char end;
411 int ret;
412 current_thread->blocked = 1;
414 reschedule (&end, 0);
416 pthread_mutex_unlock (&global_lock);
418 ret = select (n, rfd, wfd, xfd, tmo);
419 current_thread->blocked = 0;
421 pthread_mutex_lock (&global_lock);
422 pthread_cond_broadcast (&thread_cond);
424 while (current_thread->pthread_id != next_thread)
425 pthread_cond_wait (&thread_cond, &global_lock);
427 return ret;
431 other_threads_p (void)
433 return all_threads->next ? 1 : 0;
436 Lisp_Object
437 thread_notify_kill_buffer (register struct buffer *b)
439 register Lisp_Object tem;
440 struct thread_state *it = all_threads;
441 for (; it; it = it->next_thread)
443 if (b == it->m_current_buffer)
445 register Lisp_Object buf;
446 XSETBUFFER (buf, it->m_current_buffer);
447 tem = Fother_buffer (buf, Qnil, Qnil);
448 it->m_current_buffer = XBUFFER (tem);
449 if (b == it->m_current_buffer)
450 return Qnil;
454 return Qt;
457 void
458 init_threads_once (void)
460 primary_thread.size = PSEUDOVECSIZE (struct thread_state, m_gcprolist);
461 primary_thread.next = NULL;
462 primary_thread.func = Qnil;
463 primary_thread.initial_specpdl = Qnil;
464 XSETPVECTYPE (&primary_thread, PVEC_THREAD);
465 minibuffer_mutex = Fmake_mutex (Qt);
468 void
469 init_threads (void)
471 pthread_mutex_init (&global_lock, NULL);
472 pthread_cond_init (&thread_cond, NULL);
473 pthread_mutex_lock (&global_lock);
475 primary_thread.pthread_id = pthread_self ();
476 primary_thread.blocked = 0;
477 primary_thread.m_last_thing_searched = Qnil;
478 next_thread = primary_thread.pthread_id;
481 void
482 syms_of_threads (void)
484 DEFVAR_LISP ("minibuffer-mutex", &minibuffer_mutex,
485 doc: /* Mutex for the minibuffer. */);
487 defsubr (&Srun_in_thread);
488 defsubr (&Syield);
489 defsubr (&Smake_mutex);
490 defsubr (&Smutex_lock);
491 defsubr (&Smutex_unlock);