Use ALLOCATE_PSEUDOVECTOR.
[emacs.git] / src / thread.c
blob2a3199b70b9fb20c9cee1e0a75a6a2c4d8587068
2 #include <config.h>
3 #include "lisp.h"
4 #include "buffer.h"
5 #include "blockinput.h"
6 #include <pthread.h>
7 #include "systime.h"
8 #include "sysselect.h"
10 void mark_byte_stack P_ ((struct byte_stack *));
11 void mark_backtrace P_ ((struct backtrace *));
12 void mark_catchlist P_ ((struct catchtag *));
13 void mark_stack P_ ((char *, char *));
14 void flush_stack_call_func P_ ((void (*) (char *, void *), void *));
16 /* Get the next thread as in circular buffer. */
17 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
19 /* condition var .. w/ global lock */
21 static pthread_cond_t buffer_cond;
23 static struct thread_state primary_thread;
25 static struct thread_state *all_threads = &primary_thread;
27 __thread struct thread_state *current_thread = &primary_thread;
29 static int inhibit_yield_counter = 0;
31 pthread_mutex_t global_lock;
33 /* Used internally by the scheduler, it is the next that will be executed. */
34 static pthread_t next_thread;
36 /* Choose the next thread to be executed. */
37 static void
38 thread_schedule ()
40 struct thread_state *it, *begin = NEXT_THREAD (current_thread);
42 #define CHECK_THREAD(T,B) \
43 if ((!other_threads_p () \
44 || ((struct thread_state *)T)->nolock \
45 || EQ (((struct thread_state *)T)->desired_buffer, \
46 ((struct thread_state *)T)->m_current_buffer) \
47 || EQ (B->owner, Qnil) \
48 /* We set the owner to Qt to mean it is being killed. */ \
49 || EQ (B->owner, Qt)) \
50 && !((struct thread_state *)T)->blocked) \
51 { \
52 next_thread = ((struct thread_state *)T)->pthread_id; \
53 return; \
54 } \
56 /* Try to wake up the thread holding the desired buffer. */
57 if (current_thread->desired_buffer)
59 struct buffer *db = current_thread->desired_buffer;
60 if (!EQ (db->owner, Qnil) && !EQ (db, current_buffer))
61 CHECK_THREAD (XVECTOR (db->owner), db);
64 /* A simple round-robin. We can't just check for it != current_thread
65 because current_thread could be already unlinked from all_threads. */
66 it = begin;
67 while (1)
69 struct buffer *new_buffer = it->desired_buffer;
70 if (new_buffer)
71 CHECK_THREAD (it, new_buffer);
73 it = NEXT_THREAD (it);
74 if (it == current_thread)
75 break;
79 /* Schedule a new thread and block the caller until it is not scheduled
80 again. */
81 static inline void
82 reschedule (char *end, int wait)
84 current_thread->stack_top = end;
85 if (!thread_inhibit_yield_p ())
86 thread_schedule ();
88 if (next_thread != current_thread->pthread_id)
89 pthread_cond_broadcast (&buffer_cond);
91 if (!wait)
92 return;
94 pthread_mutex_unlock (&global_lock);
96 pthread_mutex_lock (&global_lock);
98 while (current_thread->pthread_id != next_thread)
99 pthread_cond_wait (&buffer_cond, &global_lock);
102 static void
103 mark_one_thread (struct thread_state *thread)
105 register struct specbinding *bind;
106 struct handler *handler;
107 Lisp_Object tem;
109 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
111 mark_object (bind->symbol);
112 mark_object (bind->old_value);
115 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
116 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
117 mark_stack (thread->stack_bottom, thread->stack_top);
118 #else
120 register struct gcpro *tail;
121 for (tail = thread->m_gcprolist; tail; tail = tail->next)
122 for (i = 0; i < tail->nvars; i++)
123 mark_object (tail->var[i]);
125 #endif
127 if (thread->m_byte_stack_list)
128 mark_byte_stack (thread->m_byte_stack_list);
130 mark_catchlist (thread->m_catchlist);
132 for (handler = thread->m_handlerlist; handler; handler = handler->next)
134 mark_object (handler->handler);
135 mark_object (handler->var);
138 mark_backtrace (thread->m_backtrace_list);
140 if (thread->m_current_buffer)
142 XSETBUFFER (tem, thread->m_current_buffer);
143 mark_object (tem);
146 mark_object (thread->m_last_thing_searched);
148 if (thread->m_saved_last_thing_searched)
149 mark_object (thread->m_saved_last_thing_searched);
152 static void
153 mark_threads_callback (char *end, void *ignore)
155 struct thread_state *iter;
157 current_thread->stack_top = end;
158 for (iter = all_threads; iter; iter = iter->next_thread)
160 Lisp_Object thread_obj;
161 XSETTHREAD (thread_obj, iter);
162 mark_object (thread_obj);
163 mark_one_thread (iter);
167 void
168 mark_threads (void)
170 flush_stack_call_func (mark_threads_callback, NULL);
173 void
174 unmark_threads (void)
176 struct thread_state *iter;
178 for (iter = all_threads; iter; iter = iter->next_thread)
179 if (iter->m_byte_stack_list)
180 unmark_byte_stack (iter->m_byte_stack_list);
183 void
184 thread_acquire_buffer (char *end, void *nb)
186 struct buffer *new_buffer = nb;
187 current_thread->desired_buffer = new_buffer;
188 if (current_buffer)
190 current_buffer->owner = current_buffer->prev_owner;
191 current_buffer->prev_owner = Qnil;
194 reschedule (end, 1);
196 /* FIXME: if buffer is killed */
197 new_buffer->prev_owner = new_buffer->owner;
198 if (current_thread->nolock)
199 new_buffer->owner = Qnil;
200 else
201 new_buffer->owner = get_current_thread ();
203 current_buffer = new_buffer;
207 thread_inhibit_yield_p ()
209 return inhibit_yield_counter || interrupt_input_blocked || abort_on_gc;
212 static void
213 thread_yield_callback (char *end, void *ignore)
215 if (!thread_inhibit_yield_p ())
216 reschedule (end, 1);
219 void
220 thread_yield (void)
222 /* Note: currently it is safe to check this here, but eventually it
223 will require a lock to ensure non-racy operation. */
224 /* Only yield if there is another thread to yield to. */
225 if (all_threads->next_thread)
226 flush_stack_call_func (thread_yield_callback, NULL);
229 DEFUN ("yield", Fyield, Syield, 0, 0, 0,
230 doc: /* Yield to the next thread. */)
231 (void)
233 thread_yield ();
234 return other_threads_p () ? Qt : Qnil;
237 static Lisp_Object
238 invoke_thread_function (void)
240 Lisp_Object iter;
242 int count = SPECPDL_INDEX ();
244 /* Set up specpdl. */
245 for (iter = current_thread->initial_specpdl;
246 !EQ (iter, Qnil);
247 iter = XCDR (iter))
249 /* We may bind a variable twice -- but it doesn't matter because
250 there is no way to undo these bindings without exiting the
251 thread. */
252 specbind (XCAR (XCAR (iter)), XCDR (XCAR (iter)));
254 current_thread->initial_specpdl = Qnil;
256 Feval (current_thread->func);
257 return unbind_to (count, Qnil);
260 static Lisp_Object
261 do_nothing (Lisp_Object whatever)
263 return whatever;
266 static void *
267 run_thread (void *state)
269 struct thread_state *self = state;
270 struct thread_state **iter;
271 struct gcpro gcpro1;
272 Lisp_Object buffer;
273 char stack_pos;
275 self->stack_top = self->stack_bottom = &stack_pos;
277 self->m_specpdl_size = 50;
278 self->m_specpdl = xmalloc (self->m_specpdl_size
279 * sizeof (struct specbinding));
280 self->m_specpdl_ptr = self->m_specpdl;
281 self->pthread_id = pthread_self ();
283 /* Thread-local assignment. */
284 current_thread = self;
286 /* We need special handling to set the initial buffer. Our parent
287 thread is very likely to be using this same buffer so we will
288 typically wait for the parent thread to release it first. */
289 XSETBUFFER (buffer, self->m_current_buffer);
290 GCPRO1 (buffer);
291 self->desired_buffer = (struct buffer *) buffer;
292 self->m_current_buffer = 0;
294 pthread_mutex_lock (&global_lock);
296 set_buffer_internal (XBUFFER (buffer));
298 /* It might be nice to do something with errors here. */
299 internal_condition_case (invoke_thread_function, Qt, do_nothing);
301 /* Unlink this thread from the list of all threads. */
302 for (iter = &all_threads; *iter != self; iter = &(*iter)->next_thread)
304 *iter = (*iter)->next_thread;
306 if (!EQ (self->m_current_buffer->owner, Qt))
307 self->m_current_buffer->owner = self->m_current_buffer->prev_owner;
309 thread_schedule ();
310 pthread_cond_broadcast (&buffer_cond);
312 xfree (self->m_specpdl);
314 pthread_mutex_unlock (&global_lock);
316 return NULL;
319 DEFUN ("run-in-thread", Frun_in_thread, Srun_in_thread, 1, 2, 0,
320 doc: /* Start a new thread and run FUNCTION in it.
321 When the function exits, the thread dies. When NOLOCK is no-nil the thread
322 does not try to get a lock on the current buffer. */)
323 (function, nolock)
324 Lisp_Object function;
325 Lisp_Object nolock;
327 char stack_pos;
328 pthread_t thr;
329 struct thread_state *new_thread;
330 struct specbinding *p;
332 /* Can't start a thread in temacs. */
333 if (!initialized)
334 abort ();
336 new_thread = ALLOCATE_PSEUDOVECTOR (struct thread_state, m_gcprolist,
337 PVEC_THREAD);
338 memset ((char *) new_thread + OFFSETOF (struct thread_state, m_gcprolist),
339 0, sizeof (struct thread_state) - OFFSETOF (struct thread_state,
340 m_gcprolist));
342 new_thread->func = function;
343 new_thread->blocked = 0;
344 new_thread->nolock = !EQ (nolock, Qnil);
345 new_thread->initial_specpdl = Qnil;
346 new_thread->m_last_thing_searched = Qnil; /* copy from parent? */
347 new_thread->m_saved_last_thing_searched = Qnil;
348 new_thread->m_current_buffer = current_thread->m_current_buffer;
349 new_thread->stack_bottom = &stack_pos;
351 for (p = specpdl; p != specpdl_ptr; ++p)
353 if (!p->func)
355 Lisp_Object sym = p->symbol;
356 if (!SYMBOLP (sym))
357 sym = XCAR (sym);
358 new_thread->initial_specpdl
359 = Fcons (Fcons (sym, find_symbol_value (sym)),
360 new_thread->initial_specpdl);
364 /* We'll need locking here. */
365 new_thread->next_thread = all_threads;
366 all_threads = new_thread;
368 if (pthread_create (&thr, NULL, run_thread, new_thread))
370 /* Restore the previous situation. */
371 all_threads = all_threads->next_thread;
372 error ("Could not start a new thread");
375 return Qnil;
378 /* Get the current thread as a lisp object. */
379 Lisp_Object
380 get_current_thread (void)
382 Lisp_Object result;
383 XSETTHREAD (result, current_thread);
384 return result;
387 /* Get the main thread as a lisp object. */
388 Lisp_Object
389 get_main_thread (void)
391 Lisp_Object result;
392 XSETTHREAD (result, &primary_thread);
393 return result;
396 /* Is the current an user thread. */
398 user_thread_p (void)
400 struct thread_state *it = all_threads;
401 pthread_t self = pthread_self ();
404 if (it->pthread_id == self)
405 return 1;
407 while (it = it->next_thread);
409 return 0;
412 DEFUN ("inhibit-yield", Finhibit_yield, Sinhibit_yield, 1, 1, 0,
413 doc: /* Inhibit the yield function. */)
414 (val)
415 Lisp_Object val;
417 if (!EQ (val, Qnil))
418 inhibit_yield_counter++;
419 else if (inhibit_yield_counter > 0)
420 inhibit_yield_counter--;
422 return Qnil;
426 thread_select (n, rfd, wfd, xfd, tmo)
427 int n;
428 SELECT_TYPE *rfd, *wfd, *xfd;
429 EMACS_TIME *tmo;
431 char end;
432 int ret;
433 current_thread->blocked = 1;
435 reschedule (&end, 0);
437 pthread_mutex_unlock (&global_lock);
439 ret = select (n, rfd, wfd, xfd, tmo);
440 current_thread->blocked = 0;
442 pthread_mutex_lock (&global_lock);
443 pthread_cond_broadcast (&buffer_cond);
445 while (current_thread->pthread_id != next_thread)
446 pthread_cond_wait (&buffer_cond, &global_lock);
448 return ret;
452 other_threads_p (void)
454 int avail = 0;
455 struct thread_state *it = all_threads;
456 for (; it && avail < 2; it = it->next_thread)
457 if (!it->blocked)
458 avail++;
460 return avail > 1;
463 void
464 init_threads (void)
466 pthread_mutex_init (&global_lock, NULL);
467 pthread_cond_init (&buffer_cond, NULL);
468 pthread_mutex_lock (&global_lock);
470 primary_thread.pthread_id = pthread_self ();
471 primary_thread.nolock = 0;
472 primary_thread.blocked = 0;
473 primary_thread.m_last_thing_searched = Qnil;
474 next_thread = primary_thread.pthread_id;
477 void
478 syms_of_threads (void)
480 defsubr (&Srun_in_thread);
481 defsubr (&Syield);
482 defsubr (&Sinhibit_yield);