Set the current_buffer properly.
[emacs.git] / src / thread.c
blobe83793ba765cdf884f94c766445f25d8c19cb764
2 #include <config.h>
3 #include "lisp.h"
4 #include "buffer.h"
5 #include "blockinput.h"
6 #include <pthread.h>
7 #include "systime.h"
8 #include "sysselect.h"
10 void mark_byte_stack P_ ((struct byte_stack *));
11 void mark_backtrace P_ ((struct backtrace *));
12 void mark_catchlist P_ ((struct catchtag *));
13 void mark_stack P_ ((char *, char *));
14 void flush_stack_call_func P_ ((void (*) (char *, void *), void *));
16 /* Get the next thread as in circular buffer. */
17 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
19 /* condition var .. w/ global lock */
21 static pthread_cond_t buffer_cond;
23 static struct thread_state primary_thread;
25 static struct thread_state *all_threads = &primary_thread;
27 __thread struct thread_state *current_thread = &primary_thread;
29 static int inhibit_yield_counter = 0;
31 pthread_mutex_t global_lock;
33 /* Used internally by the scheduler, it is the next that will be executed. */
34 static pthread_t next_thread;
36 /* Choose the next thread to be executed. */
37 static void
38 thread_schedule ()
40 struct thread_state *it, *begin = NEXT_THREAD (current_thread);
42 #define CHECK_THREAD(T,B) \
43 if ((!other_threads_p () \
44 || ((struct thread_state *)T)->nolock \
45 || EQ (((struct thread_state *)T)->desired_buffer, \
46 ((struct thread_state *)T)->m_current_buffer) \
47 || EQ (B->owner, Qnil) \
48 /* We set the owner to Qt to mean it is being killed. */ \
49 || EQ (B->owner, Qt)) \
50 && !((struct thread_state *)T)->blocked) \
51 { \
52 next_thread = ((struct thread_state *)T)->pthread_id; \
53 return; \
54 } \
56 /* Try to wake up the thread that is holding the desired buffer. */
57 if (current_thread->desired_buffer)
59 struct buffer *db = current_thread->desired_buffer;
60 if (!EQ (db->owner, Qnil) && !EQ (db, current_buffer))
61 CHECK_THREAD (XVECTOR (db->owner), db);
64 /* A simple round-robin. We can't just check for it != current_thread
65 because current_thread could be already unlinked from all_threads. */
66 it = begin;
67 while (1)
69 struct buffer *new_buffer = it->desired_buffer;
70 if (new_buffer)
71 CHECK_THREAD (it, new_buffer);
73 it = NEXT_THREAD (it);
74 if (it == current_thread)
75 break;
79 /* Schedule a new thread and block the caller until it is not scheduled
80 again. */
81 static inline void
82 reschedule (char *end, int wait)
84 current_thread->stack_top = end;
85 if (!thread_inhibit_yield_p ())
86 thread_schedule ();
88 if (next_thread != current_thread->pthread_id)
89 pthread_cond_broadcast (&buffer_cond);
91 if (!wait)
92 return;
94 pthread_mutex_unlock (&global_lock);
96 pthread_mutex_lock (&global_lock);
98 while (current_thread->pthread_id != next_thread)
99 pthread_cond_wait (&buffer_cond, &global_lock);
102 static void
103 mark_one_thread (struct thread_state *thread)
105 register struct specbinding *bind;
106 struct handler *handler;
107 Lisp_Object tem;
109 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
111 mark_object (bind->symbol);
112 mark_object (bind->old_value);
115 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
116 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
117 mark_stack (thread->stack_bottom, thread->stack_top);
118 #else
120 register struct gcpro *tail;
121 for (tail = thread->m_gcprolist; tail; tail = tail->next)
122 for (i = 0; i < tail->nvars; i++)
123 mark_object (tail->var[i]);
125 #endif
127 if (thread->m_byte_stack_list)
128 mark_byte_stack (thread->m_byte_stack_list);
130 mark_catchlist (thread->m_catchlist);
132 for (handler = thread->m_handlerlist; handler; handler = handler->next)
134 mark_object (handler->handler);
135 mark_object (handler->var);
138 mark_backtrace (thread->m_backtrace_list);
140 if (thread->m_current_buffer)
142 XSETBUFFER (tem, thread->m_current_buffer);
143 mark_object (tem);
147 static void
148 mark_threads_callback (char *end, void *ignore)
150 struct thread_state *iter;
152 current_thread->stack_top = end;
153 for (iter = all_threads; iter; iter = iter->next_thread)
155 Lisp_Object thread_obj;
156 XSETTHREAD (thread_obj, iter);
157 mark_object (thread_obj);
158 mark_one_thread (iter);
162 void
163 mark_threads (void)
165 flush_stack_call_func (mark_threads_callback, NULL);
168 void
169 unmark_threads (void)
171 struct thread_state *iter;
173 for (iter = all_threads; iter; iter = iter->next_thread)
174 if (iter->m_byte_stack_list)
175 unmark_byte_stack (iter->m_byte_stack_list);
178 void
179 thread_acquire_buffer (char *end, void *nb)
181 struct buffer *new_buffer = nb;
182 current_thread->desired_buffer = new_buffer;
183 if (current_buffer)
185 current_buffer->owner = current_buffer->prev_owner;
186 current_buffer->prev_owner = Qnil;
189 reschedule (end, 1);
191 /* FIXME: if buffer is killed */
192 new_buffer->prev_owner = new_buffer->owner;
193 if (current_thread->nolock)
194 new_buffer->owner = Qnil;
195 else
196 new_buffer->owner = get_current_thread ();
198 current_buffer = new_buffer;
202 thread_inhibit_yield_p ()
204 return inhibit_yield_counter || interrupt_input_blocked || abort_on_gc;
207 static int
208 thread_bind_bufferlocal_p (struct thread_state *thread)
210 register struct specbinding *bind;
212 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
214 if (BUFFER_OBJFWDP (bind->symbol) || BUFFER_LOCAL_VALUEP (bind->symbol))
215 return 1;
217 return 0;
220 static void
221 thread_yield_callback (char *end, void *ignore)
223 if (!thread_inhibit_yield_p ()
224 && !thread_bind_bufferlocal_p (current_thread))
225 thread_acquire_buffer (end, current_buffer);
226 else
227 reschedule (end, 1);
230 void
231 thread_yield (void)
233 /* Note: currently it is safe to check this here, but eventually it
234 will require a lock to ensure non-racy operation. */
235 /* Only yield if there is another thread to yield to. */
236 if (all_threads->next_thread)
237 flush_stack_call_func (thread_yield_callback, NULL);
240 DEFUN ("yield", Fyield, Syield, 0, 0, 0,
241 doc: /* Yield to the next thread. */)
242 (void)
244 thread_yield ();
245 return other_threads_p () ? Qt : Qnil;
248 static Lisp_Object
249 invoke_thread_function (void)
251 Lisp_Object iter;
253 int count = SPECPDL_INDEX ();
255 /* Set up specpdl. */
256 for (iter = current_thread->initial_specpdl;
257 !EQ (iter, Qnil);
258 iter = XCDR (iter))
260 /* We may bind a variable twice -- but it doesn't matter because
261 there is no way to undo these bindings without exiting the
262 thread. */
263 specbind (XCAR (XCAR (iter)), XCDR (XCAR (iter)));
265 current_thread->initial_specpdl = Qnil;
267 Feval (current_thread->func);
268 return unbind_to (count, Qnil);
271 static Lisp_Object
272 do_nothing (Lisp_Object whatever)
274 return whatever;
277 static void *
278 run_thread (void *state)
280 struct thread_state *self = state;
281 struct thread_state **iter;
282 struct gcpro gcpro1;
283 Lisp_Object buffer;
284 char stack_pos;
286 self->stack_top = self->stack_bottom = &stack_pos;
288 self->m_specpdl_size = 50;
289 self->m_specpdl = xmalloc (self->m_specpdl_size
290 * sizeof (struct specbinding));
291 self->m_specpdl_ptr = self->m_specpdl;
292 self->pthread_id = pthread_self ();
294 /* Thread-local assignment. */
295 current_thread = self;
297 /* We need special handling to set the initial buffer. Our parent
298 thread is very likely to be using this same buffer so we will
299 typically wait for the parent thread to release it first. */
300 XSETBUFFER (buffer, self->m_current_buffer);
301 GCPRO1 (buffer);
302 self->desired_buffer = (struct buffer *) buffer;
303 self->m_current_buffer = 0;
305 pthread_mutex_lock (&global_lock);
307 set_buffer_internal (XBUFFER (buffer));
309 /* It might be nice to do something with errors here. */
310 internal_condition_case (invoke_thread_function, Qt, do_nothing);
312 /* Unlink this thread from the list of all threads. */
313 for (iter = &all_threads; *iter != self; iter = &(*iter)->next_thread)
315 *iter = (*iter)->next_thread;
317 if (!EQ (self->m_current_buffer->owner, Qt))
318 self->m_current_buffer->owner = self->m_current_buffer->prev_owner;
320 thread_schedule ();
321 pthread_cond_broadcast (&buffer_cond);
323 xfree (self->m_specpdl);
325 pthread_mutex_unlock (&global_lock);
327 return NULL;
330 DEFUN ("run-in-thread", Frun_in_thread, Srun_in_thread, 1, 2, 0,
331 doc: /* Start a new thread and run FUNCTION in it.
332 When the function exits, the thread dies. When NOLOCK is no-nil the thread
333 does not try to get a lock on the current buffer. */)
334 (function, nolock)
335 Lisp_Object function;
336 Lisp_Object nolock;
338 char stack_pos;
339 pthread_t thr;
340 struct thread_state *new_thread;
341 struct specbinding *p;
343 /* Can't start a thread in temacs. */
344 if (!initialized)
345 abort ();
347 new_thread = (struct thread_state *) allocate_pseudovector (VECSIZE (struct thread_state),
348 2, PVEC_THREAD);
349 memset ((char *) new_thread + OFFSETOF (struct thread_state, m_gcprolist),
350 0, sizeof (struct thread_state) - OFFSETOF (struct thread_state,
351 m_gcprolist));
353 new_thread->func = function;
354 new_thread->blocked = 0;
355 new_thread->nolock = !EQ (nolock, Qnil);
356 new_thread->initial_specpdl = Qnil;
357 new_thread->m_current_buffer = current_thread->m_current_buffer;
358 new_thread->stack_bottom = &stack_pos;
360 for (p = specpdl; p != specpdl_ptr; ++p)
362 if (!p->func)
364 Lisp_Object sym = p->symbol;
365 if (!SYMBOLP (sym))
366 sym = XCAR (sym);
367 new_thread->initial_specpdl
368 = Fcons (Fcons (sym, find_symbol_value (sym)),
369 new_thread->initial_specpdl);
373 /* We'll need locking here. */
374 new_thread->next_thread = all_threads;
375 all_threads = new_thread;
377 if (pthread_create (&thr, NULL, run_thread, new_thread))
379 /* Restore the previous situation. */
380 all_threads = all_threads->next_thread;
381 error ("Could not start a new thread");
384 return Qnil;
387 /* Get the current thread as a lisp object. */
388 Lisp_Object
389 get_current_thread (void)
391 Lisp_Object result;
392 XSETTHREAD (result, current_thread);
393 return result;
396 /* Get the main thread as a lisp object. */
397 Lisp_Object
398 get_main_thread (void)
400 Lisp_Object result;
401 XSETTHREAD (result, &primary_thread);
402 return result;
405 /* Is the current an user thread. */
407 user_thread_p (void)
409 struct thread_state *it = all_threads;
410 pthread_t self = pthread_self ();
413 if (it->pthread_id == self)
414 return 1;
416 while (it = it->next_thread);
418 return 0;
421 DEFUN ("inhibit-yield", Finhibit_yield, Sinhibit_yield, 1, 1, 0,
422 doc: /* Inhibit the yield function. */)
423 (val)
424 Lisp_Object val;
426 if (!EQ (val, Qnil))
427 inhibit_yield_counter++;
428 else if (inhibit_yield_counter > 0)
429 inhibit_yield_counter--;
431 return Qnil;
435 thread_select (n, rfd, wfd, xfd, tmo)
436 int n;
437 SELECT_TYPE *rfd, *wfd, *xfd;
438 EMACS_TIME *tmo;
440 char end;
441 int ret;
442 current_thread->blocked = 1;
444 reschedule (&end, 0);
446 pthread_mutex_unlock (&global_lock);
448 ret = select (n, rfd, wfd, xfd, tmo);
449 current_thread->blocked = 0;
451 pthread_mutex_lock (&global_lock);
452 pthread_cond_broadcast (&buffer_cond);
454 while (current_thread->pthread_id != next_thread)
455 pthread_cond_wait (&buffer_cond, &global_lock);
457 return ret;
461 other_threads_p (void)
463 int avail = 0;
464 struct thread_state *it = all_threads;
465 for (; it && avail < 2; it = it->next_thread)
466 if (!it->blocked)
467 avail++;
469 return avail > 1;
472 void
473 init_threads (void)
475 pthread_mutex_init (&global_lock, NULL);
476 pthread_cond_init (&buffer_cond, NULL);
477 pthread_mutex_lock (&global_lock);
479 primary_thread.pthread_id = pthread_self ();
480 primary_thread.nolock = 0;
481 primary_thread.blocked = 0;
482 next_thread = primary_thread.pthread_id;
485 void
486 syms_of_threads (void)
488 defsubr (&Srun_in_thread);
489 defsubr (&Syield);
490 defsubr (&Sinhibit_yield);