Move all locking logic into thread.c.
[emacs.git] / src / thread.c
blob92d0c1801127a1e244378078928fd2f9568fcc1d
2 #include <config.h>
3 #include "lisp.h"
4 #include "buffer.h"
5 #include "blockinput.h"
6 #include <pthread.h>
8 void mark_byte_stack P_ ((struct byte_stack *));
9 void mark_backtrace P_ ((struct backtrace *));
10 void mark_catchlist P_ ((struct catchtag *));
11 void mark_stack P_ ((char *, char *));
12 void flush_stack_call_func P_ ((void (*) (char *, void *), void *));
15 /* condition var .. w/ global lock */
17 static pthread_cond_t buffer_cond;
19 static struct thread_state primary_thread;
21 static struct thread_state *all_threads = &primary_thread;
23 __thread struct thread_state *current_thread = &primary_thread;
25 static int inhibit_yield_counter = 0;
27 pthread_mutex_t global_lock;
29 static void
30 mark_one_thread (struct thread_state *thread)
32 register struct specbinding *bind;
33 struct handler *handler;
34 Lisp_Object tem;
36 for (bind = thread->m_specpdl; bind != thread->m_specpdl_ptr; bind++)
38 mark_object (bind->symbol);
39 mark_object (bind->old_value);
42 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
43 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
44 mark_stack (thread->stack_bottom, thread->stack_top);
45 #else
47 register struct gcpro *tail;
48 for (tail = thread->m_gcprolist; tail; tail = tail->next)
49 for (i = 0; i < tail->nvars; i++)
50 mark_object (tail->var[i]);
52 #endif
54 if (thread->m_byte_stack_list)
55 mark_byte_stack (thread->m_byte_stack_list);
57 mark_catchlist (thread->m_catchlist);
59 for (handler = thread->m_handlerlist; handler; handler = handler->next)
61 mark_object (handler->handler);
62 mark_object (handler->var);
65 mark_backtrace (thread->m_backtrace_list);
67 if (thread->m_current_buffer)
69 XSETBUFFER (tem, thread->m_current_buffer);
70 mark_object (tem);
74 static void
75 mark_threads_callback (char *end, void *ignore)
77 struct thread_state *iter;
79 current_thread->stack_top = end;
80 for (iter = all_threads; iter; iter = iter->next_thread)
82 Lisp_Object thread_obj;
83 XSETTHREAD (thread_obj, iter);
84 mark_object (thread_obj);
85 mark_one_thread (iter);
89 void
90 mark_threads (void)
92 flush_stack_call_func (mark_threads_callback, NULL);
95 void
96 unmark_threads (void)
98 struct thread_state *iter;
100 for (iter = all_threads; iter; iter = iter->next_thread)
101 if (iter->m_byte_stack_list)
102 unmark_byte_stack (iter->m_byte_stack_list);
105 void
106 thread_acquire_buffer (char *end, void *nb)
108 struct buffer *new_buffer = nb;
110 if (current_buffer)
112 current_buffer->owner = current_buffer->prev_owner;
113 current_buffer->prev_owner = Qnil;
116 /* FIXME this check should be in the caller, for better
117 single-threaded performance. */
118 if (other_threads_p () && !thread_inhibit_yield_p ())
120 /* Let other threads try to acquire a buffer. */
121 pthread_cond_broadcast (&buffer_cond);
123 /* If our desired buffer is locked, wait for it. */
124 while (other_threads_p ()
125 && !current_thread->nolock
126 && !EQ (new_buffer->owner, Qnil)
127 /* We set the owner to Qt to mean it is being killed. */
128 && !EQ (new_buffer->owner, Qt))
129 pthread_cond_wait (&buffer_cond, &global_lock);
132 /* FIXME: if buffer is killed */
133 new_buffer->prev_owner = new_buffer->owner;
134 if (current_thread->nolock)
135 new_buffer->owner = Qnil;
136 else
137 new_buffer->owner = get_current_thread ();
141 thread_inhibit_yield_p ()
143 return inhibit_yield_counter || interrupt_input_blocked || abort_on_gc;
146 static void
147 thread_yield_callback (char *end, void *ignore)
149 if (thread_inhibit_yield_p ())
150 return;
152 current_thread->stack_top = end;
153 pthread_mutex_unlock (&global_lock);
154 sched_yield ();
155 pthread_mutex_lock (&global_lock);
158 void
159 thread_yield (void)
161 /* Note: currently it is safe to check this here, but eventually it
162 will require a lock to ensure non-racy operation. */
163 /* Only yield if there is another thread to yield to. */
164 if (all_threads->next_thread)
165 flush_stack_call_func (thread_yield_callback, NULL);
168 DEFUN ("yield", Fyield, Syield, 0, 0, 0,
169 doc: /* Yield to the next thread. */)
170 (void)
172 thread_yield ();
173 return other_threads_p () ? Qt : Qnil;
176 static Lisp_Object
177 invoke_thread_function (void)
179 Lisp_Object iter;
181 int count = SPECPDL_INDEX ();
183 /* Set up specpdl. */
184 for (iter = current_thread->initial_specpdl;
185 !EQ (iter, Qnil);
186 iter = XCDR (iter))
188 /* We may bind a variable twice -- but it doesn't matter because
189 there is no way to undo these bindings without exiting the
190 thread. */
191 specbind (XCAR (XCAR (iter)), XCDR (XCAR (iter)));
193 current_thread->initial_specpdl = Qnil;
195 Feval (current_thread->func);
196 return unbind_to (count, Qnil);
199 static Lisp_Object
200 do_nothing (Lisp_Object whatever)
202 return whatever;
205 static void *
206 run_thread (void *state)
208 struct thread_state *self = state;
209 struct thread_state **iter;
210 struct gcpro gcpro1;
211 Lisp_Object buffer;
212 char stack_pos;
214 self->stack_top = self->stack_bottom = &stack_pos;
216 self->m_specpdl_size = 50;
217 self->m_specpdl = xmalloc (self->m_specpdl_size
218 * sizeof (struct specbinding));
219 self->m_specpdl_ptr = self->m_specpdl;
220 self->pthread_id = pthread_self ();
222 /* Thread-local assignment. */
223 current_thread = self;
225 /* We need special handling to set the initial buffer. Our parent
226 thread is very likely to be using this same buffer so we will
227 typically wait for the parent thread to release it first. */
228 XSETBUFFER (buffer, self->m_current_buffer);
229 GCPRO1 (buffer);
230 self->m_current_buffer = 0;
231 set_buffer_internal (XBUFFER (buffer));
233 pthread_mutex_lock (&global_lock);
235 /* It might be nice to do something with errors here. */
236 internal_condition_case (invoke_thread_function, Qt, do_nothing);
238 /* Unlink this thread from the list of all threads. */
239 for (iter = &all_threads; *iter != self; iter = &(*iter)->next_thread)
241 *iter = (*iter)->next_thread;
243 if (!EQ (self->m_current_buffer->owner, Qt))
245 self->m_current_buffer->owner = Qnil;
246 pthread_cond_broadcast (&buffer_cond);
249 xfree (self->m_specpdl);
251 pthread_mutex_unlock (&global_lock);
253 return NULL;
256 DEFUN ("run-in-thread", Frun_in_thread, Srun_in_thread, 1, 2, 0,
257 doc: /* Start a new thread and run FUNCTION in it.
258 When the function exits, the thread dies. When NOLOCK is no-nil the thread
259 does not try to get a lock on the current buffer. */)
260 (function, nolock)
261 Lisp_Object function;
262 Lisp_Object nolock;
264 char stack_pos;
265 pthread_t thr;
266 struct thread_state *new_thread;
267 struct specbinding *p;
269 /* Can't start a thread in temacs. */
270 if (!initialized)
271 abort ();
273 new_thread = (struct thread_state *) allocate_pseudovector (VECSIZE (struct thread_state),
274 2, PVEC_THREAD);
275 memset ((char *) new_thread + OFFSETOF (struct thread_state, m_gcprolist),
276 0, sizeof (struct thread_state) - OFFSETOF (struct thread_state,
277 m_gcprolist));
279 new_thread->func = function;
280 new_thread->nolock = !EQ (nolock, Qnil);
281 new_thread->initial_specpdl = Qnil;
282 new_thread->m_current_buffer = current_thread->m_current_buffer;
283 new_thread->stack_bottom = &stack_pos;
285 for (p = specpdl; p != specpdl_ptr; ++p)
287 if (!p->func)
289 Lisp_Object sym = p->symbol;
290 if (!SYMBOLP (sym))
291 sym = XCAR (sym);
292 new_thread->initial_specpdl
293 = Fcons (Fcons (sym, find_symbol_value (sym)),
294 new_thread->initial_specpdl);
298 /* We'll need locking here. */
299 new_thread->next_thread = all_threads;
300 all_threads = new_thread;
302 if (pthread_create (&thr, NULL, run_thread, new_thread))
304 /* Restore the previous situation. */
305 all_threads = all_threads->next_thread;
308 return Qnil;
311 /* Get the current thread as a lisp object. */
312 Lisp_Object
313 get_current_thread (void)
315 Lisp_Object result;
316 XSETTHREAD (result, current_thread);
317 return result;
320 /* Get the main thread as a lisp object. */
321 Lisp_Object
322 get_main_thread (void)
324 Lisp_Object result;
325 XSETTHREAD (result, &primary_thread);
326 return result;
329 /* Is the current an user thread. */
331 user_thread_p (void)
333 struct thread_state *it = all_threads;
334 pthread_t self = pthread_self ();
337 if (it->pthread_id == self)
338 return 1;
340 while (it = it->next_thread);
342 return 0;
345 DEFUN ("inhibit-yield", Finhibit_yield, Sinhibit_yield, 1, 1, 0,
346 doc: /* Inhibit the yield function. */)
347 (val)
348 Lisp_Object val;
350 if (!EQ (val, Qnil))
351 inhibit_yield_counter++;
352 else if (inhibit_yield_counter > 0)
353 inhibit_yield_counter--;
355 return Qnil;
360 other_threads_p (void)
362 return all_threads->next_thread != NULL;
365 void
366 init_threads (void)
368 pthread_mutex_init (&global_lock, NULL);
369 pthread_mutex_lock (&global_lock);
370 primary_thread.pthread_id = pthread_self ();
371 primary_thread.nolock = 0;
373 pthread_cond_init (&buffer_cond, NULL);
376 void
377 syms_of_threads (void)
379 defsubr (&Srun_in_thread);
380 defsubr (&Syield);
381 defsubr (&Sinhibit_yield);