6 #include "blockinput.h"
12 void mark_byte_stack
P_ ((struct byte_stack
*));
13 void mark_backtrace
P_ ((struct backtrace
*));
14 void mark_catchlist
P_ ((struct catchtag
*));
15 void mark_stack
P_ ((char *, char *));
16 void flush_stack_call_func
P_ ((void (*) (char *, void *), void *));
18 /* Get the next thread as in circular buffer. */
19 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
21 /* condition var .. w/ global lock */
23 static pthread_cond_t thread_cond
;
25 static struct thread_state primary_thread
;
27 static struct thread_state
*all_threads
= &primary_thread
;
29 __thread
struct thread_state
*current_thread
= &primary_thread
;
31 pthread_mutex_t global_lock
;
33 /* Used internally by the scheduler, it is the next that will be executed. */
34 static pthread_t next_thread
;
36 Lisp_Object minibuffer_mutex
;
38 /* Choose the next thread to be executed. */
42 struct thread_state
*it
= current_thread
;
45 it
= NEXT_THREAD (it
);
47 while (it
->blocked
&& it
!= current_thread
);
49 next_thread
= it
->pthread_id
;
52 /* Schedule a new thread and block the caller until it is not scheduled
55 reschedule (char *end
, int wait
)
57 current_thread
->stack_top
= end
;
60 if (next_thread
!= current_thread
->pthread_id
)
61 pthread_cond_broadcast (&thread_cond
);
66 pthread_mutex_unlock (&global_lock
);
68 pthread_mutex_lock (&global_lock
);
70 while (current_thread
->pthread_id
!= next_thread
)
71 pthread_cond_wait (&thread_cond
, &global_lock
);
75 mark_one_thread (struct thread_state
*thread
)
77 register struct specbinding
*bind
;
78 struct handler
*handler
;
81 for (bind
= thread
->m_specpdl
; bind
!= thread
->m_specpdl_ptr
; bind
++)
83 mark_object (bind
->symbol
);
84 mark_object (bind
->old_value
);
87 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
88 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
89 mark_stack (thread
->stack_bottom
, thread
->stack_top
);
92 register struct gcpro
*tail
;
93 for (tail
= thread
->m_gcprolist
; tail
; tail
= tail
->next
)
94 for (i
= 0; i
< tail
->nvars
; i
++)
95 mark_object (tail
->var
[i
]);
99 if (thread
->m_byte_stack_list
)
100 mark_byte_stack (thread
->m_byte_stack_list
);
102 mark_catchlist (thread
->m_catchlist
);
104 for (handler
= thread
->m_handlerlist
; handler
; handler
= handler
->next
)
106 mark_object (handler
->handler
);
107 mark_object (handler
->var
);
110 mark_backtrace (thread
->m_backtrace_list
);
112 if (thread
->m_current_buffer
)
114 XSETBUFFER (tem
, thread
->m_current_buffer
);
118 mark_object (thread
->m_last_thing_searched
);
120 if (thread
->m_saved_last_thing_searched
)
121 mark_object (thread
->m_saved_last_thing_searched
);
125 mark_threads_callback (char *end
, void *ignore
)
127 struct thread_state
*iter
;
129 current_thread
->stack_top
= end
;
130 for (iter
= all_threads
; iter
; iter
= iter
->next_thread
)
132 Lisp_Object thread_obj
;
133 XSETTHREAD (thread_obj
, iter
);
134 mark_object (thread_obj
);
135 mark_one_thread (iter
);
142 flush_stack_call_func (mark_threads_callback
, NULL
);
146 unmark_threads (void)
148 struct thread_state
*iter
;
150 for (iter
= all_threads
; iter
; iter
= iter
->next_thread
)
151 if (iter
->m_byte_stack_list
)
152 unmark_byte_stack (iter
->m_byte_stack_list
);
156 thread_yield_callback (char *end
, void *ignore
)
164 /* Note: currently it is safe to check this here, but eventually it
165 will require a lock to ensure non-racy operation. */
166 /* Only yield if there is another thread to yield to. */
167 if (all_threads
->next_thread
)
168 flush_stack_call_func (thread_yield_callback
, NULL
);
171 DEFUN ("yield", Fyield
, Syield
, 0, 0, 0,
172 doc
: /* Yield to the next thread. */)
176 return other_threads_p () ? Qt
: Qnil
;
180 invoke_thread_function (void)
184 int count
= SPECPDL_INDEX ();
186 /* Set up specpdl. */
187 for (iter
= current_thread
->initial_specpdl
;
191 /* We may bind a variable twice -- but it doesn't matter because
192 there is no way to undo these bindings without exiting the
194 specbind (XCAR (XCAR (iter
)), XCDR (XCAR (iter
)));
196 current_thread
->initial_specpdl
= Qnil
;
198 Ffuncall (1, ¤t_thread
->func
);
199 return unbind_to (count
, Qnil
);
203 do_nothing (Lisp_Object whatever
)
209 run_thread (void *state
)
211 struct thread_state
*self
= state
;
212 struct thread_state
**iter
;
217 self
->stack_top
= self
->stack_bottom
= &stack_pos
;
219 self
->m_specpdl_size
= 50;
220 self
->m_specpdl
= xmalloc (self
->m_specpdl_size
221 * sizeof (struct specbinding
));
222 self
->m_specpdl_ptr
= self
->m_specpdl
;
223 self
->pthread_id
= pthread_self ();
225 /* Thread-local assignment. */
226 current_thread
= self
;
228 /* We need special handling to set the initial buffer. Our parent
229 thread is very likely to be using this same buffer so we will
230 typically wait for the parent thread to release it first. */
231 XSETBUFFER (buffer
, self
->m_current_buffer
);
233 self
->m_current_buffer
= 0;
235 pthread_mutex_lock (&global_lock
);
237 set_buffer_internal (XBUFFER (buffer
));
239 /* It might be nice to do something with errors here. */
240 internal_condition_case (invoke_thread_function
, Qt
, do_nothing
);
242 blocal_unbind_thread (get_current_thread ());
244 /* Unlink this thread from the list of all threads. */
245 for (iter
= &all_threads
; *iter
!= self
; iter
= &(*iter
)->next_thread
)
247 *iter
= (*iter
)->next_thread
;
250 pthread_cond_broadcast (&thread_cond
);
252 xfree (self
->m_specpdl
);
254 pthread_mutex_unlock (&global_lock
);
259 DEFUN ("run-in-thread", Frun_in_thread
, Srun_in_thread
, 1, 1, 0,
260 doc
: /* Start a new thread and run FUNCTION in it.
261 When the function exits, the thread dies. */)
263 Lisp_Object function
;
267 struct thread_state
*new_thread
;
268 struct specbinding
*p
;
270 /* Can't start a thread in temacs. */
274 new_thread
= ALLOCATE_PSEUDOVECTOR (struct thread_state
, m_gcprolist
,
276 memset ((char *) new_thread
+ OFFSETOF (struct thread_state
, m_gcprolist
),
277 0, sizeof (struct thread_state
) - OFFSETOF (struct thread_state
,
280 new_thread
->func
= function
;
281 new_thread
->blocked
= 0;
282 new_thread
->initial_specpdl
= Qnil
;
283 new_thread
->m_last_thing_searched
= Qnil
; /* copy from parent? */
284 new_thread
->m_saved_last_thing_searched
= Qnil
;
285 new_thread
->m_current_buffer
= current_thread
->m_current_buffer
;
286 new_thread
->stack_bottom
= &stack_pos
;
288 for (p
= specpdl
; p
!= specpdl_ptr
; ++p
)
292 Lisp_Object sym
= p
->symbol
;
295 new_thread
->initial_specpdl
296 = Fcons (Fcons (sym
, find_symbol_value (sym
)),
297 new_thread
->initial_specpdl
);
301 /* We'll need locking here. */
302 new_thread
->next_thread
= all_threads
;
303 all_threads
= new_thread
;
305 if (pthread_create (&thr
, NULL
, run_thread
, new_thread
))
307 /* Restore the previous situation. */
308 all_threads
= all_threads
->next_thread
;
309 error ("Could not start a new thread");
315 /* Get the current thread as a lisp object. */
317 get_current_thread (void)
320 XSETTHREAD (result
, current_thread
);
324 /* Get the main thread as a lisp object. */
326 get_main_thread (void)
329 XSETTHREAD (result
, &primary_thread
);
333 /* Is the current an user thread. */
337 struct thread_state
*it
= all_threads
;
338 pthread_t self
= pthread_self ();
341 if (it
->pthread_id
== self
)
344 while (it
= it
->next_thread
);
349 DEFUN ("make-mutex", Fmake_mutex
, Smake_mutex
, 0, 1, 0,
350 doc
: /* Make a mutex. If RECURSIVE is nil the mutex is not recursive
351 and can be locked once. */)
353 Lisp_Object recursive
;
356 struct Lisp_Mutex
*mutex
= allocate_mutex ();
358 mutex
->rec_counter
= 0;
359 mutex
->recursive
= recursive
;
360 XSETMUTEX (ret
, mutex
);
364 DEFUN ("mutex-lock", Fmutex_lock
, Smutex_lock
, 1, 1, 0,
365 doc
: /* Lock a mutex. */)
369 struct Lisp_Mutex
*mutex
= XMUTEX (val
);
372 if (mutex
->owner
== 0
373 || (!NILP (mutex
->recursive
) && mutex
->owner
== pthread_self ()))
375 mutex
->owner
= pthread_self ();
376 mutex
->rec_counter
++;
386 DEFUN ("mutex-unlock", Fmutex_unlock
, Smutex_unlock
, 1, 1, 0,
387 doc
: /* Unlock a mutex. */)
391 struct Lisp_Mutex
*mutex
= XMUTEX (val
);
393 if (mutex
->owner
!= pthread_self () || mutex
->rec_counter
== 0)
396 mutex
->rec_counter
--;
398 if (mutex
->rec_counter
== 0)
405 thread_select (n
, rfd
, wfd
, xfd
, tmo
)
407 SELECT_TYPE
*rfd
, *wfd
, *xfd
;
412 current_thread
->blocked
= 1;
414 reschedule (&end
, 0);
416 pthread_mutex_unlock (&global_lock
);
418 ret
= select (n
, rfd
, wfd
, xfd
, tmo
);
419 current_thread
->blocked
= 0;
421 pthread_mutex_lock (&global_lock
);
422 pthread_cond_broadcast (&thread_cond
);
424 while (current_thread
->pthread_id
!= next_thread
)
425 pthread_cond_wait (&thread_cond
, &global_lock
);
431 other_threads_p (void)
433 return all_threads
->next
? 1 : 0;
437 thread_notify_kill_buffer (register struct buffer
*b
)
439 register Lisp_Object tem
;
440 struct thread_state
*it
= all_threads
;
441 for (; it
; it
= it
->next_thread
)
443 if (b
== it
->m_current_buffer
)
445 register Lisp_Object buf
;
446 XSETBUFFER (buf
, it
->m_current_buffer
);
447 tem
= Fother_buffer (buf
, Qnil
, Qnil
);
448 it
->m_current_buffer
= XBUFFER (tem
);
449 if (b
== it
->m_current_buffer
)
458 init_threads_once (void)
460 primary_thread
.size
= PSEUDOVECSIZE (struct thread_state
, m_gcprolist
);
461 primary_thread
.next
= NULL
;
462 primary_thread
.func
= Qnil
;
463 primary_thread
.initial_specpdl
= Qnil
;
464 XSETPVECTYPE (&primary_thread
, PVEC_THREAD
);
465 minibuffer_mutex
= Fmake_mutex (Qt
);
471 pthread_mutex_init (&global_lock
, NULL
);
472 pthread_cond_init (&thread_cond
, NULL
);
473 pthread_mutex_lock (&global_lock
);
475 primary_thread
.pthread_id
= pthread_self ();
476 primary_thread
.blocked
= 0;
477 primary_thread
.m_last_thing_searched
= Qnil
;
478 next_thread
= primary_thread
.pthread_id
;
482 syms_of_threads (void)
484 DEFVAR_LISP ("minibuffer-mutex", &minibuffer_mutex
,
485 doc
: /* Mutex for the minibuffer. */);
487 defsubr (&Srun_in_thread
);
489 defsubr (&Smake_mutex
);
490 defsubr (&Smutex_lock
);
491 defsubr (&Smutex_unlock
);