5 #include "blockinput.h"
10 void mark_byte_stack
P_ ((struct byte_stack
*));
11 void mark_backtrace
P_ ((struct backtrace
*));
12 void mark_catchlist
P_ ((struct catchtag
*));
13 void mark_stack
P_ ((char *, char *));
14 void flush_stack_call_func
P_ ((void (*) (char *, void *), void *));
16 /* Get the next thread as in circular buffer. */
17 #define NEXT_THREAD(x)(x->next_thread ? x->next_thread : all_threads)
19 /* condition var .. w/ global lock */
21 static pthread_cond_t buffer_cond
;
23 static struct thread_state primary_thread
;
25 static struct thread_state
*all_threads
= &primary_thread
;
27 __thread
struct thread_state
*current_thread
= &primary_thread
;
29 static int inhibit_yield_counter
= 0;
31 pthread_mutex_t global_lock
;
33 /* Used internally by the scheduler, it is the next that will be executed. */
34 static pthread_t next_thread
;
36 /* Choose the next thread to be executed. */
40 struct thread_state
*it
, *begin
= NEXT_THREAD (current_thread
);
42 #define CHECK_THREAD(T,B) \
43 if ((!other_threads_p () \
44 || ((struct thread_state *)T)->nolock \
45 || EQ (((struct thread_state *)T)->desired_buffer, \
46 ((struct thread_state *)T)->m_current_buffer) \
47 || EQ (B->owner, Qnil) \
48 /* We set the owner to Qt to mean it is being killed. */ \
49 || EQ (B->owner, Qt)) \
50 && !((struct thread_state *)T)->blocked) \
52 next_thread = ((struct thread_state *)T)->pthread_id; \
56 /* Try to wake up the thread that is holding the desired buffer. */
57 if (current_thread
->desired_buffer
)
59 struct buffer
*db
= current_thread
->desired_buffer
;
60 if (!EQ (db
->owner
, Qnil
) && !EQ (db
, current_buffer
))
61 CHECK_THREAD (XVECTOR (db
->owner
), db
);
64 /* A simple round-robin. We can't just check for it != current_thread
65 because current_thread could be already unlinked from all_threads. */
69 struct buffer
*new_buffer
= it
->desired_buffer
;
71 CHECK_THREAD (it
, new_buffer
);
73 it
= NEXT_THREAD (it
);
74 if (it
== current_thread
)
79 /* Schedule a new thread and block the caller until it is not scheduled
82 reschedule (char *end
, int wait
)
84 current_thread
->stack_top
= end
;
85 if (!thread_inhibit_yield_p ())
88 if (next_thread
!= current_thread
->pthread_id
)
89 pthread_cond_broadcast (&buffer_cond
);
94 pthread_mutex_unlock (&global_lock
);
96 pthread_mutex_lock (&global_lock
);
98 while (current_thread
->pthread_id
!= next_thread
)
99 pthread_cond_wait (&buffer_cond
, &global_lock
);
103 mark_one_thread (struct thread_state
*thread
)
105 register struct specbinding
*bind
;
106 struct handler
*handler
;
109 for (bind
= thread
->m_specpdl
; bind
!= thread
->m_specpdl_ptr
; bind
++)
111 mark_object (bind
->symbol
);
112 mark_object (bind
->old_value
);
115 #if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
116 || GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
117 mark_stack (thread
->stack_bottom
, thread
->stack_top
);
120 register struct gcpro
*tail
;
121 for (tail
= thread
->m_gcprolist
; tail
; tail
= tail
->next
)
122 for (i
= 0; i
< tail
->nvars
; i
++)
123 mark_object (tail
->var
[i
]);
127 if (thread
->m_byte_stack_list
)
128 mark_byte_stack (thread
->m_byte_stack_list
);
130 mark_catchlist (thread
->m_catchlist
);
132 for (handler
= thread
->m_handlerlist
; handler
; handler
= handler
->next
)
134 mark_object (handler
->handler
);
135 mark_object (handler
->var
);
138 mark_backtrace (thread
->m_backtrace_list
);
140 if (thread
->m_current_buffer
)
142 XSETBUFFER (tem
, thread
->m_current_buffer
);
148 mark_threads_callback (char *end
, void *ignore
)
150 struct thread_state
*iter
;
152 current_thread
->stack_top
= end
;
153 for (iter
= all_threads
; iter
; iter
= iter
->next_thread
)
155 Lisp_Object thread_obj
;
156 XSETTHREAD (thread_obj
, iter
);
157 mark_object (thread_obj
);
158 mark_one_thread (iter
);
165 flush_stack_call_func (mark_threads_callback
, NULL
);
169 unmark_threads (void)
171 struct thread_state
*iter
;
173 for (iter
= all_threads
; iter
; iter
= iter
->next_thread
)
174 if (iter
->m_byte_stack_list
)
175 unmark_byte_stack (iter
->m_byte_stack_list
);
179 thread_acquire_buffer (char *end
, void *nb
)
181 struct buffer
*new_buffer
= nb
;
182 current_thread
->desired_buffer
= new_buffer
;
185 current_buffer
->owner
= current_buffer
->prev_owner
;
186 current_buffer
->prev_owner
= Qnil
;
191 /* FIXME: if buffer is killed */
192 new_buffer
->prev_owner
= new_buffer
->owner
;
193 if (current_thread
->nolock
)
194 new_buffer
->owner
= Qnil
;
196 new_buffer
->owner
= get_current_thread ();
198 current_buffer
= new_buffer
;
202 thread_inhibit_yield_p ()
204 return inhibit_yield_counter
|| interrupt_input_blocked
|| abort_on_gc
;
208 thread_bind_bufferlocal_p (struct thread_state
*thread
)
210 register struct specbinding
*bind
;
212 for (bind
= thread
->m_specpdl
; bind
!= thread
->m_specpdl_ptr
; bind
++)
214 if (BUFFER_OBJFWDP (bind
->symbol
) || BUFFER_LOCAL_VALUEP (bind
->symbol
))
221 thread_yield_callback (char *end
, void *ignore
)
223 if (!thread_inhibit_yield_p ()
224 && !thread_bind_bufferlocal_p (current_thread
))
225 thread_acquire_buffer (end
, current_buffer
);
233 /* Note: currently it is safe to check this here, but eventually it
234 will require a lock to ensure non-racy operation. */
235 /* Only yield if there is another thread to yield to. */
236 if (all_threads
->next_thread
)
237 flush_stack_call_func (thread_yield_callback
, NULL
);
240 DEFUN ("yield", Fyield
, Syield
, 0, 0, 0,
241 doc
: /* Yield to the next thread. */)
245 return other_threads_p () ? Qt
: Qnil
;
249 invoke_thread_function (void)
253 int count
= SPECPDL_INDEX ();
255 /* Set up specpdl. */
256 for (iter
= current_thread
->initial_specpdl
;
260 /* We may bind a variable twice -- but it doesn't matter because
261 there is no way to undo these bindings without exiting the
263 specbind (XCAR (XCAR (iter
)), XCDR (XCAR (iter
)));
265 current_thread
->initial_specpdl
= Qnil
;
267 Feval (current_thread
->func
);
268 return unbind_to (count
, Qnil
);
272 do_nothing (Lisp_Object whatever
)
278 run_thread (void *state
)
280 struct thread_state
*self
= state
;
281 struct thread_state
**iter
;
286 self
->stack_top
= self
->stack_bottom
= &stack_pos
;
288 self
->m_specpdl_size
= 50;
289 self
->m_specpdl
= xmalloc (self
->m_specpdl_size
290 * sizeof (struct specbinding
));
291 self
->m_specpdl_ptr
= self
->m_specpdl
;
292 self
->pthread_id
= pthread_self ();
294 /* Thread-local assignment. */
295 current_thread
= self
;
297 /* We need special handling to set the initial buffer. Our parent
298 thread is very likely to be using this same buffer so we will
299 typically wait for the parent thread to release it first. */
300 XSETBUFFER (buffer
, self
->m_current_buffer
);
302 self
->desired_buffer
= (struct buffer
*) buffer
;
303 self
->m_current_buffer
= 0;
305 pthread_mutex_lock (&global_lock
);
307 set_buffer_internal (XBUFFER (buffer
));
309 /* It might be nice to do something with errors here. */
310 internal_condition_case (invoke_thread_function
, Qt
, do_nothing
);
312 /* Unlink this thread from the list of all threads. */
313 for (iter
= &all_threads
; *iter
!= self
; iter
= &(*iter
)->next_thread
)
315 *iter
= (*iter
)->next_thread
;
317 if (!EQ (self
->m_current_buffer
->owner
, Qt
))
318 self
->m_current_buffer
->owner
= self
->m_current_buffer
->prev_owner
;
321 pthread_cond_broadcast (&buffer_cond
);
323 xfree (self
->m_specpdl
);
325 pthread_mutex_unlock (&global_lock
);
330 DEFUN ("run-in-thread", Frun_in_thread
, Srun_in_thread
, 1, 2, 0,
331 doc
: /* Start a new thread and run FUNCTION in it.
332 When the function exits, the thread dies. When NOLOCK is no-nil the thread
333 does not try to get a lock on the current buffer. */)
335 Lisp_Object function
;
340 struct thread_state
*new_thread
;
341 struct specbinding
*p
;
343 /* Can't start a thread in temacs. */
347 new_thread
= (struct thread_state
*) allocate_pseudovector (VECSIZE (struct thread_state
),
349 memset ((char *) new_thread
+ OFFSETOF (struct thread_state
, m_gcprolist
),
350 0, sizeof (struct thread_state
) - OFFSETOF (struct thread_state
,
353 new_thread
->func
= function
;
354 new_thread
->blocked
= 0;
355 new_thread
->nolock
= !EQ (nolock
, Qnil
);
356 new_thread
->initial_specpdl
= Qnil
;
357 new_thread
->m_current_buffer
= current_thread
->m_current_buffer
;
358 new_thread
->stack_bottom
= &stack_pos
;
360 for (p
= specpdl
; p
!= specpdl_ptr
; ++p
)
364 Lisp_Object sym
= p
->symbol
;
367 new_thread
->initial_specpdl
368 = Fcons (Fcons (sym
, find_symbol_value (sym
)),
369 new_thread
->initial_specpdl
);
373 /* We'll need locking here. */
374 new_thread
->next_thread
= all_threads
;
375 all_threads
= new_thread
;
377 if (pthread_create (&thr
, NULL
, run_thread
, new_thread
))
379 /* Restore the previous situation. */
380 all_threads
= all_threads
->next_thread
;
381 error ("Could not start a new thread");
387 /* Get the current thread as a lisp object. */
389 get_current_thread (void)
392 XSETTHREAD (result
, current_thread
);
396 /* Get the main thread as a lisp object. */
398 get_main_thread (void)
401 XSETTHREAD (result
, &primary_thread
);
405 /* Is the current an user thread. */
409 struct thread_state
*it
= all_threads
;
410 pthread_t self
= pthread_self ();
413 if (it
->pthread_id
== self
)
416 while (it
= it
->next_thread
);
421 DEFUN ("inhibit-yield", Finhibit_yield
, Sinhibit_yield
, 1, 1, 0,
422 doc
: /* Inhibit the yield function. */)
427 inhibit_yield_counter
++;
428 else if (inhibit_yield_counter
> 0)
429 inhibit_yield_counter
--;
435 thread_select (n
, rfd
, wfd
, xfd
, tmo
)
437 SELECT_TYPE
*rfd
, *wfd
, *xfd
;
442 current_thread
->blocked
= 1;
444 reschedule (&end
, 0);
446 pthread_mutex_unlock (&global_lock
);
448 ret
= select (n
, rfd
, wfd
, xfd
, tmo
);
449 current_thread
->blocked
= 0;
451 pthread_mutex_lock (&global_lock
);
452 pthread_cond_broadcast (&buffer_cond
);
454 while (current_thread
->pthread_id
!= next_thread
)
455 pthread_cond_wait (&buffer_cond
, &global_lock
);
461 other_threads_p (void)
464 struct thread_state
*it
= all_threads
;
465 for (; it
&& avail
< 2; it
= it
->next_thread
)
475 pthread_mutex_init (&global_lock
, NULL
);
476 pthread_cond_init (&buffer_cond
, NULL
);
477 pthread_mutex_lock (&global_lock
);
479 primary_thread
.pthread_id
= pthread_self ();
480 primary_thread
.nolock
= 0;
481 primary_thread
.blocked
= 0;
482 next_thread
= primary_thread
.pthread_id
;
486 syms_of_threads (void)
488 defsubr (&Srun_in_thread
);
490 defsubr (&Sinhibit_yield
);