2 * Server-side thread management
4 * Copyright (C) 1998 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
32 #include <sys/types.h>
40 #define WIN32_NO_STATUS
57 struct thread_wait
*next
; /* next wait structure for this thread */
58 struct thread
*thread
; /* owner thread */
59 int count
; /* count of objects */
61 void *cookie
; /* magic cookie to return to client */
62 struct timeval timeout
;
63 struct timeout_user
*user
;
64 struct wait_queue_entry queues
[1];
67 /* asynchronous procedure calls */
71 struct object obj
; /* object header */
72 struct list entry
; /* queue linked list */
73 struct object
*owner
; /* object that queued this apc */
74 int executed
; /* has it been executed by the client? */
78 static void dump_thread_apc( struct object
*obj
, int verbose
);
79 static int thread_apc_signaled( struct object
*obj
, struct thread
*thread
);
81 static const struct object_ops thread_apc_ops
=
83 sizeof(struct thread_apc
), /* size */
84 dump_thread_apc
, /* dump */
85 add_queue
, /* add_queue */
86 remove_queue
, /* remove_queue */
87 thread_apc_signaled
, /* signaled */
88 no_satisfied
, /* satisfied */
89 no_signal
, /* signal */
90 no_get_fd
, /* get_fd */
91 no_map_access
, /* map_access */
92 no_lookup_name
, /* lookup_name */
93 no_close_handle
, /* close_handle */
94 no_destroy
/* destroy */
98 /* thread operations */
100 static void dump_thread( struct object
*obj
, int verbose
);
101 static int thread_signaled( struct object
*obj
, struct thread
*thread
);
102 static unsigned int thread_map_access( struct object
*obj
, unsigned int access
);
103 static void thread_poll_event( struct fd
*fd
, int event
);
104 static void destroy_thread( struct object
*obj
);
105 static struct thread_apc
*thread_dequeue_apc( struct thread
*thread
, int system_only
);
107 static const struct object_ops thread_ops
=
109 sizeof(struct thread
), /* size */
110 dump_thread
, /* dump */
111 add_queue
, /* add_queue */
112 remove_queue
, /* remove_queue */
113 thread_signaled
, /* signaled */
114 no_satisfied
, /* satisfied */
115 no_signal
, /* signal */
116 no_get_fd
, /* get_fd */
117 thread_map_access
, /* map_access */
118 no_lookup_name
, /* lookup_name */
119 no_close_handle
, /* close_handle */
120 destroy_thread
/* destroy */
123 static const struct fd_ops thread_fd_ops
=
125 NULL
, /* get_poll_events */
126 thread_poll_event
, /* poll_event */
127 no_flush
, /* flush */
128 no_get_file_info
, /* get_file_info */
129 no_queue_async
, /* queue_async */
130 no_cancel_async
/* cancel_async */
133 static struct list thread_list
= LIST_INIT(thread_list
);
135 /* initialize the structure for a newly allocated thread */
136 inline static void init_thread_structure( struct thread
*thread
)
140 thread
->unix_pid
= -1; /* not known yet */
141 thread
->unix_tid
= -1; /* not known yet */
142 thread
->context
= NULL
;
143 thread
->suspend_context
= NULL
;
145 thread
->debug_ctx
= NULL
;
146 thread
->debug_event
= NULL
;
147 thread
->debug_break
= 0;
148 thread
->queue
= NULL
;
151 thread
->req_data
= NULL
;
152 thread
->req_toread
= 0;
153 thread
->reply_data
= NULL
;
154 thread
->reply_towrite
= 0;
155 thread
->request_fd
= NULL
;
156 thread
->reply_fd
= NULL
;
157 thread
->wait_fd
= NULL
;
158 thread
->state
= RUNNING
;
159 thread
->exit_code
= 0;
160 thread
->priority
= 0;
161 thread
->affinity
= 1;
163 thread
->desktop_users
= 0;
164 thread
->token
= NULL
;
166 thread
->creation_time
= current_time
;
167 thread
->exit_time
.tv_sec
= thread
->exit_time
.tv_usec
= 0;
169 list_init( &thread
->mutex_list
);
170 list_init( &thread
->system_apc
);
171 list_init( &thread
->user_apc
);
173 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
174 thread
->inflight
[i
].server
= thread
->inflight
[i
].client
= -1;
177 /* check if address looks valid for a client-side data structure (TEB etc.) */
178 static inline int is_valid_address( void *addr
)
180 return addr
&& !((unsigned long)addr
% sizeof(int));
183 /* create a new thread */
184 struct thread
*create_thread( int fd
, struct process
*process
)
186 struct thread
*thread
;
188 if (!(thread
= alloc_object( &thread_ops
))) return NULL
;
190 init_thread_structure( thread
);
192 thread
->process
= (struct process
*)grab_object( process
);
193 thread
->desktop
= process
->desktop
;
194 if (!current
) current
= thread
;
196 list_add_head( &thread_list
, &thread
->entry
);
198 if (!(thread
->id
= alloc_ptid( thread
)))
200 release_object( thread
);
203 if (!(thread
->request_fd
= create_anonymous_fd( &thread_fd_ops
, fd
, &thread
->obj
)))
205 release_object( thread
);
209 set_fd_events( thread
->request_fd
, POLLIN
); /* start listening to events */
210 add_process_thread( thread
->process
, thread
);
214 /* handle a client event */
215 static void thread_poll_event( struct fd
*fd
, int event
)
217 struct thread
*thread
= get_fd_user( fd
);
218 assert( thread
->obj
.ops
== &thread_ops
);
220 if (event
& (POLLERR
| POLLHUP
)) kill_thread( thread
, 0 );
221 else if (event
& POLLIN
) read_request( thread
);
222 else if (event
& POLLOUT
) write_reply( thread
);
225 /* cleanup everything that is no longer needed by a dead thread */
226 /* used by destroy_thread and kill_thread */
227 static void cleanup_thread( struct thread
*thread
)
230 struct thread_apc
*apc
;
232 while ((apc
= thread_dequeue_apc( thread
, 0 ))) release_object( apc
);
233 free( thread
->req_data
);
234 free( thread
->reply_data
);
235 if (thread
->request_fd
) release_object( thread
->request_fd
);
236 if (thread
->reply_fd
) release_object( thread
->reply_fd
);
237 if (thread
->wait_fd
) release_object( thread
->wait_fd
);
238 free( thread
->suspend_context
);
239 free_msg_queue( thread
);
240 cleanup_clipboard_thread(thread
);
241 destroy_thread_windows( thread
);
242 close_thread_desktop( thread
);
243 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
245 if (thread
->inflight
[i
].client
!= -1)
247 close( thread
->inflight
[i
].server
);
248 thread
->inflight
[i
].client
= thread
->inflight
[i
].server
= -1;
251 thread
->req_data
= NULL
;
252 thread
->reply_data
= NULL
;
253 thread
->request_fd
= NULL
;
254 thread
->reply_fd
= NULL
;
255 thread
->wait_fd
= NULL
;
256 thread
->context
= NULL
;
257 thread
->suspend_context
= NULL
;
261 /* destroy a thread when its refcount is 0 */
262 static void destroy_thread( struct object
*obj
)
264 struct thread
*thread
= (struct thread
*)obj
;
265 assert( obj
->ops
== &thread_ops
);
267 assert( !thread
->debug_ctx
); /* cannot still be debugging something */
268 list_remove( &thread
->entry
);
269 cleanup_thread( thread
);
270 release_object( thread
->process
);
271 if (thread
->id
) free_ptid( thread
->id
);
272 if (thread
->token
) release_object( thread
->token
);
275 /* dump a thread on stdout for debugging purposes */
276 static void dump_thread( struct object
*obj
, int verbose
)
278 struct thread
*thread
= (struct thread
*)obj
;
279 assert( obj
->ops
== &thread_ops
);
281 fprintf( stderr
, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
282 thread
->id
, thread
->unix_pid
, thread
->unix_tid
, thread
->teb
, thread
->state
);
285 static int thread_signaled( struct object
*obj
, struct thread
*thread
)
287 struct thread
*mythread
= (struct thread
*)obj
;
288 return (mythread
->state
== TERMINATED
);
291 static unsigned int thread_map_access( struct object
*obj
, unsigned int access
)
293 if (access
& GENERIC_READ
) access
|= STANDARD_RIGHTS_READ
| SYNCHRONIZE
;
294 if (access
& GENERIC_WRITE
) access
|= STANDARD_RIGHTS_WRITE
| SYNCHRONIZE
;
295 if (access
& GENERIC_EXECUTE
) access
|= STANDARD_RIGHTS_EXECUTE
;
296 if (access
& GENERIC_ALL
) access
|= THREAD_ALL_ACCESS
;
297 return access
& ~(GENERIC_READ
| GENERIC_WRITE
| GENERIC_EXECUTE
| GENERIC_ALL
);
300 static void dump_thread_apc( struct object
*obj
, int verbose
)
302 struct thread_apc
*apc
= (struct thread_apc
*)obj
;
303 assert( obj
->ops
== &thread_apc_ops
);
305 fprintf( stderr
, "APC owner=%p type=%u\n", apc
->owner
, apc
->call
.type
);
308 static int thread_apc_signaled( struct object
*obj
, struct thread
*thread
)
310 struct thread_apc
*apc
= (struct thread_apc
*)obj
;
311 return apc
->executed
;
314 /* get a thread pointer from a thread id (and increment the refcount) */
315 struct thread
*get_thread_from_id( thread_id_t id
)
317 struct object
*obj
= get_ptid_entry( id
);
319 if (obj
&& obj
->ops
== &thread_ops
) return (struct thread
*)grab_object( obj
);
320 set_error( STATUS_INVALID_CID
);
324 /* get a thread from a handle (and increment the refcount) */
325 struct thread
*get_thread_from_handle( obj_handle_t handle
, unsigned int access
)
327 return (struct thread
*)get_handle_obj( current
->process
, handle
,
328 access
, &thread_ops
);
331 /* find a thread from a Unix tid */
332 struct thread
*get_thread_from_tid( int tid
)
334 struct thread
*thread
;
336 LIST_FOR_EACH_ENTRY( thread
, &thread_list
, struct thread
, entry
)
338 if (thread
->unix_tid
== tid
) return thread
;
343 /* find a thread from a Unix pid */
344 struct thread
*get_thread_from_pid( int pid
)
346 struct thread
*thread
;
348 LIST_FOR_EACH_ENTRY( thread
, &thread_list
, struct thread
, entry
)
350 if (thread
->unix_pid
== pid
) return thread
;
355 /* set all information about a thread */
356 static void set_thread_info( struct thread
*thread
,
357 const struct set_thread_info_request
*req
)
359 if (req
->mask
& SET_THREAD_INFO_PRIORITY
)
360 thread
->priority
= req
->priority
;
361 if (req
->mask
& SET_THREAD_INFO_AFFINITY
)
363 if (req
->affinity
!= 1) set_error( STATUS_INVALID_PARAMETER
);
364 else thread
->affinity
= req
->affinity
;
366 if (req
->mask
& SET_THREAD_INFO_TOKEN
)
367 security_set_thread_token( thread
, req
->token
);
370 /* stop a thread (at the Unix level) */
371 void stop_thread( struct thread
*thread
)
373 if (thread
->context
) return; /* already inside a debug event, no need for a signal */
374 /* can't stop a thread while initialisation is in progress */
375 if (is_process_init_done(thread
->process
)) send_thread_signal( thread
, SIGUSR1
);
378 /* suspend a thread */
379 static int suspend_thread( struct thread
*thread
)
381 int old_count
= thread
->suspend
;
382 if (thread
->suspend
< MAXIMUM_SUSPEND_COUNT
)
384 if (!(thread
->process
->suspend
+ thread
->suspend
++)) stop_thread( thread
);
386 else set_error( STATUS_SUSPEND_COUNT_EXCEEDED
);
390 /* resume a thread */
391 static int resume_thread( struct thread
*thread
)
393 int old_count
= thread
->suspend
;
394 if (thread
->suspend
> 0)
396 if (!(--thread
->suspend
+ thread
->process
->suspend
)) wake_thread( thread
);
401 /* add a thread to an object wait queue; return 1 if OK, 0 on error */
402 int add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
406 list_add_tail( &obj
->wait_queue
, &entry
->entry
);
410 /* remove a thread from an object wait queue */
411 void remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
413 list_remove( &entry
->entry
);
414 release_object( obj
);
418 static void end_wait( struct thread
*thread
)
420 struct thread_wait
*wait
= thread
->wait
;
421 struct wait_queue_entry
*entry
;
425 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
426 entry
->obj
->ops
->remove_queue( entry
->obj
, entry
);
427 if (wait
->user
) remove_timeout_user( wait
->user
);
428 thread
->wait
= wait
->next
;
432 /* build the thread wait structure */
433 static int wait_on( int count
, struct object
*objects
[], int flags
, const abs_time_t
*timeout
)
435 struct thread_wait
*wait
;
436 struct wait_queue_entry
*entry
;
439 if (!(wait
= mem_alloc( sizeof(*wait
) + (count
-1) * sizeof(*entry
) ))) return 0;
440 wait
->next
= current
->wait
;
441 wait
->thread
= current
;
445 current
->wait
= wait
;
446 if (flags
& SELECT_TIMEOUT
)
448 wait
->timeout
.tv_sec
= timeout
->sec
;
449 wait
->timeout
.tv_usec
= timeout
->usec
;
452 for (i
= 0, entry
= wait
->queues
; i
< count
; i
++, entry
++)
454 struct object
*obj
= objects
[i
];
455 entry
->thread
= current
;
456 if (!obj
->ops
->add_queue( obj
, entry
))
466 /* check if the thread waiting condition is satisfied */
467 static int check_wait( struct thread
*thread
)
470 struct thread_wait
*wait
= thread
->wait
;
471 struct wait_queue_entry
*entry
= wait
->queues
;
473 /* Suspended threads may not acquire locks, but they can run system APCs */
474 if (thread
->process
->suspend
+ thread
->suspend
> 0)
476 if ((wait
->flags
& SELECT_INTERRUPTIBLE
) && !list_empty( &thread
->system_apc
))
477 return STATUS_USER_APC
;
482 if (wait
->flags
& SELECT_ALL
)
485 /* Note: we must check them all anyway, as some objects may
486 * want to do something when signaled, even if others are not */
487 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
488 not_ok
|= !entry
->obj
->ops
->signaled( entry
->obj
, thread
);
489 if (not_ok
) goto other_checks
;
490 /* Wait satisfied: tell it to all objects */
492 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
493 if (entry
->obj
->ops
->satisfied( entry
->obj
, thread
))
494 signaled
= STATUS_ABANDONED_WAIT_0
;
499 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
501 if (!entry
->obj
->ops
->signaled( entry
->obj
, thread
)) continue;
502 /* Wait satisfied: tell it to the object */
504 if (entry
->obj
->ops
->satisfied( entry
->obj
, thread
))
505 signaled
= i
+ STATUS_ABANDONED_WAIT_0
;
511 if ((wait
->flags
& SELECT_INTERRUPTIBLE
) && !list_empty(&thread
->system_apc
)) return STATUS_USER_APC
;
512 if ((wait
->flags
& SELECT_ALERTABLE
) && !list_empty(&thread
->user_apc
)) return STATUS_USER_APC
;
513 if (wait
->flags
& SELECT_TIMEOUT
)
515 if (!time_before( ¤t_time
, &wait
->timeout
)) return STATUS_TIMEOUT
;
520 /* send the wakeup signal to a thread */
521 static int send_thread_wakeup( struct thread
*thread
, void *cookie
, int signaled
)
523 struct wake_up_reply reply
;
526 reply
.cookie
= cookie
;
527 reply
.signaled
= signaled
;
528 if ((ret
= write( get_unix_fd( thread
->wait_fd
), &reply
, sizeof(reply
) )) == sizeof(reply
))
531 fatal_protocol_error( thread
, "partial wakeup write %d\n", ret
);
532 else if (errno
== EPIPE
)
533 kill_thread( thread
, 0 ); /* normal death */
535 fatal_protocol_perror( thread
, "write" );
539 /* attempt to wake up a thread */
540 /* return >0 if OK, 0 if the wait condition is still not satisfied */
541 int wake_thread( struct thread
*thread
)
546 for (count
= 0; thread
->wait
; count
++)
548 if ((signaled
= check_wait( thread
)) == -1) break;
550 cookie
= thread
->wait
->cookie
;
551 if (debug_level
) fprintf( stderr
, "%04x: *wakeup* signaled=%d cookie=%p\n",
552 thread
->id
, signaled
, cookie
);
554 if (send_thread_wakeup( thread
, cookie
, signaled
) == -1) /* error */
560 /* thread wait timeout */
561 static void thread_timeout( void *ptr
)
563 struct thread_wait
*wait
= ptr
;
564 struct thread
*thread
= wait
->thread
;
565 void *cookie
= wait
->cookie
;
568 if (thread
->wait
!= wait
) return; /* not the top-level wait, ignore it */
569 if (thread
->suspend
+ thread
->process
->suspend
> 0) return; /* suspended, ignore it */
571 if (debug_level
) fprintf( stderr
, "%04x: *wakeup* signaled=%d cookie=%p\n",
572 thread
->id
, (int)STATUS_TIMEOUT
, cookie
);
574 if (send_thread_wakeup( thread
, cookie
, STATUS_TIMEOUT
) == -1) return;
575 /* check if other objects have become signaled in the meantime */
576 wake_thread( thread
);
579 /* try signaling an event flag, a semaphore or a mutex */
580 static int signal_object( obj_handle_t handle
)
585 obj
= get_handle_obj( current
->process
, handle
, 0, NULL
);
588 ret
= obj
->ops
->signal( obj
, get_handle_access( current
->process
, handle
));
589 release_object( obj
);
594 /* select on a list of handles */
595 static void select_on( int count
, void *cookie
, const obj_handle_t
*handles
,
596 int flags
, const abs_time_t
*timeout
, obj_handle_t signal_obj
)
599 struct object
*objects
[MAXIMUM_WAIT_OBJECTS
];
601 if ((count
< 0) || (count
> MAXIMUM_WAIT_OBJECTS
))
603 set_error( STATUS_INVALID_PARAMETER
);
606 for (i
= 0; i
< count
; i
++)
608 if (!(objects
[i
] = get_handle_obj( current
->process
, handles
[i
], SYNCHRONIZE
, NULL
)))
612 if (i
< count
) goto done
;
613 if (!wait_on( count
, objects
, flags
, timeout
)) goto done
;
615 /* signal the object */
618 if (!signal_object( signal_obj
))
623 /* check if we woke ourselves up */
624 if (!current
->wait
) goto done
;
627 if ((ret
= check_wait( current
)) != -1)
629 /* condition is already satisfied */
635 /* now we need to wait */
636 if (flags
& SELECT_TIMEOUT
)
638 if (!(current
->wait
->user
= add_timeout_user( ¤t
->wait
->timeout
,
639 thread_timeout
, current
->wait
)))
645 current
->wait
->cookie
= cookie
;
646 set_error( STATUS_PENDING
);
649 while (--i
>= 0) release_object( objects
[i
] );
652 /* attempt to wake threads sleeping on the object wait queue */
653 void wake_up( struct object
*obj
, int max
)
655 struct list
*ptr
, *next
;
657 LIST_FOR_EACH_SAFE( ptr
, next
, &obj
->wait_queue
)
659 struct wait_queue_entry
*entry
= LIST_ENTRY( ptr
, struct wait_queue_entry
, entry
);
660 if (wake_thread( entry
->thread
))
662 if (max
&& !--max
) break;
667 /* return the apc queue to use for a given apc type */
668 static inline struct list
*get_apc_queue( struct thread
*thread
, enum apc_type type
)
675 return &thread
->user_apc
;
677 return &thread
->system_apc
;
681 /* queue an async procedure call */
682 int thread_queue_apc( struct thread
*thread
, struct object
*owner
, const apc_call_t
*call_data
)
684 struct thread_apc
*apc
;
685 struct list
*queue
= get_apc_queue( thread
, call_data
->type
);
687 /* cancel a possible previous APC with the same owner */
688 if (owner
) thread_cancel_apc( thread
, owner
, call_data
->type
);
689 if (thread
->state
== TERMINATED
) return 0;
691 if (!(apc
= alloc_object( &thread_apc_ops
))) return 0;
692 apc
->call
= *call_data
;
695 list_add_tail( queue
, &apc
->entry
);
696 if (!list_prev( queue
, &apc
->entry
)) /* first one */
697 wake_thread( thread
);
702 /* cancel the async procedure call owned by a specific object */
703 void thread_cancel_apc( struct thread
*thread
, struct object
*owner
, enum apc_type type
)
705 struct thread_apc
*apc
;
706 struct list
*queue
= get_apc_queue( thread
, type
);
708 LIST_FOR_EACH_ENTRY( apc
, queue
, struct thread_apc
, entry
)
710 if (apc
->owner
!= owner
) continue;
711 list_remove( &apc
->entry
);
712 release_object( apc
);
717 /* remove the head apc from the queue; the returned pointer must be freed by the caller */
718 static struct thread_apc
*thread_dequeue_apc( struct thread
*thread
, int system_only
)
720 struct thread_apc
*apc
= NULL
;
721 struct list
*ptr
= list_head( &thread
->system_apc
);
723 if (!ptr
&& !system_only
) ptr
= list_head( &thread
->user_apc
);
726 apc
= LIST_ENTRY( ptr
, struct thread_apc
, entry
);
732 /* add an fd to the inflight list */
733 /* return list index, or -1 on error */
734 int thread_add_inflight_fd( struct thread
*thread
, int client
, int server
)
738 if (server
== -1) return -1;
745 /* first check if we already have an entry for this fd */
746 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
747 if (thread
->inflight
[i
].client
== client
)
749 close( thread
->inflight
[i
].server
);
750 thread
->inflight
[i
].server
= server
;
754 /* now find a free spot to store it */
755 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
756 if (thread
->inflight
[i
].client
== -1)
758 thread
->inflight
[i
].client
= client
;
759 thread
->inflight
[i
].server
= server
;
765 /* get an inflight fd and purge it from the list */
766 /* the fd must be closed when no longer used */
767 int thread_get_inflight_fd( struct thread
*thread
, int client
)
771 if (client
== -1) return -1;
775 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
777 if (thread
->inflight
[i
].client
== client
)
779 ret
= thread
->inflight
[i
].server
;
780 thread
->inflight
[i
].server
= thread
->inflight
[i
].client
= -1;
784 } while (!receive_fd( thread
->process
)); /* in case it is still in the socket buffer */
788 /* kill a thread on the spot */
789 void kill_thread( struct thread
*thread
, int violent_death
)
791 if (thread
->state
== TERMINATED
) return; /* already killed */
792 thread
->state
= TERMINATED
;
793 thread
->exit_time
= current_time
;
794 if (current
== thread
) current
= NULL
;
796 fprintf( stderr
,"%04x: *killed* exit_code=%d\n",
797 thread
->id
, thread
->exit_code
);
800 while (thread
->wait
) end_wait( thread
);
801 send_thread_wakeup( thread
, NULL
, STATUS_PENDING
);
802 /* if it is waiting on the socket, we don't need to send a SIGTERM */
805 kill_console_processes( thread
, 0 );
806 debug_exit_thread( thread
);
807 abandon_mutexes( thread
);
808 wake_up( &thread
->obj
, 0 );
809 if (violent_death
) send_thread_signal( thread
, SIGTERM
);
810 cleanup_thread( thread
);
811 remove_process_thread( thread
->process
, thread
);
812 release_object( thread
);
815 /* trigger a breakpoint event in a given thread */
816 void break_thread( struct thread
*thread
)
818 struct debug_event_exception data
;
820 assert( thread
->context
);
822 data
.record
.ExceptionCode
= STATUS_BREAKPOINT
;
823 data
.record
.ExceptionFlags
= EXCEPTION_CONTINUABLE
;
824 data
.record
.ExceptionRecord
= NULL
;
825 data
.record
.ExceptionAddress
= get_context_ip( thread
->context
);
826 data
.record
.NumberParameters
= 0;
828 generate_debug_event( thread
, EXCEPTION_DEBUG_EVENT
, &data
);
829 thread
->debug_break
= 0;
832 /* take a snapshot of currently running threads */
833 struct thread_snapshot
*thread_snap( int *count
)
835 struct thread_snapshot
*snapshot
, *ptr
;
836 struct thread
*thread
;
839 LIST_FOR_EACH_ENTRY( thread
, &thread_list
, struct thread
, entry
)
840 if (thread
->state
!= TERMINATED
) total
++;
841 if (!total
|| !(snapshot
= mem_alloc( sizeof(*snapshot
) * total
))) return NULL
;
843 LIST_FOR_EACH_ENTRY( thread
, &thread_list
, struct thread
, entry
)
845 if (thread
->state
== TERMINATED
) continue;
846 ptr
->thread
= thread
;
847 ptr
->count
= thread
->obj
.refcount
;
848 ptr
->priority
= thread
->priority
;
849 grab_object( thread
);
856 /* gets the current impersonation token */
857 struct token
*thread_get_impersonation_token( struct thread
*thread
)
860 return thread
->token
;
862 return thread
->process
->token
;
865 /* create a new thread */
866 DECL_HANDLER(new_thread
)
868 struct thread
*thread
;
869 int request_fd
= thread_get_inflight_fd( current
, req
->request_fd
);
871 if (request_fd
== -1 || fcntl( request_fd
, F_SETFL
, O_NONBLOCK
) == -1)
873 if (request_fd
!= -1) close( request_fd
);
874 set_error( STATUS_INVALID_HANDLE
);
878 if ((thread
= create_thread( request_fd
, current
->process
)))
880 if (req
->suspend
) thread
->suspend
++;
881 reply
->tid
= get_thread_id( thread
);
882 if ((reply
->handle
= alloc_handle( current
->process
, thread
, req
->access
, req
->attributes
)))
884 /* thread object will be released when the thread gets killed */
887 kill_thread( thread
, 1 );
891 /* initialize a new thread */
892 DECL_HANDLER(init_thread
)
894 struct process
*process
= current
->process
;
895 int reply_fd
= thread_get_inflight_fd( current
, req
->reply_fd
);
896 int wait_fd
= thread_get_inflight_fd( current
, req
->wait_fd
);
898 if (current
->reply_fd
) /* already initialised */
900 set_error( STATUS_INVALID_PARAMETER
);
904 if (reply_fd
== -1 || fcntl( reply_fd
, F_SETFL
, O_NONBLOCK
) == -1) goto error
;
906 current
->reply_fd
= create_anonymous_fd( &thread_fd_ops
, reply_fd
, ¤t
->obj
);
908 if (!current
->reply_fd
) goto error
;
912 set_error( STATUS_TOO_MANY_OPENED_FILES
); /* most likely reason */
915 if (!(current
->wait_fd
= create_anonymous_fd( &thread_fd_ops
, wait_fd
, ¤t
->obj
)))
918 if (!is_valid_address(req
->teb
) || !is_valid_address(req
->peb
) || !is_valid_address(req
->ldt_copy
))
920 set_error( STATUS_INVALID_PARAMETER
);
924 current
->unix_pid
= req
->unix_pid
;
925 current
->unix_tid
= req
->unix_tid
;
926 current
->teb
= req
->teb
;
928 if (!process
->peb
) /* first thread, initialize the process too */
930 process
->unix_pid
= current
->unix_pid
;
931 process
->peb
= req
->peb
;
932 process
->ldt_copy
= req
->ldt_copy
;
933 reply
->info_size
= init_process( current
);
937 if (process
->unix_pid
!= current
->unix_pid
)
938 process
->unix_pid
= -1; /* can happen with linuxthreads */
939 if (current
->suspend
+ process
->suspend
> 0) stop_thread( current
);
940 generate_debug_event( current
, CREATE_THREAD_DEBUG_EVENT
, req
->entry
);
942 debug_level
= max( debug_level
, req
->debug_level
);
944 reply
->pid
= get_process_id( process
);
945 reply
->tid
= get_thread_id( current
);
946 reply
->version
= SERVER_PROTOCOL_VERSION
;
947 reply
->server_start
.sec
= server_start_time
.tv_sec
;
948 reply
->server_start
.usec
= server_start_time
.tv_usec
;
952 if (reply_fd
!= -1) close( reply_fd
);
953 if (wait_fd
!= -1) close( wait_fd
);
956 /* terminate a thread */
957 DECL_HANDLER(terminate_thread
)
959 struct thread
*thread
;
963 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_TERMINATE
)))
965 thread
->exit_code
= req
->exit_code
;
966 if (thread
!= current
) kill_thread( thread
, 1 );
970 reply
->last
= (thread
->process
->running_threads
== 1);
972 release_object( thread
);
976 /* open a handle to a thread */
977 DECL_HANDLER(open_thread
)
979 struct thread
*thread
= get_thread_from_id( req
->tid
);
984 reply
->handle
= alloc_handle( current
->process
, thread
, req
->access
, req
->attributes
);
985 release_object( thread
);
989 /* fetch information about a thread */
990 DECL_HANDLER(get_thread_info
)
992 struct thread
*thread
;
993 obj_handle_t handle
= req
->handle
;
995 if (!handle
) thread
= get_thread_from_id( req
->tid_in
);
996 else thread
= get_thread_from_handle( req
->handle
, THREAD_QUERY_INFORMATION
);
1000 reply
->pid
= get_process_id( thread
->process
);
1001 reply
->tid
= get_thread_id( thread
);
1002 reply
->teb
= thread
->teb
;
1003 reply
->exit_code
= (thread
->state
== TERMINATED
) ? thread
->exit_code
: STATUS_PENDING
;
1004 reply
->priority
= thread
->priority
;
1005 reply
->affinity
= thread
->affinity
;
1006 reply
->creation_time
.sec
= thread
->creation_time
.tv_sec
;
1007 reply
->creation_time
.usec
= thread
->creation_time
.tv_usec
;
1008 reply
->exit_time
.sec
= thread
->exit_time
.tv_sec
;
1009 reply
->exit_time
.usec
= thread
->exit_time
.tv_usec
;
1010 reply
->last
= thread
->process
->running_threads
== 1;
1012 release_object( thread
);
1016 /* set information about a thread */
1017 DECL_HANDLER(set_thread_info
)
1019 struct thread
*thread
;
1021 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SET_INFORMATION
)))
1023 set_thread_info( thread
, req
);
1024 release_object( thread
);
1028 /* suspend a thread */
1029 DECL_HANDLER(suspend_thread
)
1031 struct thread
*thread
;
1033 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SUSPEND_RESUME
)))
1035 if (thread
->state
== TERMINATED
) set_error( STATUS_ACCESS_DENIED
);
1036 else reply
->count
= suspend_thread( thread
);
1037 release_object( thread
);
1041 /* resume a thread */
1042 DECL_HANDLER(resume_thread
)
1044 struct thread
*thread
;
1046 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SUSPEND_RESUME
)))
1048 if (thread
->state
== TERMINATED
) set_error( STATUS_ACCESS_DENIED
);
1049 else reply
->count
= resume_thread( thread
);
1050 release_object( thread
);
1054 /* select on a handle list */
1055 DECL_HANDLER(select
)
1057 int count
= get_req_data_size() / sizeof(obj_handle_t
);
1058 select_on( count
, req
->cookie
, get_req_data(), req
->flags
, &req
->timeout
, req
->signal
);
1061 /* queue an APC for a thread */
1062 DECL_HANDLER(queue_apc
)
1064 struct thread
*thread
;
1065 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SET_CONTEXT
)))
1067 switch( req
->call
.type
)
1071 thread_queue_apc( thread
, NULL
, &req
->call
);
1074 set_error( STATUS_INVALID_PARAMETER
);
1077 release_object( thread
);
1081 /* get next APC to call */
1082 DECL_HANDLER(get_apc
)
1084 struct thread_apc
*apc
;
1085 int system_only
= !req
->alertable
;
1089 if (!(apc
= (struct thread_apc
*)get_handle_obj( current
->process
, req
->prev
,
1090 0, &thread_apc_ops
))) return;
1092 wake_up( &apc
->obj
, 0 );
1093 close_handle( current
->process
, req
->prev
);
1094 release_object( apc
);
1097 if (current
->suspend
+ current
->process
->suspend
> 0) system_only
= 1;
1101 if (!(apc
= thread_dequeue_apc( current
, system_only
)))
1104 set_error( STATUS_PENDING
);
1107 /* Optimization: ignore APC_NONE calls, they are only used to
1108 * wake up a thread, but since we got here the thread woke up already.
1110 if (apc
->call
.type
!= APC_NONE
) break;
1111 release_object( apc
);
1114 if ((reply
->handle
= alloc_handle( current
->process
, apc
, SYNCHRONIZE
, 0 )))
1115 reply
->call
= apc
->call
;
1116 release_object( apc
);
1119 /* retrieve the current context of a thread */
1120 DECL_HANDLER(get_thread_context
)
1122 struct thread
*thread
;
1125 if (get_reply_max_size() < sizeof(CONTEXT
))
1127 set_error( STATUS_INVALID_PARAMETER
);
1130 if (!(thread
= get_thread_from_handle( req
->handle
, THREAD_GET_CONTEXT
))) return;
1134 if (thread
!= current
|| !thread
->suspend_context
)
1136 /* not suspended, shouldn't happen */
1137 set_error( STATUS_INVALID_PARAMETER
);
1141 if (thread
->context
== thread
->suspend_context
) thread
->context
= NULL
;
1142 set_reply_data_ptr( thread
->suspend_context
, sizeof(CONTEXT
) );
1143 thread
->suspend_context
= NULL
;
1146 else if (thread
!= current
&& !thread
->context
)
1148 /* thread is not suspended, retry (if it's still running) */
1149 if (thread
->state
!= RUNNING
) set_error( STATUS_ACCESS_DENIED
);
1150 else set_error( STATUS_PENDING
);
1152 else if ((context
= set_reply_data_size( sizeof(CONTEXT
) )))
1154 unsigned int flags
= get_context_system_regs( req
->flags
);
1156 memset( context
, 0, sizeof(CONTEXT
) );
1157 context
->ContextFlags
= get_context_cpu_flag();
1158 if (thread
->context
) copy_context( context
, thread
->context
, req
->flags
& ~flags
);
1159 if (flags
) get_thread_context( thread
, context
, flags
);
1161 reply
->self
= (thread
== current
);
1162 release_object( thread
);
1165 /* set the current context of a thread */
1166 DECL_HANDLER(set_thread_context
)
1168 struct thread
*thread
;
1170 if (get_req_data_size() < sizeof(CONTEXT
))
1172 set_error( STATUS_INVALID_PARAMETER
);
1175 if (!(thread
= get_thread_from_handle( req
->handle
, THREAD_SET_CONTEXT
))) return;
1179 if (thread
!= current
|| thread
->context
)
1181 /* nested suspend or exception, shouldn't happen */
1182 set_error( STATUS_INVALID_PARAMETER
);
1184 else if ((thread
->suspend_context
= mem_alloc( sizeof(CONTEXT
) )))
1186 memcpy( thread
->suspend_context
, get_req_data(), sizeof(CONTEXT
) );
1187 thread
->context
= thread
->suspend_context
;
1188 if (thread
->debug_break
) break_thread( thread
);
1191 else if (thread
!= current
&& !thread
->context
)
1193 /* thread is not suspended, retry (if it's still running) */
1194 if (thread
->state
!= RUNNING
) set_error( STATUS_ACCESS_DENIED
);
1195 else set_error( STATUS_PENDING
);
1199 const CONTEXT
*context
= get_req_data();
1200 unsigned int flags
= get_context_system_regs( req
->flags
);
1202 if (flags
) set_thread_context( thread
, context
, flags
);
1203 if (thread
->context
&& !get_error())
1204 copy_context( thread
->context
, context
, req
->flags
& ~flags
);
1206 reply
->self
= (thread
== current
);
1207 release_object( thread
);
1210 /* fetch a selector entry for a thread */
1211 DECL_HANDLER(get_selector_entry
)
1213 struct thread
*thread
;
1214 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_QUERY_INFORMATION
)))
1216 get_selector_entry( thread
, req
->entry
, &reply
->base
, &reply
->limit
, &reply
->flags
);
1217 release_object( thread
);