2 * Server-side thread management
4 * Copyright (C) 1998 Alexandre Julliard
17 #ifdef HAVE_SYS_MMAN_H
20 #include <sys/types.h>
36 struct thread_wait
*next
; /* next wait structure for this thread */
37 struct thread
*thread
; /* owner thread */
38 int count
; /* count of objects */
40 void *cookie
; /* magic cookie to return to client */
41 struct timeval timeout
;
42 struct timeout_user
*user
;
43 struct wait_queue_entry queues
[1];
46 /* asynchronous procedure calls */
50 struct thread_apc
*next
; /* queue linked list */
51 struct thread_apc
*prev
;
52 struct object
*owner
; /* object that queued this apc */
53 void *func
; /* function to call in client */
54 enum apc_type type
; /* type of apc function */
55 int nb_args
; /* number of arguments */
56 void *args
[1]; /* function arguments */
60 /* thread operations */
62 static void dump_thread( struct object
*obj
, int verbose
);
63 static int thread_signaled( struct object
*obj
, struct thread
*thread
);
64 static void thread_poll_event( struct object
*obj
, int event
);
65 static void destroy_thread( struct object
*obj
);
66 static struct thread_apc
*thread_dequeue_apc( struct thread
*thread
, int system_only
);
68 static const struct object_ops thread_ops
=
70 sizeof(struct thread
), /* size */
71 dump_thread
, /* dump */
72 add_queue
, /* add_queue */
73 remove_queue
, /* remove_queue */
74 thread_signaled
, /* signaled */
75 no_satisfied
, /* satisfied */
76 NULL
, /* get_poll_events */
77 thread_poll_event
, /* poll_event */
78 no_get_fd
, /* get_fd */
80 no_get_file_info
, /* get_file_info */
81 destroy_thread
/* destroy */
84 static struct thread
*first_thread
;
85 static struct thread
*booting_thread
;
87 /* initialize the structure for a newly allocated thread */
88 inline static void init_thread_structure( struct thread
*thread
)
92 thread
->unix_pid
= 0; /* not known yet */
93 thread
->context
= NULL
;
96 thread
->debug_ctx
= NULL
;
97 thread
->debug_event
= NULL
;
101 thread
->system_apc
.head
= NULL
;
102 thread
->system_apc
.tail
= NULL
;
103 thread
->user_apc
.head
= NULL
;
104 thread
->user_apc
.tail
= NULL
;
106 thread
->request_fd
= NULL
;
107 thread
->reply_fd
= -1;
108 thread
->wait_fd
= -1;
109 thread
->state
= RUNNING
;
110 thread
->attached
= 0;
111 thread
->exit_code
= 0;
114 thread
->priority
= THREAD_PRIORITY_NORMAL
;
115 thread
->affinity
= 1;
117 thread
->buffer
= (void *)-1;
119 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
120 thread
->inflight
[i
].server
= thread
->inflight
[i
].client
= -1;
123 /* create a new thread */
124 struct thread
*create_thread( int fd
, struct process
*process
)
126 struct thread
*thread
;
128 if (!(thread
= alloc_object( &thread_ops
, fd
))) return NULL
;
130 init_thread_structure( thread
);
132 thread
->process
= (struct process
*)grab_object( process
);
133 if (!current
) current
= thread
;
135 if (!booting_thread
) /* first thread ever */
137 booting_thread
= thread
;
138 lock_master_socket(1);
141 if ((thread
->next
= first_thread
) != NULL
) thread
->next
->prev
= thread
;
142 first_thread
= thread
;
144 fcntl( fd
, F_SETFL
, O_NONBLOCK
);
145 set_select_events( &thread
->obj
, POLLIN
); /* start listening to events */
146 add_process_thread( thread
->process
, thread
);
150 /* handle a client event */
151 static void thread_poll_event( struct object
*obj
, int event
)
153 struct thread
*thread
= (struct thread
*)obj
;
154 assert( obj
->ops
== &thread_ops
);
156 if (event
& (POLLERR
| POLLHUP
)) kill_thread( thread
, 0 );
157 else if (event
& POLLIN
) read_request( thread
);
160 /* cleanup everything that is no longer needed by a dead thread */
161 /* used by destroy_thread and kill_thread */
162 static void cleanup_thread( struct thread
*thread
)
165 struct thread_apc
*apc
;
167 while ((apc
= thread_dequeue_apc( thread
, 0 ))) free( apc
);
168 if (thread
->buffer
!= (void *)-1) munmap( thread
->buffer
, MAX_REQUEST_LENGTH
);
169 if (thread
->reply_fd
!= -1) close( thread
->reply_fd
);
170 if (thread
->wait_fd
!= -1) close( thread
->wait_fd
);
171 if (thread
->request_fd
) release_object( thread
->request_fd
);
172 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
174 if (thread
->inflight
[i
].client
!= -1)
176 close( thread
->inflight
[i
].server
);
177 thread
->inflight
[i
].client
= thread
->inflight
[i
].server
= -1;
180 thread
->buffer
= (void *)-1;
181 thread
->reply_fd
= -1;
182 thread
->wait_fd
= -1;
183 thread
->request_fd
= NULL
;
186 /* destroy a thread when its refcount is 0 */
187 static void destroy_thread( struct object
*obj
)
189 struct thread_apc
*apc
;
190 struct thread
*thread
= (struct thread
*)obj
;
191 assert( obj
->ops
== &thread_ops
);
193 assert( !thread
->debug_ctx
); /* cannot still be debugging something */
194 release_object( thread
->process
);
195 if (thread
->next
) thread
->next
->prev
= thread
->prev
;
196 if (thread
->prev
) thread
->prev
->next
= thread
->next
;
197 else first_thread
= thread
->next
;
198 while ((apc
= thread_dequeue_apc( thread
, 0 ))) free( apc
);
199 if (thread
->info
) release_object( thread
->info
);
200 if (thread
->queue
) release_object( thread
->queue
);
201 cleanup_thread( thread
);
204 /* dump a thread on stdout for debugging purposes */
205 static void dump_thread( struct object
*obj
, int verbose
)
207 struct thread
*thread
= (struct thread
*)obj
;
208 assert( obj
->ops
== &thread_ops
);
210 fprintf( stderr
, "Thread pid=%d teb=%p state=%d\n",
211 thread
->unix_pid
, thread
->teb
, thread
->state
);
214 static int thread_signaled( struct object
*obj
, struct thread
*thread
)
216 struct thread
*mythread
= (struct thread
*)obj
;
217 return (mythread
->state
== TERMINATED
);
220 /* get a thread pointer from a thread id (and increment the refcount) */
221 struct thread
*get_thread_from_id( void *id
)
223 struct thread
*t
= first_thread
;
224 while (t
&& (t
!= id
)) t
= t
->next
;
225 if (t
) grab_object( t
);
229 /* get a thread from a handle (and increment the refcount) */
230 struct thread
*get_thread_from_handle( handle_t handle
, unsigned int access
)
232 return (struct thread
*)get_handle_obj( current
->process
, handle
,
233 access
, &thread_ops
);
236 /* find a thread from a Unix pid */
237 struct thread
*get_thread_from_pid( int pid
)
239 struct thread
*t
= first_thread
;
240 while (t
&& (t
->unix_pid
!= pid
)) t
= t
->next
;
244 /* set all information about a thread */
245 static void set_thread_info( struct thread
*thread
,
246 struct set_thread_info_request
*req
)
248 if (req
->mask
& SET_THREAD_INFO_PRIORITY
)
249 thread
->priority
= req
->priority
;
250 if (req
->mask
& SET_THREAD_INFO_AFFINITY
)
252 if (req
->affinity
!= 1) set_error( STATUS_INVALID_PARAMETER
);
253 else thread
->affinity
= req
->affinity
;
257 /* suspend a thread */
258 int suspend_thread( struct thread
*thread
, int check_limit
)
260 int old_count
= thread
->suspend
;
261 if (thread
->suspend
< MAXIMUM_SUSPEND_COUNT
|| !check_limit
)
263 if (!(thread
->process
->suspend
+ thread
->suspend
++)) stop_thread( thread
);
265 else set_error( STATUS_SUSPEND_COUNT_EXCEEDED
);
269 /* resume a thread */
270 int resume_thread( struct thread
*thread
)
272 int old_count
= thread
->suspend
;
273 if (thread
->suspend
> 0)
275 if (!(--thread
->suspend
+ thread
->process
->suspend
)) continue_thread( thread
);
280 /* suspend all threads but the current */
281 void suspend_all_threads( void )
283 struct thread
*thread
;
284 for ( thread
= first_thread
; thread
; thread
= thread
->next
)
285 if ( thread
!= current
)
286 suspend_thread( thread
, 0 );
289 /* resume all threads but the current */
290 void resume_all_threads( void )
292 struct thread
*thread
;
293 for ( thread
= first_thread
; thread
; thread
= thread
->next
)
294 if ( thread
!= current
)
295 resume_thread( thread
);
298 /* add a thread to an object wait queue; return 1 if OK, 0 on error */
299 int add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
303 entry
->prev
= obj
->tail
;
305 if (obj
->tail
) obj
->tail
->next
= entry
;
306 else obj
->head
= entry
;
311 /* remove a thread from an object wait queue */
312 void remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
314 if (entry
->next
) entry
->next
->prev
= entry
->prev
;
315 else obj
->tail
= entry
->prev
;
316 if (entry
->prev
) entry
->prev
->next
= entry
->next
;
317 else obj
->head
= entry
->next
;
318 release_object( obj
);
322 static void end_wait( struct thread
*thread
)
324 struct thread_wait
*wait
= thread
->wait
;
325 struct wait_queue_entry
*entry
;
329 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
330 entry
->obj
->ops
->remove_queue( entry
->obj
, entry
);
331 if (wait
->user
) remove_timeout_user( wait
->user
);
332 thread
->wait
= wait
->next
;
336 /* build the thread wait structure */
337 static int wait_on( int count
, struct object
*objects
[], int flags
, int sec
, int usec
)
339 struct thread_wait
*wait
;
340 struct wait_queue_entry
*entry
;
343 if (!(wait
= mem_alloc( sizeof(*wait
) + (count
-1) * sizeof(*entry
) ))) return 0;
344 wait
->next
= current
->wait
;
345 wait
->thread
= current
;
349 current
->wait
= wait
;
350 if (flags
& SELECT_TIMEOUT
)
352 wait
->timeout
.tv_sec
= sec
;
353 wait
->timeout
.tv_usec
= usec
;
356 for (i
= 0, entry
= wait
->queues
; i
< count
; i
++, entry
++)
358 struct object
*obj
= objects
[i
];
359 entry
->thread
= current
;
360 if (!obj
->ops
->add_queue( obj
, entry
))
370 /* check if the thread waiting condition is satisfied */
371 static int check_wait( struct thread
*thread
)
374 struct thread_wait
*wait
= thread
->wait
;
375 struct wait_queue_entry
*entry
= wait
->queues
;
378 if (wait
->flags
& SELECT_ALL
)
381 /* Note: we must check them all anyway, as some objects may
382 * want to do something when signaled, even if others are not */
383 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
384 not_ok
|= !entry
->obj
->ops
->signaled( entry
->obj
, thread
);
385 if (not_ok
) goto other_checks
;
386 /* Wait satisfied: tell it to all objects */
388 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
389 if (entry
->obj
->ops
->satisfied( entry
->obj
, thread
))
390 signaled
= STATUS_ABANDONED_WAIT_0
;
395 for (i
= 0, entry
= wait
->queues
; i
< wait
->count
; i
++, entry
++)
397 if (!entry
->obj
->ops
->signaled( entry
->obj
, thread
)) continue;
398 /* Wait satisfied: tell it to the object */
400 if (entry
->obj
->ops
->satisfied( entry
->obj
, thread
))
401 signaled
= i
+ STATUS_ABANDONED_WAIT_0
;
407 if ((wait
->flags
& SELECT_INTERRUPTIBLE
) && thread
->system_apc
.head
) return STATUS_USER_APC
;
408 if ((wait
->flags
& SELECT_ALERTABLE
) && thread
->user_apc
.head
) return STATUS_USER_APC
;
409 if (wait
->flags
& SELECT_TIMEOUT
)
412 gettimeofday( &now
, NULL
);
413 if (!time_before( &now
, &wait
->timeout
)) return STATUS_TIMEOUT
;
418 /* send the wakeup signal to a thread */
419 static int send_thread_wakeup( struct thread
*thread
, void *cookie
, int signaled
)
421 struct wake_up_reply reply
;
424 reply
.cookie
= cookie
;
425 reply
.signaled
= signaled
;
426 if ((ret
= write( thread
->wait_fd
, &reply
, sizeof(reply
) )) == sizeof(reply
)) return 0;
428 fatal_protocol_error( thread
, "partial wakeup write %d\n", ret
);
429 else if (errno
== EPIPE
)
430 kill_thread( thread
, 0 ); /* normal death */
432 fatal_protocol_perror( thread
, "write" );
436 /* attempt to wake up a thread */
437 /* return >0 if OK, 0 if the wait condition is still not satisfied */
438 static int wake_thread( struct thread
*thread
)
443 for (count
= 0; thread
->wait
; count
++)
445 if ((signaled
= check_wait( thread
)) == -1) break;
447 cookie
= thread
->wait
->cookie
;
448 if (debug_level
) fprintf( stderr
, "%08x: *wakeup* signaled=%d cookie=%p\n",
449 (unsigned int)thread
, signaled
, cookie
);
451 send_thread_wakeup( thread
, cookie
, signaled
);
456 /* thread wait timeout */
457 static void thread_timeout( void *ptr
)
459 struct thread_wait
*wait
= ptr
;
460 struct thread
*thread
= wait
->thread
;
461 void *cookie
= wait
->cookie
;
464 if (thread
->wait
!= wait
) return; /* not the top-level wait, ignore it */
466 if (debug_level
) fprintf( stderr
, "%08x: *wakeup* signaled=%d cookie=%p\n",
467 (unsigned int)thread
, STATUS_TIMEOUT
, cookie
);
469 send_thread_wakeup( thread
, cookie
, STATUS_TIMEOUT
);
470 /* check if other objects have become signaled in the meantime */
471 wake_thread( thread
);
474 /* select on a list of handles */
475 static void select_on( int count
, void *cookie
, handle_t
*handles
, int flags
, int sec
, int usec
)
478 struct object
*objects
[MAXIMUM_WAIT_OBJECTS
];
480 if ((count
< 0) || (count
> MAXIMUM_WAIT_OBJECTS
))
482 set_error( STATUS_INVALID_PARAMETER
);
485 for (i
= 0; i
< count
; i
++)
487 if (!(objects
[i
] = get_handle_obj( current
->process
, handles
[i
], SYNCHRONIZE
, NULL
)))
491 if (i
< count
) goto done
;
492 if (!wait_on( count
, objects
, flags
, sec
, usec
)) goto done
;
494 if ((ret
= check_wait( current
)) != -1)
496 /* condition is already satisfied */
502 /* now we need to wait */
503 if (flags
& SELECT_TIMEOUT
)
505 if (!(current
->wait
->user
= add_timeout_user( ¤t
->wait
->timeout
,
506 thread_timeout
, current
->wait
)))
512 current
->wait
->cookie
= cookie
;
513 set_error( STATUS_PENDING
);
516 while (--i
>= 0) release_object( objects
[i
] );
519 /* attempt to wake threads sleeping on the object wait queue */
520 void wake_up( struct object
*obj
, int max
)
522 struct wait_queue_entry
*entry
= obj
->head
;
526 struct thread
*thread
= entry
->thread
;
528 if (wake_thread( thread
))
530 if (max
&& !--max
) break;
535 /* queue an async procedure call */
536 int thread_queue_apc( struct thread
*thread
, struct object
*owner
, void *func
,
537 enum apc_type type
, int system
, int nb_args
, ... )
539 struct thread_apc
*apc
;
540 struct apc_queue
*queue
= system
? &thread
->system_apc
: &thread
->user_apc
;
542 /* cancel a possible previous APC with the same owner */
543 if (owner
) thread_cancel_apc( thread
, owner
, system
);
545 if (!(apc
= mem_alloc( sizeof(*apc
) + (nb_args
-1)*sizeof(apc
->args
[0]) ))) return 0;
546 apc
->prev
= queue
->tail
;
551 apc
->nb_args
= nb_args
;
556 va_start( args
, nb_args
);
557 for (i
= 0; i
< nb_args
; i
++) apc
->args
[i
] = va_arg( args
, void * );
561 if (!apc
->prev
) /* first one */
564 wake_thread( thread
);
569 /* cancel the async procedure call owned by a specific object */
570 void thread_cancel_apc( struct thread
*thread
, struct object
*owner
, int system
)
572 struct thread_apc
*apc
;
573 struct apc_queue
*queue
= system
? &thread
->system_apc
: &thread
->user_apc
;
574 for (apc
= queue
->head
; apc
; apc
= apc
->next
)
576 if (apc
->owner
!= owner
) continue;
577 if (apc
->next
) apc
->next
->prev
= apc
->prev
;
578 else queue
->tail
= apc
->prev
;
579 if (apc
->prev
) apc
->prev
->next
= apc
->next
;
580 else queue
->head
= apc
->next
;
586 /* remove the head apc from the queue; the returned pointer must be freed by the caller */
587 static struct thread_apc
*thread_dequeue_apc( struct thread
*thread
, int system_only
)
589 struct thread_apc
*apc
;
590 struct apc_queue
*queue
= &thread
->system_apc
;
592 if (!queue
->head
&& !system_only
) queue
= &thread
->user_apc
;
593 if ((apc
= queue
->head
))
595 if (apc
->next
) apc
->next
->prev
= NULL
;
596 else queue
->tail
= NULL
;
597 queue
->head
= apc
->next
;
602 /* add an fd to the inflight list */
603 /* return list index, or -1 on error */
604 int thread_add_inflight_fd( struct thread
*thread
, int client
, int server
)
608 if (server
== -1) return -1;
615 /* first check if we already have an entry for this fd */
616 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
617 if (thread
->inflight
[i
].client
== client
)
619 close( thread
->inflight
[i
].server
);
620 thread
->inflight
[i
].server
= server
;
624 /* now find a free spot to store it */
625 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
626 if (thread
->inflight
[i
].client
== -1)
628 thread
->inflight
[i
].client
= client
;
629 thread
->inflight
[i
].server
= server
;
635 /* get an inflight fd and purge it from the list */
636 /* the fd must be closed when no longer used */
637 int thread_get_inflight_fd( struct thread
*thread
, int client
)
641 if (client
== -1) return -1;
645 for (i
= 0; i
< MAX_INFLIGHT_FDS
; i
++)
647 if (thread
->inflight
[i
].client
== client
)
649 ret
= thread
->inflight
[i
].server
;
650 thread
->inflight
[i
].server
= thread
->inflight
[i
].client
= -1;
654 } while (!receive_fd( thread
->process
)); /* in case it is still in the socket buffer */
658 /* retrieve an LDT selector entry */
659 static void get_selector_entry( struct thread
*thread
, int entry
,
660 unsigned int *base
, unsigned int *limit
,
661 unsigned char *flags
)
663 if (!thread
->process
->ldt_copy
)
665 set_error( STATUS_ACCESS_DENIED
);
670 set_error( STATUS_INVALID_PARAMETER
); /* FIXME */
673 if (suspend_for_ptrace( thread
))
675 unsigned char flags_buf
[4];
676 int *addr
= (int *)thread
->process
->ldt_copy
+ entry
;
677 if (read_thread_int( thread
, addr
, base
) == -1) goto done
;
678 if (read_thread_int( thread
, addr
+ 8192, limit
) == -1) goto done
;
679 addr
= (int *)thread
->process
->ldt_copy
+ 2*8192 + (entry
>> 2);
680 if (read_thread_int( thread
, addr
, (int *)flags_buf
) == -1) goto done
;
681 *flags
= flags_buf
[entry
& 3];
683 resume_thread( thread
);
687 /* kill a thread on the spot */
688 void kill_thread( struct thread
*thread
, int violent_death
)
690 if (thread
->state
== TERMINATED
) return; /* already killed */
691 thread
->state
= TERMINATED
;
692 if (current
== thread
) current
= NULL
;
694 fprintf( stderr
,"%08x: *killed* exit_code=%d\n",
695 (unsigned int)thread
, thread
->exit_code
);
698 while (thread
->wait
) end_wait( thread
);
699 send_thread_wakeup( thread
, NULL
, STATUS_PENDING
);
700 /* if it is waiting on the socket, we don't need to send a SIGTERM */
703 debug_exit_thread( thread
);
704 abandon_mutexes( thread
);
705 remove_process_thread( thread
->process
, thread
);
706 wake_up( &thread
->obj
, 0 );
707 detach_thread( thread
, violent_death
? SIGTERM
: 0 );
708 remove_select_user( &thread
->obj
);
709 cleanup_thread( thread
);
710 release_object( thread
);
713 /* take a snapshot of currently running threads */
714 struct thread_snapshot
*thread_snap( int *count
)
716 struct thread_snapshot
*snapshot
, *ptr
;
717 struct thread
*thread
;
720 for (thread
= first_thread
; thread
; thread
= thread
->next
)
721 if (thread
->state
!= TERMINATED
) total
++;
722 if (!total
|| !(snapshot
= mem_alloc( sizeof(*snapshot
) * total
))) return NULL
;
724 for (thread
= first_thread
; thread
; thread
= thread
->next
)
726 if (thread
->state
== TERMINATED
) continue;
727 ptr
->thread
= thread
;
728 ptr
->count
= thread
->obj
.refcount
;
729 ptr
->priority
= thread
->priority
;
730 grab_object( thread
);
737 /* signal that we are finished booting on the client side */
738 DECL_HANDLER(boot_done
)
740 debug_level
= max( debug_level
, req
->debug_level
);
741 if (current
== booting_thread
)
743 booting_thread
= (struct thread
*)~0UL; /* make sure it doesn't match other threads */
744 lock_master_socket(0); /* allow other clients now */
748 /* create a new thread */
749 DECL_HANDLER(new_thread
)
751 struct thread
*thread
;
752 int request_fd
= thread_get_inflight_fd( current
, req
->request_fd
);
754 if (request_fd
== -1)
756 set_error( STATUS_INVALID_HANDLE
);
760 if ((thread
= create_thread( request_fd
, current
->process
)))
762 if (req
->suspend
) thread
->suspend
++;
764 if ((req
->handle
= alloc_handle( current
->process
, thread
,
765 THREAD_ALL_ACCESS
, req
->inherit
)))
767 /* thread object will be released when the thread gets killed */
770 kill_thread( thread
, 1 );
775 /* initialize a new thread */
776 DECL_HANDLER(init_thread
)
778 int reply_fd
= thread_get_inflight_fd( current
, req
->reply_fd
);
779 int wait_fd
= thread_get_inflight_fd( current
, req
->wait_fd
);
781 if (current
->unix_pid
)
783 fatal_protocol_error( current
, "init_thread: already running\n" );
788 fatal_protocol_error( current
, "bad reply fd\n" );
793 fatal_protocol_error( current
, "bad wait fd\n" );
797 current
->unix_pid
= req
->unix_pid
;
798 current
->teb
= req
->teb
;
799 current
->reply_fd
= reply_fd
;
800 current
->wait_fd
= wait_fd
;
802 if (current
->suspend
+ current
->process
->suspend
> 0) stop_thread( current
);
803 if (current
->process
->running_threads
> 1)
804 generate_debug_event( current
, CREATE_THREAD_DEBUG_EVENT
, req
->entry
);
806 req
->pid
= get_process_id( current
->process
);
807 req
->tid
= get_thread_id( current
);
808 req
->boot
= (current
== booting_thread
);
809 req
->version
= SERVER_PROTOCOL_VERSION
;
813 if (reply_fd
!= -1) close( reply_fd
);
814 if (wait_fd
!= -1) close( wait_fd
);
817 /* set the shared buffer for a thread */
818 DECL_HANDLER(set_thread_buffer
)
820 unsigned int size
= MAX_REQUEST_LENGTH
;
821 unsigned int offset
= 0;
822 int fd
= thread_get_inflight_fd( current
, req
->fd
);
825 req
->offset
= offset
;
829 if (ftruncate( fd
, size
) == -1) file_set_error();
832 void *buffer
= mmap( 0, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, offset
);
833 if (buffer
== (void *)-1) file_set_error();
836 if (current
->buffer
!= (void *)-1) munmap( current
->buffer
, size
);
837 current
->buffer
= buffer
;
842 else set_error( STATUS_INVALID_HANDLE
);
845 /* terminate a thread */
846 DECL_HANDLER(terminate_thread
)
848 struct thread
*thread
;
852 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_TERMINATE
)))
854 thread
->exit_code
= req
->exit_code
;
855 if (thread
!= current
) kill_thread( thread
, 1 );
859 req
->last
= (thread
->process
->running_threads
== 1);
861 release_object( thread
);
865 /* fetch information about a thread */
866 DECL_HANDLER(get_thread_info
)
868 struct thread
*thread
;
869 handle_t handle
= req
->handle
;
871 if (!handle
) thread
= get_thread_from_id( req
->tid_in
);
872 else thread
= get_thread_from_handle( req
->handle
, THREAD_QUERY_INFORMATION
);
876 req
->tid
= get_thread_id( thread
);
877 req
->teb
= thread
->teb
;
878 req
->exit_code
= (thread
->state
== TERMINATED
) ? thread
->exit_code
: STILL_ACTIVE
;
879 req
->priority
= thread
->priority
;
880 release_object( thread
);
884 /* set information about a thread */
885 DECL_HANDLER(set_thread_info
)
887 struct thread
*thread
;
889 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SET_INFORMATION
)))
891 set_thread_info( thread
, req
);
892 release_object( thread
);
896 /* suspend a thread */
897 DECL_HANDLER(suspend_thread
)
899 struct thread
*thread
;
901 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SUSPEND_RESUME
)))
903 req
->count
= suspend_thread( thread
, 1 );
904 release_object( thread
);
908 /* resume a thread */
909 DECL_HANDLER(resume_thread
)
911 struct thread
*thread
;
913 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SUSPEND_RESUME
)))
915 req
->count
= resume_thread( thread
);
916 release_object( thread
);
920 /* select on a handle list */
923 int count
= get_req_data_size(req
) / sizeof(int);
924 select_on( count
, req
->cookie
, get_req_data(req
), req
->flags
, req
->sec
, req
->usec
);
927 /* queue an APC for a thread */
928 DECL_HANDLER(queue_apc
)
930 struct thread
*thread
;
931 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_SET_CONTEXT
)))
933 thread_queue_apc( thread
, NULL
, req
->func
, APC_USER
, !req
->user
, 1, req
->param
);
934 release_object( thread
);
938 /* get next APC to call */
939 DECL_HANDLER(get_apc
)
941 struct thread_apc
*apc
;
946 if (!(apc
= thread_dequeue_apc( current
, !req
->alertable
)))
950 req
->type
= APC_NONE
;
951 set_req_data_size( req
, 0 );
954 /* Optimization: ignore APCs that have a NULL func; they are only used
955 * to wake up a thread, but since we got here the thread woke up already.
957 if (apc
->func
) break;
960 size
= apc
->nb_args
* sizeof(apc
->args
[0]);
961 if (size
> get_req_data_size(req
)) size
= get_req_data_size(req
);
962 req
->func
= apc
->func
;
963 req
->type
= apc
->type
;
964 memcpy( get_req_data(req
), apc
->args
, size
);
965 set_req_data_size( req
, size
);
969 /* fetch a selector entry for a thread */
970 DECL_HANDLER(get_selector_entry
)
972 struct thread
*thread
;
973 if ((thread
= get_thread_from_handle( req
->handle
, THREAD_QUERY_INFORMATION
)))
975 get_selector_entry( thread
, req
->entry
, &req
->base
, &req
->limit
, &req
->flags
);
976 release_object( thread
);