update rx (mobile builds).
[mono-project.git] / mono / io-layer / handles.c
blobcab7e849c6a2ac324da3d52fbf5b0367d242b670
1 /*
2 * handles.c: Generic and internal operations on handles
4 * Author:
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002-2011 Novell, Inc.
8 * Copyright 2011 Xamarin Inc
9 */
11 #include <config.h>
12 #include <glib.h>
13 #include <pthread.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <string.h>
17 #include <sys/types.h>
18 #ifdef HAVE_SYS_SOCKET_H
19 # include <sys/socket.h>
20 #endif
21 #ifdef HAVE_SYS_UN_H
22 # include <sys/un.h>
23 #endif
24 #ifdef HAVE_SYS_MMAN_H
25 # include <sys/mman.h>
26 #endif
27 #ifdef HAVE_DIRENT_H
28 # include <dirent.h>
29 #endif
30 #include <sys/stat.h>
32 #include <mono/io-layer/wapi.h>
33 #include <mono/io-layer/wapi-private.h>
34 #include <mono/io-layer/handles-private.h>
35 #include <mono/io-layer/mono-mutex.h>
36 #include <mono/io-layer/misc-private.h>
37 #include <mono/io-layer/shared.h>
38 #include <mono/io-layer/collection.h>
39 #include <mono/io-layer/process-private.h>
40 #include <mono/io-layer/critical-section-private.h>
42 #undef DEBUG_REFS
44 #if 0
45 #define DEBUG(...) g_message(__VA_ARGS__)
46 #else
47 #define DEBUG(...)
48 #endif
50 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer);
52 static WapiHandleCapability handle_caps[WAPI_HANDLE_COUNT]={0};
53 static struct _WapiHandleOps *handle_ops[WAPI_HANDLE_COUNT]={
54 NULL,
55 &_wapi_file_ops,
56 &_wapi_console_ops,
57 &_wapi_thread_ops,
58 &_wapi_sem_ops,
59 &_wapi_mutex_ops,
60 &_wapi_event_ops,
61 #ifndef DISABLE_SOCKETS
62 &_wapi_socket_ops,
63 #endif
64 &_wapi_find_ops,
65 &_wapi_process_ops,
66 &_wapi_pipe_ops,
67 &_wapi_namedmutex_ops,
68 &_wapi_namedsem_ops,
69 &_wapi_namedevent_ops,
72 static void _wapi_shared_details (gpointer handle_info);
74 static void (*handle_details[WAPI_HANDLE_COUNT])(gpointer) = {
75 NULL,
76 _wapi_file_details,
77 _wapi_console_details,
78 _wapi_shared_details, /* thread */
79 _wapi_sem_details,
80 _wapi_mutex_details,
81 _wapi_event_details,
82 NULL, /* Nothing useful to see in a socket handle */
83 NULL, /* Nothing useful to see in a find handle */
84 _wapi_shared_details, /* process */
85 _wapi_pipe_details,
86 _wapi_shared_details, /* namedmutex */
87 _wapi_shared_details, /* namedsem */
88 _wapi_shared_details, /* namedevent */
91 const char *_wapi_handle_typename[] = {
92 "Unused",
93 "File",
94 "Console",
95 "Thread",
96 "Sem",
97 "Mutex",
98 "Event",
99 "Socket",
100 "Find",
101 "Process",
102 "Pipe",
103 "N.Mutex",
104 "N.Sem",
105 "N.Event",
106 "Error!!"
110 * We can hold _WAPI_PRIVATE_MAX_SLOTS * _WAPI_HANDLE_INITIAL_COUNT handles.
111 * If 4M handles are not enough... Oh, well... we will crash.
113 #define SLOT_INDEX(x) (x / _WAPI_HANDLE_INITIAL_COUNT)
114 #define SLOT_OFFSET(x) (x % _WAPI_HANDLE_INITIAL_COUNT)
116 struct _WapiHandleUnshared *_wapi_private_handles [_WAPI_PRIVATE_MAX_SLOTS];
117 static guint32 _wapi_private_handle_count = 0;
118 static guint32 _wapi_private_handle_slot_count = 0;
120 struct _WapiHandleSharedLayout *_wapi_shared_layout = NULL;
123 * If SHM is enabled, this will point to shared memory, otherwise it will be NULL.
125 struct _WapiFileShareLayout *_wapi_fileshare_layout = NULL;
128 * If SHM is disabled, this will point to a hash of _WapiFileShare structures, otherwise
129 * it will be NULL. We use this instead of _wapi_fileshare_layout to avoid allocating a
130 * 4MB array.
132 static GHashTable *file_share_hash;
133 static CRITICAL_SECTION file_share_hash_mutex;
135 #define file_share_hash_lock() EnterCriticalSection (&file_share_hash_mutex)
136 #define file_share_hash_unlock() LeaveCriticalSection (&file_share_hash_mutex)
138 guint32 _wapi_fd_reserve;
141 * This is an internal handle which is used for handling waiting for multiple handles.
142 * Threads which wait for multiple handles wait on this one handle, and when a handle
143 * is signalled, this handle is signalled too.
145 static gpointer _wapi_global_signal_handle;
147 /* Point to the mutex/cond inside _wapi_global_signal_handle */
148 mono_mutex_t *_wapi_global_signal_mutex;
149 pthread_cond_t *_wapi_global_signal_cond;
151 int _wapi_sem_id;
152 gboolean _wapi_has_shut_down = FALSE;
154 /* Use this instead of getpid(), to cope with linuxthreads. It's a
155 * function rather than a variable lookup because we need to get at
156 * this before share_init() might have been called.
158 static pid_t _wapi_pid;
159 static mono_once_t pid_init_once = MONO_ONCE_INIT;
161 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles);
163 static void pid_init (void)
165 _wapi_pid = getpid ();
168 pid_t _wapi_getpid (void)
170 mono_once (&pid_init_once, pid_init);
172 return(_wapi_pid);
176 static mono_mutex_t scan_mutex = MONO_MUTEX_INITIALIZER;
178 static void handle_cleanup (void)
180 int i, j, k;
182 /* Every shared handle we were using ought really to be closed
183 * by now, but to make sure just blow them all away. The
184 * exiting finalizer thread in particular races us to the
185 * program exit and doesn't always win, so it can be left
186 * cluttering up the shared file. Anything else left over is
187 * really a bug.
189 for(i = SLOT_INDEX (0); _wapi_private_handles[i] != NULL; i++) {
190 for(j = SLOT_OFFSET (0); j < _WAPI_HANDLE_INITIAL_COUNT; j++) {
191 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles[i][j];
192 int type = handle_data->type;
193 gpointer handle = GINT_TO_POINTER (i*_WAPI_HANDLE_INITIAL_COUNT+j);
195 if (_WAPI_SHARED_HANDLE (type)) {
196 if (type == WAPI_HANDLE_THREAD) {
197 /* Special-case thread handles
198 * because they need extra
199 * cleanup. This also avoids
200 * a race condition between
201 * the application exit and
202 * the finalizer thread - if
203 * it finishes up between now
204 * and actual app termination
205 * it will find all its handle
206 * details have been blown
207 * away, so this sets those
208 * anyway.
210 g_assert (0); /*This condition is freaking impossible*/
211 _wapi_thread_set_termination_details (handle, 0);
215 for(k = handle_data->ref; k > 0; k--) {
216 DEBUG ("%s: unreffing %s handle %p", __func__, _wapi_handle_typename[type], handle);
218 _wapi_handle_unref_full (handle, TRUE);
223 _wapi_shm_semaphores_remove ();
225 _wapi_shm_detach (WAPI_SHM_DATA);
226 _wapi_shm_detach (WAPI_SHM_FILESHARE);
228 if (file_share_hash) {
229 g_hash_table_destroy (file_share_hash);
230 DeleteCriticalSection (&file_share_hash_mutex);
233 for (i = 0; i < _WAPI_PRIVATE_MAX_SLOTS; ++i)
234 g_free (_wapi_private_handles [i]);
238 * wapi_init:
240 * Initialize the io-layer.
242 void
243 wapi_init (void)
245 g_assert ((sizeof (handle_ops) / sizeof (handle_ops[0]))
246 == WAPI_HANDLE_COUNT);
248 _wapi_fd_reserve = getdtablesize();
250 /* This is needed by the code in _wapi_handle_new_internal */
251 _wapi_fd_reserve = (_wapi_fd_reserve + (_WAPI_HANDLE_INITIAL_COUNT - 1)) & ~(_WAPI_HANDLE_INITIAL_COUNT - 1);
253 do {
255 * The entries in _wapi_private_handles reserved for fds are allocated lazily to
256 * save memory.
259 _wapi_private_handles [idx++] = g_new0 (struct _WapiHandleUnshared,
260 _WAPI_HANDLE_INITIAL_COUNT);
263 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
264 _wapi_private_handle_slot_count ++;
265 } while(_wapi_fd_reserve > _wapi_private_handle_count);
267 _wapi_shm_semaphores_init ();
269 _wapi_shared_layout = _wapi_shm_attach (WAPI_SHM_DATA);
270 g_assert (_wapi_shared_layout != NULL);
272 if (_wapi_shm_enabled ()) {
273 /* This allocates a 4mb array, so do it only if SHM is enabled */
274 _wapi_fileshare_layout = _wapi_shm_attach (WAPI_SHM_FILESHARE);
275 g_assert (_wapi_fileshare_layout != NULL);
278 #if !defined (DISABLE_SHARED_HANDLES)
279 if (_wapi_shm_enabled ())
280 _wapi_collection_init ();
281 #endif
283 _wapi_global_signal_handle = _wapi_handle_new (WAPI_HANDLE_EVENT, NULL);
285 _wapi_global_signal_cond = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_cond;
286 _wapi_global_signal_mutex = &_WAPI_PRIVATE_HANDLES (GPOINTER_TO_UINT (_wapi_global_signal_handle)).signal_mutex;
288 /* Using g_atexit here instead of an explicit function call in
289 * a cleanup routine lets us cope when a third-party library
290 * calls exit (eg if an X client loses the connection to its
291 * server.)
293 g_atexit (handle_cleanup);
296 void
297 wapi_cleanup (void)
299 g_assert (_wapi_has_shut_down == FALSE);
301 _wapi_has_shut_down = TRUE;
303 _wapi_critical_section_cleanup ();
304 _wapi_error_cleanup ();
305 _wapi_thread_cleanup ();
308 static void _wapi_handle_init_shared (struct _WapiHandleShared *handle,
309 WapiHandleType type,
310 gpointer handle_specific)
312 g_assert (_wapi_has_shut_down == FALSE);
314 handle->type = type;
315 handle->timestamp = (guint32)(time (NULL) & 0xFFFFFFFF);
316 handle->signalled = FALSE;
317 handle->handle_refs = 1;
319 if (handle_specific != NULL) {
320 memcpy (&handle->u, handle_specific, sizeof (handle->u));
324 static void _wapi_handle_init (struct _WapiHandleUnshared *handle,
325 WapiHandleType type, gpointer handle_specific)
327 int thr_ret;
329 g_assert (_wapi_has_shut_down == FALSE);
331 handle->type = type;
332 handle->signalled = FALSE;
333 handle->ref = 1;
335 if (!_WAPI_SHARED_HANDLE(type)) {
336 thr_ret = pthread_cond_init (&handle->signal_cond, NULL);
337 g_assert (thr_ret == 0);
339 thr_ret = mono_mutex_init (&handle->signal_mutex, NULL);
340 g_assert (thr_ret == 0);
342 if (handle_specific != NULL) {
343 memcpy (&handle->u, handle_specific,
344 sizeof (handle->u));
349 static guint32 _wapi_handle_new_shared (WapiHandleType type,
350 gpointer handle_specific)
352 guint32 offset;
353 static guint32 last = 1;
354 int thr_ret;
356 g_assert (_wapi_has_shut_down == FALSE);
358 /* Leave the first slot empty as a guard */
359 again:
360 /* FIXME: expandable array */
361 for(offset = last; offset <_WAPI_HANDLE_INITIAL_COUNT; offset++) {
362 struct _WapiHandleShared *handle = &_wapi_shared_layout->handles[offset];
364 if(handle->type == WAPI_HANDLE_UNUSED) {
365 thr_ret = _wapi_handle_lock_shared_handles ();
366 g_assert (thr_ret == 0);
368 if (InterlockedCompareExchange ((gint32 *)&handle->type, type, WAPI_HANDLE_UNUSED) == WAPI_HANDLE_UNUSED) {
369 last = offset + 1;
371 _wapi_handle_init_shared (handle, type,
372 handle_specific);
374 _wapi_handle_unlock_shared_handles ();
376 return(offset);
377 } else {
378 /* Someone else beat us to it, just
379 * continue looking
383 _wapi_handle_unlock_shared_handles ();
387 if(last > 1) {
388 /* Try again from the beginning */
389 last = 1;
390 goto again;
393 /* Will need to expand the array. The caller will sort it out */
395 return(0);
399 * _wapi_handle_new_internal:
400 * @type: Init handle to this type
402 * Search for a free handle and initialize it. Return the handle on
403 * success and 0 on failure. This is only called from
404 * _wapi_handle_new, and scan_mutex must be held.
406 static guint32 _wapi_handle_new_internal (WapiHandleType type,
407 gpointer handle_specific)
409 guint32 i, k, count;
410 static guint32 last = 0;
411 gboolean retry = FALSE;
413 g_assert (_wapi_has_shut_down == FALSE);
415 /* A linear scan should be fast enough. Start from the last
416 * allocation, assuming that handles are allocated more often
417 * than they're freed. Leave the space reserved for file
418 * descriptors
421 if (last < _wapi_fd_reserve) {
422 last = _wapi_fd_reserve;
423 } else {
424 retry = TRUE;
427 again:
428 count = last;
429 for(i = SLOT_INDEX (count); i < _wapi_private_handle_slot_count; i++) {
430 if (_wapi_private_handles [i]) {
431 for (k = SLOT_OFFSET (count); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
432 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
434 if(handle->type == WAPI_HANDLE_UNUSED) {
435 last = count + 1;
437 _wapi_handle_init (handle, type, handle_specific);
438 return (count);
440 count++;
445 if(retry && last > _wapi_fd_reserve) {
446 /* Try again from the beginning */
447 last = _wapi_fd_reserve;
448 goto again;
451 /* Will need to expand the array. The caller will sort it out */
453 return(0);
456 gpointer
457 _wapi_handle_new (WapiHandleType type, gpointer handle_specific)
459 guint32 handle_idx = 0;
460 gpointer handle;
461 int thr_ret;
463 g_assert (_wapi_has_shut_down == FALSE);
465 DEBUG ("%s: Creating new handle of type %s", __func__,
466 _wapi_handle_typename[type]);
468 g_assert(!_WAPI_FD_HANDLE(type));
470 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
471 (void *)&scan_mutex);
472 thr_ret = mono_mutex_lock (&scan_mutex);
473 g_assert (thr_ret == 0);
475 while ((handle_idx = _wapi_handle_new_internal (type, handle_specific)) == 0) {
476 /* Try and expand the array, and have another go */
477 int idx = SLOT_INDEX (_wapi_private_handle_count);
478 if (idx >= _WAPI_PRIVATE_MAX_SLOTS) {
479 break;
482 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
483 _WAPI_HANDLE_INITIAL_COUNT);
485 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
486 _wapi_private_handle_slot_count ++;
489 thr_ret = mono_mutex_unlock (&scan_mutex);
490 g_assert (thr_ret == 0);
491 pthread_cleanup_pop (0);
493 if (handle_idx == 0) {
494 /* We ran out of slots */
495 handle = _WAPI_HANDLE_INVALID;
496 goto done;
499 /* Make sure we left the space for fd mappings */
500 g_assert (handle_idx >= _wapi_fd_reserve);
502 handle = GUINT_TO_POINTER (handle_idx);
504 DEBUG ("%s: Allocated new handle %p", __func__, handle);
506 if (_WAPI_SHARED_HANDLE(type)) {
507 /* Add the shared section too */
508 guint32 ref;
510 ref = _wapi_handle_new_shared (type, handle_specific);
511 if (ref == 0) {
512 _wapi_handle_collect ();
513 ref = _wapi_handle_new_shared (type, handle_specific);
514 if (ref == 0) {
515 /* FIXME: grow the arrays */
516 handle = _WAPI_HANDLE_INVALID;
517 goto done;
521 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = ref;
522 DEBUG ("%s: New shared handle at offset 0x%x", __func__,
523 ref);
526 done:
527 return(handle);
530 gpointer _wapi_handle_new_from_offset (WapiHandleType type, guint32 offset,
531 gboolean timestamp)
533 guint32 handle_idx = 0;
534 gpointer handle = INVALID_HANDLE_VALUE;
535 int thr_ret, i, k;
536 struct _WapiHandleShared *shared;
538 g_assert (_wapi_has_shut_down == FALSE);
540 DEBUG ("%s: Creating new handle of type %s to offset %d", __func__,
541 _wapi_handle_typename[type], offset);
543 g_assert(!_WAPI_FD_HANDLE(type));
544 g_assert(_WAPI_SHARED_HANDLE(type));
545 g_assert(offset != 0);
547 shared = &_wapi_shared_layout->handles[offset];
548 if (timestamp) {
549 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
550 /* Bump up the timestamp for this offset */
551 InterlockedExchange ((gint32 *)&shared->timestamp, now);
554 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
555 (void *)&scan_mutex);
556 thr_ret = mono_mutex_lock (&scan_mutex);
557 g_assert (thr_ret == 0);
559 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
560 if (_wapi_private_handles [i]) {
561 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
562 struct _WapiHandleUnshared *handle_data = &_wapi_private_handles [i][k];
564 if (handle_data->type == type &&
565 handle_data->u.shared.offset == offset) {
566 handle = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
567 goto first_pass_done;
573 first_pass_done:
574 thr_ret = mono_mutex_unlock (&scan_mutex);
575 g_assert (thr_ret == 0);
576 pthread_cleanup_pop (0);
578 if (handle != INVALID_HANDLE_VALUE) {
579 _wapi_handle_ref (handle);
581 DEBUG ("%s: Returning old handle %p referencing 0x%x",
582 __func__, handle, offset);
583 return (handle);
586 /* Prevent entries expiring under us as we search */
587 thr_ret = _wapi_handle_lock_shared_handles ();
588 g_assert (thr_ret == 0);
590 if (shared->type == WAPI_HANDLE_UNUSED) {
591 /* Someone deleted this handle while we were working */
592 DEBUG ("%s: Handle at 0x%x unused", __func__, offset);
593 goto done;
596 if (shared->type != type) {
597 DEBUG ("%s: Wrong type at %d 0x%x! Found %s wanted %s",
598 __func__, offset, offset,
599 _wapi_handle_typename[shared->type],
600 _wapi_handle_typename[type]);
601 goto done;
604 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
605 (void *)&scan_mutex);
606 thr_ret = mono_mutex_lock (&scan_mutex);
607 g_assert (thr_ret == 0);
609 while ((handle_idx = _wapi_handle_new_internal (type, NULL)) == 0) {
610 /* Try and expand the array, and have another go */
611 int idx = SLOT_INDEX (_wapi_private_handle_count);
612 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
613 _WAPI_HANDLE_INITIAL_COUNT);
615 _wapi_private_handle_count += _WAPI_HANDLE_INITIAL_COUNT;
616 _wapi_private_handle_slot_count ++;
619 thr_ret = mono_mutex_unlock (&scan_mutex);
620 g_assert (thr_ret == 0);
621 pthread_cleanup_pop (0);
623 /* Make sure we left the space for fd mappings */
624 g_assert (handle_idx >= _wapi_fd_reserve);
626 handle = GUINT_TO_POINTER (handle_idx);
628 _WAPI_PRIVATE_HANDLES(handle_idx).u.shared.offset = offset;
629 InterlockedIncrement ((gint32 *)&shared->handle_refs);
631 DEBUG ("%s: Allocated new handle %p referencing 0x%x (shared refs %d)", __func__, handle, offset, shared->handle_refs);
633 done:
634 _wapi_handle_unlock_shared_handles ();
636 return(handle);
639 static void
640 init_handles_slot (int idx)
642 int thr_ret;
644 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
645 (void *)&scan_mutex);
646 thr_ret = mono_mutex_lock (&scan_mutex);
647 g_assert (thr_ret == 0);
649 if (_wapi_private_handles [idx] == NULL) {
650 _wapi_private_handles [idx] = g_new0 (struct _WapiHandleUnshared,
651 _WAPI_HANDLE_INITIAL_COUNT);
652 g_assert (_wapi_private_handles [idx]);
655 thr_ret = mono_mutex_unlock (&scan_mutex);
656 g_assert (thr_ret == 0);
657 pthread_cleanup_pop (0);
660 gpointer _wapi_handle_new_fd (WapiHandleType type, int fd,
661 gpointer handle_specific)
663 struct _WapiHandleUnshared *handle;
664 int thr_ret;
666 g_assert (_wapi_has_shut_down == FALSE);
668 DEBUG ("%s: Creating new handle of type %s", __func__,
669 _wapi_handle_typename[type]);
671 g_assert(_WAPI_FD_HANDLE(type));
672 g_assert(!_WAPI_SHARED_HANDLE(type));
674 if (fd >= _wapi_fd_reserve) {
675 DEBUG ("%s: fd %d is too big", __func__, fd);
677 return(GUINT_TO_POINTER (_WAPI_HANDLE_INVALID));
680 /* Initialize the array entries on demand */
681 if (_wapi_private_handles [SLOT_INDEX (fd)] == NULL)
682 init_handles_slot (SLOT_INDEX (fd));
684 handle = &_WAPI_PRIVATE_HANDLES(fd);
686 if (handle->type != WAPI_HANDLE_UNUSED) {
687 DEBUG ("%s: fd %d is already in use!", __func__, fd);
688 /* FIXME: clean up this handle? We can't do anything
689 * with the fd, cos thats the new one
693 DEBUG ("%s: Assigning new fd handle %d", __func__, fd);
695 /* Prevent file share entries racing with us, when the file
696 * handle is only half initialised
698 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
699 g_assert(thr_ret == 0);
701 _wapi_handle_init (handle, type, handle_specific);
703 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
705 return(GUINT_TO_POINTER(fd));
708 gboolean _wapi_lookup_handle (gpointer handle, WapiHandleType type,
709 gpointer *handle_specific)
711 struct _WapiHandleUnshared *handle_data;
712 guint32 handle_idx = GPOINTER_TO_UINT(handle);
714 if (!_WAPI_PRIVATE_VALID_SLOT (handle_idx)) {
715 return(FALSE);
718 /* Initialize the array entries on demand */
719 if (_wapi_private_handles [SLOT_INDEX (handle_idx)] == NULL)
720 init_handles_slot (SLOT_INDEX (handle_idx));
722 handle_data = &_WAPI_PRIVATE_HANDLES(handle_idx);
724 if (handle_data->type != type) {
725 return(FALSE);
728 if (handle_specific == NULL) {
729 return(FALSE);
732 if (_WAPI_SHARED_HANDLE(type)) {
733 struct _WapiHandle_shared_ref *ref;
734 struct _WapiHandleShared *shared_handle_data;
736 ref = &handle_data->u.shared;
737 shared_handle_data = &_wapi_shared_layout->handles[ref->offset];
739 if (shared_handle_data->type != type) {
740 /* The handle must have been deleted on us
742 return (FALSE);
745 *handle_specific = &shared_handle_data->u;
746 } else {
747 *handle_specific = &handle_data->u;
750 return(TRUE);
753 void
754 _wapi_handle_foreach (WapiHandleType type,
755 gboolean (*on_each)(gpointer test, gpointer user),
756 gpointer user_data)
758 struct _WapiHandleUnshared *handle_data = NULL;
759 gpointer ret = NULL;
760 guint32 i, k;
761 int thr_ret;
763 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
764 (void *)&scan_mutex);
765 thr_ret = mono_mutex_lock (&scan_mutex);
766 g_assert (thr_ret == 0);
768 for (i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
769 if (_wapi_private_handles [i]) {
770 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
771 handle_data = &_wapi_private_handles [i][k];
773 if (handle_data->type == type) {
774 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
775 if (on_each (ret, user_data) == TRUE)
776 break;
782 thr_ret = mono_mutex_unlock (&scan_mutex);
783 g_assert (thr_ret == 0);
784 pthread_cleanup_pop (0);
787 /* This might list some shared handles twice if they are already
788 * opened by this process, and the check function returns FALSE the
789 * first time. Shared handles that are created during the search are
790 * unreffed if the check function returns FALSE, so callers must not
791 * rely on the handle persisting (unless the check function returns
792 * TRUE)
793 * The caller owns the returned handle.
795 gpointer _wapi_search_handle (WapiHandleType type,
796 gboolean (*check)(gpointer test, gpointer user),
797 gpointer user_data,
798 gpointer *handle_specific,
799 gboolean search_shared)
801 struct _WapiHandleUnshared *handle_data = NULL;
802 struct _WapiHandleShared *shared = NULL;
803 gpointer ret = NULL;
804 guint32 i, k;
805 gboolean found = FALSE;
806 int thr_ret;
808 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
809 (void *)&scan_mutex);
810 thr_ret = mono_mutex_lock (&scan_mutex);
811 g_assert (thr_ret == 0);
813 for (i = SLOT_INDEX (0); !found && i < _wapi_private_handle_slot_count; i++) {
814 if (_wapi_private_handles [i]) {
815 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
816 handle_data = &_wapi_private_handles [i][k];
818 if (handle_data->type == type) {
819 ret = GUINT_TO_POINTER (i * _WAPI_HANDLE_INITIAL_COUNT + k);
820 if (check (ret, user_data) == TRUE) {
821 _wapi_handle_ref (ret);
822 found = TRUE;
824 if (_WAPI_SHARED_HANDLE (type)) {
825 shared = &_wapi_shared_layout->handles[i];
828 break;
835 thr_ret = mono_mutex_unlock (&scan_mutex);
836 g_assert (thr_ret == 0);
837 pthread_cleanup_pop (0);
839 if (!found && search_shared && _WAPI_SHARED_HANDLE (type)) {
840 /* Not found yet, so search the shared memory too */
841 DEBUG ("%s: Looking at other shared handles...", __func__);
843 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
844 shared = &_wapi_shared_layout->handles[i];
846 if (shared->type == type) {
847 /* Tell new_from_offset to not
848 * timestamp this handle, because
849 * otherwise it will ping every handle
850 * in the list and they will never
851 * expire
853 ret = _wapi_handle_new_from_offset (type, i,
854 FALSE);
855 if (ret == INVALID_HANDLE_VALUE) {
856 /* This handle was deleted
857 * while we were looking at it
859 continue;
862 DEBUG ("%s: Opened tmp handle %p (type %s) from offset %d", __func__, ret, _wapi_handle_typename[type], i);
864 /* It's possible that the shared part
865 * of this handle has now been blown
866 * away (after new_from_offset
867 * successfully opened it,) if its
868 * timestamp is too old. The check
869 * function needs to be aware of this,
870 * and cope if the handle has
871 * vanished.
873 if (check (ret, user_data) == TRUE) {
874 /* Timestamp this handle, but make
875 * sure it still exists first
877 thr_ret = _wapi_handle_lock_shared_handles ();
878 g_assert (thr_ret == 0);
880 if (shared->type == type) {
881 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
882 InterlockedExchange ((gint32 *)&shared->timestamp, now);
884 found = TRUE;
885 handle_data = &_WAPI_PRIVATE_HANDLES(GPOINTER_TO_UINT(ret));
887 _wapi_handle_unlock_shared_handles ();
888 break;
889 } else {
890 /* It's been deleted,
891 * so just keep
892 * looking
894 _wapi_handle_unlock_shared_handles ();
898 /* This isn't the handle we're looking
899 * for, so drop the reference we took
900 * in _wapi_handle_new_from_offset ()
902 _wapi_handle_unref (ret);
907 if (!found) {
908 ret = NULL;
909 goto done;
912 if(handle_specific != NULL) {
913 if (_WAPI_SHARED_HANDLE(type)) {
914 g_assert(shared->type == type);
916 *handle_specific = &shared->u;
917 } else {
918 *handle_specific = &handle_data->u;
922 done:
923 return(ret);
926 /* Returns the offset of the metadata array, or -1 on error, or 0 for
927 * not found (0 is not a valid offset)
929 gint32 _wapi_search_handle_namespace (WapiHandleType type,
930 gchar *utf8_name)
932 struct _WapiHandleShared *shared_handle_data;
933 guint32 i;
934 gint32 ret = 0;
935 int thr_ret;
937 g_assert(_WAPI_SHARED_HANDLE(type));
939 DEBUG ("%s: Lookup for handle named [%s] type %s", __func__,
940 utf8_name, _wapi_handle_typename[type]);
942 /* Do a handle collection before starting to look, so that any
943 * stale cruft gets removed
945 _wapi_handle_collect ();
947 thr_ret = _wapi_handle_lock_shared_handles ();
948 g_assert (thr_ret == 0);
950 for(i = 1; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
951 WapiSharedNamespace *sharedns;
953 shared_handle_data = &_wapi_shared_layout->handles[i];
955 /* Check mutex, event, semaphore, timer, job and
956 * file-mapping object names. So far only mutex,
957 * semaphore and event are implemented.
959 if (!_WAPI_SHARED_NAMESPACE (shared_handle_data->type)) {
960 continue;
963 DEBUG ("%s: found a shared namespace handle at 0x%x (type %s)", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
965 sharedns=(WapiSharedNamespace *)&shared_handle_data->u;
967 DEBUG ("%s: name is [%s]", __func__, sharedns->name);
969 if (strcmp (sharedns->name, utf8_name) == 0) {
970 if (shared_handle_data->type != type) {
971 /* Its the wrong type, so fail now */
972 DEBUG ("%s: handle 0x%x matches name but is wrong type: %s", __func__, i, _wapi_handle_typename[shared_handle_data->type]);
973 ret = -1;
974 goto done;
975 } else {
976 DEBUG ("%s: handle 0x%x matches name and type", __func__, i);
977 ret = i;
978 goto done;
983 done:
984 _wapi_handle_unlock_shared_handles ();
986 return(ret);
989 void _wapi_handle_ref (gpointer handle)
991 guint32 idx = GPOINTER_TO_UINT(handle);
992 struct _WapiHandleUnshared *handle_data;
994 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
995 return;
998 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
999 g_warning ("%s: Attempting to ref unused handle %p", __func__,
1000 handle);
1001 return;
1004 handle_data = &_WAPI_PRIVATE_HANDLES(idx);
1006 InterlockedIncrement ((gint32 *)&handle_data->ref);
1008 /* It's possible for processes to exit before getting around
1009 * to updating timestamps in the collection thread, so if a
1010 * shared handle is reffed do the timestamp here as well just
1011 * to make sure.
1013 if (_WAPI_SHARED_HANDLE(handle_data->type)) {
1014 struct _WapiHandleShared *shared_data = &_wapi_shared_layout->handles[handle_data->u.shared.offset];
1015 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1016 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
1019 #ifdef DEBUG_REFS
1020 g_message ("%s: %s handle %p ref now %d", __func__,
1021 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1022 handle,
1023 _WAPI_PRIVATE_HANDLES(idx).ref);
1024 #endif
1027 /* The handle must not be locked on entry to this function */
1028 static void _wapi_handle_unref_full (gpointer handle, gboolean ignore_private_busy_handles)
1030 guint32 idx = GPOINTER_TO_UINT(handle);
1031 gboolean destroy = FALSE, early_exit = FALSE;
1032 int thr_ret;
1034 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1035 return;
1038 if (_wapi_handle_type (handle) == WAPI_HANDLE_UNUSED) {
1039 g_warning ("%s: Attempting to unref unused handle %p",
1040 __func__, handle);
1041 return;
1044 /* Possible race condition here if another thread refs the
1045 * handle between here and setting the type to UNUSED. I
1046 * could lock a mutex, but I'm not sure that allowing a handle
1047 * reference to reach 0 isn't an application bug anyway.
1049 destroy = (InterlockedDecrement ((gint32 *)&_WAPI_PRIVATE_HANDLES(idx).ref) ==0);
1051 #ifdef DEBUG_REFS
1052 g_message ("%s: %s handle %p ref now %d (destroy %s)", __func__,
1053 _wapi_handle_typename[_WAPI_PRIVATE_HANDLES (idx).type],
1054 handle,
1055 _WAPI_PRIVATE_HANDLES(idx).ref, destroy?"TRUE":"FALSE");
1056 #endif
1058 if(destroy==TRUE) {
1059 /* Need to copy the handle info, reset the slot in the
1060 * array, and _only then_ call the close function to
1061 * avoid race conditions (eg file descriptors being
1062 * closed, and another file being opened getting the
1063 * same fd racing the memset())
1065 struct _WapiHandleUnshared handle_data;
1066 struct _WapiHandleShared shared_handle_data;
1067 WapiHandleType type = _WAPI_PRIVATE_HANDLES(idx).type;
1068 void (*close_func)(gpointer, gpointer) = _wapi_handle_ops_get_close_func (type);
1069 gboolean is_shared = _WAPI_SHARED_HANDLE(type);
1071 if (is_shared) {
1072 /* If this is a shared handle we need to take
1073 * the shared lock outside of the scan_mutex
1074 * lock to avoid deadlocks
1076 thr_ret = _wapi_handle_lock_shared_handles ();
1077 g_assert (thr_ret == 0);
1080 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup, (void *)&scan_mutex);
1081 thr_ret = mono_mutex_lock (&scan_mutex);
1083 DEBUG ("%s: Destroying handle %p", __func__, handle);
1085 memcpy (&handle_data, &_WAPI_PRIVATE_HANDLES(idx),
1086 sizeof (struct _WapiHandleUnshared));
1088 memset (&_WAPI_PRIVATE_HANDLES(idx).u, '\0',
1089 sizeof(_WAPI_PRIVATE_HANDLES(idx).u));
1091 _WAPI_PRIVATE_HANDLES(idx).type = WAPI_HANDLE_UNUSED;
1093 if (!is_shared) {
1094 /* Destroy the mutex and cond var. We hope nobody
1095 * tried to grab them between the handle unlock and
1096 * now, but pthreads doesn't have a
1097 * "unlock_and_destroy" atomic function.
1099 thr_ret = mono_mutex_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_mutex);
1100 /*WARNING gross hack to make cleanup not crash when exiting without the whole runtime teardown.*/
1101 if (thr_ret == EBUSY && ignore_private_busy_handles) {
1102 early_exit = TRUE;
1103 } else {
1104 if (thr_ret != 0)
1105 g_error ("Error destroying handle %p mutex due to %d\n", handle, thr_ret);
1107 thr_ret = pthread_cond_destroy (&_WAPI_PRIVATE_HANDLES(idx).signal_cond);
1108 if (thr_ret == EBUSY && ignore_private_busy_handles)
1109 early_exit = TRUE;
1110 else if (thr_ret != 0)
1111 g_error ("Error destroying handle %p cond var due to %d\n", handle, thr_ret);
1113 } else {
1114 struct _WapiHandleShared *shared = &_wapi_shared_layout->handles[handle_data.u.shared.offset];
1116 memcpy (&shared_handle_data, shared,
1117 sizeof (struct _WapiHandleShared));
1119 /* It's possible that this handle is already
1120 * pointing at a deleted shared section
1122 #ifdef DEBUG_REFS
1123 g_message ("%s: %s handle %p shared refs before dec %d", __func__, _wapi_handle_typename[type], handle, shared->handle_refs);
1124 #endif
1126 if (shared->handle_refs > 0) {
1127 shared->handle_refs--;
1128 if (shared->handle_refs == 0) {
1129 memset (shared, '\0', sizeof (struct _WapiHandleShared));
1134 thr_ret = mono_mutex_unlock (&scan_mutex);
1135 g_assert (thr_ret == 0);
1136 pthread_cleanup_pop (0);
1138 if (early_exit)
1139 return;
1140 if (is_shared) {
1141 _wapi_handle_unlock_shared_handles ();
1144 if (close_func != NULL) {
1145 if (is_shared) {
1146 close_func (handle, &shared_handle_data.u);
1147 } else {
1148 close_func (handle, &handle_data.u);
1154 void _wapi_handle_unref (gpointer handle)
1156 _wapi_handle_unref_full (handle, FALSE);
1159 void _wapi_handle_register_capabilities (WapiHandleType type,
1160 WapiHandleCapability caps)
1162 handle_caps[type] = caps;
1165 gboolean _wapi_handle_test_capabilities (gpointer handle,
1166 WapiHandleCapability caps)
1168 guint32 idx = GPOINTER_TO_UINT(handle);
1169 WapiHandleType type;
1171 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1172 return(FALSE);
1175 type = _WAPI_PRIVATE_HANDLES(idx).type;
1177 DEBUG ("%s: testing 0x%x against 0x%x (%d)", __func__,
1178 handle_caps[type], caps, handle_caps[type] & caps);
1180 return((handle_caps[type] & caps) != 0);
1183 static void (*_wapi_handle_ops_get_close_func (WapiHandleType type))(gpointer, gpointer)
1185 if (handle_ops[type] != NULL &&
1186 handle_ops[type]->close != NULL) {
1187 return (handle_ops[type]->close);
1190 return (NULL);
1193 void _wapi_handle_ops_close (gpointer handle, gpointer data)
1195 guint32 idx = GPOINTER_TO_UINT(handle);
1196 WapiHandleType type;
1198 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1199 return;
1202 type = _WAPI_PRIVATE_HANDLES(idx).type;
1204 if (handle_ops[type] != NULL &&
1205 handle_ops[type]->close != NULL) {
1206 handle_ops[type]->close (handle, data);
1210 void _wapi_handle_ops_signal (gpointer handle)
1212 guint32 idx = GPOINTER_TO_UINT(handle);
1213 WapiHandleType type;
1215 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1216 return;
1219 type = _WAPI_PRIVATE_HANDLES(idx).type;
1221 if (handle_ops[type] != NULL && handle_ops[type]->signal != NULL) {
1222 handle_ops[type]->signal (handle);
1226 gboolean _wapi_handle_ops_own (gpointer handle)
1228 guint32 idx = GPOINTER_TO_UINT(handle);
1229 WapiHandleType type;
1231 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1232 return(FALSE);
1235 type = _WAPI_PRIVATE_HANDLES(idx).type;
1237 if (handle_ops[type] != NULL && handle_ops[type]->own_handle != NULL) {
1238 return(handle_ops[type]->own_handle (handle));
1239 } else {
1240 return(FALSE);
1244 gboolean _wapi_handle_ops_isowned (gpointer handle)
1246 guint32 idx = GPOINTER_TO_UINT(handle);
1247 WapiHandleType type;
1249 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1250 return(FALSE);
1253 type = _WAPI_PRIVATE_HANDLES(idx).type;
1255 if (handle_ops[type] != NULL && handle_ops[type]->is_owned != NULL) {
1256 return(handle_ops[type]->is_owned (handle));
1257 } else {
1258 return(FALSE);
1262 guint32 _wapi_handle_ops_special_wait (gpointer handle, guint32 timeout, gboolean alertable)
1264 guint32 idx = GPOINTER_TO_UINT(handle);
1265 WapiHandleType type;
1267 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1268 return(WAIT_FAILED);
1271 type = _WAPI_PRIVATE_HANDLES(idx).type;
1273 if (handle_ops[type] != NULL &&
1274 handle_ops[type]->special_wait != NULL) {
1275 return(handle_ops[type]->special_wait (handle, timeout, alertable));
1276 } else {
1277 return(WAIT_FAILED);
1281 void _wapi_handle_ops_prewait (gpointer handle)
1283 guint32 idx = GPOINTER_TO_UINT (handle);
1284 WapiHandleType type;
1286 if (!_WAPI_PRIVATE_VALID_SLOT (idx)) {
1287 return;
1290 type = _WAPI_PRIVATE_HANDLES (idx).type;
1292 if (handle_ops[type] != NULL &&
1293 handle_ops[type]->prewait != NULL) {
1294 handle_ops[type]->prewait (handle);
1300 * CloseHandle:
1301 * @handle: The handle to release
1303 * Closes and invalidates @handle, releasing any resources it
1304 * consumes. When the last handle to a temporary or non-persistent
1305 * object is closed, that object can be deleted. Closing the same
1306 * handle twice is an error.
1308 * Return value: %TRUE on success, %FALSE otherwise.
1310 gboolean CloseHandle(gpointer handle)
1312 if (handle == NULL) {
1313 /* Problem: because we map file descriptors to the
1314 * same-numbered handle we can't tell the difference
1315 * between a bogus handle and the handle to stdin.
1316 * Assume that it's the console handle if that handle
1317 * exists...
1319 if (_WAPI_PRIVATE_HANDLES (0).type != WAPI_HANDLE_CONSOLE) {
1320 SetLastError (ERROR_INVALID_PARAMETER);
1321 return(FALSE);
1324 if (handle == _WAPI_HANDLE_INVALID){
1325 SetLastError (ERROR_INVALID_PARAMETER);
1326 return(FALSE);
1329 _wapi_handle_unref (handle);
1331 return(TRUE);
1334 /* Lots more to implement here, but this is all we need at the moment */
1335 gboolean DuplicateHandle (gpointer srcprocess, gpointer src,
1336 gpointer targetprocess, gpointer *target,
1337 guint32 access G_GNUC_UNUSED, gboolean inherit G_GNUC_UNUSED, guint32 options G_GNUC_UNUSED)
1339 if (srcprocess != _WAPI_PROCESS_CURRENT ||
1340 targetprocess != _WAPI_PROCESS_CURRENT) {
1341 /* Duplicating other process's handles is not supported */
1342 SetLastError (ERROR_INVALID_HANDLE);
1343 return(FALSE);
1346 if (src == _WAPI_PROCESS_CURRENT) {
1347 *target = _wapi_process_duplicate ();
1348 } else if (src == _WAPI_THREAD_CURRENT) {
1349 *target = _wapi_thread_duplicate ();
1350 } else {
1351 _wapi_handle_ref (src);
1352 *target = src;
1355 return(TRUE);
1358 gboolean _wapi_handle_count_signalled_handles (guint32 numhandles,
1359 gpointer *handles,
1360 gboolean waitall,
1361 guint32 *retcount,
1362 guint32 *lowest)
1364 guint32 count, i, iter=0;
1365 gboolean ret;
1366 int thr_ret;
1367 WapiHandleType type;
1369 /* Lock all the handles, with backoff */
1370 again:
1371 thr_ret = _wapi_handle_lock_shared_handles ();
1372 g_assert (thr_ret == 0);
1374 for(i=0; i<numhandles; i++) {
1375 gpointer handle = handles[i];
1376 guint32 idx = GPOINTER_TO_UINT(handle);
1378 DEBUG ("%s: attempting to lock %p", __func__, handle);
1380 type = _WAPI_PRIVATE_HANDLES(idx).type;
1382 thr_ret = _wapi_handle_trylock_handle (handle);
1384 if (thr_ret != 0) {
1385 /* Bummer */
1387 DEBUG ("%s: attempt failed for %p: %s", __func__,
1388 handle, strerror (thr_ret));
1390 thr_ret = _wapi_handle_unlock_shared_handles ();
1391 g_assert (thr_ret == 0);
1393 while (i--) {
1394 handle = handles[i];
1395 idx = GPOINTER_TO_UINT(handle);
1397 thr_ret = _wapi_handle_unlock_handle (handle);
1398 g_assert (thr_ret == 0);
1401 /* If iter ever reaches 100 the nanosleep will
1402 * return EINVAL immediately, but we have a
1403 * design flaw if that happens.
1405 iter++;
1406 if(iter==100) {
1407 g_warning ("%s: iteration overflow!",
1408 __func__);
1409 iter=1;
1412 DEBUG ("%s: Backing off for %d ms", __func__,
1413 iter*10);
1414 _wapi_handle_spin (10 * iter);
1416 goto again;
1420 DEBUG ("%s: Locked all handles", __func__);
1422 count=0;
1423 *lowest=numhandles;
1425 for(i=0; i<numhandles; i++) {
1426 gpointer handle = handles[i];
1427 guint32 idx = GPOINTER_TO_UINT(handle);
1429 type = _WAPI_PRIVATE_HANDLES(idx).type;
1431 DEBUG ("%s: Checking handle %p", __func__, handle);
1433 if(((_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN)==TRUE) &&
1434 (_wapi_handle_ops_isowned (handle) == TRUE)) ||
1435 (_WAPI_SHARED_HANDLE(type) &&
1436 WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) ||
1437 (!_WAPI_SHARED_HANDLE(type) &&
1438 _WAPI_PRIVATE_HANDLES(idx).signalled == TRUE)) {
1439 count++;
1441 DEBUG ("%s: Handle %p signalled", __func__,
1442 handle);
1443 if(*lowest>i) {
1444 *lowest=i;
1449 DEBUG ("%s: %d event handles signalled", __func__, count);
1451 if ((waitall == TRUE && count == numhandles) ||
1452 (waitall == FALSE && count > 0)) {
1453 ret=TRUE;
1454 } else {
1455 ret=FALSE;
1458 DEBUG ("%s: Returning %d", __func__, ret);
1460 *retcount=count;
1462 return(ret);
1465 void _wapi_handle_unlock_handles (guint32 numhandles, gpointer *handles)
1467 guint32 i;
1468 int thr_ret;
1470 thr_ret = _wapi_handle_unlock_shared_handles ();
1471 g_assert (thr_ret == 0);
1473 for(i=0; i<numhandles; i++) {
1474 gpointer handle = handles[i];
1476 DEBUG ("%s: unlocking handle %p", __func__, handle);
1478 thr_ret = _wapi_handle_unlock_handle (handle);
1479 g_assert (thr_ret == 0);
1483 static int timedwait_signal_poll_cond (pthread_cond_t *cond, mono_mutex_t *mutex, struct timespec *timeout, gboolean alertable)
1485 struct timespec fake_timeout;
1486 int ret;
1488 if (!alertable) {
1489 if (timeout)
1490 ret=mono_cond_timedwait (cond, mutex, timeout);
1491 else
1492 ret=mono_cond_wait (cond, mutex);
1493 } else {
1494 _wapi_calc_timeout (&fake_timeout, 100);
1496 if (timeout != NULL && ((fake_timeout.tv_sec > timeout->tv_sec) ||
1497 (fake_timeout.tv_sec == timeout->tv_sec &&
1498 fake_timeout.tv_nsec > timeout->tv_nsec))) {
1499 /* Real timeout is less than 100ms time */
1500 ret=mono_cond_timedwait (cond, mutex, timeout);
1501 } else {
1502 ret=mono_cond_timedwait (cond, mutex, &fake_timeout);
1504 /* Mask the fake timeout, this will cause
1505 * another poll if the cond was not really signaled
1507 if (ret==ETIMEDOUT) {
1508 ret=0;
1513 return(ret);
1516 int _wapi_handle_wait_signal (gboolean poll)
1518 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, NULL, TRUE, poll);
1521 int _wapi_handle_timedwait_signal (struct timespec *timeout, gboolean poll)
1523 return _wapi_handle_timedwait_signal_handle (_wapi_global_signal_handle, timeout, TRUE, poll);
1526 int _wapi_handle_wait_signal_handle (gpointer handle, gboolean alertable)
1528 DEBUG ("%s: waiting for %p", __func__, handle);
1530 return _wapi_handle_timedwait_signal_handle (handle, NULL, alertable, FALSE);
1533 int _wapi_handle_timedwait_signal_handle (gpointer handle,
1534 struct timespec *timeout, gboolean alertable, gboolean poll)
1536 DEBUG ("%s: waiting for %p (type %s)", __func__, handle,
1537 _wapi_handle_typename[_wapi_handle_type (handle)]);
1539 if (_WAPI_SHARED_HANDLE (_wapi_handle_type (handle))) {
1540 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1541 return (0);
1543 if (timeout != NULL) {
1544 struct timespec fake_timeout;
1545 _wapi_calc_timeout (&fake_timeout, 100);
1547 if ((fake_timeout.tv_sec > timeout->tv_sec) ||
1548 (fake_timeout.tv_sec == timeout->tv_sec &&
1549 fake_timeout.tv_nsec > timeout->tv_nsec)) {
1550 /* FIXME: Real timeout is less than
1551 * 100ms time, but is it really worth
1552 * calculating to the exact ms?
1554 _wapi_handle_spin (100);
1556 if (WAPI_SHARED_HANDLE_DATA(handle).signalled == TRUE) {
1557 return (0);
1558 } else {
1559 return (ETIMEDOUT);
1563 _wapi_handle_spin (100);
1564 return (0);
1566 } else {
1567 guint32 idx = GPOINTER_TO_UINT(handle);
1568 int res;
1569 pthread_cond_t *cond;
1570 mono_mutex_t *mutex;
1572 if (alertable && !wapi_thread_set_wait_handle (handle))
1573 return 0;
1575 cond = &_WAPI_PRIVATE_HANDLES (idx).signal_cond;
1576 mutex = &_WAPI_PRIVATE_HANDLES (idx).signal_mutex;
1578 if (poll) {
1579 /* This is needed when waiting for process handles */
1580 res = timedwait_signal_poll_cond (cond, mutex, timeout, alertable);
1581 } else {
1582 if (timeout)
1583 res = mono_cond_timedwait (cond, mutex, timeout);
1584 else
1585 res = mono_cond_wait (cond, mutex);
1588 if (alertable)
1589 wapi_thread_clear_wait_handle (handle);
1591 return res;
1595 void
1596 _wapi_free_share_info (_WapiFileShare *share_info)
1598 if (!_wapi_shm_enabled ()) {
1599 file_share_hash_lock ();
1600 g_hash_table_remove (file_share_hash, share_info);
1601 file_share_hash_unlock ();
1602 /* The hashtable dtor frees share_info */
1603 } else {
1604 memset (share_info, '\0', sizeof(struct _WapiFileShare));
1608 static gint
1609 wapi_share_info_equal (gconstpointer ka, gconstpointer kb)
1611 const _WapiFileShare *s1 = ka;
1612 const _WapiFileShare *s2 = kb;
1614 return (s1->device == s2->device && s1->inode == s2->inode) ? 1 : 0;
1617 static guint
1618 wapi_share_info_hash (gconstpointer data)
1620 const _WapiFileShare *s = data;
1622 return s->inode;
1625 gboolean _wapi_handle_get_or_set_share (dev_t device, ino_t inode,
1626 guint32 new_sharemode,
1627 guint32 new_access,
1628 guint32 *old_sharemode,
1629 guint32 *old_access,
1630 struct _WapiFileShare **share_info)
1632 struct _WapiFileShare *file_share;
1633 guint32 now = (guint32)(time(NULL) & 0xFFFFFFFF);
1634 int thr_ret, i, first_unused = -1;
1635 gboolean exists = FALSE;
1637 /* Prevents entries from expiring under us as we search
1639 thr_ret = _wapi_handle_lock_shared_handles ();
1640 g_assert (thr_ret == 0);
1642 /* Prevent new entries racing with us */
1643 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1644 g_assert (thr_ret == 0);
1646 if (!_wapi_shm_enabled ()) {
1647 _WapiFileShare tmp;
1650 * Instead of allocating a 4MB array, we use a hash table to keep track of this
1651 * info. This is needed even if SHM is disabled, to track sharing inside
1652 * the current process.
1654 if (!file_share_hash) {
1655 file_share_hash = g_hash_table_new_full (wapi_share_info_hash, wapi_share_info_equal, NULL, g_free);
1656 InitializeCriticalSection (&file_share_hash_mutex);
1659 tmp.device = device;
1660 tmp.inode = inode;
1662 file_share_hash_lock ();
1664 file_share = g_hash_table_lookup (file_share_hash, &tmp);
1665 if (file_share) {
1666 *old_sharemode = file_share->sharemode;
1667 *old_access = file_share->access;
1668 *share_info = file_share;
1670 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1671 exists = TRUE;
1672 } else {
1673 file_share = g_new0 (_WapiFileShare, 1);
1675 file_share->device = device;
1676 file_share->inode = inode;
1677 file_share->opened_by_pid = _wapi_getpid ();
1678 file_share->sharemode = new_sharemode;
1679 file_share->access = new_access;
1680 file_share->handle_refs = 1;
1681 *share_info = file_share;
1683 g_hash_table_insert (file_share_hash, file_share, file_share);
1686 file_share_hash_unlock ();
1687 } else {
1688 /* If a linear scan gets too slow we'll have to fit a hash
1689 * table onto the shared mem backing store
1691 *share_info = NULL;
1692 for (i = 0; i <= _wapi_fileshare_layout->hwm; i++) {
1693 file_share = &_wapi_fileshare_layout->share_info[i];
1695 /* Make a note of an unused slot, in case we need to
1696 * store share info
1698 if (first_unused == -1 && file_share->handle_refs == 0) {
1699 first_unused = i;
1700 continue;
1703 if (file_share->handle_refs == 0) {
1704 continue;
1707 if (file_share->device == device &&
1708 file_share->inode == inode) {
1709 *old_sharemode = file_share->sharemode;
1710 *old_access = file_share->access;
1711 *share_info = file_share;
1713 /* Increment the reference count while we
1714 * still have sole access to the shared area.
1715 * This makes the increment atomic wrt
1716 * collections
1718 InterlockedIncrement ((gint32 *)&file_share->handle_refs);
1720 exists = TRUE;
1721 break;
1725 if (!exists) {
1726 if (i == _WAPI_FILESHARE_SIZE && first_unused == -1) {
1727 /* No more space */
1728 } else {
1729 if (first_unused == -1) {
1730 file_share = &_wapi_fileshare_layout->share_info[++i];
1731 _wapi_fileshare_layout->hwm = i;
1732 } else {
1733 file_share = &_wapi_fileshare_layout->share_info[first_unused];
1736 file_share->device = device;
1737 file_share->inode = inode;
1738 file_share->opened_by_pid = _wapi_getpid ();
1739 file_share->sharemode = new_sharemode;
1740 file_share->access = new_access;
1741 file_share->handle_refs = 1;
1742 *share_info = file_share;
1746 if (*share_info != NULL) {
1747 InterlockedExchange ((gint32 *)&(*share_info)->timestamp, now);
1751 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1753 _wapi_handle_unlock_shared_handles ();
1755 return(exists);
1758 /* If we don't have the info in /proc, check if the process that
1759 * opened this share info is still there (it's not a perfect method,
1760 * due to pid reuse)
1762 static void _wapi_handle_check_share_by_pid (struct _WapiFileShare *share_info)
1764 if (kill (share_info->opened_by_pid, 0) == -1 &&
1765 (errno == ESRCH ||
1766 errno == EPERM)) {
1767 /* It's gone completely (or there's a new process
1768 * owned by someone else) so mark this share info as
1769 * dead
1771 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1773 _wapi_free_share_info (share_info);
1777 #ifdef __linux__
1778 /* Scan /proc/<pids>/fd/ for open file descriptors to the file in
1779 * question. If there are none, reset the share info.
1781 * This implementation is Linux-specific; legacy systems will have to
1782 * implement their own ways of finding out if a particular file is
1783 * open by a process.
1785 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1787 gboolean found = FALSE, proc_fds = FALSE;
1788 pid_t self = _wapi_getpid ();
1789 int pid;
1790 int thr_ret, i;
1792 /* Prevents entries from expiring under us if we remove this
1793 * one
1795 thr_ret = _wapi_handle_lock_shared_handles ();
1796 g_assert (thr_ret == 0);
1798 /* Prevent new entries racing with us */
1799 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1800 g_assert (thr_ret == 0);
1802 /* If there is no /proc, there's nothing more we can do here */
1803 if (access ("/proc", F_OK) == -1) {
1804 _wapi_handle_check_share_by_pid (share_info);
1805 goto done;
1808 /* If there's another handle that thinks it owns this fd, then even
1809 * if the fd has been closed behind our back consider it still owned.
1810 * See bugs 75764 and 75891
1812 for (i = 0; i < _wapi_fd_reserve; i++) {
1813 if (_wapi_private_handles [SLOT_INDEX (i)]) {
1814 struct _WapiHandleUnshared *handle = &_WAPI_PRIVATE_HANDLES(i);
1816 if (i != fd &&
1817 handle->type == WAPI_HANDLE_FILE) {
1818 struct _WapiHandle_file *file_handle = &handle->u.file;
1820 if (file_handle->share_info == share_info) {
1821 DEBUG ("%s: handle 0x%x has this file open!",
1822 __func__, i);
1824 goto done;
1830 for (i = 0; i < _WAPI_HANDLE_INITIAL_COUNT; i++) {
1831 struct _WapiHandleShared *shared;
1832 struct _WapiHandle_process *process_handle;
1834 shared = &_wapi_shared_layout->handles[i];
1836 if (shared->type == WAPI_HANDLE_PROCESS) {
1837 DIR *fd_dir;
1838 struct dirent *fd_entry;
1839 char subdir[_POSIX_PATH_MAX];
1841 process_handle = &shared->u.process;
1842 pid = process_handle->id;
1844 /* Look in /proc/<pid>/fd/ but ignore
1845 * /proc/<our pid>/fd/<fd>, as we have the
1846 * file open too
1848 g_snprintf (subdir, _POSIX_PATH_MAX, "/proc/%d/fd",
1849 pid);
1851 fd_dir = opendir (subdir);
1852 if (fd_dir == NULL) {
1853 continue;
1856 DEBUG ("%s: Looking in %s", __func__, subdir);
1858 proc_fds = TRUE;
1860 while ((fd_entry = readdir (fd_dir)) != NULL) {
1861 char path[_POSIX_PATH_MAX];
1862 struct stat link_stat;
1864 if (!strcmp (fd_entry->d_name, ".") ||
1865 !strcmp (fd_entry->d_name, "..") ||
1866 (pid == self &&
1867 fd == atoi (fd_entry->d_name))) {
1868 continue;
1871 g_snprintf (path, _POSIX_PATH_MAX,
1872 "/proc/%d/fd/%s", pid,
1873 fd_entry->d_name);
1875 stat (path, &link_stat);
1876 if (link_stat.st_dev == share_info->device &&
1877 link_stat.st_ino == share_info->inode) {
1878 DEBUG ("%s: Found it at %s",
1879 __func__, path);
1881 found = TRUE;
1885 closedir (fd_dir);
1889 if (proc_fds == FALSE) {
1890 _wapi_handle_check_share_by_pid (share_info);
1891 } else if (found == FALSE) {
1892 /* Blank out this entry, as it is stale */
1893 DEBUG ("%s: Didn't find it, destroying entry", __func__);
1895 _wapi_free_share_info (share_info);
1898 done:
1899 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1901 _wapi_handle_unlock_shared_handles ();
1903 #else
1905 // Other implementations (non-Linux)
1907 void _wapi_handle_check_share (struct _WapiFileShare *share_info, int fd)
1909 int thr_ret;
1911 /* Prevents entries from expiring under us if we remove this
1912 * one */
1913 thr_ret = _wapi_handle_lock_shared_handles ();
1914 g_assert (thr_ret == 0);
1916 /* Prevent new entries racing with us */
1917 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1918 g_assert (thr_ret == 0);
1920 _wapi_handle_check_share_by_pid (share_info);
1922 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
1923 _wapi_handle_unlock_shared_handles ();
1925 #endif
1927 void _wapi_handle_dump (void)
1929 struct _WapiHandleUnshared *handle_data;
1930 guint32 i, k;
1931 int thr_ret;
1933 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1934 (void *)&scan_mutex);
1935 thr_ret = mono_mutex_lock (&scan_mutex);
1936 g_assert (thr_ret == 0);
1938 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1939 if (_wapi_private_handles [i]) {
1940 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1941 handle_data = &_wapi_private_handles [i][k];
1943 if (handle_data->type == WAPI_HANDLE_UNUSED) {
1944 continue;
1947 g_print ("%3x [%7s] %s %d ",
1948 i * _WAPI_HANDLE_INITIAL_COUNT + k,
1949 _wapi_handle_typename[handle_data->type],
1950 handle_data->signalled?"Sg":"Un",
1951 handle_data->ref);
1952 handle_details[handle_data->type](&handle_data->u);
1953 g_print ("\n");
1958 thr_ret = mono_mutex_unlock (&scan_mutex);
1959 g_assert (thr_ret == 0);
1960 pthread_cleanup_pop (0);
1963 static void _wapi_shared_details (gpointer handle_info)
1965 struct _WapiHandle_shared_ref *shared = (struct _WapiHandle_shared_ref *)handle_info;
1967 g_print ("offset: 0x%x", shared->offset);
1970 void _wapi_handle_update_refs (void)
1972 guint32 i, k;
1973 int thr_ret;
1974 guint32 now = (guint32)(time (NULL) & 0xFFFFFFFF);
1976 thr_ret = _wapi_handle_lock_shared_handles ();
1977 g_assert (thr_ret == 0);
1979 /* Prevent file share entries racing with us */
1980 thr_ret = _wapi_shm_sem_lock (_WAPI_SHARED_SEM_FILESHARE);
1981 g_assert(thr_ret == 0);
1983 pthread_cleanup_push ((void(*)(void *))mono_mutex_unlock_in_cleanup,
1984 (void *)&scan_mutex);
1985 thr_ret = mono_mutex_lock (&scan_mutex);
1987 for(i = SLOT_INDEX (0); i < _wapi_private_handle_slot_count; i++) {
1988 if (_wapi_private_handles [i]) {
1989 for (k = SLOT_OFFSET (0); k < _WAPI_HANDLE_INITIAL_COUNT; k++) {
1990 struct _WapiHandleUnshared *handle = &_wapi_private_handles [i][k];
1992 if (_WAPI_SHARED_HANDLE(handle->type)) {
1993 struct _WapiHandleShared *shared_data;
1995 DEBUG ("%s: (%d) handle 0x%x is SHARED (%s)", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k, _wapi_handle_typename[handle->type]);
1997 shared_data = &_wapi_shared_layout->handles[handle->u.shared.offset];
1999 DEBUG ("%s: (%d) Updating timestamp of handle 0x%x", __func__, _wapi_getpid (), handle->u.shared.offset);
2001 InterlockedExchange ((gint32 *)&shared_data->timestamp, now);
2002 } else if (handle->type == WAPI_HANDLE_FILE) {
2003 struct _WapiHandle_file *file_handle = &handle->u.file;
2005 DEBUG ("%s: (%d) handle 0x%x is FILE", __func__, _wapi_getpid (), i * _WAPI_HANDLE_INITIAL_COUNT + k);
2007 g_assert (file_handle->share_info != NULL);
2009 DEBUG ("%s: (%d) Inc refs on fileshare 0x%x", __func__, _wapi_getpid (), (file_handle->share_info - &_wapi_fileshare_layout->share_info[0]) / sizeof(struct _WapiFileShare));
2011 InterlockedExchange ((gint32 *)&file_handle->share_info->timestamp, now);
2017 thr_ret = mono_mutex_unlock (&scan_mutex);
2018 g_assert (thr_ret == 0);
2019 pthread_cleanup_pop (0);
2021 thr_ret = _wapi_shm_sem_unlock (_WAPI_SHARED_SEM_FILESHARE);
2023 _wapi_handle_unlock_shared_handles ();