cryptnet: Check number of contexts in CertDllVerifyRevocation.
[wine/hacks.git] / server / fd.c
blobf57ed97d3b1115c08068a9901b9555f9cf47e86c
1 /*
2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "config.h"
23 #include "wine/port.h"
25 #include <assert.h>
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <limits.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #ifdef HAVE_POLL_H
35 #include <poll.h>
36 #endif
37 #ifdef HAVE_SYS_POLL_H
38 #include <sys/poll.h>
39 #endif
40 #ifdef HAVE_LINUX_MAJOR_H
41 #include <linux/major.h>
42 #endif
43 #ifdef HAVE_SYS_STATVFS_H
44 #include <sys/statvfs.h>
45 #endif
46 #ifdef HAVE_SYS_VFS_H
48 * Solaris defines its system list in sys/list.h.
49 * This need to be workaround it here.
51 #define list SYSLIST
52 #define list_next SYSLIST_NEXT
53 #define list_prev SYSLIST_PREV
54 #define list_head SYSLIST_HEAD
55 #define list_tail SYSLIST_TAIL
56 #define list_move_tail SYSLIST_MOVE_TAIL
57 #define list_remove SYSLIST_REMOVE
58 #include <sys/vfs.h>
59 #undef list
60 #undef list_next
61 #undef list_prev
62 #undef list_head
63 #undef list_tail
64 #undef list_move_tail
65 #undef list_remove
66 #endif
67 #ifdef HAVE_SYS_PARAM_H
68 #include <sys/param.h>
69 #endif
70 #ifdef HAVE_SYS_MOUNT_H
71 #include <sys/mount.h>
72 #endif
73 #ifdef HAVE_SYS_STATFS_H
74 #include <sys/statfs.h>
75 #endif
76 #ifdef HAVE_SYS_SYSCTL_H
77 #include <sys/sysctl.h>
78 #endif
79 #ifdef HAVE_SYS_EVENT_H
80 #include <sys/event.h>
81 #undef LIST_INIT
82 #undef LIST_ENTRY
83 #endif
84 #ifdef HAVE_STDINT_H
85 #include <stdint.h>
86 #endif
87 #include <sys/stat.h>
88 #include <sys/time.h>
89 #include <sys/types.h>
90 #include <unistd.h>
92 #include "ntstatus.h"
93 #define WIN32_NO_STATUS
94 #include "object.h"
95 #include "file.h"
96 #include "handle.h"
97 #include "process.h"
98 #include "request.h"
100 #include "winternl.h"
101 #include "winioctl.h"
103 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
104 # include <sys/epoll.h>
105 # define USE_EPOLL
106 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
107 # define USE_EPOLL
108 # define EPOLLIN POLLIN
109 # define EPOLLOUT POLLOUT
110 # define EPOLLERR POLLERR
111 # define EPOLLHUP POLLHUP
112 # define EPOLL_CTL_ADD 1
113 # define EPOLL_CTL_DEL 2
114 # define EPOLL_CTL_MOD 3
116 typedef union epoll_data
118 void *ptr;
119 int fd;
120 uint32_t u32;
121 uint64_t u64;
122 } epoll_data_t;
124 struct epoll_event
126 uint32_t events;
127 epoll_data_t data;
130 #define SYSCALL_RET(ret) do { \
131 if (ret < 0) { errno = -ret; ret = -1; } \
132 return ret; \
133 } while(0)
135 static inline int epoll_create( int size )
137 int ret;
138 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
139 : "=a" (ret) : "0" (254 /*NR_epoll_create*/), "r" (size) );
140 SYSCALL_RET(ret);
143 static inline int epoll_ctl( int epfd, int op, int fd, const struct epoll_event *event )
145 int ret;
146 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
147 : "=a" (ret)
148 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd), "c" (op), "d" (fd), "S" (event), "m" (*event) );
149 SYSCALL_RET(ret);
152 static inline int epoll_wait( int epfd, struct epoll_event *events, int maxevents, int timeout )
154 int ret;
155 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
156 : "=a" (ret)
157 : "0" (256 /*NR_epoll_wait*/), "r" (epfd), "c" (events), "d" (maxevents), "S" (timeout)
158 : "memory" );
159 SYSCALL_RET(ret);
161 #undef SYSCALL_RET
163 #endif /* linux && __i386__ && HAVE_STDINT_H */
166 /* Because of the stupid Posix locking semantics, we need to keep
167 * track of all file descriptors referencing a given file, and not
168 * close a single one until all the locks are gone (sigh).
171 /* file descriptor object */
173 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
174 struct closed_fd
176 struct list entry; /* entry in inode closed list */
177 int unix_fd; /* the unix file descriptor */
178 char unlink[1]; /* name to unlink on close (if any) */
181 struct fd
183 struct object obj; /* object header */
184 const struct fd_ops *fd_ops; /* file descriptor operations */
185 struct inode *inode; /* inode that this fd belongs to */
186 struct list inode_entry; /* entry in inode fd list */
187 struct closed_fd *closed; /* structure to store the unix fd at destroy time */
188 struct object *user; /* object using this file descriptor */
189 struct list locks; /* list of locks on this fd */
190 unsigned int access; /* file access (FILE_READ_DATA etc.) */
191 unsigned int options; /* file options (FILE_DELETE_ON_CLOSE, FILE_SYNCHRONOUS...) */
192 unsigned int sharing; /* file sharing mode */
193 char *unix_name; /* unix file name */
194 int unix_fd; /* unix file descriptor */
195 unsigned int no_fd_status;/* status to return when unix_fd is -1 */
196 unsigned int signaled :1; /* is the fd signaled? */
197 unsigned int fs_locks :1; /* can we use filesystem locks for this fd? */
198 int poll_index; /* index of fd in poll array */
199 struct async_queue *read_q; /* async readers of this fd */
200 struct async_queue *write_q; /* async writers of this fd */
201 struct async_queue *wait_q; /* other async waiters of this fd */
202 struct completion *completion; /* completion object attached to this fd */
203 apc_param_t comp_key; /* completion key to set in completion events */
206 static void fd_dump( struct object *obj, int verbose );
207 static void fd_destroy( struct object *obj );
209 static const struct object_ops fd_ops =
211 sizeof(struct fd), /* size */
212 fd_dump, /* dump */
213 no_get_type, /* get_type */
214 no_add_queue, /* add_queue */
215 NULL, /* remove_queue */
216 NULL, /* signaled */
217 NULL, /* satisfied */
218 no_signal, /* signal */
219 no_get_fd, /* get_fd */
220 no_map_access, /* map_access */
221 default_get_sd, /* get_sd */
222 default_set_sd, /* set_sd */
223 no_lookup_name, /* lookup_name */
224 no_open_file, /* open_file */
225 no_close_handle, /* close_handle */
226 fd_destroy /* destroy */
229 /* device object */
231 #define DEVICE_HASH_SIZE 7
232 #define INODE_HASH_SIZE 17
234 struct device
236 struct object obj; /* object header */
237 struct list entry; /* entry in device hash list */
238 dev_t dev; /* device number */
239 int removable; /* removable device? (or -1 if unknown) */
240 struct list inode_hash[INODE_HASH_SIZE]; /* inodes hash table */
243 static void device_dump( struct object *obj, int verbose );
244 static void device_destroy( struct object *obj );
246 static const struct object_ops device_ops =
248 sizeof(struct device), /* size */
249 device_dump, /* dump */
250 no_get_type, /* get_type */
251 no_add_queue, /* add_queue */
252 NULL, /* remove_queue */
253 NULL, /* signaled */
254 NULL, /* satisfied */
255 no_signal, /* signal */
256 no_get_fd, /* get_fd */
257 no_map_access, /* map_access */
258 default_get_sd, /* get_sd */
259 default_set_sd, /* set_sd */
260 no_lookup_name, /* lookup_name */
261 no_open_file, /* open_file */
262 no_close_handle, /* close_handle */
263 device_destroy /* destroy */
266 /* inode object */
268 struct inode
270 struct object obj; /* object header */
271 struct list entry; /* inode hash list entry */
272 struct device *device; /* device containing this inode */
273 ino_t ino; /* inode number */
274 struct list open; /* list of open file descriptors */
275 struct list locks; /* list of file locks */
276 struct list closed; /* list of file descriptors to close at destroy time */
279 static void inode_dump( struct object *obj, int verbose );
280 static void inode_destroy( struct object *obj );
282 static const struct object_ops inode_ops =
284 sizeof(struct inode), /* size */
285 inode_dump, /* dump */
286 no_get_type, /* get_type */
287 no_add_queue, /* add_queue */
288 NULL, /* remove_queue */
289 NULL, /* signaled */
290 NULL, /* satisfied */
291 no_signal, /* signal */
292 no_get_fd, /* get_fd */
293 no_map_access, /* map_access */
294 default_get_sd, /* get_sd */
295 default_set_sd, /* set_sd */
296 no_lookup_name, /* lookup_name */
297 no_open_file, /* open_file */
298 no_close_handle, /* close_handle */
299 inode_destroy /* destroy */
302 /* file lock object */
304 struct file_lock
306 struct object obj; /* object header */
307 struct fd *fd; /* fd owning this lock */
308 struct list fd_entry; /* entry in list of locks on a given fd */
309 struct list inode_entry; /* entry in inode list of locks */
310 int shared; /* shared lock? */
311 file_pos_t start; /* locked region is interval [start;end) */
312 file_pos_t end;
313 struct process *process; /* process owning this lock */
314 struct list proc_entry; /* entry in list of locks owned by the process */
317 static void file_lock_dump( struct object *obj, int verbose );
318 static int file_lock_signaled( struct object *obj, struct thread *thread );
320 static const struct object_ops file_lock_ops =
322 sizeof(struct file_lock), /* size */
323 file_lock_dump, /* dump */
324 no_get_type, /* get_type */
325 add_queue, /* add_queue */
326 remove_queue, /* remove_queue */
327 file_lock_signaled, /* signaled */
328 no_satisfied, /* satisfied */
329 no_signal, /* signal */
330 no_get_fd, /* get_fd */
331 no_map_access, /* map_access */
332 default_get_sd, /* get_sd */
333 default_set_sd, /* set_sd */
334 no_lookup_name, /* lookup_name */
335 no_open_file, /* open_file */
336 no_close_handle, /* close_handle */
337 no_destroy /* destroy */
341 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
342 #define FILE_POS_T_MAX (~(file_pos_t)0)
344 static file_pos_t max_unix_offset = OFF_T_MAX;
346 #define DUMP_LONG_LONG(val) do { \
347 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
348 fprintf( stderr, "%lx%08lx", (unsigned long)((unsigned long long)(val) >> 32), (unsigned long)(val) ); \
349 else \
350 fprintf( stderr, "%lx", (unsigned long)(val) ); \
351 } while (0)
355 /****************************************************************/
356 /* timeouts support */
358 struct timeout_user
360 struct list entry; /* entry in sorted timeout list */
361 timeout_t when; /* timeout expiry (absolute time) */
362 timeout_callback callback; /* callback function */
363 void *private; /* callback private data */
366 static struct list timeout_list = LIST_INIT(timeout_list); /* sorted timeouts list */
367 timeout_t current_time;
369 static inline void set_current_time(void)
371 static const timeout_t ticks_1601_to_1970 = (timeout_t)86400 * (369 * 365 + 89) * TICKS_PER_SEC;
372 struct timeval now;
373 gettimeofday( &now, NULL );
374 current_time = (timeout_t)now.tv_sec * TICKS_PER_SEC + now.tv_usec * 10 + ticks_1601_to_1970;
377 /* add a timeout user */
378 struct timeout_user *add_timeout_user( timeout_t when, timeout_callback func, void *private )
380 struct timeout_user *user;
381 struct list *ptr;
383 if (!(user = mem_alloc( sizeof(*user) ))) return NULL;
384 user->when = (when > 0) ? when : current_time - when;
385 user->callback = func;
386 user->private = private;
388 /* Now insert it in the linked list */
390 LIST_FOR_EACH( ptr, &timeout_list )
392 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
393 if (timeout->when >= user->when) break;
395 list_add_before( ptr, &user->entry );
396 return user;
399 /* remove a timeout user */
400 void remove_timeout_user( struct timeout_user *user )
402 list_remove( &user->entry );
403 free( user );
406 /* return a text description of a timeout for debugging purposes */
407 const char *get_timeout_str( timeout_t timeout )
409 static char buffer[64];
410 long secs, nsecs;
412 if (!timeout) return "0";
413 if (timeout == TIMEOUT_INFINITE) return "infinite";
415 if (timeout < 0) /* relative */
417 secs = -timeout / TICKS_PER_SEC;
418 nsecs = -timeout % TICKS_PER_SEC;
419 sprintf( buffer, "+%ld.%07ld", secs, nsecs );
421 else /* absolute */
423 secs = (timeout - current_time) / TICKS_PER_SEC;
424 nsecs = (timeout - current_time) % TICKS_PER_SEC;
425 if (nsecs < 0)
427 nsecs += TICKS_PER_SEC;
428 secs--;
430 if (secs >= 0)
431 sprintf( buffer, "%x%08x (+%ld.%07ld)",
432 (unsigned int)(timeout >> 32), (unsigned int)timeout, secs, nsecs );
433 else
434 sprintf( buffer, "%x%08x (-%ld.%07ld)",
435 (unsigned int)(timeout >> 32), (unsigned int)timeout,
436 -(secs + 1), TICKS_PER_SEC - nsecs );
438 return buffer;
442 /****************************************************************/
443 /* poll support */
445 static struct fd **poll_users; /* users array */
446 static struct pollfd *pollfd; /* poll fd array */
447 static int nb_users; /* count of array entries actually in use */
448 static int active_users; /* current number of active users */
449 static int allocated_users; /* count of allocated entries in the array */
450 static struct fd **freelist; /* list of free entries in the array */
452 static int get_next_timeout(void);
454 static inline void fd_poll_event( struct fd *fd, int event )
456 fd->fd_ops->poll_event( fd, event );
459 #ifdef USE_EPOLL
461 static int epoll_fd = -1;
463 static inline void init_epoll(void)
465 epoll_fd = epoll_create( 128 );
468 /* set the events that epoll waits for on this fd; helper for set_fd_events */
469 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
471 struct epoll_event ev;
472 int ctl;
474 if (epoll_fd == -1) return;
476 if (events == -1) /* stop waiting on this fd completely */
478 if (pollfd[user].fd == -1) return; /* already removed */
479 ctl = EPOLL_CTL_DEL;
481 else if (pollfd[user].fd == -1)
483 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
484 ctl = EPOLL_CTL_ADD;
486 else
488 if (pollfd[user].events == events) return; /* nothing to do */
489 ctl = EPOLL_CTL_MOD;
492 ev.events = events;
493 memset(&ev.data, 0, sizeof(ev.data));
494 ev.data.u32 = user;
496 if (epoll_ctl( epoll_fd, ctl, fd->unix_fd, &ev ) == -1)
498 if (errno == ENOMEM) /* not enough memory, give up on epoll */
500 close( epoll_fd );
501 epoll_fd = -1;
503 else perror( "epoll_ctl" ); /* should not happen */
507 static inline void remove_epoll_user( struct fd *fd, int user )
509 if (epoll_fd == -1) return;
511 if (pollfd[user].fd != -1)
513 struct epoll_event dummy;
514 epoll_ctl( epoll_fd, EPOLL_CTL_DEL, fd->unix_fd, &dummy );
518 static inline void main_loop_epoll(void)
520 int i, ret, timeout;
521 struct epoll_event events[128];
523 assert( POLLIN == EPOLLIN );
524 assert( POLLOUT == EPOLLOUT );
525 assert( POLLERR == EPOLLERR );
526 assert( POLLHUP == EPOLLHUP );
528 if (epoll_fd == -1) return;
530 while (active_users)
532 timeout = get_next_timeout();
534 if (!active_users) break; /* last user removed by a timeout */
535 if (epoll_fd == -1) break; /* an error occurred with epoll */
537 ret = epoll_wait( epoll_fd, events, sizeof(events)/sizeof(events[0]), timeout );
538 set_current_time();
540 /* put the events into the pollfd array first, like poll does */
541 for (i = 0; i < ret; i++)
543 int user = events[i].data.u32;
544 pollfd[user].revents = events[i].events;
547 /* read events from the pollfd array, as set_fd_events may modify them */
548 for (i = 0; i < ret; i++)
550 int user = events[i].data.u32;
551 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
556 #elif defined(HAVE_KQUEUE)
558 static int kqueue_fd = -1;
560 static inline void init_epoll(void)
562 #ifdef __APPLE__ /* kqueue support is broken in Mac OS < 10.5 */
563 int mib[2];
564 char release[32];
565 size_t len = sizeof(release);
567 mib[0] = CTL_KERN;
568 mib[1] = KERN_OSRELEASE;
569 if (sysctl( mib, 2, release, &len, NULL, 0 ) == -1) return;
570 if (atoi(release) < 9) return;
571 #endif
572 kqueue_fd = kqueue();
575 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
577 struct kevent ev[2];
579 if (kqueue_fd == -1) return;
581 EV_SET( &ev[0], fd->unix_fd, EVFILT_READ, 0, NOTE_LOWAT, 1, (void *)user );
582 EV_SET( &ev[1], fd->unix_fd, EVFILT_WRITE, 0, NOTE_LOWAT, 1, (void *)user );
584 if (events == -1) /* stop waiting on this fd completely */
586 if (pollfd[user].fd == -1) return; /* already removed */
587 ev[0].flags |= EV_DELETE;
588 ev[1].flags |= EV_DELETE;
590 else if (pollfd[user].fd == -1)
592 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
593 ev[0].flags |= EV_ADD | ((events & POLLIN) ? EV_ENABLE : EV_DISABLE);
594 ev[1].flags |= EV_ADD | ((events & POLLOUT) ? EV_ENABLE : EV_DISABLE);
596 else
598 if (pollfd[user].events == events) return; /* nothing to do */
599 ev[0].flags |= (events & POLLIN) ? EV_ENABLE : EV_DISABLE;
600 ev[1].flags |= (events & POLLOUT) ? EV_ENABLE : EV_DISABLE;
603 if (kevent( kqueue_fd, ev, 2, NULL, 0, NULL ) == -1)
605 if (errno == ENOMEM) /* not enough memory, give up on kqueue */
607 close( kqueue_fd );
608 kqueue_fd = -1;
610 else perror( "kevent" ); /* should not happen */
614 static inline void remove_epoll_user( struct fd *fd, int user )
616 if (kqueue_fd == -1) return;
618 if (pollfd[user].fd != -1)
620 struct kevent ev[2];
622 EV_SET( &ev[0], fd->unix_fd, EVFILT_READ, EV_DELETE, 0, 0, 0 );
623 EV_SET( &ev[1], fd->unix_fd, EVFILT_WRITE, EV_DELETE, 0, 0, 0 );
624 kevent( kqueue_fd, ev, 2, NULL, 0, NULL );
628 static inline void main_loop_epoll(void)
630 int i, ret, timeout;
631 struct kevent events[128];
633 if (kqueue_fd == -1) return;
635 while (active_users)
637 timeout = get_next_timeout();
639 if (!active_users) break; /* last user removed by a timeout */
640 if (kqueue_fd == -1) break; /* an error occurred with kqueue */
642 if (timeout != -1)
644 struct timespec ts;
646 ts.tv_sec = timeout / 1000;
647 ts.tv_nsec = (timeout % 1000) * 1000000;
648 ret = kevent( kqueue_fd, NULL, 0, events, sizeof(events)/sizeof(events[0]), &ts );
650 else ret = kevent( kqueue_fd, NULL, 0, events, sizeof(events)/sizeof(events[0]), NULL );
652 set_current_time();
654 /* put the events into the pollfd array first, like poll does */
655 for (i = 0; i < ret; i++)
657 long user = (long)events[i].udata;
658 pollfd[user].revents = 0;
660 for (i = 0; i < ret; i++)
662 long user = (long)events[i].udata;
663 if (events[i].filter == EVFILT_READ) pollfd[user].revents |= POLLIN;
664 else if (events[i].filter == EVFILT_WRITE) pollfd[user].revents |= POLLOUT;
665 if (events[i].flags & EV_EOF) pollfd[user].revents |= POLLHUP;
666 if (events[i].flags & EV_ERROR) pollfd[user].revents |= POLLERR;
669 /* read events from the pollfd array, as set_fd_events may modify them */
670 for (i = 0; i < ret; i++)
672 long user = (long)events[i].udata;
673 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
674 pollfd[user].revents = 0;
679 #else /* HAVE_KQUEUE */
681 static inline void init_epoll(void) { }
682 static inline void set_fd_epoll_events( struct fd *fd, int user, int events ) { }
683 static inline void remove_epoll_user( struct fd *fd, int user ) { }
684 static inline void main_loop_epoll(void) { }
686 #endif /* USE_EPOLL */
689 /* add a user in the poll array and return its index, or -1 on failure */
690 static int add_poll_user( struct fd *fd )
692 int ret;
693 if (freelist)
695 ret = freelist - poll_users;
696 freelist = (struct fd **)poll_users[ret];
698 else
700 if (nb_users == allocated_users)
702 struct fd **newusers;
703 struct pollfd *newpoll;
704 int new_count = allocated_users ? (allocated_users + allocated_users / 2) : 16;
705 if (!(newusers = realloc( poll_users, new_count * sizeof(*poll_users) ))) return -1;
706 if (!(newpoll = realloc( pollfd, new_count * sizeof(*pollfd) )))
708 if (allocated_users)
709 poll_users = newusers;
710 else
711 free( newusers );
712 return -1;
714 poll_users = newusers;
715 pollfd = newpoll;
716 if (!allocated_users) init_epoll();
717 allocated_users = new_count;
719 ret = nb_users++;
721 pollfd[ret].fd = -1;
722 pollfd[ret].events = 0;
723 pollfd[ret].revents = 0;
724 poll_users[ret] = fd;
725 active_users++;
726 return ret;
729 /* remove a user from the poll list */
730 static void remove_poll_user( struct fd *fd, int user )
732 assert( user >= 0 );
733 assert( poll_users[user] == fd );
735 remove_epoll_user( fd, user );
736 pollfd[user].fd = -1;
737 pollfd[user].events = 0;
738 pollfd[user].revents = 0;
739 poll_users[user] = (struct fd *)freelist;
740 freelist = &poll_users[user];
741 active_users--;
744 /* process pending timeouts and return the time until the next timeout, in milliseconds */
745 static int get_next_timeout(void)
747 if (!list_empty( &timeout_list ))
749 struct list expired_list, *ptr;
751 /* first remove all expired timers from the list */
753 list_init( &expired_list );
754 while ((ptr = list_head( &timeout_list )) != NULL)
756 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
758 if (timeout->when <= current_time)
760 list_remove( &timeout->entry );
761 list_add_tail( &expired_list, &timeout->entry );
763 else break;
766 /* now call the callback for all the removed timers */
768 while ((ptr = list_head( &expired_list )) != NULL)
770 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
771 list_remove( &timeout->entry );
772 timeout->callback( timeout->private );
773 free( timeout );
776 if ((ptr = list_head( &timeout_list )) != NULL)
778 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
779 int diff = (timeout->when - current_time + 9999) / 10000;
780 if (diff < 0) diff = 0;
781 return diff;
784 return -1; /* no pending timeouts */
787 /* server main poll() loop */
788 void main_loop(void)
790 int i, ret, timeout;
792 set_current_time();
793 server_start_time = current_time;
795 main_loop_epoll();
796 /* fall through to normal poll loop */
798 while (active_users)
800 timeout = get_next_timeout();
802 if (!active_users) break; /* last user removed by a timeout */
804 ret = poll( pollfd, nb_users, timeout );
805 set_current_time();
807 if (ret > 0)
809 for (i = 0; i < nb_users; i++)
811 if (pollfd[i].revents)
813 fd_poll_event( poll_users[i], pollfd[i].revents );
814 if (!--ret) break;
822 /****************************************************************/
823 /* device functions */
825 static struct list device_hash[DEVICE_HASH_SIZE];
827 static int is_device_removable( dev_t dev, int unix_fd )
829 #if defined(linux) && defined(HAVE_FSTATFS)
830 struct statfs stfs;
832 /* check for floppy disk */
833 if (major(dev) == FLOPPY_MAJOR) return 1;
835 if (fstatfs( unix_fd, &stfs ) == -1) return 0;
836 return (stfs.f_type == 0x9660 || /* iso9660 */
837 stfs.f_type == 0x9fa1 || /* supermount */
838 stfs.f_type == 0x15013346); /* udf */
839 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__)
840 struct statfs stfs;
842 if (fstatfs( unix_fd, &stfs ) == -1) return 0;
843 return (!strcmp("cd9660", stfs.f_fstypename) || !strcmp("udf", stfs.f_fstypename));
844 #elif defined(__NetBSD__)
845 struct statvfs stfs;
847 if (fstatvfs( unix_fd, &stfs ) == -1) return 0;
848 return (!strcmp("cd9660", stfs.f_fstypename) || !strcmp("udf", stfs.f_fstypename));
849 #elif defined(sun)
850 # include <sys/dkio.h>
851 # include <sys/vtoc.h>
852 struct dk_cinfo dkinf;
853 if (ioctl( unix_fd, DKIOCINFO, &dkinf ) == -1) return 0;
854 return (dkinf.dki_ctype == DKC_CDROM ||
855 dkinf.dki_ctype == DKC_NCRFLOPPY ||
856 dkinf.dki_ctype == DKC_SMSFLOPPY ||
857 dkinf.dki_ctype == DKC_INTEL82072 ||
858 dkinf.dki_ctype == DKC_INTEL82077);
859 #else
860 return 0;
861 #endif
864 /* retrieve the device object for a given fd, creating it if needed */
865 static struct device *get_device( dev_t dev, int unix_fd )
867 struct device *device;
868 unsigned int i, hash = dev % DEVICE_HASH_SIZE;
870 if (device_hash[hash].next)
872 LIST_FOR_EACH_ENTRY( device, &device_hash[hash], struct device, entry )
873 if (device->dev == dev) return (struct device *)grab_object( device );
875 else list_init( &device_hash[hash] );
877 /* not found, create it */
879 if (unix_fd == -1) return NULL;
880 if ((device = alloc_object( &device_ops )))
882 device->dev = dev;
883 device->removable = is_device_removable( dev, unix_fd );
884 for (i = 0; i < INODE_HASH_SIZE; i++) list_init( &device->inode_hash[i] );
885 list_add_head( &device_hash[hash], &device->entry );
887 return device;
890 static void device_dump( struct object *obj, int verbose )
892 struct device *device = (struct device *)obj;
893 fprintf( stderr, "Device dev=" );
894 DUMP_LONG_LONG( device->dev );
895 fprintf( stderr, "\n" );
898 static void device_destroy( struct object *obj )
900 struct device *device = (struct device *)obj;
901 unsigned int i;
903 for (i = 0; i < INODE_HASH_SIZE; i++)
904 assert( list_empty(&device->inode_hash[i]) );
906 list_remove( &device->entry ); /* remove it from the hash table */
910 /****************************************************************/
911 /* inode functions */
913 /* close all pending file descriptors in the closed list */
914 static void inode_close_pending( struct inode *inode, int keep_unlinks )
916 struct list *ptr = list_head( &inode->closed );
918 while (ptr)
920 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
921 struct list *next = list_next( &inode->closed, ptr );
923 if (fd->unix_fd != -1)
925 close( fd->unix_fd );
926 fd->unix_fd = -1;
928 if (!keep_unlinks || !fd->unlink[0]) /* get rid of it unless there's an unlink pending on that file */
930 list_remove( ptr );
931 free( fd );
933 ptr = next;
937 static void inode_dump( struct object *obj, int verbose )
939 struct inode *inode = (struct inode *)obj;
940 fprintf( stderr, "Inode device=%p ino=", inode->device );
941 DUMP_LONG_LONG( inode->ino );
942 fprintf( stderr, "\n" );
945 static void inode_destroy( struct object *obj )
947 struct inode *inode = (struct inode *)obj;
948 struct list *ptr;
950 assert( list_empty(&inode->open) );
951 assert( list_empty(&inode->locks) );
953 list_remove( &inode->entry );
955 while ((ptr = list_head( &inode->closed )))
957 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
958 list_remove( ptr );
959 if (fd->unix_fd != -1) close( fd->unix_fd );
960 if (fd->unlink[0])
962 /* make sure it is still the same file */
963 struct stat st;
964 if (!stat( fd->unlink, &st ) && st.st_dev == inode->device->dev && st.st_ino == inode->ino)
966 if (S_ISDIR(st.st_mode)) rmdir( fd->unlink );
967 else unlink( fd->unlink );
970 free( fd );
972 release_object( inode->device );
975 /* retrieve the inode object for a given fd, creating it if needed */
976 static struct inode *get_inode( dev_t dev, ino_t ino, int unix_fd )
978 struct device *device;
979 struct inode *inode;
980 unsigned int hash = ino % INODE_HASH_SIZE;
982 if (!(device = get_device( dev, unix_fd ))) return NULL;
984 LIST_FOR_EACH_ENTRY( inode, &device->inode_hash[hash], struct inode, entry )
986 if (inode->ino == ino)
988 release_object( device );
989 return (struct inode *)grab_object( inode );
993 /* not found, create it */
994 if ((inode = alloc_object( &inode_ops )))
996 inode->device = device;
997 inode->ino = ino;
998 list_init( &inode->open );
999 list_init( &inode->locks );
1000 list_init( &inode->closed );
1001 list_add_head( &device->inode_hash[hash], &inode->entry );
1003 else release_object( device );
1005 return inode;
1008 /* add fd to the inode list of file descriptors to close */
1009 static void inode_add_closed_fd( struct inode *inode, struct closed_fd *fd )
1011 if (!list_empty( &inode->locks ))
1013 list_add_head( &inode->closed, &fd->entry );
1015 else if (fd->unlink[0]) /* close the fd but keep the structure around for unlink */
1017 if (fd->unix_fd != -1) close( fd->unix_fd );
1018 fd->unix_fd = -1;
1019 list_add_head( &inode->closed, &fd->entry );
1021 else /* no locks on this inode and no unlink, get rid of the fd */
1023 if (fd->unix_fd != -1) close( fd->unix_fd );
1024 free( fd );
1029 /****************************************************************/
1030 /* file lock functions */
1032 static void file_lock_dump( struct object *obj, int verbose )
1034 struct file_lock *lock = (struct file_lock *)obj;
1035 fprintf( stderr, "Lock %s fd=%p proc=%p start=",
1036 lock->shared ? "shared" : "excl", lock->fd, lock->process );
1037 DUMP_LONG_LONG( lock->start );
1038 fprintf( stderr, " end=" );
1039 DUMP_LONG_LONG( lock->end );
1040 fprintf( stderr, "\n" );
1043 static int file_lock_signaled( struct object *obj, struct thread *thread )
1045 struct file_lock *lock = (struct file_lock *)obj;
1046 /* lock is signaled if it has lost its owner */
1047 return !lock->process;
1050 /* set (or remove) a Unix lock if possible for the given range */
1051 static int set_unix_lock( struct fd *fd, file_pos_t start, file_pos_t end, int type )
1053 struct flock fl;
1055 if (!fd->fs_locks) return 1; /* no fs locks possible for this fd */
1056 for (;;)
1058 if (start == end) return 1; /* can't set zero-byte lock */
1059 if (start > max_unix_offset) return 1; /* ignore it */
1060 fl.l_type = type;
1061 fl.l_whence = SEEK_SET;
1062 fl.l_start = start;
1063 if (!end || end > max_unix_offset) fl.l_len = 0;
1064 else fl.l_len = end - start;
1065 if (fcntl( fd->unix_fd, F_SETLK, &fl ) != -1) return 1;
1067 switch(errno)
1069 case EACCES:
1070 /* check whether locks work at all on this file system */
1071 if (fcntl( fd->unix_fd, F_GETLK, &fl ) != -1)
1073 set_error( STATUS_FILE_LOCK_CONFLICT );
1074 return 0;
1076 /* fall through */
1077 case EIO:
1078 case ENOLCK:
1079 /* no locking on this fs, just ignore it */
1080 fd->fs_locks = 0;
1081 return 1;
1082 case EAGAIN:
1083 set_error( STATUS_FILE_LOCK_CONFLICT );
1084 return 0;
1085 case EBADF:
1086 /* this can happen if we try to set a write lock on a read-only file */
1087 /* we just ignore that error */
1088 if (fl.l_type == F_WRLCK) return 1;
1089 set_error( STATUS_ACCESS_DENIED );
1090 return 0;
1091 #ifdef EOVERFLOW
1092 case EOVERFLOW:
1093 #endif
1094 case EINVAL:
1095 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
1096 /* in that case we shrink the limit and retry */
1097 if (max_unix_offset > INT_MAX)
1099 max_unix_offset = INT_MAX;
1100 break; /* retry */
1102 /* fall through */
1103 default:
1104 file_set_error();
1105 return 0;
1110 /* check if interval [start;end) overlaps the lock */
1111 static inline int lock_overlaps( struct file_lock *lock, file_pos_t start, file_pos_t end )
1113 if (lock->end && start >= lock->end) return 0;
1114 if (end && lock->start >= end) return 0;
1115 return 1;
1118 /* remove Unix locks for all bytes in the specified area that are no longer locked */
1119 static void remove_unix_locks( struct fd *fd, file_pos_t start, file_pos_t end )
1121 struct hole
1123 struct hole *next;
1124 struct hole *prev;
1125 file_pos_t start;
1126 file_pos_t end;
1127 } *first, *cur, *next, *buffer;
1129 struct list *ptr;
1130 int count = 0;
1132 if (!fd->inode) return;
1133 if (!fd->fs_locks) return;
1134 if (start == end || start > max_unix_offset) return;
1135 if (!end || end > max_unix_offset) end = max_unix_offset + 1;
1137 /* count the number of locks overlapping the specified area */
1139 LIST_FOR_EACH( ptr, &fd->inode->locks )
1141 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1142 if (lock->start == lock->end) continue;
1143 if (lock_overlaps( lock, start, end )) count++;
1146 if (!count) /* no locks at all, we can unlock everything */
1148 set_unix_lock( fd, start, end, F_UNLCK );
1149 return;
1152 /* allocate space for the list of holes */
1153 /* max. number of holes is number of locks + 1 */
1155 if (!(buffer = malloc( sizeof(*buffer) * (count+1) ))) return;
1156 first = buffer;
1157 first->next = NULL;
1158 first->prev = NULL;
1159 first->start = start;
1160 first->end = end;
1161 next = first + 1;
1163 /* build a sorted list of unlocked holes in the specified area */
1165 LIST_FOR_EACH( ptr, &fd->inode->locks )
1167 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1168 if (lock->start == lock->end) continue;
1169 if (!lock_overlaps( lock, start, end )) continue;
1171 /* go through all the holes touched by this lock */
1172 for (cur = first; cur; cur = cur->next)
1174 if (cur->end <= lock->start) continue; /* hole is before start of lock */
1175 if (lock->end && cur->start >= lock->end) break; /* hole is after end of lock */
1177 /* now we know that lock is overlapping hole */
1179 if (cur->start >= lock->start) /* lock starts before hole, shrink from start */
1181 cur->start = lock->end;
1182 if (cur->start && cur->start < cur->end) break; /* done with this lock */
1183 /* now hole is empty, remove it */
1184 if (cur->next) cur->next->prev = cur->prev;
1185 if (cur->prev) cur->prev->next = cur->next;
1186 else if (!(first = cur->next)) goto done; /* no more holes at all */
1188 else if (!lock->end || cur->end <= lock->end) /* lock larger than hole, shrink from end */
1190 cur->end = lock->start;
1191 assert( cur->start < cur->end );
1193 else /* lock is in the middle of hole, split hole in two */
1195 next->prev = cur;
1196 next->next = cur->next;
1197 cur->next = next;
1198 next->start = lock->end;
1199 next->end = cur->end;
1200 cur->end = lock->start;
1201 assert( next->start < next->end );
1202 assert( cur->end < next->start );
1203 next++;
1204 break; /* done with this lock */
1209 /* clear Unix locks for all the holes */
1211 for (cur = first; cur; cur = cur->next)
1212 set_unix_lock( fd, cur->start, cur->end, F_UNLCK );
1214 done:
1215 free( buffer );
1218 /* create a new lock on a fd */
1219 static struct file_lock *add_lock( struct fd *fd, int shared, file_pos_t start, file_pos_t end )
1221 struct file_lock *lock;
1223 if (!(lock = alloc_object( &file_lock_ops ))) return NULL;
1224 lock->shared = shared;
1225 lock->start = start;
1226 lock->end = end;
1227 lock->fd = fd;
1228 lock->process = current->process;
1230 /* now try to set a Unix lock */
1231 if (!set_unix_lock( lock->fd, lock->start, lock->end, lock->shared ? F_RDLCK : F_WRLCK ))
1233 release_object( lock );
1234 return NULL;
1236 list_add_head( &fd->locks, &lock->fd_entry );
1237 list_add_head( &fd->inode->locks, &lock->inode_entry );
1238 list_add_head( &lock->process->locks, &lock->proc_entry );
1239 return lock;
1242 /* remove an existing lock */
1243 static void remove_lock( struct file_lock *lock, int remove_unix )
1245 struct inode *inode = lock->fd->inode;
1247 list_remove( &lock->fd_entry );
1248 list_remove( &lock->inode_entry );
1249 list_remove( &lock->proc_entry );
1250 if (remove_unix) remove_unix_locks( lock->fd, lock->start, lock->end );
1251 if (list_empty( &inode->locks )) inode_close_pending( inode, 1 );
1252 lock->process = NULL;
1253 wake_up( &lock->obj, 0 );
1254 release_object( lock );
1257 /* remove all locks owned by a given process */
1258 void remove_process_locks( struct process *process )
1260 struct list *ptr;
1262 while ((ptr = list_head( &process->locks )))
1264 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, proc_entry );
1265 remove_lock( lock, 1 ); /* this removes it from the list */
1269 /* remove all locks on a given fd */
1270 static void remove_fd_locks( struct fd *fd )
1272 file_pos_t start = FILE_POS_T_MAX, end = 0;
1273 struct list *ptr;
1275 while ((ptr = list_head( &fd->locks )))
1277 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
1278 if (lock->start < start) start = lock->start;
1279 if (!lock->end || lock->end > end) end = lock->end - 1;
1280 remove_lock( lock, 0 );
1282 if (start < end) remove_unix_locks( fd, start, end + 1 );
1285 /* add a lock on an fd */
1286 /* returns handle to wait on */
1287 obj_handle_t lock_fd( struct fd *fd, file_pos_t start, file_pos_t count, int shared, int wait )
1289 struct list *ptr;
1290 file_pos_t end = start + count;
1292 if (!fd->inode) /* not a regular file */
1294 set_error( STATUS_INVALID_DEVICE_REQUEST );
1295 return 0;
1298 /* don't allow wrapping locks */
1299 if (end && end < start)
1301 set_error( STATUS_INVALID_PARAMETER );
1302 return 0;
1305 /* check if another lock on that file overlaps the area */
1306 LIST_FOR_EACH( ptr, &fd->inode->locks )
1308 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1309 if (!lock_overlaps( lock, start, end )) continue;
1310 if (lock->shared && shared) continue;
1311 /* found one */
1312 if (!wait)
1314 set_error( STATUS_FILE_LOCK_CONFLICT );
1315 return 0;
1317 set_error( STATUS_PENDING );
1318 return alloc_handle( current->process, lock, SYNCHRONIZE, 0 );
1321 /* not found, add it */
1322 if (add_lock( fd, shared, start, end )) return 0;
1323 if (get_error() == STATUS_FILE_LOCK_CONFLICT)
1325 /* Unix lock conflict -> tell client to wait and retry */
1326 if (wait) set_error( STATUS_PENDING );
1328 return 0;
1331 /* remove a lock on an fd */
1332 void unlock_fd( struct fd *fd, file_pos_t start, file_pos_t count )
1334 struct list *ptr;
1335 file_pos_t end = start + count;
1337 /* find an existing lock with the exact same parameters */
1338 LIST_FOR_EACH( ptr, &fd->locks )
1340 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
1341 if ((lock->start == start) && (lock->end == end))
1343 remove_lock( lock, 1 );
1344 return;
1347 set_error( STATUS_FILE_LOCK_CONFLICT );
1351 /****************************************************************/
1352 /* file descriptor functions */
1354 static void fd_dump( struct object *obj, int verbose )
1356 struct fd *fd = (struct fd *)obj;
1357 fprintf( stderr, "Fd unix_fd=%d user=%p options=%08x", fd->unix_fd, fd->user, fd->options );
1358 if (fd->inode) fprintf( stderr, " inode=%p unlink='%s'", fd->inode, fd->closed->unlink );
1359 fprintf( stderr, "\n" );
1362 static void fd_destroy( struct object *obj )
1364 struct fd *fd = (struct fd *)obj;
1366 free_async_queue( fd->read_q );
1367 free_async_queue( fd->write_q );
1368 free_async_queue( fd->wait_q );
1370 if (fd->completion) release_object( fd->completion );
1371 remove_fd_locks( fd );
1372 free( fd->unix_name );
1373 list_remove( &fd->inode_entry );
1374 if (fd->poll_index != -1) remove_poll_user( fd, fd->poll_index );
1375 if (fd->inode)
1377 inode_add_closed_fd( fd->inode, fd->closed );
1378 release_object( fd->inode );
1380 else /* no inode, close it right away */
1382 if (fd->unix_fd != -1) close( fd->unix_fd );
1386 /* set the events that select waits for on this fd */
1387 void set_fd_events( struct fd *fd, int events )
1389 int user = fd->poll_index;
1390 assert( poll_users[user] == fd );
1392 set_fd_epoll_events( fd, user, events );
1394 if (events == -1) /* stop waiting on this fd completely */
1396 pollfd[user].fd = -1;
1397 pollfd[user].events = POLLERR;
1398 pollfd[user].revents = 0;
1400 else if (pollfd[user].fd != -1 || !pollfd[user].events)
1402 pollfd[user].fd = fd->unix_fd;
1403 pollfd[user].events = events;
1407 /* prepare an fd for unmounting its corresponding device */
1408 static inline void unmount_fd( struct fd *fd )
1410 assert( fd->inode );
1412 async_wake_up( fd->read_q, STATUS_VOLUME_DISMOUNTED );
1413 async_wake_up( fd->write_q, STATUS_VOLUME_DISMOUNTED );
1415 if (fd->poll_index != -1) set_fd_events( fd, -1 );
1417 if (fd->unix_fd != -1) close( fd->unix_fd );
1419 fd->unix_fd = -1;
1420 fd->no_fd_status = STATUS_VOLUME_DISMOUNTED;
1421 fd->closed->unix_fd = -1;
1422 fd->closed->unlink[0] = 0;
1424 /* stop using Unix locks on this fd (existing locks have been removed by close) */
1425 fd->fs_locks = 0;
1428 /* allocate an fd object, without setting the unix fd yet */
1429 static struct fd *alloc_fd_object(void)
1431 struct fd *fd = alloc_object( &fd_ops );
1433 if (!fd) return NULL;
1435 fd->fd_ops = NULL;
1436 fd->user = NULL;
1437 fd->inode = NULL;
1438 fd->closed = NULL;
1439 fd->access = 0;
1440 fd->options = 0;
1441 fd->sharing = 0;
1442 fd->unix_fd = -1;
1443 fd->unix_name = NULL;
1444 fd->signaled = 1;
1445 fd->fs_locks = 1;
1446 fd->poll_index = -1;
1447 fd->read_q = NULL;
1448 fd->write_q = NULL;
1449 fd->wait_q = NULL;
1450 fd->completion = NULL;
1451 list_init( &fd->inode_entry );
1452 list_init( &fd->locks );
1454 if ((fd->poll_index = add_poll_user( fd )) == -1)
1456 release_object( fd );
1457 return NULL;
1459 return fd;
1462 /* allocate a pseudo fd object, for objects that need to behave like files but don't have a unix fd */
1463 struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *user, unsigned int options )
1465 struct fd *fd = alloc_object( &fd_ops );
1467 if (!fd) return NULL;
1469 fd->fd_ops = fd_user_ops;
1470 fd->user = user;
1471 fd->inode = NULL;
1472 fd->closed = NULL;
1473 fd->access = 0;
1474 fd->options = options;
1475 fd->sharing = 0;
1476 fd->unix_name = NULL;
1477 fd->unix_fd = -1;
1478 fd->signaled = 0;
1479 fd->fs_locks = 0;
1480 fd->poll_index = -1;
1481 fd->read_q = NULL;
1482 fd->write_q = NULL;
1483 fd->wait_q = NULL;
1484 fd->completion = NULL;
1485 fd->no_fd_status = STATUS_BAD_DEVICE_TYPE;
1486 list_init( &fd->inode_entry );
1487 list_init( &fd->locks );
1488 return fd;
1491 /* set the status to return when the fd has no associated unix fd */
1492 void set_no_fd_status( struct fd *fd, unsigned int status )
1494 fd->no_fd_status = status;
1497 /* check if the desired access is possible without violating */
1498 /* the sharing mode of other opens of the same file */
1499 static int check_sharing( struct fd *fd, unsigned int access, unsigned int sharing )
1501 unsigned int existing_sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
1502 unsigned int existing_access = 0;
1503 struct list *ptr;
1505 /* if access mode is 0, sharing mode is ignored */
1506 if (!access) sharing = existing_sharing;
1507 fd->access = access;
1508 fd->sharing = sharing;
1510 LIST_FOR_EACH( ptr, &fd->inode->open )
1512 struct fd *fd_ptr = LIST_ENTRY( ptr, struct fd, inode_entry );
1513 if (fd_ptr != fd)
1515 existing_sharing &= fd_ptr->sharing;
1516 existing_access |= fd_ptr->access;
1520 if ((access & FILE_UNIX_READ_ACCESS) && !(existing_sharing & FILE_SHARE_READ)) return 0;
1521 if ((access & FILE_UNIX_WRITE_ACCESS) && !(existing_sharing & FILE_SHARE_WRITE)) return 0;
1522 if ((access & DELETE) && !(existing_sharing & FILE_SHARE_DELETE)) return 0;
1523 if ((existing_access & FILE_UNIX_READ_ACCESS) && !(sharing & FILE_SHARE_READ)) return 0;
1524 if ((existing_access & FILE_UNIX_WRITE_ACCESS) && !(sharing & FILE_SHARE_WRITE)) return 0;
1525 if ((existing_access & DELETE) && !(sharing & FILE_SHARE_DELETE)) return 0;
1526 return 1;
1529 /* sets the user of an fd that previously had no user */
1530 void set_fd_user( struct fd *fd, const struct fd_ops *user_ops, struct object *user )
1532 assert( fd->fd_ops == NULL );
1533 fd->fd_ops = user_ops;
1534 fd->user = user;
1537 /* open() wrapper that returns a struct fd with no fd user set */
1538 struct fd *open_fd( const char *name, int flags, mode_t *mode, unsigned int access,
1539 unsigned int sharing, unsigned int options )
1541 struct stat st;
1542 struct closed_fd *closed_fd;
1543 struct fd *fd;
1544 const char *unlink_name = "";
1545 int rw_mode;
1547 if ((options & FILE_DELETE_ON_CLOSE) && !(access & DELETE))
1549 set_error( STATUS_INVALID_PARAMETER );
1550 return NULL;
1553 if (!(fd = alloc_fd_object())) return NULL;
1555 fd->options = options;
1556 if (options & FILE_DELETE_ON_CLOSE) unlink_name = name;
1557 if (!(closed_fd = mem_alloc( sizeof(*closed_fd) + strlen(unlink_name) )))
1559 release_object( fd );
1560 return NULL;
1563 /* create the directory if needed */
1564 if ((options & FILE_DIRECTORY_FILE) && (flags & O_CREAT))
1566 if (mkdir( name, 0777 ) == -1)
1568 if (errno != EEXIST || (flags & O_EXCL))
1570 file_set_error();
1571 goto error;
1574 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
1577 if ((access & FILE_UNIX_WRITE_ACCESS) && !(options & FILE_DIRECTORY_FILE))
1579 if (access & FILE_UNIX_READ_ACCESS) rw_mode = O_RDWR;
1580 else rw_mode = O_WRONLY;
1582 else rw_mode = O_RDONLY;
1584 if (!(fd->unix_name = mem_alloc( strlen(name) + 1 ))) goto error;
1585 strcpy( fd->unix_name, name );
1587 if ((fd->unix_fd = open( name, rw_mode | (flags & ~O_TRUNC), *mode )) == -1)
1589 /* if we tried to open a directory for write access, retry read-only */
1590 if (errno != EISDIR ||
1591 !(access & FILE_UNIX_WRITE_ACCESS) ||
1592 (fd->unix_fd = open( name, O_RDONLY | (flags & ~(O_TRUNC | O_CREAT | O_EXCL)), *mode )) == -1)
1594 file_set_error();
1595 goto error;
1599 closed_fd->unix_fd = fd->unix_fd;
1600 closed_fd->unlink[0] = 0;
1601 fstat( fd->unix_fd, &st );
1602 *mode = st.st_mode;
1604 /* only bother with an inode for normal files and directories */
1605 if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode))
1607 struct inode *inode = get_inode( st.st_dev, st.st_ino, fd->unix_fd );
1609 if (!inode)
1611 /* we can close the fd because there are no others open on the same file,
1612 * otherwise we wouldn't have failed to allocate a new inode
1614 goto error;
1616 fd->inode = inode;
1617 fd->closed = closed_fd;
1618 list_add_head( &inode->open, &fd->inode_entry );
1620 /* check directory options */
1621 if ((options & FILE_DIRECTORY_FILE) && !S_ISDIR(st.st_mode))
1623 release_object( fd );
1624 set_error( STATUS_NOT_A_DIRECTORY );
1625 return NULL;
1627 if ((options & FILE_NON_DIRECTORY_FILE) && S_ISDIR(st.st_mode))
1629 release_object( fd );
1630 set_error( STATUS_FILE_IS_A_DIRECTORY );
1631 return NULL;
1633 if (!check_sharing( fd, access, sharing ))
1635 release_object( fd );
1636 set_error( STATUS_SHARING_VIOLATION );
1637 return NULL;
1639 strcpy( closed_fd->unlink, unlink_name );
1640 if (flags & O_TRUNC) ftruncate( fd->unix_fd, 0 );
1642 else /* special file */
1644 if (options & FILE_DIRECTORY_FILE)
1646 set_error( STATUS_NOT_A_DIRECTORY );
1647 goto error;
1649 if (unlink_name[0]) /* we can't unlink special files */
1651 set_error( STATUS_INVALID_PARAMETER );
1652 goto error;
1654 free( closed_fd );
1656 return fd;
1658 error:
1659 release_object( fd );
1660 free( closed_fd );
1661 return NULL;
1664 /* create an fd for an anonymous file */
1665 /* if the function fails the unix fd is closed */
1666 struct fd *create_anonymous_fd( const struct fd_ops *fd_user_ops, int unix_fd, struct object *user,
1667 unsigned int options )
1669 struct fd *fd = alloc_fd_object();
1671 if (fd)
1673 set_fd_user( fd, fd_user_ops, user );
1674 fd->unix_fd = unix_fd;
1675 fd->options = options;
1676 return fd;
1678 close( unix_fd );
1679 return NULL;
1682 /* retrieve the object that is using an fd */
1683 void *get_fd_user( struct fd *fd )
1685 return fd->user;
1688 /* retrieve the opening options for the fd */
1689 unsigned int get_fd_options( struct fd *fd )
1691 return fd->options;
1694 /* retrieve the unix fd for an object */
1695 int get_unix_fd( struct fd *fd )
1697 if (fd->unix_fd == -1) set_error( fd->no_fd_status );
1698 return fd->unix_fd;
1701 /* check if two file descriptors point to the same file */
1702 int is_same_file_fd( struct fd *fd1, struct fd *fd2 )
1704 return fd1->inode == fd2->inode;
1707 /* check if fd is on a removable device */
1708 int is_fd_removable( struct fd *fd )
1710 return (fd->inode && fd->inode->device->removable);
1713 /* set or clear the fd signaled state */
1714 void set_fd_signaled( struct fd *fd, int signaled )
1716 fd->signaled = signaled;
1717 if (signaled) wake_up( fd->user, 0 );
1720 /* set or clear the fd signaled state */
1721 int is_fd_signaled( struct fd *fd )
1723 return fd->signaled;
1726 /* handler for close_handle that refuses to close fd-associated handles in other processes */
1727 int fd_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
1729 return (!current || current->process == process);
1732 /* check if events are pending and if yes return which one(s) */
1733 int check_fd_events( struct fd *fd, int events )
1735 struct pollfd pfd;
1737 if (fd->unix_fd == -1) return POLLERR;
1738 if (fd->inode) return events; /* regular files are always signaled */
1740 pfd.fd = fd->unix_fd;
1741 pfd.events = events;
1742 if (poll( &pfd, 1, 0 ) <= 0) return 0;
1743 return pfd.revents;
1746 /* default signaled() routine for objects that poll() on an fd */
1747 int default_fd_signaled( struct object *obj, struct thread *thread )
1749 struct fd *fd = get_obj_fd( obj );
1750 int ret = fd->signaled;
1751 release_object( fd );
1752 return ret;
1755 /* default map_access() routine for objects that behave like an fd */
1756 unsigned int default_fd_map_access( struct object *obj, unsigned int access )
1758 if (access & GENERIC_READ) access |= FILE_GENERIC_READ;
1759 if (access & GENERIC_WRITE) access |= FILE_GENERIC_WRITE;
1760 if (access & GENERIC_EXECUTE) access |= FILE_GENERIC_EXECUTE;
1761 if (access & GENERIC_ALL) access |= FILE_ALL_ACCESS;
1762 return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
1765 int default_fd_get_poll_events( struct fd *fd )
1767 int events = 0;
1769 if (async_waiting( fd->read_q )) events |= POLLIN;
1770 if (async_waiting( fd->write_q )) events |= POLLOUT;
1771 return events;
1774 /* default handler for poll() events */
1775 void default_poll_event( struct fd *fd, int event )
1777 if (event & (POLLIN | POLLERR | POLLHUP)) async_wake_up( fd->read_q, STATUS_ALERTED );
1778 if (event & (POLLOUT | POLLERR | POLLHUP)) async_wake_up( fd->write_q, STATUS_ALERTED );
1780 /* if an error occurred, stop polling this fd to avoid busy-looping */
1781 if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 );
1782 else if (!fd->inode) set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
1785 struct async *fd_queue_async( struct fd *fd, const async_data_t *data, int type )
1787 struct async_queue *queue;
1788 struct async *async;
1790 switch (type)
1792 case ASYNC_TYPE_READ:
1793 if (!fd->read_q && !(fd->read_q = create_async_queue( fd ))) return NULL;
1794 queue = fd->read_q;
1795 break;
1796 case ASYNC_TYPE_WRITE:
1797 if (!fd->write_q && !(fd->write_q = create_async_queue( fd ))) return NULL;
1798 queue = fd->write_q;
1799 break;
1800 case ASYNC_TYPE_WAIT:
1801 if (!fd->wait_q && !(fd->wait_q = create_async_queue( fd ))) return NULL;
1802 queue = fd->wait_q;
1803 break;
1804 default:
1805 queue = NULL;
1806 assert(0);
1809 if ((async = create_async( current, queue, data )) && type != ASYNC_TYPE_WAIT)
1811 if (!fd->inode)
1812 set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
1813 else /* regular files are always ready for read and write */
1814 async_wake_up( queue, STATUS_ALERTED );
1816 return async;
1819 void fd_async_wake_up( struct fd *fd, int type, unsigned int status )
1821 switch (type)
1823 case ASYNC_TYPE_READ:
1824 async_wake_up( fd->read_q, status );
1825 break;
1826 case ASYNC_TYPE_WRITE:
1827 async_wake_up( fd->write_q, status );
1828 break;
1829 case ASYNC_TYPE_WAIT:
1830 async_wake_up( fd->wait_q, status );
1831 break;
1832 default:
1833 assert(0);
1837 void fd_reselect_async( struct fd *fd, struct async_queue *queue )
1839 fd->fd_ops->reselect_async( fd, queue );
1842 void default_fd_queue_async( struct fd *fd, const async_data_t *data, int type, int count )
1844 struct async *async;
1846 if ((async = fd_queue_async( fd, data, type )))
1848 release_object( async );
1849 set_error( STATUS_PENDING );
1853 /* default reselect_async() fd routine */
1854 void default_fd_reselect_async( struct fd *fd, struct async_queue *queue )
1856 if (queue != fd->wait_q)
1858 int poll_events = fd->fd_ops->get_poll_events( fd );
1859 int events = check_fd_events( fd, poll_events );
1860 if (events) fd->fd_ops->poll_event( fd, events );
1861 else set_fd_events( fd, poll_events );
1865 /* default cancel_async() fd routine */
1866 void default_fd_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb )
1868 int n = 0;
1870 n += async_wake_up_by( fd->read_q, process, thread, iosb, STATUS_CANCELLED );
1871 n += async_wake_up_by( fd->write_q, process, thread, iosb, STATUS_CANCELLED );
1872 n += async_wake_up_by( fd->wait_q, process, thread, iosb, STATUS_CANCELLED );
1873 if (!n && iosb)
1874 set_error( STATUS_NOT_FOUND );
1877 /* default flush() routine */
1878 void no_flush( struct fd *fd, struct event **event )
1880 set_error( STATUS_OBJECT_TYPE_MISMATCH );
1883 static inline int is_valid_mounted_device( struct stat *st )
1885 #if defined(linux) || defined(__sun__)
1886 return S_ISBLK( st->st_mode );
1887 #else
1888 /* disks are char devices on *BSD */
1889 return S_ISCHR( st->st_mode );
1890 #endif
1893 /* close all Unix file descriptors on a device to allow unmounting it */
1894 static void unmount_device( struct fd *device_fd )
1896 unsigned int i;
1897 struct stat st;
1898 struct device *device;
1899 struct inode *inode;
1900 struct fd *fd;
1901 int unix_fd = get_unix_fd( device_fd );
1903 if (unix_fd == -1) return;
1905 if (fstat( unix_fd, &st ) == -1 || !is_valid_mounted_device( &st ))
1907 set_error( STATUS_INVALID_PARAMETER );
1908 return;
1911 if (!(device = get_device( st.st_rdev, -1 ))) return;
1913 for (i = 0; i < INODE_HASH_SIZE; i++)
1915 LIST_FOR_EACH_ENTRY( inode, &device->inode_hash[i], struct inode, entry )
1917 LIST_FOR_EACH_ENTRY( fd, &inode->open, struct fd, inode_entry )
1919 unmount_fd( fd );
1921 inode_close_pending( inode, 0 );
1924 /* remove it from the hash table */
1925 list_remove( &device->entry );
1926 list_init( &device->entry );
1927 release_object( device );
1930 /* default ioctl() routine */
1931 obj_handle_t default_fd_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async,
1932 int blocking, const void *data, data_size_t size )
1934 switch(code)
1936 case FSCTL_DISMOUNT_VOLUME:
1937 unmount_device( fd );
1938 return 0;
1939 default:
1940 set_error( STATUS_NOT_SUPPORTED );
1941 return 0;
1945 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1946 static struct fd *get_handle_fd_obj( struct process *process, obj_handle_t handle,
1947 unsigned int access )
1949 struct fd *fd = NULL;
1950 struct object *obj;
1952 if ((obj = get_handle_obj( process, handle, access, NULL )))
1954 fd = get_obj_fd( obj );
1955 release_object( obj );
1957 return fd;
1960 struct completion *fd_get_completion( struct fd *fd, apc_param_t *p_key )
1962 *p_key = fd->comp_key;
1963 return fd->completion ? (struct completion *)grab_object( fd->completion ) : NULL;
1966 void fd_copy_completion( struct fd *src, struct fd *dst )
1968 assert( !dst->completion );
1969 dst->completion = fd_get_completion( src, &dst->comp_key );
1972 /* flush a file buffers */
1973 DECL_HANDLER(flush_file)
1975 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
1976 struct event * event = NULL;
1978 if (fd)
1980 fd->fd_ops->flush( fd, &event );
1981 if ( event )
1983 reply->event = alloc_handle( current->process, event, SYNCHRONIZE, 0 );
1985 release_object( fd );
1989 /* open a file object */
1990 DECL_HANDLER(open_file_object)
1992 struct unicode_str name;
1993 struct directory *root = NULL;
1994 struct object *obj, *result;
1996 get_req_unicode_str( &name );
1997 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir, 0 )))
1998 return;
2000 if ((obj = open_object_dir( root, &name, req->attributes, NULL )))
2002 if ((result = obj->ops->open_file( obj, req->access, req->sharing, req->options )))
2004 reply->handle = alloc_handle( current->process, result, req->access, req->attributes );
2005 release_object( result );
2007 release_object( obj );
2010 if (root) release_object( root );
2013 /* get the Unix name from a file handle */
2014 DECL_HANDLER(get_handle_unix_name)
2016 struct fd *fd;
2018 if ((fd = get_handle_fd_obj( current->process, req->handle, 0 )))
2020 if (fd->unix_name)
2022 data_size_t name_len = strlen( fd->unix_name );
2023 reply->name_len = name_len;
2024 if (name_len <= get_reply_max_size()) set_reply_data( fd->unix_name, name_len );
2025 else set_error( STATUS_BUFFER_OVERFLOW );
2027 release_object( fd );
2031 /* get a Unix fd to access a file */
2032 DECL_HANDLER(get_handle_fd)
2034 struct fd *fd;
2036 if ((fd = get_handle_fd_obj( current->process, req->handle, 0 )))
2038 int unix_fd = get_unix_fd( fd );
2039 if (unix_fd != -1)
2041 reply->type = fd->fd_ops->get_fd_type( fd );
2042 reply->removable = is_fd_removable(fd);
2043 reply->options = fd->options;
2044 reply->access = get_handle_access( current->process, req->handle );
2045 send_client_fd( current->process, unix_fd, req->handle );
2047 release_object( fd );
2051 /* perform an ioctl on a file */
2052 DECL_HANDLER(ioctl)
2054 unsigned int access = (req->code >> 14) & (FILE_READ_DATA|FILE_WRITE_DATA);
2055 struct fd *fd = get_handle_fd_obj( current->process, req->async.handle, access );
2057 if (fd)
2059 reply->wait = fd->fd_ops->ioctl( fd, req->code, &req->async, req->blocking,
2060 get_req_data(), get_req_data_size() );
2061 reply->options = fd->options;
2062 release_object( fd );
2066 /* create / reschedule an async I/O */
2067 DECL_HANDLER(register_async)
2069 unsigned int access;
2070 struct fd *fd;
2072 switch(req->type)
2074 case ASYNC_TYPE_READ:
2075 access = FILE_READ_DATA;
2076 break;
2077 case ASYNC_TYPE_WRITE:
2078 access = FILE_WRITE_DATA;
2079 break;
2080 default:
2081 set_error( STATUS_INVALID_PARAMETER );
2082 return;
2085 if ((fd = get_handle_fd_obj( current->process, req->async.handle, access )))
2087 if (get_unix_fd( fd ) != -1) fd->fd_ops->queue_async( fd, &req->async, req->type, req->count );
2088 release_object( fd );
2092 /* cancels all async I/O */
2093 DECL_HANDLER(cancel_async)
2095 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2096 struct thread *thread = req->only_thread ? current : NULL;
2098 if (fd)
2100 if (get_unix_fd( fd ) != -1) fd->fd_ops->cancel_async( fd, current->process, thread, req->iosb );
2101 release_object( fd );
2105 /* attach completion object to a fd */
2106 DECL_HANDLER(set_completion_info)
2108 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2110 if (fd)
2112 if (!(fd->options & (FILE_SYNCHRONOUS_IO_ALERT | FILE_SYNCHRONOUS_IO_NONALERT)) && !fd->completion)
2114 fd->completion = get_completion_obj( current->process, req->chandle, IO_COMPLETION_MODIFY_STATE );
2115 fd->comp_key = req->ckey;
2117 else set_error( STATUS_INVALID_PARAMETER );
2118 release_object( fd );
2122 /* push new completion msg into a completion queue attached to the fd */
2123 DECL_HANDLER(add_fd_completion)
2125 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2126 if (fd)
2128 if (fd->completion)
2129 add_completion( fd->completion, fd->comp_key, req->cvalue, req->status, req->information );
2130 release_object( fd );