ddraw: Pass the correct size to memset (Coccinelle).
[wine/multimedia.git] / server / fd.c
blobf9a769e13576970ec92a1b42063e441a45e3c434
1 /*
2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "config.h"
23 #include "wine/port.h"
25 #include <assert.h>
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <limits.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #ifdef HAVE_POLL_H
35 #include <poll.h>
36 #endif
37 #ifdef HAVE_SYS_POLL_H
38 #include <sys/poll.h>
39 #endif
40 #ifdef HAVE_LINUX_MAJOR_H
41 #include <linux/major.h>
42 #endif
43 #ifdef HAVE_SYS_STATVFS_H
44 #include <sys/statvfs.h>
45 #endif
46 #ifdef HAVE_SYS_VFS_H
48 * Solaris defines its system list in sys/list.h.
49 * This need to be workaround it here.
51 #define list SYSLIST
52 #define list_next SYSLIST_NEXT
53 #define list_prev SYSLIST_PREV
54 #define list_head SYSLIST_HEAD
55 #define list_tail SYSLIST_TAIL
56 #define list_move_tail SYSLIST_MOVE_TAIL
57 #define list_remove SYSLIST_REMOVE
58 #include <sys/vfs.h>
59 #undef list
60 #undef list_next
61 #undef list_prev
62 #undef list_head
63 #undef list_tail
64 #undef list_move_tail
65 #undef list_remove
66 #endif
67 #ifdef HAVE_SYS_PARAM_H
68 #include <sys/param.h>
69 #endif
70 #ifdef HAVE_SYS_MOUNT_H
71 #include <sys/mount.h>
72 #endif
73 #ifdef HAVE_SYS_STATFS_H
74 #include <sys/statfs.h>
75 #endif
76 #ifdef HAVE_SYS_SYSCTL_H
77 #include <sys/sysctl.h>
78 #endif
79 #ifdef HAVE_SYS_EVENT_H
80 #include <sys/event.h>
81 #undef LIST_INIT
82 #undef LIST_ENTRY
83 #endif
84 #ifdef HAVE_STDINT_H
85 #include <stdint.h>
86 #endif
87 #include <sys/stat.h>
88 #include <sys/time.h>
89 #include <sys/types.h>
90 #include <unistd.h>
92 #include "ntstatus.h"
93 #define WIN32_NO_STATUS
94 #include "object.h"
95 #include "file.h"
96 #include "handle.h"
97 #include "process.h"
98 #include "request.h"
100 #include "winternl.h"
101 #include "winioctl.h"
103 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
104 # include <sys/epoll.h>
105 # define USE_EPOLL
106 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
107 # define USE_EPOLL
108 # define EPOLLIN POLLIN
109 # define EPOLLOUT POLLOUT
110 # define EPOLLERR POLLERR
111 # define EPOLLHUP POLLHUP
112 # define EPOLL_CTL_ADD 1
113 # define EPOLL_CTL_DEL 2
114 # define EPOLL_CTL_MOD 3
116 typedef union epoll_data
118 void *ptr;
119 int fd;
120 uint32_t u32;
121 uint64_t u64;
122 } epoll_data_t;
124 struct epoll_event
126 uint32_t events;
127 epoll_data_t data;
130 #define SYSCALL_RET(ret) do { \
131 if (ret < 0) { errno = -ret; ret = -1; } \
132 return ret; \
133 } while(0)
135 static inline int epoll_create( int size )
137 int ret;
138 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
139 : "=a" (ret) : "0" (254 /*NR_epoll_create*/), "r" (size) );
140 SYSCALL_RET(ret);
143 static inline int epoll_ctl( int epfd, int op, int fd, const struct epoll_event *event )
145 int ret;
146 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
147 : "=a" (ret)
148 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd), "c" (op), "d" (fd), "S" (event), "m" (*event) );
149 SYSCALL_RET(ret);
152 static inline int epoll_wait( int epfd, struct epoll_event *events, int maxevents, int timeout )
154 int ret;
155 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
156 : "=a" (ret)
157 : "0" (256 /*NR_epoll_wait*/), "r" (epfd), "c" (events), "d" (maxevents), "S" (timeout)
158 : "memory" );
159 SYSCALL_RET(ret);
161 #undef SYSCALL_RET
163 #endif /* linux && __i386__ && HAVE_STDINT_H */
165 #if defined(HAVE_PORT_H) && defined(HAVE_PORT_CREATE)
166 # include <port.h>
167 # define USE_EVENT_PORTS
168 #endif /* HAVE_PORT_H && HAVE_PORT_CREATE */
170 /* Because of the stupid Posix locking semantics, we need to keep
171 * track of all file descriptors referencing a given file, and not
172 * close a single one until all the locks are gone (sigh).
175 /* file descriptor object */
177 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
178 struct closed_fd
180 struct list entry; /* entry in inode closed list */
181 int unix_fd; /* the unix file descriptor */
182 char unlink[1]; /* name to unlink on close (if any) */
185 struct fd
187 struct object obj; /* object header */
188 const struct fd_ops *fd_ops; /* file descriptor operations */
189 struct inode *inode; /* inode that this fd belongs to */
190 struct list inode_entry; /* entry in inode fd list */
191 struct closed_fd *closed; /* structure to store the unix fd at destroy time */
192 struct object *user; /* object using this file descriptor */
193 struct list locks; /* list of locks on this fd */
194 unsigned int access; /* file access (FILE_READ_DATA etc.) */
195 unsigned int options; /* file options (FILE_DELETE_ON_CLOSE, FILE_SYNCHRONOUS...) */
196 unsigned int sharing; /* file sharing mode */
197 char *unix_name; /* unix file name */
198 int unix_fd; /* unix file descriptor */
199 unsigned int no_fd_status;/* status to return when unix_fd is -1 */
200 unsigned int cacheable :1;/* can the fd be cached on the client side? */
201 unsigned int signaled :1; /* is the fd signaled? */
202 unsigned int fs_locks :1; /* can we use filesystem locks for this fd? */
203 int poll_index; /* index of fd in poll array */
204 struct async_queue *read_q; /* async readers of this fd */
205 struct async_queue *write_q; /* async writers of this fd */
206 struct async_queue *wait_q; /* other async waiters of this fd */
207 struct completion *completion; /* completion object attached to this fd */
208 apc_param_t comp_key; /* completion key to set in completion events */
211 static void fd_dump( struct object *obj, int verbose );
212 static void fd_destroy( struct object *obj );
214 static const struct object_ops fd_ops =
216 sizeof(struct fd), /* size */
217 fd_dump, /* dump */
218 no_get_type, /* get_type */
219 no_add_queue, /* add_queue */
220 NULL, /* remove_queue */
221 NULL, /* signaled */
222 NULL, /* satisfied */
223 no_signal, /* signal */
224 no_get_fd, /* get_fd */
225 no_map_access, /* map_access */
226 default_get_sd, /* get_sd */
227 default_set_sd, /* set_sd */
228 no_lookup_name, /* lookup_name */
229 no_open_file, /* open_file */
230 no_close_handle, /* close_handle */
231 fd_destroy /* destroy */
234 /* device object */
236 #define DEVICE_HASH_SIZE 7
237 #define INODE_HASH_SIZE 17
239 struct device
241 struct object obj; /* object header */
242 struct list entry; /* entry in device hash list */
243 dev_t dev; /* device number */
244 int removable; /* removable device? (or -1 if unknown) */
245 struct list inode_hash[INODE_HASH_SIZE]; /* inodes hash table */
248 static void device_dump( struct object *obj, int verbose );
249 static void device_destroy( struct object *obj );
251 static const struct object_ops device_ops =
253 sizeof(struct device), /* size */
254 device_dump, /* dump */
255 no_get_type, /* get_type */
256 no_add_queue, /* add_queue */
257 NULL, /* remove_queue */
258 NULL, /* signaled */
259 NULL, /* satisfied */
260 no_signal, /* signal */
261 no_get_fd, /* get_fd */
262 no_map_access, /* map_access */
263 default_get_sd, /* get_sd */
264 default_set_sd, /* set_sd */
265 no_lookup_name, /* lookup_name */
266 no_open_file, /* open_file */
267 no_close_handle, /* close_handle */
268 device_destroy /* destroy */
271 /* inode object */
273 struct inode
275 struct object obj; /* object header */
276 struct list entry; /* inode hash list entry */
277 struct device *device; /* device containing this inode */
278 ino_t ino; /* inode number */
279 struct list open; /* list of open file descriptors */
280 struct list locks; /* list of file locks */
281 struct list closed; /* list of file descriptors to close at destroy time */
284 static void inode_dump( struct object *obj, int verbose );
285 static void inode_destroy( struct object *obj );
287 static const struct object_ops inode_ops =
289 sizeof(struct inode), /* size */
290 inode_dump, /* dump */
291 no_get_type, /* get_type */
292 no_add_queue, /* add_queue */
293 NULL, /* remove_queue */
294 NULL, /* signaled */
295 NULL, /* satisfied */
296 no_signal, /* signal */
297 no_get_fd, /* get_fd */
298 no_map_access, /* map_access */
299 default_get_sd, /* get_sd */
300 default_set_sd, /* set_sd */
301 no_lookup_name, /* lookup_name */
302 no_open_file, /* open_file */
303 no_close_handle, /* close_handle */
304 inode_destroy /* destroy */
307 /* file lock object */
309 struct file_lock
311 struct object obj; /* object header */
312 struct fd *fd; /* fd owning this lock */
313 struct list fd_entry; /* entry in list of locks on a given fd */
314 struct list inode_entry; /* entry in inode list of locks */
315 int shared; /* shared lock? */
316 file_pos_t start; /* locked region is interval [start;end) */
317 file_pos_t end;
318 struct process *process; /* process owning this lock */
319 struct list proc_entry; /* entry in list of locks owned by the process */
322 static void file_lock_dump( struct object *obj, int verbose );
323 static int file_lock_signaled( struct object *obj, struct thread *thread );
325 static const struct object_ops file_lock_ops =
327 sizeof(struct file_lock), /* size */
328 file_lock_dump, /* dump */
329 no_get_type, /* get_type */
330 add_queue, /* add_queue */
331 remove_queue, /* remove_queue */
332 file_lock_signaled, /* signaled */
333 no_satisfied, /* satisfied */
334 no_signal, /* signal */
335 no_get_fd, /* get_fd */
336 no_map_access, /* map_access */
337 default_get_sd, /* get_sd */
338 default_set_sd, /* set_sd */
339 no_lookup_name, /* lookup_name */
340 no_open_file, /* open_file */
341 no_close_handle, /* close_handle */
342 no_destroy /* destroy */
346 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
347 #define FILE_POS_T_MAX (~(file_pos_t)0)
349 static file_pos_t max_unix_offset = OFF_T_MAX;
351 #define DUMP_LONG_LONG(val) do { \
352 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
353 fprintf( stderr, "%lx%08lx", (unsigned long)((unsigned long long)(val) >> 32), (unsigned long)(val) ); \
354 else \
355 fprintf( stderr, "%lx", (unsigned long)(val) ); \
356 } while (0)
360 /****************************************************************/
361 /* timeouts support */
363 struct timeout_user
365 struct list entry; /* entry in sorted timeout list */
366 timeout_t when; /* timeout expiry (absolute time) */
367 timeout_callback callback; /* callback function */
368 void *private; /* callback private data */
371 static struct list timeout_list = LIST_INIT(timeout_list); /* sorted timeouts list */
372 timeout_t current_time;
374 static inline void set_current_time(void)
376 static const timeout_t ticks_1601_to_1970 = (timeout_t)86400 * (369 * 365 + 89) * TICKS_PER_SEC;
377 struct timeval now;
378 gettimeofday( &now, NULL );
379 current_time = (timeout_t)now.tv_sec * TICKS_PER_SEC + now.tv_usec * 10 + ticks_1601_to_1970;
382 /* add a timeout user */
383 struct timeout_user *add_timeout_user( timeout_t when, timeout_callback func, void *private )
385 struct timeout_user *user;
386 struct list *ptr;
388 if (!(user = mem_alloc( sizeof(*user) ))) return NULL;
389 user->when = (when > 0) ? when : current_time - when;
390 user->callback = func;
391 user->private = private;
393 /* Now insert it in the linked list */
395 LIST_FOR_EACH( ptr, &timeout_list )
397 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
398 if (timeout->when >= user->when) break;
400 list_add_before( ptr, &user->entry );
401 return user;
404 /* remove a timeout user */
405 void remove_timeout_user( struct timeout_user *user )
407 list_remove( &user->entry );
408 free( user );
411 /* return a text description of a timeout for debugging purposes */
412 const char *get_timeout_str( timeout_t timeout )
414 static char buffer[64];
415 long secs, nsecs;
417 if (!timeout) return "0";
418 if (timeout == TIMEOUT_INFINITE) return "infinite";
420 if (timeout < 0) /* relative */
422 secs = -timeout / TICKS_PER_SEC;
423 nsecs = -timeout % TICKS_PER_SEC;
424 sprintf( buffer, "+%ld.%07ld", secs, nsecs );
426 else /* absolute */
428 secs = (timeout - current_time) / TICKS_PER_SEC;
429 nsecs = (timeout - current_time) % TICKS_PER_SEC;
430 if (nsecs < 0)
432 nsecs += TICKS_PER_SEC;
433 secs--;
435 if (secs >= 0)
436 sprintf( buffer, "%x%08x (+%ld.%07ld)",
437 (unsigned int)(timeout >> 32), (unsigned int)timeout, secs, nsecs );
438 else
439 sprintf( buffer, "%x%08x (-%ld.%07ld)",
440 (unsigned int)(timeout >> 32), (unsigned int)timeout,
441 -(secs + 1), TICKS_PER_SEC - nsecs );
443 return buffer;
447 /****************************************************************/
448 /* poll support */
450 static struct fd **poll_users; /* users array */
451 static struct pollfd *pollfd; /* poll fd array */
452 static int nb_users; /* count of array entries actually in use */
453 static int active_users; /* current number of active users */
454 static int allocated_users; /* count of allocated entries in the array */
455 static struct fd **freelist; /* list of free entries in the array */
457 static int get_next_timeout(void);
459 static inline void fd_poll_event( struct fd *fd, int event )
461 fd->fd_ops->poll_event( fd, event );
464 #ifdef USE_EPOLL
466 static int epoll_fd = -1;
468 static inline void init_epoll(void)
470 epoll_fd = epoll_create( 128 );
473 /* set the events that epoll waits for on this fd; helper for set_fd_events */
474 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
476 struct epoll_event ev;
477 int ctl;
479 if (epoll_fd == -1) return;
481 if (events == -1) /* stop waiting on this fd completely */
483 if (pollfd[user].fd == -1) return; /* already removed */
484 ctl = EPOLL_CTL_DEL;
486 else if (pollfd[user].fd == -1)
488 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
489 ctl = EPOLL_CTL_ADD;
491 else
493 if (pollfd[user].events == events) return; /* nothing to do */
494 ctl = EPOLL_CTL_MOD;
497 ev.events = events;
498 memset(&ev.data, 0, sizeof(ev.data));
499 ev.data.u32 = user;
501 if (epoll_ctl( epoll_fd, ctl, fd->unix_fd, &ev ) == -1)
503 if (errno == ENOMEM) /* not enough memory, give up on epoll */
505 close( epoll_fd );
506 epoll_fd = -1;
508 else perror( "epoll_ctl" ); /* should not happen */
512 static inline void remove_epoll_user( struct fd *fd, int user )
514 if (epoll_fd == -1) return;
516 if (pollfd[user].fd != -1)
518 struct epoll_event dummy;
519 epoll_ctl( epoll_fd, EPOLL_CTL_DEL, fd->unix_fd, &dummy );
523 static inline void main_loop_epoll(void)
525 int i, ret, timeout;
526 struct epoll_event events[128];
528 assert( POLLIN == EPOLLIN );
529 assert( POLLOUT == EPOLLOUT );
530 assert( POLLERR == EPOLLERR );
531 assert( POLLHUP == EPOLLHUP );
533 if (epoll_fd == -1) return;
535 while (active_users)
537 timeout = get_next_timeout();
539 if (!active_users) break; /* last user removed by a timeout */
540 if (epoll_fd == -1) break; /* an error occurred with epoll */
542 ret = epoll_wait( epoll_fd, events, sizeof(events)/sizeof(events[0]), timeout );
543 set_current_time();
545 /* put the events into the pollfd array first, like poll does */
546 for (i = 0; i < ret; i++)
548 int user = events[i].data.u32;
549 pollfd[user].revents = events[i].events;
552 /* read events from the pollfd array, as set_fd_events may modify them */
553 for (i = 0; i < ret; i++)
555 int user = events[i].data.u32;
556 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
561 #elif defined(HAVE_KQUEUE)
563 static int kqueue_fd = -1;
565 static inline void init_epoll(void)
567 #ifdef __APPLE__ /* kqueue support is broken in Mac OS < 10.5 */
568 int mib[2];
569 char release[32];
570 size_t len = sizeof(release);
572 mib[0] = CTL_KERN;
573 mib[1] = KERN_OSRELEASE;
574 if (sysctl( mib, 2, release, &len, NULL, 0 ) == -1) return;
575 if (atoi(release) < 9) return;
576 #endif
577 kqueue_fd = kqueue();
580 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
582 struct kevent ev[2];
584 if (kqueue_fd == -1) return;
586 EV_SET( &ev[0], fd->unix_fd, EVFILT_READ, 0, NOTE_LOWAT, 1, (void *)user );
587 EV_SET( &ev[1], fd->unix_fd, EVFILT_WRITE, 0, NOTE_LOWAT, 1, (void *)user );
589 if (events == -1) /* stop waiting on this fd completely */
591 if (pollfd[user].fd == -1) return; /* already removed */
592 ev[0].flags |= EV_DELETE;
593 ev[1].flags |= EV_DELETE;
595 else if (pollfd[user].fd == -1)
597 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
598 ev[0].flags |= EV_ADD | ((events & POLLIN) ? EV_ENABLE : EV_DISABLE);
599 ev[1].flags |= EV_ADD | ((events & POLLOUT) ? EV_ENABLE : EV_DISABLE);
601 else
603 if (pollfd[user].events == events) return; /* nothing to do */
604 ev[0].flags |= (events & POLLIN) ? EV_ENABLE : EV_DISABLE;
605 ev[1].flags |= (events & POLLOUT) ? EV_ENABLE : EV_DISABLE;
608 if (kevent( kqueue_fd, ev, 2, NULL, 0, NULL ) == -1)
610 if (errno == ENOMEM) /* not enough memory, give up on kqueue */
612 close( kqueue_fd );
613 kqueue_fd = -1;
615 else perror( "kevent" ); /* should not happen */
619 static inline void remove_epoll_user( struct fd *fd, int user )
621 if (kqueue_fd == -1) return;
623 if (pollfd[user].fd != -1)
625 struct kevent ev[2];
627 EV_SET( &ev[0], fd->unix_fd, EVFILT_READ, EV_DELETE, 0, 0, 0 );
628 EV_SET( &ev[1], fd->unix_fd, EVFILT_WRITE, EV_DELETE, 0, 0, 0 );
629 kevent( kqueue_fd, ev, 2, NULL, 0, NULL );
633 static inline void main_loop_epoll(void)
635 int i, ret, timeout;
636 struct kevent events[128];
638 if (kqueue_fd == -1) return;
640 while (active_users)
642 timeout = get_next_timeout();
644 if (!active_users) break; /* last user removed by a timeout */
645 if (kqueue_fd == -1) break; /* an error occurred with kqueue */
647 if (timeout != -1)
649 struct timespec ts;
651 ts.tv_sec = timeout / 1000;
652 ts.tv_nsec = (timeout % 1000) * 1000000;
653 ret = kevent( kqueue_fd, NULL, 0, events, sizeof(events)/sizeof(events[0]), &ts );
655 else ret = kevent( kqueue_fd, NULL, 0, events, sizeof(events)/sizeof(events[0]), NULL );
657 set_current_time();
659 /* put the events into the pollfd array first, like poll does */
660 for (i = 0; i < ret; i++)
662 long user = (long)events[i].udata;
663 pollfd[user].revents = 0;
665 for (i = 0; i < ret; i++)
667 long user = (long)events[i].udata;
668 if (events[i].filter == EVFILT_READ) pollfd[user].revents |= POLLIN;
669 else if (events[i].filter == EVFILT_WRITE) pollfd[user].revents |= POLLOUT;
670 if (events[i].flags & EV_EOF) pollfd[user].revents |= POLLHUP;
671 if (events[i].flags & EV_ERROR) pollfd[user].revents |= POLLERR;
674 /* read events from the pollfd array, as set_fd_events may modify them */
675 for (i = 0; i < ret; i++)
677 long user = (long)events[i].udata;
678 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
679 pollfd[user].revents = 0;
684 #elif defined(USE_EVENT_PORTS)
686 static int port_fd = -1;
688 static inline void init_epoll(void)
690 port_fd = port_create();
693 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
695 int ret;
697 if (port_fd == -1) return;
699 if (events == -1) /* stop waiting on this fd completely */
701 if (pollfd[user].fd == -1) return; /* already removed */
702 port_dissociate( port_fd, PORT_SOURCE_FD, fd->unix_fd );
704 else if (pollfd[user].fd == -1)
706 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
707 ret = port_associate( port_fd, PORT_SOURCE_FD, fd->unix_fd, events, (void *)user );
709 else
711 if (pollfd[user].events == events) return; /* nothing to do */
712 ret = port_associate( port_fd, PORT_SOURCE_FD, fd->unix_fd, events, (void *)user );
715 if (ret == -1)
717 if (errno == ENOMEM) /* not enough memory, give up on port_associate */
719 close( port_fd );
720 port_fd = -1;
722 else perror( "port_associate" ); /* should not happen */
726 static inline void remove_epoll_user( struct fd *fd, int user )
728 if (port_fd == -1) return;
730 if (pollfd[user].fd != -1)
732 port_dissociate( port_fd, PORT_SOURCE_FD, fd->unix_fd );
736 static inline void main_loop_epoll(void)
738 int i, nget, ret, timeout;
739 port_event_t events[128];
741 if (port_fd == -1) return;
743 while (active_users)
745 timeout = get_next_timeout();
746 nget = 1;
748 if (!active_users) break; /* last user removed by a timeout */
749 if (port_fd == -1) break; /* an error occurred with event completion */
751 if (timeout != -1)
753 struct timespec ts;
755 ts.tv_sec = timeout / 1000;
756 ts.tv_nsec = (timeout % 1000) * 1000000;
757 ret = port_getn( port_fd, events, sizeof(events)/sizeof(events[0]), &nget, &ts );
759 else ret = port_getn( port_fd, events, sizeof(events)/sizeof(events[0]), &nget, NULL );
761 if (ret == -1) break; /* an error occurred with event completion */
763 set_current_time();
765 /* put the events into the pollfd array first, like poll does */
766 for (i = 0; i < nget; i++)
768 long user = (long)events[i].portev_user;
769 pollfd[user].revents = events[i].portev_events;
772 /* read events from the pollfd array, as set_fd_events may modify them */
773 for (i = 0; i < nget; i++)
775 long user = (long)events[i].portev_user;
776 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
777 /* if we are still interested, reassociate the fd */
778 if (pollfd[user].fd != -1) {
779 port_associate( port_fd, PORT_SOURCE_FD, pollfd[user].fd, pollfd[user].events, (void *)user );
785 #else /* HAVE_KQUEUE */
787 static inline void init_epoll(void) { }
788 static inline void set_fd_epoll_events( struct fd *fd, int user, int events ) { }
789 static inline void remove_epoll_user( struct fd *fd, int user ) { }
790 static inline void main_loop_epoll(void) { }
792 #endif /* USE_EPOLL */
795 /* add a user in the poll array and return its index, or -1 on failure */
796 static int add_poll_user( struct fd *fd )
798 int ret;
799 if (freelist)
801 ret = freelist - poll_users;
802 freelist = (struct fd **)poll_users[ret];
804 else
806 if (nb_users == allocated_users)
808 struct fd **newusers;
809 struct pollfd *newpoll;
810 int new_count = allocated_users ? (allocated_users + allocated_users / 2) : 16;
811 if (!(newusers = realloc( poll_users, new_count * sizeof(*poll_users) ))) return -1;
812 if (!(newpoll = realloc( pollfd, new_count * sizeof(*pollfd) )))
814 if (allocated_users)
815 poll_users = newusers;
816 else
817 free( newusers );
818 return -1;
820 poll_users = newusers;
821 pollfd = newpoll;
822 if (!allocated_users) init_epoll();
823 allocated_users = new_count;
825 ret = nb_users++;
827 pollfd[ret].fd = -1;
828 pollfd[ret].events = 0;
829 pollfd[ret].revents = 0;
830 poll_users[ret] = fd;
831 active_users++;
832 return ret;
835 /* remove a user from the poll list */
836 static void remove_poll_user( struct fd *fd, int user )
838 assert( user >= 0 );
839 assert( poll_users[user] == fd );
841 remove_epoll_user( fd, user );
842 pollfd[user].fd = -1;
843 pollfd[user].events = 0;
844 pollfd[user].revents = 0;
845 poll_users[user] = (struct fd *)freelist;
846 freelist = &poll_users[user];
847 active_users--;
850 /* process pending timeouts and return the time until the next timeout, in milliseconds */
851 static int get_next_timeout(void)
853 if (!list_empty( &timeout_list ))
855 struct list expired_list, *ptr;
857 /* first remove all expired timers from the list */
859 list_init( &expired_list );
860 while ((ptr = list_head( &timeout_list )) != NULL)
862 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
864 if (timeout->when <= current_time)
866 list_remove( &timeout->entry );
867 list_add_tail( &expired_list, &timeout->entry );
869 else break;
872 /* now call the callback for all the removed timers */
874 while ((ptr = list_head( &expired_list )) != NULL)
876 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
877 list_remove( &timeout->entry );
878 timeout->callback( timeout->private );
879 free( timeout );
882 if ((ptr = list_head( &timeout_list )) != NULL)
884 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
885 int diff = (timeout->when - current_time + 9999) / 10000;
886 if (diff < 0) diff = 0;
887 return diff;
890 return -1; /* no pending timeouts */
893 /* server main poll() loop */
894 void main_loop(void)
896 int i, ret, timeout;
898 set_current_time();
899 server_start_time = current_time;
901 main_loop_epoll();
902 /* fall through to normal poll loop */
904 while (active_users)
906 timeout = get_next_timeout();
908 if (!active_users) break; /* last user removed by a timeout */
910 ret = poll( pollfd, nb_users, timeout );
911 set_current_time();
913 if (ret > 0)
915 for (i = 0; i < nb_users; i++)
917 if (pollfd[i].revents)
919 fd_poll_event( poll_users[i], pollfd[i].revents );
920 if (!--ret) break;
928 /****************************************************************/
929 /* device functions */
931 static struct list device_hash[DEVICE_HASH_SIZE];
933 static int is_device_removable( dev_t dev, int unix_fd )
935 #if defined(linux) && defined(HAVE_FSTATFS)
936 struct statfs stfs;
938 /* check for floppy disk */
939 if (major(dev) == FLOPPY_MAJOR) return 1;
941 if (fstatfs( unix_fd, &stfs ) == -1) return 0;
942 return (stfs.f_type == 0x9660 || /* iso9660 */
943 stfs.f_type == 0x9fa1 || /* supermount */
944 stfs.f_type == 0x15013346); /* udf */
945 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__)
946 struct statfs stfs;
948 if (fstatfs( unix_fd, &stfs ) == -1) return 0;
949 return (!strcmp("cd9660", stfs.f_fstypename) || !strcmp("udf", stfs.f_fstypename));
950 #elif defined(__NetBSD__)
951 struct statvfs stfs;
953 if (fstatvfs( unix_fd, &stfs ) == -1) return 0;
954 return (!strcmp("cd9660", stfs.f_fstypename) || !strcmp("udf", stfs.f_fstypename));
955 #elif defined(sun)
956 # include <sys/dkio.h>
957 # include <sys/vtoc.h>
958 struct dk_cinfo dkinf;
959 if (ioctl( unix_fd, DKIOCINFO, &dkinf ) == -1) return 0;
960 return (dkinf.dki_ctype == DKC_CDROM ||
961 dkinf.dki_ctype == DKC_NCRFLOPPY ||
962 dkinf.dki_ctype == DKC_SMSFLOPPY ||
963 dkinf.dki_ctype == DKC_INTEL82072 ||
964 dkinf.dki_ctype == DKC_INTEL82077);
965 #else
966 return 0;
967 #endif
970 /* retrieve the device object for a given fd, creating it if needed */
971 static struct device *get_device( dev_t dev, int unix_fd )
973 struct device *device;
974 unsigned int i, hash = dev % DEVICE_HASH_SIZE;
976 if (device_hash[hash].next)
978 LIST_FOR_EACH_ENTRY( device, &device_hash[hash], struct device, entry )
979 if (device->dev == dev) return (struct device *)grab_object( device );
981 else list_init( &device_hash[hash] );
983 /* not found, create it */
985 if (unix_fd == -1) return NULL;
986 if ((device = alloc_object( &device_ops )))
988 device->dev = dev;
989 device->removable = is_device_removable( dev, unix_fd );
990 for (i = 0; i < INODE_HASH_SIZE; i++) list_init( &device->inode_hash[i] );
991 list_add_head( &device_hash[hash], &device->entry );
993 return device;
996 static void device_dump( struct object *obj, int verbose )
998 struct device *device = (struct device *)obj;
999 fprintf( stderr, "Device dev=" );
1000 DUMP_LONG_LONG( device->dev );
1001 fprintf( stderr, "\n" );
1004 static void device_destroy( struct object *obj )
1006 struct device *device = (struct device *)obj;
1007 unsigned int i;
1009 for (i = 0; i < INODE_HASH_SIZE; i++)
1010 assert( list_empty(&device->inode_hash[i]) );
1012 list_remove( &device->entry ); /* remove it from the hash table */
1016 /****************************************************************/
1017 /* inode functions */
1019 /* close all pending file descriptors in the closed list */
1020 static void inode_close_pending( struct inode *inode, int keep_unlinks )
1022 struct list *ptr = list_head( &inode->closed );
1024 while (ptr)
1026 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
1027 struct list *next = list_next( &inode->closed, ptr );
1029 if (fd->unix_fd != -1)
1031 close( fd->unix_fd );
1032 fd->unix_fd = -1;
1034 if (!keep_unlinks || !fd->unlink[0]) /* get rid of it unless there's an unlink pending on that file */
1036 list_remove( ptr );
1037 free( fd );
1039 ptr = next;
1043 static void inode_dump( struct object *obj, int verbose )
1045 struct inode *inode = (struct inode *)obj;
1046 fprintf( stderr, "Inode device=%p ino=", inode->device );
1047 DUMP_LONG_LONG( inode->ino );
1048 fprintf( stderr, "\n" );
1051 static void inode_destroy( struct object *obj )
1053 struct inode *inode = (struct inode *)obj;
1054 struct list *ptr;
1056 assert( list_empty(&inode->open) );
1057 assert( list_empty(&inode->locks) );
1059 list_remove( &inode->entry );
1061 while ((ptr = list_head( &inode->closed )))
1063 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
1064 list_remove( ptr );
1065 if (fd->unix_fd != -1) close( fd->unix_fd );
1066 if (fd->unlink[0])
1068 /* make sure it is still the same file */
1069 struct stat st;
1070 if (!stat( fd->unlink, &st ) && st.st_dev == inode->device->dev && st.st_ino == inode->ino)
1072 if (S_ISDIR(st.st_mode)) rmdir( fd->unlink );
1073 else unlink( fd->unlink );
1076 free( fd );
1078 release_object( inode->device );
1081 /* retrieve the inode object for a given fd, creating it if needed */
1082 static struct inode *get_inode( dev_t dev, ino_t ino, int unix_fd )
1084 struct device *device;
1085 struct inode *inode;
1086 unsigned int hash = ino % INODE_HASH_SIZE;
1088 if (!(device = get_device( dev, unix_fd ))) return NULL;
1090 LIST_FOR_EACH_ENTRY( inode, &device->inode_hash[hash], struct inode, entry )
1092 if (inode->ino == ino)
1094 release_object( device );
1095 return (struct inode *)grab_object( inode );
1099 /* not found, create it */
1100 if ((inode = alloc_object( &inode_ops )))
1102 inode->device = device;
1103 inode->ino = ino;
1104 list_init( &inode->open );
1105 list_init( &inode->locks );
1106 list_init( &inode->closed );
1107 list_add_head( &device->inode_hash[hash], &inode->entry );
1109 else release_object( device );
1111 return inode;
1114 /* add fd to the inode list of file descriptors to close */
1115 static void inode_add_closed_fd( struct inode *inode, struct closed_fd *fd )
1117 if (!list_empty( &inode->locks ))
1119 list_add_head( &inode->closed, &fd->entry );
1121 else if (fd->unlink[0]) /* close the fd but keep the structure around for unlink */
1123 if (fd->unix_fd != -1) close( fd->unix_fd );
1124 fd->unix_fd = -1;
1125 list_add_head( &inode->closed, &fd->entry );
1127 else /* no locks on this inode and no unlink, get rid of the fd */
1129 if (fd->unix_fd != -1) close( fd->unix_fd );
1130 free( fd );
1135 /****************************************************************/
1136 /* file lock functions */
1138 static void file_lock_dump( struct object *obj, int verbose )
1140 struct file_lock *lock = (struct file_lock *)obj;
1141 fprintf( stderr, "Lock %s fd=%p proc=%p start=",
1142 lock->shared ? "shared" : "excl", lock->fd, lock->process );
1143 DUMP_LONG_LONG( lock->start );
1144 fprintf( stderr, " end=" );
1145 DUMP_LONG_LONG( lock->end );
1146 fprintf( stderr, "\n" );
1149 static int file_lock_signaled( struct object *obj, struct thread *thread )
1151 struct file_lock *lock = (struct file_lock *)obj;
1152 /* lock is signaled if it has lost its owner */
1153 return !lock->process;
1156 /* set (or remove) a Unix lock if possible for the given range */
1157 static int set_unix_lock( struct fd *fd, file_pos_t start, file_pos_t end, int type )
1159 struct flock fl;
1161 if (!fd->fs_locks) return 1; /* no fs locks possible for this fd */
1162 for (;;)
1164 if (start == end) return 1; /* can't set zero-byte lock */
1165 if (start > max_unix_offset) return 1; /* ignore it */
1166 fl.l_type = type;
1167 fl.l_whence = SEEK_SET;
1168 fl.l_start = start;
1169 if (!end || end > max_unix_offset) fl.l_len = 0;
1170 else fl.l_len = end - start;
1171 if (fcntl( fd->unix_fd, F_SETLK, &fl ) != -1) return 1;
1173 switch(errno)
1175 case EACCES:
1176 /* check whether locks work at all on this file system */
1177 if (fcntl( fd->unix_fd, F_GETLK, &fl ) != -1)
1179 set_error( STATUS_FILE_LOCK_CONFLICT );
1180 return 0;
1182 /* fall through */
1183 case EIO:
1184 case ENOLCK:
1185 /* no locking on this fs, just ignore it */
1186 fd->fs_locks = 0;
1187 return 1;
1188 case EAGAIN:
1189 set_error( STATUS_FILE_LOCK_CONFLICT );
1190 return 0;
1191 case EBADF:
1192 /* this can happen if we try to set a write lock on a read-only file */
1193 /* we just ignore that error */
1194 if (fl.l_type == F_WRLCK) return 1;
1195 set_error( STATUS_ACCESS_DENIED );
1196 return 0;
1197 #ifdef EOVERFLOW
1198 case EOVERFLOW:
1199 #endif
1200 case EINVAL:
1201 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
1202 /* in that case we shrink the limit and retry */
1203 if (max_unix_offset > INT_MAX)
1205 max_unix_offset = INT_MAX;
1206 break; /* retry */
1208 /* fall through */
1209 default:
1210 file_set_error();
1211 return 0;
1216 /* check if interval [start;end) overlaps the lock */
1217 static inline int lock_overlaps( struct file_lock *lock, file_pos_t start, file_pos_t end )
1219 if (lock->end && start >= lock->end) return 0;
1220 if (end && lock->start >= end) return 0;
1221 return 1;
1224 /* remove Unix locks for all bytes in the specified area that are no longer locked */
1225 static void remove_unix_locks( struct fd *fd, file_pos_t start, file_pos_t end )
1227 struct hole
1229 struct hole *next;
1230 struct hole *prev;
1231 file_pos_t start;
1232 file_pos_t end;
1233 } *first, *cur, *next, *buffer;
1235 struct list *ptr;
1236 int count = 0;
1238 if (!fd->inode) return;
1239 if (!fd->fs_locks) return;
1240 if (start == end || start > max_unix_offset) return;
1241 if (!end || end > max_unix_offset) end = max_unix_offset + 1;
1243 /* count the number of locks overlapping the specified area */
1245 LIST_FOR_EACH( ptr, &fd->inode->locks )
1247 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1248 if (lock->start == lock->end) continue;
1249 if (lock_overlaps( lock, start, end )) count++;
1252 if (!count) /* no locks at all, we can unlock everything */
1254 set_unix_lock( fd, start, end, F_UNLCK );
1255 return;
1258 /* allocate space for the list of holes */
1259 /* max. number of holes is number of locks + 1 */
1261 if (!(buffer = malloc( sizeof(*buffer) * (count+1) ))) return;
1262 first = buffer;
1263 first->next = NULL;
1264 first->prev = NULL;
1265 first->start = start;
1266 first->end = end;
1267 next = first + 1;
1269 /* build a sorted list of unlocked holes in the specified area */
1271 LIST_FOR_EACH( ptr, &fd->inode->locks )
1273 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1274 if (lock->start == lock->end) continue;
1275 if (!lock_overlaps( lock, start, end )) continue;
1277 /* go through all the holes touched by this lock */
1278 for (cur = first; cur; cur = cur->next)
1280 if (cur->end <= lock->start) continue; /* hole is before start of lock */
1281 if (lock->end && cur->start >= lock->end) break; /* hole is after end of lock */
1283 /* now we know that lock is overlapping hole */
1285 if (cur->start >= lock->start) /* lock starts before hole, shrink from start */
1287 cur->start = lock->end;
1288 if (cur->start && cur->start < cur->end) break; /* done with this lock */
1289 /* now hole is empty, remove it */
1290 if (cur->next) cur->next->prev = cur->prev;
1291 if (cur->prev) cur->prev->next = cur->next;
1292 else if (!(first = cur->next)) goto done; /* no more holes at all */
1294 else if (!lock->end || cur->end <= lock->end) /* lock larger than hole, shrink from end */
1296 cur->end = lock->start;
1297 assert( cur->start < cur->end );
1299 else /* lock is in the middle of hole, split hole in two */
1301 next->prev = cur;
1302 next->next = cur->next;
1303 cur->next = next;
1304 next->start = lock->end;
1305 next->end = cur->end;
1306 cur->end = lock->start;
1307 assert( next->start < next->end );
1308 assert( cur->end < next->start );
1309 next++;
1310 break; /* done with this lock */
1315 /* clear Unix locks for all the holes */
1317 for (cur = first; cur; cur = cur->next)
1318 set_unix_lock( fd, cur->start, cur->end, F_UNLCK );
1320 done:
1321 free( buffer );
1324 /* create a new lock on a fd */
1325 static struct file_lock *add_lock( struct fd *fd, int shared, file_pos_t start, file_pos_t end )
1327 struct file_lock *lock;
1329 if (!(lock = alloc_object( &file_lock_ops ))) return NULL;
1330 lock->shared = shared;
1331 lock->start = start;
1332 lock->end = end;
1333 lock->fd = fd;
1334 lock->process = current->process;
1336 /* now try to set a Unix lock */
1337 if (!set_unix_lock( lock->fd, lock->start, lock->end, lock->shared ? F_RDLCK : F_WRLCK ))
1339 release_object( lock );
1340 return NULL;
1342 list_add_head( &fd->locks, &lock->fd_entry );
1343 list_add_head( &fd->inode->locks, &lock->inode_entry );
1344 list_add_head( &lock->process->locks, &lock->proc_entry );
1345 return lock;
1348 /* remove an existing lock */
1349 static void remove_lock( struct file_lock *lock, int remove_unix )
1351 struct inode *inode = lock->fd->inode;
1353 list_remove( &lock->fd_entry );
1354 list_remove( &lock->inode_entry );
1355 list_remove( &lock->proc_entry );
1356 if (remove_unix) remove_unix_locks( lock->fd, lock->start, lock->end );
1357 if (list_empty( &inode->locks )) inode_close_pending( inode, 1 );
1358 lock->process = NULL;
1359 wake_up( &lock->obj, 0 );
1360 release_object( lock );
1363 /* remove all locks owned by a given process */
1364 void remove_process_locks( struct process *process )
1366 struct list *ptr;
1368 while ((ptr = list_head( &process->locks )))
1370 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, proc_entry );
1371 remove_lock( lock, 1 ); /* this removes it from the list */
1375 /* remove all locks on a given fd */
1376 static void remove_fd_locks( struct fd *fd )
1378 file_pos_t start = FILE_POS_T_MAX, end = 0;
1379 struct list *ptr;
1381 while ((ptr = list_head( &fd->locks )))
1383 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
1384 if (lock->start < start) start = lock->start;
1385 if (!lock->end || lock->end > end) end = lock->end - 1;
1386 remove_lock( lock, 0 );
1388 if (start < end) remove_unix_locks( fd, start, end + 1 );
1391 /* add a lock on an fd */
1392 /* returns handle to wait on */
1393 obj_handle_t lock_fd( struct fd *fd, file_pos_t start, file_pos_t count, int shared, int wait )
1395 struct list *ptr;
1396 file_pos_t end = start + count;
1398 if (!fd->inode) /* not a regular file */
1400 set_error( STATUS_INVALID_DEVICE_REQUEST );
1401 return 0;
1404 /* don't allow wrapping locks */
1405 if (end && end < start)
1407 set_error( STATUS_INVALID_PARAMETER );
1408 return 0;
1411 /* check if another lock on that file overlaps the area */
1412 LIST_FOR_EACH( ptr, &fd->inode->locks )
1414 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
1415 if (!lock_overlaps( lock, start, end )) continue;
1416 if (lock->shared && shared) continue;
1417 /* found one */
1418 if (!wait)
1420 set_error( STATUS_FILE_LOCK_CONFLICT );
1421 return 0;
1423 set_error( STATUS_PENDING );
1424 return alloc_handle( current->process, lock, SYNCHRONIZE, 0 );
1427 /* not found, add it */
1428 if (add_lock( fd, shared, start, end )) return 0;
1429 if (get_error() == STATUS_FILE_LOCK_CONFLICT)
1431 /* Unix lock conflict -> tell client to wait and retry */
1432 if (wait) set_error( STATUS_PENDING );
1434 return 0;
1437 /* remove a lock on an fd */
1438 void unlock_fd( struct fd *fd, file_pos_t start, file_pos_t count )
1440 struct list *ptr;
1441 file_pos_t end = start + count;
1443 /* find an existing lock with the exact same parameters */
1444 LIST_FOR_EACH( ptr, &fd->locks )
1446 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
1447 if ((lock->start == start) && (lock->end == end))
1449 remove_lock( lock, 1 );
1450 return;
1453 set_error( STATUS_FILE_LOCK_CONFLICT );
1457 /****************************************************************/
1458 /* file descriptor functions */
1460 static void fd_dump( struct object *obj, int verbose )
1462 struct fd *fd = (struct fd *)obj;
1463 fprintf( stderr, "Fd unix_fd=%d user=%p options=%08x", fd->unix_fd, fd->user, fd->options );
1464 if (fd->inode) fprintf( stderr, " inode=%p unlink='%s'", fd->inode, fd->closed->unlink );
1465 fprintf( stderr, "\n" );
1468 static void fd_destroy( struct object *obj )
1470 struct fd *fd = (struct fd *)obj;
1472 free_async_queue( fd->read_q );
1473 free_async_queue( fd->write_q );
1474 free_async_queue( fd->wait_q );
1476 if (fd->completion) release_object( fd->completion );
1477 remove_fd_locks( fd );
1478 free( fd->unix_name );
1479 list_remove( &fd->inode_entry );
1480 if (fd->poll_index != -1) remove_poll_user( fd, fd->poll_index );
1481 if (fd->inode)
1483 inode_add_closed_fd( fd->inode, fd->closed );
1484 release_object( fd->inode );
1486 else /* no inode, close it right away */
1488 if (fd->unix_fd != -1) close( fd->unix_fd );
1492 /* set the events that select waits for on this fd */
1493 void set_fd_events( struct fd *fd, int events )
1495 int user = fd->poll_index;
1496 assert( poll_users[user] == fd );
1498 set_fd_epoll_events( fd, user, events );
1500 if (events == -1) /* stop waiting on this fd completely */
1502 pollfd[user].fd = -1;
1503 pollfd[user].events = POLLERR;
1504 pollfd[user].revents = 0;
1506 else if (pollfd[user].fd != -1 || !pollfd[user].events)
1508 pollfd[user].fd = fd->unix_fd;
1509 pollfd[user].events = events;
1513 /* prepare an fd for unmounting its corresponding device */
1514 static inline void unmount_fd( struct fd *fd )
1516 assert( fd->inode );
1518 async_wake_up( fd->read_q, STATUS_VOLUME_DISMOUNTED );
1519 async_wake_up( fd->write_q, STATUS_VOLUME_DISMOUNTED );
1521 if (fd->poll_index != -1) set_fd_events( fd, -1 );
1523 if (fd->unix_fd != -1) close( fd->unix_fd );
1525 fd->unix_fd = -1;
1526 fd->no_fd_status = STATUS_VOLUME_DISMOUNTED;
1527 fd->closed->unix_fd = -1;
1528 fd->closed->unlink[0] = 0;
1530 /* stop using Unix locks on this fd (existing locks have been removed by close) */
1531 fd->fs_locks = 0;
1534 /* allocate an fd object, without setting the unix fd yet */
1535 static struct fd *alloc_fd_object(void)
1537 struct fd *fd = alloc_object( &fd_ops );
1539 if (!fd) return NULL;
1541 fd->fd_ops = NULL;
1542 fd->user = NULL;
1543 fd->inode = NULL;
1544 fd->closed = NULL;
1545 fd->access = 0;
1546 fd->options = 0;
1547 fd->sharing = 0;
1548 fd->unix_fd = -1;
1549 fd->unix_name = NULL;
1550 fd->cacheable = 0;
1551 fd->signaled = 1;
1552 fd->fs_locks = 1;
1553 fd->poll_index = -1;
1554 fd->read_q = NULL;
1555 fd->write_q = NULL;
1556 fd->wait_q = NULL;
1557 fd->completion = NULL;
1558 list_init( &fd->inode_entry );
1559 list_init( &fd->locks );
1561 if ((fd->poll_index = add_poll_user( fd )) == -1)
1563 release_object( fd );
1564 return NULL;
1566 return fd;
1569 /* allocate a pseudo fd object, for objects that need to behave like files but don't have a unix fd */
1570 struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *user, unsigned int options )
1572 struct fd *fd = alloc_object( &fd_ops );
1574 if (!fd) return NULL;
1576 fd->fd_ops = fd_user_ops;
1577 fd->user = user;
1578 fd->inode = NULL;
1579 fd->closed = NULL;
1580 fd->access = 0;
1581 fd->options = options;
1582 fd->sharing = 0;
1583 fd->unix_name = NULL;
1584 fd->unix_fd = -1;
1585 fd->cacheable = 0;
1586 fd->signaled = 0;
1587 fd->fs_locks = 0;
1588 fd->poll_index = -1;
1589 fd->read_q = NULL;
1590 fd->write_q = NULL;
1591 fd->wait_q = NULL;
1592 fd->completion = NULL;
1593 fd->no_fd_status = STATUS_BAD_DEVICE_TYPE;
1594 list_init( &fd->inode_entry );
1595 list_init( &fd->locks );
1596 return fd;
1599 /* duplicate an fd object for a different user */
1600 struct fd *dup_fd_object( struct fd *orig, unsigned int access, unsigned int sharing, unsigned int options )
1602 struct fd *fd = alloc_fd_object();
1604 if (!fd) return NULL;
1606 fd->access = access;
1607 fd->options = options;
1608 fd->sharing = sharing;
1609 fd->cacheable = orig->cacheable;
1611 if (orig->unix_name)
1613 if (!(fd->unix_name = mem_alloc( strlen(orig->unix_name) + 1 ))) goto failed;
1614 strcpy( fd->unix_name, orig->unix_name );
1617 if (orig->inode)
1619 struct closed_fd *closed = mem_alloc( sizeof(*closed) );
1620 if (!closed) goto failed;
1621 if ((fd->unix_fd = dup( orig->unix_fd )) == -1)
1623 file_set_error();
1624 free( closed );
1625 goto failed;
1627 closed->unix_fd = fd->unix_fd;
1628 closed->unlink[0] = 0;
1629 fd->closed = closed;
1630 fd->inode = (struct inode *)grab_object( orig->inode );
1631 list_add_head( &fd->inode->open, &fd->inode_entry );
1633 else if ((fd->unix_fd = dup( orig->unix_fd )) == -1)
1635 file_set_error();
1636 goto failed;
1638 return fd;
1640 failed:
1641 release_object( fd );
1642 return NULL;
1645 /* set the status to return when the fd has no associated unix fd */
1646 void set_no_fd_status( struct fd *fd, unsigned int status )
1648 fd->no_fd_status = status;
1651 /* check if the desired access is possible without violating */
1652 /* the sharing mode of other opens of the same file */
1653 static unsigned int check_sharing( struct fd *fd, unsigned int access, unsigned int sharing,
1654 unsigned int open_flags, unsigned int options )
1656 unsigned int existing_sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
1657 unsigned int existing_access = 0;
1658 struct list *ptr;
1660 fd->access = access;
1661 fd->sharing = sharing;
1663 LIST_FOR_EACH( ptr, &fd->inode->open )
1665 struct fd *fd_ptr = LIST_ENTRY( ptr, struct fd, inode_entry );
1666 if (fd_ptr != fd)
1668 /* if access mode is 0, sharing mode is ignored */
1669 if (fd_ptr->access) existing_sharing &= fd_ptr->sharing;
1670 existing_access |= fd_ptr->access;
1674 if (((access & FILE_UNIX_READ_ACCESS) && !(existing_sharing & FILE_SHARE_READ)) ||
1675 ((access & FILE_UNIX_WRITE_ACCESS) && !(existing_sharing & FILE_SHARE_WRITE)) ||
1676 ((access & DELETE) && !(existing_sharing & FILE_SHARE_DELETE)))
1677 return STATUS_SHARING_VIOLATION;
1678 if (((existing_access & FILE_MAPPING_WRITE) && !(sharing & FILE_SHARE_WRITE)) ||
1679 ((existing_access & FILE_MAPPING_IMAGE) && (access & FILE_SHARE_WRITE)))
1680 return STATUS_SHARING_VIOLATION;
1681 if ((existing_access & FILE_MAPPING_IMAGE) && (options & FILE_DELETE_ON_CLOSE))
1682 return STATUS_CANNOT_DELETE;
1683 if ((existing_access & FILE_MAPPING_ACCESS) && (open_flags & O_TRUNC))
1684 return STATUS_USER_MAPPED_FILE;
1685 if (!access) return 0; /* if access mode is 0, sharing mode is ignored (except for mappings) */
1686 if (((existing_access & FILE_UNIX_READ_ACCESS) && !(sharing & FILE_SHARE_READ)) ||
1687 ((existing_access & FILE_UNIX_WRITE_ACCESS) && !(sharing & FILE_SHARE_WRITE)) ||
1688 ((existing_access & DELETE) && !(sharing & FILE_SHARE_DELETE)))
1689 return STATUS_SHARING_VIOLATION;
1690 return 0;
1693 /* sets the user of an fd that previously had no user */
1694 void set_fd_user( struct fd *fd, const struct fd_ops *user_ops, struct object *user )
1696 assert( fd->fd_ops == NULL );
1697 fd->fd_ops = user_ops;
1698 fd->user = user;
1701 static char *dup_fd_name( struct fd *root, const char *name )
1703 char *ret;
1705 if (!root) return strdup( name );
1706 if (!root->unix_name) return NULL;
1708 /* skip . prefix */
1709 if (name[0] == '.' && (!name[1] || name[1] == '/')) name++;
1711 if ((ret = malloc( strlen(root->unix_name) + strlen(name) + 2 )))
1713 strcpy( ret, root->unix_name );
1714 if (name[0] && name[0] != '/') strcat( ret, "/" );
1715 strcat( ret, name );
1717 return ret;
1720 /* open() wrapper that returns a struct fd with no fd user set */
1721 struct fd *open_fd( struct fd *root, const char *name, int flags, mode_t *mode, unsigned int access,
1722 unsigned int sharing, unsigned int options )
1724 struct stat st;
1725 struct closed_fd *closed_fd;
1726 struct fd *fd;
1727 const char *unlink_name = "";
1728 int root_fd = -1;
1729 int rw_mode;
1731 if (((options & FILE_DELETE_ON_CLOSE) && !(access & DELETE)) ||
1732 ((options & FILE_DIRECTORY_FILE) && (flags & O_TRUNC)))
1734 set_error( STATUS_INVALID_PARAMETER );
1735 return NULL;
1738 if (!(fd = alloc_fd_object())) return NULL;
1740 fd->options = options;
1741 if (options & FILE_DELETE_ON_CLOSE) unlink_name = name;
1742 if (!(closed_fd = mem_alloc( sizeof(*closed_fd) + strlen(unlink_name) )))
1744 release_object( fd );
1745 return NULL;
1748 if (root)
1750 if ((root_fd = get_unix_fd( root )) == -1) goto error;
1751 if (fchdir( root_fd ) == -1)
1753 file_set_error();
1754 root_fd = -1;
1755 goto error;
1759 /* create the directory if needed */
1760 if ((options & FILE_DIRECTORY_FILE) && (flags & O_CREAT))
1762 if (mkdir( name, 0777 ) == -1)
1764 if (errno != EEXIST || (flags & O_EXCL))
1766 file_set_error();
1767 goto error;
1770 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
1773 if ((access & FILE_UNIX_WRITE_ACCESS) && !(options & FILE_DIRECTORY_FILE))
1775 if (access & FILE_UNIX_READ_ACCESS) rw_mode = O_RDWR;
1776 else rw_mode = O_WRONLY;
1778 else rw_mode = O_RDONLY;
1780 fd->unix_name = dup_fd_name( root, name );
1782 if ((fd->unix_fd = open( name, rw_mode | (flags & ~O_TRUNC), *mode )) == -1)
1784 /* if we tried to open a directory for write access, retry read-only */
1785 if (errno == EISDIR)
1787 if ((access & FILE_UNIX_WRITE_ACCESS) || (flags & O_CREAT))
1788 fd->unix_fd = open( name, O_RDONLY | (flags & ~(O_TRUNC | O_CREAT | O_EXCL)), *mode );
1791 if (fd->unix_fd == -1)
1793 file_set_error();
1794 goto error;
1798 closed_fd->unix_fd = fd->unix_fd;
1799 closed_fd->unlink[0] = 0;
1800 fstat( fd->unix_fd, &st );
1801 *mode = st.st_mode;
1803 /* only bother with an inode for normal files and directories */
1804 if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode))
1806 unsigned int err;
1807 struct inode *inode = get_inode( st.st_dev, st.st_ino, fd->unix_fd );
1809 if (!inode)
1811 /* we can close the fd because there are no others open on the same file,
1812 * otherwise we wouldn't have failed to allocate a new inode
1814 goto error;
1816 fd->inode = inode;
1817 fd->closed = closed_fd;
1818 fd->cacheable = !inode->device->removable;
1819 list_add_head( &inode->open, &fd->inode_entry );
1821 /* check directory options */
1822 if ((options & FILE_DIRECTORY_FILE) && !S_ISDIR(st.st_mode))
1824 release_object( fd );
1825 set_error( STATUS_NOT_A_DIRECTORY );
1826 return NULL;
1828 if ((options & FILE_NON_DIRECTORY_FILE) && S_ISDIR(st.st_mode))
1830 release_object( fd );
1831 set_error( STATUS_FILE_IS_A_DIRECTORY );
1832 return NULL;
1834 if ((err = check_sharing( fd, access, sharing, flags, options )))
1836 release_object( fd );
1837 set_error( err );
1838 return NULL;
1840 strcpy( closed_fd->unlink, unlink_name );
1841 if (flags & O_TRUNC)
1843 if (S_ISDIR(st.st_mode))
1845 release_object( fd );
1846 set_error( STATUS_OBJECT_NAME_COLLISION );
1847 return NULL;
1849 ftruncate( fd->unix_fd, 0 );
1852 else /* special file */
1854 if (options & FILE_DIRECTORY_FILE)
1856 set_error( STATUS_NOT_A_DIRECTORY );
1857 goto error;
1859 if (unlink_name[0]) /* we can't unlink special files */
1861 set_error( STATUS_INVALID_PARAMETER );
1862 goto error;
1864 free( closed_fd );
1865 fd->cacheable = 1;
1867 return fd;
1869 error:
1870 release_object( fd );
1871 free( closed_fd );
1872 if (root_fd != -1) fchdir( server_dir_fd ); /* go back to the server dir */
1873 return NULL;
1876 /* create an fd for an anonymous file */
1877 /* if the function fails the unix fd is closed */
1878 struct fd *create_anonymous_fd( const struct fd_ops *fd_user_ops, int unix_fd, struct object *user,
1879 unsigned int options )
1881 struct fd *fd = alloc_fd_object();
1883 if (fd)
1885 set_fd_user( fd, fd_user_ops, user );
1886 fd->unix_fd = unix_fd;
1887 fd->options = options;
1888 return fd;
1890 close( unix_fd );
1891 return NULL;
1894 /* retrieve the object that is using an fd */
1895 void *get_fd_user( struct fd *fd )
1897 return fd->user;
1900 /* retrieve the opening options for the fd */
1901 unsigned int get_fd_options( struct fd *fd )
1903 return fd->options;
1906 /* retrieve the unix fd for an object */
1907 int get_unix_fd( struct fd *fd )
1909 if (fd->unix_fd == -1) set_error( fd->no_fd_status );
1910 return fd->unix_fd;
1913 /* check if two file descriptors point to the same file */
1914 int is_same_file_fd( struct fd *fd1, struct fd *fd2 )
1916 return fd1->inode == fd2->inode;
1919 /* allow the fd to be cached (can't be reset once set) */
1920 void allow_fd_caching( struct fd *fd )
1922 fd->cacheable = 1;
1925 /* check if fd is on a removable device */
1926 int is_fd_removable( struct fd *fd )
1928 return (fd->inode && fd->inode->device->removable);
1931 /* set or clear the fd signaled state */
1932 void set_fd_signaled( struct fd *fd, int signaled )
1934 fd->signaled = signaled;
1935 if (signaled) wake_up( fd->user, 0 );
1938 /* set or clear the fd signaled state */
1939 int is_fd_signaled( struct fd *fd )
1941 return fd->signaled;
1944 /* handler for close_handle that refuses to close fd-associated handles in other processes */
1945 int fd_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
1947 return (!current || current->process == process);
1950 /* check if events are pending and if yes return which one(s) */
1951 int check_fd_events( struct fd *fd, int events )
1953 struct pollfd pfd;
1955 if (fd->unix_fd == -1) return POLLERR;
1956 if (fd->inode) return events; /* regular files are always signaled */
1958 pfd.fd = fd->unix_fd;
1959 pfd.events = events;
1960 if (poll( &pfd, 1, 0 ) <= 0) return 0;
1961 return pfd.revents;
1964 /* default signaled() routine for objects that poll() on an fd */
1965 int default_fd_signaled( struct object *obj, struct thread *thread )
1967 struct fd *fd = get_obj_fd( obj );
1968 int ret = fd->signaled;
1969 release_object( fd );
1970 return ret;
1973 /* default map_access() routine for objects that behave like an fd */
1974 unsigned int default_fd_map_access( struct object *obj, unsigned int access )
1976 if (access & GENERIC_READ) access |= FILE_GENERIC_READ;
1977 if (access & GENERIC_WRITE) access |= FILE_GENERIC_WRITE;
1978 if (access & GENERIC_EXECUTE) access |= FILE_GENERIC_EXECUTE;
1979 if (access & GENERIC_ALL) access |= FILE_ALL_ACCESS;
1980 return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
1983 int default_fd_get_poll_events( struct fd *fd )
1985 int events = 0;
1987 if (async_waiting( fd->read_q )) events |= POLLIN;
1988 if (async_waiting( fd->write_q )) events |= POLLOUT;
1989 return events;
1992 /* default handler for poll() events */
1993 void default_poll_event( struct fd *fd, int event )
1995 if (event & (POLLIN | POLLERR | POLLHUP)) async_wake_up( fd->read_q, STATUS_ALERTED );
1996 if (event & (POLLOUT | POLLERR | POLLHUP)) async_wake_up( fd->write_q, STATUS_ALERTED );
1998 /* if an error occurred, stop polling this fd to avoid busy-looping */
1999 if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 );
2000 else if (!fd->inode) set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
2003 struct async *fd_queue_async( struct fd *fd, const async_data_t *data, int type )
2005 struct async_queue *queue;
2006 struct async *async;
2008 switch (type)
2010 case ASYNC_TYPE_READ:
2011 if (!fd->read_q && !(fd->read_q = create_async_queue( fd ))) return NULL;
2012 queue = fd->read_q;
2013 break;
2014 case ASYNC_TYPE_WRITE:
2015 if (!fd->write_q && !(fd->write_q = create_async_queue( fd ))) return NULL;
2016 queue = fd->write_q;
2017 break;
2018 case ASYNC_TYPE_WAIT:
2019 if (!fd->wait_q && !(fd->wait_q = create_async_queue( fd ))) return NULL;
2020 queue = fd->wait_q;
2021 break;
2022 default:
2023 queue = NULL;
2024 assert(0);
2027 if ((async = create_async( current, queue, data )) && type != ASYNC_TYPE_WAIT)
2029 if (!fd->inode)
2030 set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
2031 else /* regular files are always ready for read and write */
2032 async_wake_up( queue, STATUS_ALERTED );
2034 return async;
2037 void fd_async_wake_up( struct fd *fd, int type, unsigned int status )
2039 switch (type)
2041 case ASYNC_TYPE_READ:
2042 async_wake_up( fd->read_q, status );
2043 break;
2044 case ASYNC_TYPE_WRITE:
2045 async_wake_up( fd->write_q, status );
2046 break;
2047 case ASYNC_TYPE_WAIT:
2048 async_wake_up( fd->wait_q, status );
2049 break;
2050 default:
2051 assert(0);
2055 void fd_reselect_async( struct fd *fd, struct async_queue *queue )
2057 fd->fd_ops->reselect_async( fd, queue );
2060 void no_fd_queue_async( struct fd *fd, const async_data_t *data, int type, int count )
2062 set_error( STATUS_OBJECT_TYPE_MISMATCH );
2065 void default_fd_queue_async( struct fd *fd, const async_data_t *data, int type, int count )
2067 struct async *async;
2069 if ((async = fd_queue_async( fd, data, type )))
2071 release_object( async );
2072 set_error( STATUS_PENDING );
2076 /* default reselect_async() fd routine */
2077 void default_fd_reselect_async( struct fd *fd, struct async_queue *queue )
2079 if (queue != fd->wait_q)
2081 int poll_events = fd->fd_ops->get_poll_events( fd );
2082 int events = check_fd_events( fd, poll_events );
2083 if (events) fd->fd_ops->poll_event( fd, events );
2084 else set_fd_events( fd, poll_events );
2088 /* default cancel_async() fd routine */
2089 void default_fd_cancel_async( struct fd *fd, struct process *process, struct thread *thread, client_ptr_t iosb )
2091 int n = 0;
2093 n += async_wake_up_by( fd->read_q, process, thread, iosb, STATUS_CANCELLED );
2094 n += async_wake_up_by( fd->write_q, process, thread, iosb, STATUS_CANCELLED );
2095 n += async_wake_up_by( fd->wait_q, process, thread, iosb, STATUS_CANCELLED );
2096 if (!n && iosb)
2097 set_error( STATUS_NOT_FOUND );
2100 /* default flush() routine */
2101 void no_flush( struct fd *fd, struct event **event )
2103 set_error( STATUS_OBJECT_TYPE_MISMATCH );
2106 static inline int is_valid_mounted_device( struct stat *st )
2108 #if defined(linux) || defined(__sun__)
2109 return S_ISBLK( st->st_mode );
2110 #else
2111 /* disks are char devices on *BSD */
2112 return S_ISCHR( st->st_mode );
2113 #endif
2116 /* close all Unix file descriptors on a device to allow unmounting it */
2117 static void unmount_device( struct fd *device_fd )
2119 unsigned int i;
2120 struct stat st;
2121 struct device *device;
2122 struct inode *inode;
2123 struct fd *fd;
2124 int unix_fd = get_unix_fd( device_fd );
2126 if (unix_fd == -1) return;
2128 if (fstat( unix_fd, &st ) == -1 || !is_valid_mounted_device( &st ))
2130 set_error( STATUS_INVALID_PARAMETER );
2131 return;
2134 if (!(device = get_device( st.st_rdev, -1 ))) return;
2136 for (i = 0; i < INODE_HASH_SIZE; i++)
2138 LIST_FOR_EACH_ENTRY( inode, &device->inode_hash[i], struct inode, entry )
2140 LIST_FOR_EACH_ENTRY( fd, &inode->open, struct fd, inode_entry )
2142 unmount_fd( fd );
2144 inode_close_pending( inode, 0 );
2147 /* remove it from the hash table */
2148 list_remove( &device->entry );
2149 list_init( &device->entry );
2150 release_object( device );
2153 obj_handle_t no_fd_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async,
2154 int blocking, const void *data, data_size_t size )
2156 set_error( STATUS_OBJECT_TYPE_MISMATCH );
2157 return 0;
2160 /* default ioctl() routine */
2161 obj_handle_t default_fd_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async,
2162 int blocking, const void *data, data_size_t size )
2164 switch(code)
2166 case FSCTL_DISMOUNT_VOLUME:
2167 unmount_device( fd );
2168 return 0;
2169 default:
2170 set_error( STATUS_NOT_SUPPORTED );
2171 return 0;
2175 /* same as get_handle_obj but retrieve the struct fd associated to the object */
2176 static struct fd *get_handle_fd_obj( struct process *process, obj_handle_t handle,
2177 unsigned int access )
2179 struct fd *fd = NULL;
2180 struct object *obj;
2182 if ((obj = get_handle_obj( process, handle, access, NULL )))
2184 fd = get_obj_fd( obj );
2185 release_object( obj );
2187 return fd;
2190 struct completion *fd_get_completion( struct fd *fd, apc_param_t *p_key )
2192 *p_key = fd->comp_key;
2193 return fd->completion ? (struct completion *)grab_object( fd->completion ) : NULL;
2196 void fd_copy_completion( struct fd *src, struct fd *dst )
2198 assert( !dst->completion );
2199 dst->completion = fd_get_completion( src, &dst->comp_key );
2202 /* flush a file buffers */
2203 DECL_HANDLER(flush_file)
2205 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2206 struct event * event = NULL;
2208 if (fd)
2210 fd->fd_ops->flush( fd, &event );
2211 if ( event )
2213 reply->event = alloc_handle( current->process, event, SYNCHRONIZE, 0 );
2215 release_object( fd );
2219 /* open a file object */
2220 DECL_HANDLER(open_file_object)
2222 struct unicode_str name;
2223 struct directory *root = NULL;
2224 struct object *obj, *result;
2226 get_req_unicode_str( &name );
2227 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir, 0 )))
2228 return;
2230 if ((obj = open_object_dir( root, &name, req->attributes, NULL )))
2232 if ((result = obj->ops->open_file( obj, req->access, req->sharing, req->options )))
2234 reply->handle = alloc_handle( current->process, result, req->access, req->attributes );
2235 release_object( result );
2237 release_object( obj );
2240 if (root) release_object( root );
2243 /* get the Unix name from a file handle */
2244 DECL_HANDLER(get_handle_unix_name)
2246 struct fd *fd;
2248 if ((fd = get_handle_fd_obj( current->process, req->handle, 0 )))
2250 if (fd->unix_name)
2252 data_size_t name_len = strlen( fd->unix_name );
2253 reply->name_len = name_len;
2254 if (name_len <= get_reply_max_size()) set_reply_data( fd->unix_name, name_len );
2255 else set_error( STATUS_BUFFER_OVERFLOW );
2257 release_object( fd );
2261 /* get a Unix fd to access a file */
2262 DECL_HANDLER(get_handle_fd)
2264 struct fd *fd;
2266 if ((fd = get_handle_fd_obj( current->process, req->handle, 0 )))
2268 int unix_fd = get_unix_fd( fd );
2269 if (unix_fd != -1)
2271 reply->type = fd->fd_ops->get_fd_type( fd );
2272 reply->cacheable = fd->cacheable;
2273 reply->options = fd->options;
2274 reply->access = get_handle_access( current->process, req->handle );
2275 send_client_fd( current->process, unix_fd, req->handle );
2277 release_object( fd );
2281 /* perform an ioctl on a file */
2282 DECL_HANDLER(ioctl)
2284 unsigned int access = (req->code >> 14) & (FILE_READ_DATA|FILE_WRITE_DATA);
2285 struct fd *fd = get_handle_fd_obj( current->process, req->async.handle, access );
2287 if (fd)
2289 reply->wait = fd->fd_ops->ioctl( fd, req->code, &req->async, req->blocking,
2290 get_req_data(), get_req_data_size() );
2291 reply->options = fd->options;
2292 release_object( fd );
2296 /* create / reschedule an async I/O */
2297 DECL_HANDLER(register_async)
2299 unsigned int access;
2300 struct fd *fd;
2302 switch(req->type)
2304 case ASYNC_TYPE_READ:
2305 access = FILE_READ_DATA;
2306 break;
2307 case ASYNC_TYPE_WRITE:
2308 access = FILE_WRITE_DATA;
2309 break;
2310 default:
2311 set_error( STATUS_INVALID_PARAMETER );
2312 return;
2315 if ((fd = get_handle_fd_obj( current->process, req->async.handle, access )))
2317 if (get_unix_fd( fd ) != -1) fd->fd_ops->queue_async( fd, &req->async, req->type, req->count );
2318 release_object( fd );
2322 /* cancels all async I/O */
2323 DECL_HANDLER(cancel_async)
2325 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2326 struct thread *thread = req->only_thread ? current : NULL;
2328 if (fd)
2330 if (get_unix_fd( fd ) != -1) fd->fd_ops->cancel_async( fd, current->process, thread, req->iosb );
2331 release_object( fd );
2335 /* attach completion object to a fd */
2336 DECL_HANDLER(set_completion_info)
2338 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2340 if (fd)
2342 if (!(fd->options & (FILE_SYNCHRONOUS_IO_ALERT | FILE_SYNCHRONOUS_IO_NONALERT)) && !fd->completion)
2344 fd->completion = get_completion_obj( current->process, req->chandle, IO_COMPLETION_MODIFY_STATE );
2345 fd->comp_key = req->ckey;
2347 else set_error( STATUS_INVALID_PARAMETER );
2348 release_object( fd );
2352 /* push new completion msg into a completion queue attached to the fd */
2353 DECL_HANDLER(add_fd_completion)
2355 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
2356 if (fd)
2358 if (fd->completion)
2359 add_completion( fd->completion, fd->comp_key, req->cvalue, req->status, req->information );
2360 release_object( fd );