2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
37 #ifdef HAVE_SYS_POLL_H
40 #ifdef HAVE_LINUX_MAJOR_H
41 #include <linux/major.h>
43 #ifdef HAVE_SYS_STATVFS_H
44 #include <sys/statvfs.h>
49 #ifdef HAVE_SYS_PARAM_H
50 #include <sys/param.h>
52 #ifdef HAVE_SYS_MOUNT_H
53 #include <sys/mount.h>
55 #ifdef HAVE_SYS_STATFS_H
56 #include <sys/statfs.h>
58 #ifdef HAVE_SYS_EVENT_H
59 #include <sys/event.h>
68 #include <sys/types.h>
72 #define WIN32_NO_STATUS
81 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
82 # include <sys/epoll.h>
84 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
86 # define EPOLLIN POLLIN
87 # define EPOLLOUT POLLOUT
88 # define EPOLLERR POLLERR
89 # define EPOLLHUP POLLHUP
90 # define EPOLL_CTL_ADD 1
91 # define EPOLL_CTL_DEL 2
92 # define EPOLL_CTL_MOD 3
94 typedef union epoll_data
108 #define SYSCALL_RET(ret) do { \
109 if (ret < 0) { errno = -ret; ret = -1; } \
113 static inline int epoll_create( int size
)
116 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
117 : "=a" (ret
) : "0" (254 /*NR_epoll_create*/), "r" (size
) );
121 static inline int epoll_ctl( int epfd
, int op
, int fd
, const struct epoll_event
*event
)
124 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
126 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd
), "c" (op
), "d" (fd
), "S" (event
), "m" (*event
) );
130 static inline int epoll_wait( int epfd
, struct epoll_event
*events
, int maxevents
, int timeout
)
133 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
135 : "0" (256 /*NR_epoll_wait*/), "r" (epfd
), "c" (events
), "d" (maxevents
), "S" (timeout
)
141 #endif /* linux && __i386__ && HAVE_STDINT_H */
144 /* Because of the stupid Posix locking semantics, we need to keep
145 * track of all file descriptors referencing a given file, and not
146 * close a single one until all the locks are gone (sigh).
149 /* file descriptor object */
151 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
154 struct list entry
; /* entry in inode closed list */
155 int unix_fd
; /* the unix file descriptor */
156 char unlink
[1]; /* name to unlink on close (if any) */
161 struct object obj
; /* object header */
162 const struct fd_ops
*fd_ops
; /* file descriptor operations */
163 struct inode
*inode
; /* inode that this fd belongs to */
164 struct list inode_entry
; /* entry in inode fd list */
165 struct closed_fd
*closed
; /* structure to store the unix fd at destroy time */
166 struct object
*user
; /* object using this file descriptor */
167 struct list locks
; /* list of locks on this fd */
168 unsigned int access
; /* file access (FILE_READ_DATA etc.) */
169 unsigned int sharing
; /* file sharing mode */
170 int unix_fd
; /* unix file descriptor */
171 int fs_locks
:1; /* can we use filesystem locks for this fd? */
172 int unmounted
:1;/* has the device been unmounted? */
173 int poll_index
; /* index of fd in poll array */
174 struct list read_q
; /* async readers of this fd */
175 struct list write_q
; /* async writers of this fd */
178 static void fd_dump( struct object
*obj
, int verbose
);
179 static void fd_destroy( struct object
*obj
);
181 static const struct object_ops fd_ops
=
183 sizeof(struct fd
), /* size */
185 no_add_queue
, /* add_queue */
186 NULL
, /* remove_queue */
188 NULL
, /* satisfied */
189 no_signal
, /* signal */
190 no_get_fd
, /* get_fd */
191 no_map_access
, /* map_access */
192 no_lookup_name
, /* lookup_name */
193 no_open_file
, /* open_file */
194 no_close_handle
, /* close_handle */
195 fd_destroy
/* destroy */
200 #define DEVICE_HASH_SIZE 7
201 #define INODE_HASH_SIZE 17
205 struct object obj
; /* object header */
206 struct list entry
; /* entry in device hash list */
207 dev_t dev
; /* device number */
208 int removable
; /* removable device? (or -1 if unknown) */
209 struct list inode_hash
[INODE_HASH_SIZE
]; /* inodes hash table */
212 static void device_dump( struct object
*obj
, int verbose
);
213 static void device_destroy( struct object
*obj
);
215 static const struct object_ops device_ops
=
217 sizeof(struct device
), /* size */
218 device_dump
, /* dump */
219 no_add_queue
, /* add_queue */
220 NULL
, /* remove_queue */
222 NULL
, /* satisfied */
223 no_signal
, /* signal */
224 no_get_fd
, /* get_fd */
225 no_map_access
, /* map_access */
226 no_lookup_name
, /* lookup_name */
227 no_open_file
, /* open_file */
228 no_close_handle
, /* close_handle */
229 device_destroy
/* destroy */
236 struct object obj
; /* object header */
237 struct list entry
; /* inode hash list entry */
238 struct device
*device
; /* device containing this inode */
239 ino_t ino
; /* inode number */
240 struct list open
; /* list of open file descriptors */
241 struct list locks
; /* list of file locks */
242 struct list closed
; /* list of file descriptors to close at destroy time */
245 static void inode_dump( struct object
*obj
, int verbose
);
246 static void inode_destroy( struct object
*obj
);
248 static const struct object_ops inode_ops
=
250 sizeof(struct inode
), /* size */
251 inode_dump
, /* dump */
252 no_add_queue
, /* add_queue */
253 NULL
, /* remove_queue */
255 NULL
, /* satisfied */
256 no_signal
, /* signal */
257 no_get_fd
, /* get_fd */
258 no_map_access
, /* map_access */
259 no_lookup_name
, /* lookup_name */
260 no_open_file
, /* open_file */
261 no_close_handle
, /* close_handle */
262 inode_destroy
/* destroy */
265 /* file lock object */
269 struct object obj
; /* object header */
270 struct fd
*fd
; /* fd owning this lock */
271 struct list fd_entry
; /* entry in list of locks on a given fd */
272 struct list inode_entry
; /* entry in inode list of locks */
273 int shared
; /* shared lock? */
274 file_pos_t start
; /* locked region is interval [start;end) */
276 struct process
*process
; /* process owning this lock */
277 struct list proc_entry
; /* entry in list of locks owned by the process */
280 static void file_lock_dump( struct object
*obj
, int verbose
);
281 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
);
283 static const struct object_ops file_lock_ops
=
285 sizeof(struct file_lock
), /* size */
286 file_lock_dump
, /* dump */
287 add_queue
, /* add_queue */
288 remove_queue
, /* remove_queue */
289 file_lock_signaled
, /* signaled */
290 no_satisfied
, /* satisfied */
291 no_signal
, /* signal */
292 no_get_fd
, /* get_fd */
293 no_map_access
, /* map_access */
294 no_lookup_name
, /* lookup_name */
295 no_open_file
, /* open_file */
296 no_close_handle
, /* close_handle */
297 no_destroy
/* destroy */
301 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
302 #define FILE_POS_T_MAX (~(file_pos_t)0)
304 static file_pos_t max_unix_offset
= OFF_T_MAX
;
306 #define DUMP_LONG_LONG(val) do { \
307 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
308 fprintf( stderr, "%lx%08lx", (unsigned long)((unsigned long long)(val) >> 32), (unsigned long)(val) ); \
310 fprintf( stderr, "%lx", (unsigned long)(val) ); \
315 /****************************************************************/
316 /* timeouts support */
320 struct list entry
; /* entry in sorted timeout list */
321 struct timeval when
; /* timeout expiry (absolute time) */
322 timeout_callback callback
; /* callback function */
323 void *private; /* callback private data */
326 static struct list timeout_list
= LIST_INIT(timeout_list
); /* sorted timeouts list */
327 struct timeval current_time
;
329 /* add a timeout user */
330 struct timeout_user
*add_timeout_user( const struct timeval
*when
, timeout_callback func
,
333 struct timeout_user
*user
;
336 if (!(user
= mem_alloc( sizeof(*user
) ))) return NULL
;
338 user
->callback
= func
;
339 user
->private = private;
341 /* Now insert it in the linked list */
343 LIST_FOR_EACH( ptr
, &timeout_list
)
345 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
346 if (!time_before( &timeout
->when
, when
)) break;
348 list_add_before( ptr
, &user
->entry
);
352 /* remove a timeout user */
353 void remove_timeout_user( struct timeout_user
*user
)
355 list_remove( &user
->entry
);
359 /* add a timeout in milliseconds to an absolute time */
360 void add_timeout( struct timeval
*when
, int timeout
)
364 long sec
= timeout
/ 1000;
365 if ((when
->tv_usec
+= (timeout
- 1000*sec
) * 1000) >= 1000000)
367 when
->tv_usec
-= 1000000;
375 /****************************************************************/
378 static struct fd
**poll_users
; /* users array */
379 static struct pollfd
*pollfd
; /* poll fd array */
380 static int nb_users
; /* count of array entries actually in use */
381 static int active_users
; /* current number of active users */
382 static int allocated_users
; /* count of allocated entries in the array */
383 static struct fd
**freelist
; /* list of free entries in the array */
385 static int get_next_timeout(void);
389 static int epoll_fd
= -1;
391 static inline void init_epoll(void)
393 epoll_fd
= epoll_create( 128 );
396 /* set the events that epoll waits for on this fd; helper for set_fd_events */
397 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
399 struct epoll_event ev
;
402 if (epoll_fd
== -1) return;
404 if (events
== -1) /* stop waiting on this fd completely */
406 if (pollfd
[user
].fd
== -1) return; /* already removed */
409 else if (pollfd
[user
].fd
== -1)
411 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
416 if (pollfd
[user
].events
== events
) return; /* nothing to do */
421 memset(&ev
.data
, 0, sizeof(ev
.data
));
424 if (epoll_ctl( epoll_fd
, ctl
, fd
->unix_fd
, &ev
) == -1)
426 if (errno
== ENOMEM
) /* not enough memory, give up on epoll */
431 else perror( "epoll_ctl" ); /* should not happen */
435 static inline void remove_epoll_user( struct fd
*fd
, int user
)
437 if (epoll_fd
== -1) return;
439 if (pollfd
[user
].fd
!= -1)
441 struct epoll_event dummy
;
442 epoll_ctl( epoll_fd
, EPOLL_CTL_DEL
, fd
->unix_fd
, &dummy
);
446 static inline void main_loop_epoll(void)
449 struct epoll_event events
[128];
451 assert( POLLIN
== EPOLLIN
);
452 assert( POLLOUT
== EPOLLOUT
);
453 assert( POLLERR
== EPOLLERR
);
454 assert( POLLHUP
== EPOLLHUP
);
456 if (epoll_fd
== -1) return;
460 timeout
= get_next_timeout();
462 if (!active_users
) break; /* last user removed by a timeout */
463 if (epoll_fd
== -1) break; /* an error occurred with epoll */
465 ret
= epoll_wait( epoll_fd
, events
, sizeof(events
)/sizeof(events
[0]), timeout
);
466 gettimeofday( ¤t_time
, NULL
);
468 /* put the events into the pollfd array first, like poll does */
469 for (i
= 0; i
< ret
; i
++)
471 int user
= events
[i
].data
.u32
;
472 pollfd
[user
].revents
= events
[i
].events
;
475 /* read events from the pollfd array, as set_fd_events may modify them */
476 for (i
= 0; i
< ret
; i
++)
478 int user
= events
[i
].data
.u32
;
479 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
484 #elif defined(HAVE_KQUEUE)
486 static int kqueue_fd
= -1;
488 static inline void init_epoll(void)
490 #ifndef __APPLE__ /* kqueue support is broken in the MacOS kernel so we can't use it */
491 kqueue_fd
= kqueue();
495 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
499 if (kqueue_fd
== -1) return;
501 EV_SET( &ev
[0], fd
->unix_fd
, EVFILT_READ
, 0, NOTE_LOWAT
, 1, (void *)user
);
502 EV_SET( &ev
[1], fd
->unix_fd
, EVFILT_WRITE
, 0, NOTE_LOWAT
, 1, (void *)user
);
504 if (events
== -1) /* stop waiting on this fd completely */
506 if (pollfd
[user
].fd
== -1) return; /* already removed */
507 ev
[0].flags
|= EV_DELETE
;
508 ev
[1].flags
|= EV_DELETE
;
510 else if (pollfd
[user
].fd
== -1)
512 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
513 ev
[0].flags
|= EV_ADD
| ((events
& POLLIN
) ? EV_ENABLE
: EV_DISABLE
);
514 ev
[1].flags
|= EV_ADD
| ((events
& POLLOUT
) ? EV_ENABLE
: EV_DISABLE
);
518 if (pollfd
[user
].events
== events
) return; /* nothing to do */
519 ev
[0].flags
|= (events
& POLLIN
) ? EV_ENABLE
: EV_DISABLE
;
520 ev
[1].flags
|= (events
& POLLOUT
) ? EV_ENABLE
: EV_DISABLE
;
523 if (kevent( kqueue_fd
, ev
, 2, NULL
, 0, NULL
) == -1)
525 if (errno
== ENOMEM
) /* not enough memory, give up on kqueue */
530 else perror( "kevent" ); /* should not happen */
534 static inline void remove_epoll_user( struct fd
*fd
, int user
)
536 if (kqueue_fd
== -1) return;
538 if (pollfd
[user
].fd
!= -1)
542 EV_SET( &ev
[0], fd
->unix_fd
, EVFILT_READ
, EV_DELETE
, 0, 0, 0 );
543 EV_SET( &ev
[1], fd
->unix_fd
, EVFILT_WRITE
, EV_DELETE
, 0, 0, 0 );
544 kevent( kqueue_fd
, ev
, 2, NULL
, 0, NULL
);
548 static inline void main_loop_epoll(void)
551 struct kevent events
[128];
553 if (kqueue_fd
== -1) return;
557 timeout
= get_next_timeout();
559 if (!active_users
) break; /* last user removed by a timeout */
560 if (kqueue_fd
== -1) break; /* an error occurred with kqueue */
566 ts
.tv_sec
= timeout
/ 1000;
567 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
568 ret
= kevent( kqueue_fd
, NULL
, 0, events
, sizeof(events
)/sizeof(events
[0]), &ts
);
570 else ret
= kevent( kqueue_fd
, NULL
, 0, events
, sizeof(events
)/sizeof(events
[0]), NULL
);
572 gettimeofday( ¤t_time
, NULL
);
574 /* put the events into the pollfd array first, like poll does */
575 for (i
= 0; i
< ret
; i
++)
577 long user
= (long)events
[i
].udata
;
578 pollfd
[user
].revents
= 0;
580 for (i
= 0; i
< ret
; i
++)
582 long user
= (long)events
[i
].udata
;
583 if (events
[i
].filter
== EVFILT_READ
) pollfd
[user
].revents
|= POLLIN
;
584 else if (events
[i
].filter
== EVFILT_WRITE
) pollfd
[user
].revents
|= POLLOUT
;
585 if (events
[i
].flags
& EV_EOF
) pollfd
[user
].revents
|= POLLHUP
;
586 if (events
[i
].flags
& EV_ERROR
) pollfd
[user
].revents
|= POLLERR
;
589 /* read events from the pollfd array, as set_fd_events may modify them */
590 for (i
= 0; i
< ret
; i
++)
592 long user
= (long)events
[i
].udata
;
593 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
594 pollfd
[user
].revents
= 0;
599 #else /* HAVE_KQUEUE */
601 static inline void init_epoll(void) { }
602 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
) { }
603 static inline void remove_epoll_user( struct fd
*fd
, int user
) { }
604 static inline void main_loop_epoll(void) { }
606 #endif /* USE_EPOLL */
609 /* add a user in the poll array and return its index, or -1 on failure */
610 static int add_poll_user( struct fd
*fd
)
615 ret
= freelist
- poll_users
;
616 freelist
= (struct fd
**)poll_users
[ret
];
620 if (nb_users
== allocated_users
)
622 struct fd
**newusers
;
623 struct pollfd
*newpoll
;
624 int new_count
= allocated_users
? (allocated_users
+ allocated_users
/ 2) : 16;
625 if (!(newusers
= realloc( poll_users
, new_count
* sizeof(*poll_users
) ))) return -1;
626 if (!(newpoll
= realloc( pollfd
, new_count
* sizeof(*pollfd
) )))
629 poll_users
= newusers
;
634 poll_users
= newusers
;
636 if (!allocated_users
) init_epoll();
637 allocated_users
= new_count
;
642 pollfd
[ret
].events
= 0;
643 pollfd
[ret
].revents
= 0;
644 poll_users
[ret
] = fd
;
649 /* remove a user from the poll list */
650 static void remove_poll_user( struct fd
*fd
, int user
)
653 assert( poll_users
[user
] == fd
);
655 remove_epoll_user( fd
, user
);
656 pollfd
[user
].fd
= -1;
657 pollfd
[user
].events
= 0;
658 pollfd
[user
].revents
= 0;
659 poll_users
[user
] = (struct fd
*)freelist
;
660 freelist
= &poll_users
[user
];
664 /* process pending timeouts and return the time until the next timeout, in milliseconds */
665 static int get_next_timeout(void)
667 if (!list_empty( &timeout_list
))
669 struct list expired_list
, *ptr
;
671 /* first remove all expired timers from the list */
673 list_init( &expired_list
);
674 while ((ptr
= list_head( &timeout_list
)) != NULL
)
676 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
678 if (!time_before( ¤t_time
, &timeout
->when
))
680 list_remove( &timeout
->entry
);
681 list_add_tail( &expired_list
, &timeout
->entry
);
686 /* now call the callback for all the removed timers */
688 while ((ptr
= list_head( &expired_list
)) != NULL
)
690 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
691 list_remove( &timeout
->entry
);
692 timeout
->callback( timeout
->private );
696 if ((ptr
= list_head( &timeout_list
)) != NULL
)
698 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
699 int diff
= (timeout
->when
.tv_sec
- current_time
.tv_sec
) * 1000
700 + (timeout
->when
.tv_usec
- current_time
.tv_usec
+ 999) / 1000;
701 if (diff
< 0) diff
= 0;
705 return -1; /* no pending timeouts */
708 /* server main poll() loop */
713 gettimeofday( ¤t_time
, NULL
);
716 /* fall through to normal poll loop */
720 timeout
= get_next_timeout();
722 if (!active_users
) break; /* last user removed by a timeout */
724 ret
= poll( pollfd
, nb_users
, timeout
);
725 gettimeofday( ¤t_time
, NULL
);
729 for (i
= 0; i
< nb_users
; i
++)
731 if (pollfd
[i
].revents
)
733 fd_poll_event( poll_users
[i
], pollfd
[i
].revents
);
742 /****************************************************************/
743 /* device functions */
745 static struct list device_hash
[DEVICE_HASH_SIZE
];
747 static int is_device_removable( dev_t dev
, int unix_fd
)
749 #if defined(linux) && defined(HAVE_FSTATFS)
752 /* check for floppy disk */
753 if (major(dev
) == FLOPPY_MAJOR
) return 1;
755 if (fstatfs( unix_fd
, &stfs
) == -1) return 0;
756 return (stfs
.f_type
== 0x9660 || /* iso9660 */
757 stfs
.f_type
== 0x9fa1 || /* supermount */
758 stfs
.f_type
== 0x15013346); /* udf */
759 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__)
762 if (fstatfs( unix_fd
, &stfs
) == -1) return 0;
763 return (!strncmp("cd9660", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)) ||
764 !strncmp("udf", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)));
765 #elif defined(__NetBSD__)
768 if (fstatvfs( unix_fd
, &stfs
) == -1) return 0;
769 return (!strncmp("cd9660", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)) ||
770 !strncmp("udf", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)));
772 # include <sys/dkio.h>
773 # include <sys/vtoc.h>
774 struct dk_cinfo dkinf
;
775 if (ioctl( unix_fd
, DKIOCINFO
, &dkinf
) == -1) return 0;
776 return (dkinf
.dki_ctype
== DKC_CDROM
||
777 dkinf
.dki_ctype
== DKC_NCRFLOPPY
||
778 dkinf
.dki_ctype
== DKC_SMSFLOPPY
||
779 dkinf
.dki_ctype
== DKC_INTEL82072
||
780 dkinf
.dki_ctype
== DKC_INTEL82077
);
786 /* retrieve the device object for a given fd, creating it if needed */
787 static struct device
*get_device( dev_t dev
, int unix_fd
)
789 struct device
*device
;
790 unsigned int i
, hash
= dev
% DEVICE_HASH_SIZE
;
792 if (device_hash
[hash
].next
)
794 LIST_FOR_EACH_ENTRY( device
, &device_hash
[hash
], struct device
, entry
)
795 if (device
->dev
== dev
) return (struct device
*)grab_object( device
);
797 else list_init( &device_hash
[hash
] );
799 /* not found, create it */
801 if (unix_fd
== -1) return NULL
;
802 if ((device
= alloc_object( &device_ops
)))
805 device
->removable
= is_device_removable( dev
, unix_fd
);
806 for (i
= 0; i
< INODE_HASH_SIZE
; i
++) list_init( &device
->inode_hash
[i
] );
807 list_add_head( &device_hash
[hash
], &device
->entry
);
812 static void device_dump( struct object
*obj
, int verbose
)
814 struct device
*device
= (struct device
*)obj
;
815 fprintf( stderr
, "Device dev=" );
816 DUMP_LONG_LONG( device
->dev
);
817 fprintf( stderr
, "\n" );
820 static void device_destroy( struct object
*obj
)
822 struct device
*device
= (struct device
*)obj
;
825 for (i
= 0; i
< INODE_HASH_SIZE
; i
++)
826 assert( list_empty(&device
->inode_hash
[i
]) );
828 list_remove( &device
->entry
); /* remove it from the hash table */
832 /****************************************************************/
833 /* inode functions */
835 /* close all pending file descriptors in the closed list */
836 static void inode_close_pending( struct inode
*inode
, int keep_unlinks
)
838 struct list
*ptr
= list_head( &inode
->closed
);
842 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
843 struct list
*next
= list_next( &inode
->closed
, ptr
);
845 if (fd
->unix_fd
!= -1)
847 close( fd
->unix_fd
);
850 if (!keep_unlinks
|| !fd
->unlink
[0]) /* get rid of it unless there's an unlink pending on that file */
859 static void inode_dump( struct object
*obj
, int verbose
)
861 struct inode
*inode
= (struct inode
*)obj
;
862 fprintf( stderr
, "Inode device=%p ino=", inode
->device
);
863 DUMP_LONG_LONG( inode
->ino
);
864 fprintf( stderr
, "\n" );
867 static void inode_destroy( struct object
*obj
)
869 struct inode
*inode
= (struct inode
*)obj
;
872 assert( list_empty(&inode
->open
) );
873 assert( list_empty(&inode
->locks
) );
875 list_remove( &inode
->entry
);
877 while ((ptr
= list_head( &inode
->closed
)))
879 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
881 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
884 /* make sure it is still the same file */
886 if (!stat( fd
->unlink
, &st
) && st
.st_dev
== inode
->device
->dev
&& st
.st_ino
== inode
->ino
)
888 if (S_ISDIR(st
.st_mode
)) rmdir( fd
->unlink
);
889 else unlink( fd
->unlink
);
894 release_object( inode
->device
);
897 /* retrieve the inode object for a given fd, creating it if needed */
898 static struct inode
*get_inode( dev_t dev
, ino_t ino
, int unix_fd
)
900 struct device
*device
;
902 unsigned int hash
= ino
% INODE_HASH_SIZE
;
904 if (!(device
= get_device( dev
, unix_fd
))) return NULL
;
906 LIST_FOR_EACH_ENTRY( inode
, &device
->inode_hash
[hash
], struct inode
, entry
)
908 if (inode
->ino
== ino
)
910 release_object( device
);
911 return (struct inode
*)grab_object( inode
);
915 /* not found, create it */
916 if ((inode
= alloc_object( &inode_ops
)))
918 inode
->device
= device
;
920 list_init( &inode
->open
);
921 list_init( &inode
->locks
);
922 list_init( &inode
->closed
);
923 list_add_head( &device
->inode_hash
[hash
], &inode
->entry
);
925 else release_object( device
);
930 /* add fd to the inode list of file descriptors to close */
931 static void inode_add_closed_fd( struct inode
*inode
, struct closed_fd
*fd
)
933 if (!list_empty( &inode
->locks
))
935 list_add_head( &inode
->closed
, &fd
->entry
);
937 else if (fd
->unlink
[0]) /* close the fd but keep the structure around for unlink */
939 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
941 list_add_head( &inode
->closed
, &fd
->entry
);
943 else /* no locks on this inode and no unlink, get rid of the fd */
945 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
951 /****************************************************************/
952 /* file lock functions */
954 static void file_lock_dump( struct object
*obj
, int verbose
)
956 struct file_lock
*lock
= (struct file_lock
*)obj
;
957 fprintf( stderr
, "Lock %s fd=%p proc=%p start=",
958 lock
->shared
? "shared" : "excl", lock
->fd
, lock
->process
);
959 DUMP_LONG_LONG( lock
->start
);
960 fprintf( stderr
, " end=" );
961 DUMP_LONG_LONG( lock
->end
);
962 fprintf( stderr
, "\n" );
965 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
)
967 struct file_lock
*lock
= (struct file_lock
*)obj
;
968 /* lock is signaled if it has lost its owner */
969 return !lock
->process
;
972 /* set (or remove) a Unix lock if possible for the given range */
973 static int set_unix_lock( struct fd
*fd
, file_pos_t start
, file_pos_t end
, int type
)
977 if (!fd
->fs_locks
) return 1; /* no fs locks possible for this fd */
980 if (start
== end
) return 1; /* can't set zero-byte lock */
981 if (start
> max_unix_offset
) return 1; /* ignore it */
983 fl
.l_whence
= SEEK_SET
;
985 if (!end
|| end
> max_unix_offset
) fl
.l_len
= 0;
986 else fl
.l_len
= end
- start
;
987 if (fcntl( fd
->unix_fd
, F_SETLK
, &fl
) != -1) return 1;
992 /* check whether locks work at all on this file system */
993 if (fcntl( fd
->unix_fd
, F_GETLK
, &fl
) != -1)
995 set_error( STATUS_FILE_LOCK_CONFLICT
);
1001 /* no locking on this fs, just ignore it */
1005 set_error( STATUS_FILE_LOCK_CONFLICT
);
1008 /* this can happen if we try to set a write lock on a read-only file */
1009 /* we just ignore that error */
1010 if (fl
.l_type
== F_WRLCK
) return 1;
1011 set_error( STATUS_ACCESS_DENIED
);
1017 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
1018 /* in that case we shrink the limit and retry */
1019 if (max_unix_offset
> INT_MAX
)
1021 max_unix_offset
= INT_MAX
;
1032 /* check if interval [start;end) overlaps the lock */
1033 static inline int lock_overlaps( struct file_lock
*lock
, file_pos_t start
, file_pos_t end
)
1035 if (lock
->end
&& start
>= lock
->end
) return 0;
1036 if (end
&& lock
->start
>= end
) return 0;
1040 /* remove Unix locks for all bytes in the specified area that are no longer locked */
1041 static void remove_unix_locks( struct fd
*fd
, file_pos_t start
, file_pos_t end
)
1049 } *first
, *cur
, *next
, *buffer
;
1054 if (!fd
->inode
) return;
1055 if (!fd
->fs_locks
) return;
1056 if (start
== end
|| start
> max_unix_offset
) return;
1057 if (!end
|| end
> max_unix_offset
) end
= max_unix_offset
+ 1;
1059 /* count the number of locks overlapping the specified area */
1061 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1063 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1064 if (lock
->start
== lock
->end
) continue;
1065 if (lock_overlaps( lock
, start
, end
)) count
++;
1068 if (!count
) /* no locks at all, we can unlock everything */
1070 set_unix_lock( fd
, start
, end
, F_UNLCK
);
1074 /* allocate space for the list of holes */
1075 /* max. number of holes is number of locks + 1 */
1077 if (!(buffer
= malloc( sizeof(*buffer
) * (count
+1) ))) return;
1081 first
->start
= start
;
1085 /* build a sorted list of unlocked holes in the specified area */
1087 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1089 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1090 if (lock
->start
== lock
->end
) continue;
1091 if (!lock_overlaps( lock
, start
, end
)) continue;
1093 /* go through all the holes touched by this lock */
1094 for (cur
= first
; cur
; cur
= cur
->next
)
1096 if (cur
->end
<= lock
->start
) continue; /* hole is before start of lock */
1097 if (lock
->end
&& cur
->start
>= lock
->end
) break; /* hole is after end of lock */
1099 /* now we know that lock is overlapping hole */
1101 if (cur
->start
>= lock
->start
) /* lock starts before hole, shrink from start */
1103 cur
->start
= lock
->end
;
1104 if (cur
->start
&& cur
->start
< cur
->end
) break; /* done with this lock */
1105 /* now hole is empty, remove it */
1106 if (cur
->next
) cur
->next
->prev
= cur
->prev
;
1107 if (cur
->prev
) cur
->prev
->next
= cur
->next
;
1108 else if (!(first
= cur
->next
)) goto done
; /* no more holes at all */
1110 else if (!lock
->end
|| cur
->end
<= lock
->end
) /* lock larger than hole, shrink from end */
1112 cur
->end
= lock
->start
;
1113 assert( cur
->start
< cur
->end
);
1115 else /* lock is in the middle of hole, split hole in two */
1118 next
->next
= cur
->next
;
1120 next
->start
= lock
->end
;
1121 next
->end
= cur
->end
;
1122 cur
->end
= lock
->start
;
1123 assert( next
->start
< next
->end
);
1124 assert( cur
->end
< next
->start
);
1126 break; /* done with this lock */
1131 /* clear Unix locks for all the holes */
1133 for (cur
= first
; cur
; cur
= cur
->next
)
1134 set_unix_lock( fd
, cur
->start
, cur
->end
, F_UNLCK
);
1140 /* create a new lock on a fd */
1141 static struct file_lock
*add_lock( struct fd
*fd
, int shared
, file_pos_t start
, file_pos_t end
)
1143 struct file_lock
*lock
;
1145 if (!fd
->inode
) /* not a regular file */
1147 set_error( STATUS_INVALID_HANDLE
);
1151 if (!(lock
= alloc_object( &file_lock_ops
))) return NULL
;
1152 lock
->shared
= shared
;
1153 lock
->start
= start
;
1156 lock
->process
= current
->process
;
1158 /* now try to set a Unix lock */
1159 if (!set_unix_lock( lock
->fd
, lock
->start
, lock
->end
, lock
->shared
? F_RDLCK
: F_WRLCK
))
1161 release_object( lock
);
1164 list_add_head( &fd
->locks
, &lock
->fd_entry
);
1165 list_add_head( &fd
->inode
->locks
, &lock
->inode_entry
);
1166 list_add_head( &lock
->process
->locks
, &lock
->proc_entry
);
1170 /* remove an existing lock */
1171 static void remove_lock( struct file_lock
*lock
, int remove_unix
)
1173 struct inode
*inode
= lock
->fd
->inode
;
1175 list_remove( &lock
->fd_entry
);
1176 list_remove( &lock
->inode_entry
);
1177 list_remove( &lock
->proc_entry
);
1178 if (remove_unix
) remove_unix_locks( lock
->fd
, lock
->start
, lock
->end
);
1179 if (list_empty( &inode
->locks
)) inode_close_pending( inode
, 1 );
1180 lock
->process
= NULL
;
1181 wake_up( &lock
->obj
, 0 );
1182 release_object( lock
);
1185 /* remove all locks owned by a given process */
1186 void remove_process_locks( struct process
*process
)
1190 while ((ptr
= list_head( &process
->locks
)))
1192 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, proc_entry
);
1193 remove_lock( lock
, 1 ); /* this removes it from the list */
1197 /* remove all locks on a given fd */
1198 static void remove_fd_locks( struct fd
*fd
)
1200 file_pos_t start
= FILE_POS_T_MAX
, end
= 0;
1203 while ((ptr
= list_head( &fd
->locks
)))
1205 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
1206 if (lock
->start
< start
) start
= lock
->start
;
1207 if (!lock
->end
|| lock
->end
> end
) end
= lock
->end
- 1;
1208 remove_lock( lock
, 0 );
1210 if (start
< end
) remove_unix_locks( fd
, start
, end
+ 1 );
1213 /* add a lock on an fd */
1214 /* returns handle to wait on */
1215 obj_handle_t
lock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
, int shared
, int wait
)
1218 file_pos_t end
= start
+ count
;
1220 /* don't allow wrapping locks */
1221 if (end
&& end
< start
)
1223 set_error( STATUS_INVALID_PARAMETER
);
1227 /* check if another lock on that file overlaps the area */
1228 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1230 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1231 if (!lock_overlaps( lock
, start
, end
)) continue;
1232 if (lock
->shared
&& shared
) continue;
1236 set_error( STATUS_FILE_LOCK_CONFLICT
);
1239 set_error( STATUS_PENDING
);
1240 return alloc_handle( current
->process
, lock
, SYNCHRONIZE
, 0 );
1243 /* not found, add it */
1244 if (add_lock( fd
, shared
, start
, end
)) return 0;
1245 if (get_error() == STATUS_FILE_LOCK_CONFLICT
)
1247 /* Unix lock conflict -> tell client to wait and retry */
1248 if (wait
) set_error( STATUS_PENDING
);
1253 /* remove a lock on an fd */
1254 void unlock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
)
1257 file_pos_t end
= start
+ count
;
1259 /* find an existing lock with the exact same parameters */
1260 LIST_FOR_EACH( ptr
, &fd
->locks
)
1262 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
1263 if ((lock
->start
== start
) && (lock
->end
== end
))
1265 remove_lock( lock
, 1 );
1269 set_error( STATUS_FILE_LOCK_CONFLICT
);
1273 /****************************************************************/
1274 /* file descriptor functions */
1276 static void fd_dump( struct object
*obj
, int verbose
)
1278 struct fd
*fd
= (struct fd
*)obj
;
1279 fprintf( stderr
, "Fd unix_fd=%d user=%p", fd
->unix_fd
, fd
->user
);
1280 if (fd
->inode
) fprintf( stderr
, " inode=%p unlink='%s'", fd
->inode
, fd
->closed
->unlink
);
1281 fprintf( stderr
, "\n" );
1284 static void fd_destroy( struct object
*obj
)
1286 struct fd
*fd
= (struct fd
*)obj
;
1288 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1289 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1291 remove_fd_locks( fd
);
1292 list_remove( &fd
->inode_entry
);
1293 if (fd
->poll_index
!= -1) remove_poll_user( fd
, fd
->poll_index
);
1296 inode_add_closed_fd( fd
->inode
, fd
->closed
);
1297 release_object( fd
->inode
);
1299 else /* no inode, close it right away */
1301 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1305 /* set the events that select waits for on this fd */
1306 void set_fd_events( struct fd
*fd
, int events
)
1308 int user
= fd
->poll_index
;
1309 assert( poll_users
[user
] == fd
);
1311 set_fd_epoll_events( fd
, user
, events
);
1313 if (events
== -1) /* stop waiting on this fd completely */
1315 pollfd
[user
].fd
= -1;
1316 pollfd
[user
].events
= POLLERR
;
1317 pollfd
[user
].revents
= 0;
1319 else if (pollfd
[user
].fd
!= -1 || !pollfd
[user
].events
)
1321 pollfd
[user
].fd
= fd
->unix_fd
;
1322 pollfd
[user
].events
= events
;
1326 /* prepare an fd for unmounting its corresponding device */
1327 static inline void unmount_fd( struct fd
*fd
)
1329 assert( fd
->inode
);
1331 async_terminate_queue( &fd
->read_q
, STATUS_VOLUME_DISMOUNTED
);
1332 async_terminate_queue( &fd
->write_q
, STATUS_VOLUME_DISMOUNTED
);
1334 if (fd
->poll_index
!= -1) set_fd_events( fd
, -1 );
1336 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1340 fd
->closed
->unix_fd
= -1;
1341 fd
->closed
->unlink
[0] = 0;
1343 /* stop using Unix locks on this fd (existing locks have been removed by close) */
1347 /* allocate an fd object, without setting the unix fd yet */
1348 static struct fd
*alloc_fd_object(void)
1350 struct fd
*fd
= alloc_object( &fd_ops
);
1352 if (!fd
) return NULL
;
1363 fd
->poll_index
= -1;
1364 list_init( &fd
->inode_entry
);
1365 list_init( &fd
->locks
);
1366 list_init( &fd
->read_q
);
1367 list_init( &fd
->write_q
);
1369 if ((fd
->poll_index
= add_poll_user( fd
)) == -1)
1371 release_object( fd
);
1377 /* allocate a pseudo fd object, for objects that need to behave like files but don't have a unix fd */
1378 struct fd
*alloc_pseudo_fd( const struct fd_ops
*fd_user_ops
, struct object
*user
)
1380 struct fd
*fd
= alloc_object( &fd_ops
);
1382 if (!fd
) return NULL
;
1384 fd
->fd_ops
= fd_user_ops
;
1393 fd
->poll_index
= -1;
1394 list_init( &fd
->inode_entry
);
1395 list_init( &fd
->locks
);
1396 list_init( &fd
->read_q
);
1397 list_init( &fd
->write_q
);
1401 /* check if the desired access is possible without violating */
1402 /* the sharing mode of other opens of the same file */
1403 static int check_sharing( struct fd
*fd
, unsigned int access
, unsigned int sharing
)
1405 unsigned int existing_sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
1406 unsigned int existing_access
= 0;
1409 /* if access mode is 0, sharing mode is ignored */
1410 if (!access
) sharing
= existing_sharing
;
1411 fd
->access
= access
;
1412 fd
->sharing
= sharing
;
1414 LIST_FOR_EACH( ptr
, &fd
->inode
->open
)
1416 struct fd
*fd_ptr
= LIST_ENTRY( ptr
, struct fd
, inode_entry
);
1419 existing_sharing
&= fd_ptr
->sharing
;
1420 existing_access
|= fd_ptr
->access
;
1424 if ((access
& FILE_UNIX_READ_ACCESS
) && !(existing_sharing
& FILE_SHARE_READ
)) return 0;
1425 if ((access
& FILE_UNIX_WRITE_ACCESS
) && !(existing_sharing
& FILE_SHARE_WRITE
)) return 0;
1426 if ((access
& DELETE
) && !(existing_sharing
& FILE_SHARE_DELETE
)) return 0;
1427 if ((existing_access
& FILE_UNIX_READ_ACCESS
) && !(sharing
& FILE_SHARE_READ
)) return 0;
1428 if ((existing_access
& FILE_UNIX_WRITE_ACCESS
) && !(sharing
& FILE_SHARE_WRITE
)) return 0;
1429 if ((existing_access
& DELETE
) && !(sharing
& FILE_SHARE_DELETE
)) return 0;
1433 /* sets the user of an fd that previously had no user */
1434 void set_fd_user( struct fd
*fd
, const struct fd_ops
*user_ops
, struct object
*user
)
1436 assert( fd
->fd_ops
== NULL
);
1437 fd
->fd_ops
= user_ops
;
1441 /* open() wrapper that returns a struct fd with no fd user set */
1442 struct fd
*open_fd( const char *name
, int flags
, mode_t
*mode
, unsigned int access
,
1443 unsigned int sharing
, unsigned int options
)
1446 struct closed_fd
*closed_fd
;
1448 const char *unlink_name
= "";
1451 if ((options
& FILE_DELETE_ON_CLOSE
) && !(access
& DELETE
))
1453 set_error( STATUS_INVALID_PARAMETER
);
1457 if (!(fd
= alloc_fd_object())) return NULL
;
1459 if (options
& FILE_DELETE_ON_CLOSE
) unlink_name
= name
;
1460 if (!(closed_fd
= mem_alloc( sizeof(*closed_fd
) + strlen(unlink_name
) )))
1462 release_object( fd
);
1466 /* create the directory if needed */
1467 if ((options
& FILE_DIRECTORY_FILE
) && (flags
& O_CREAT
))
1469 if (mkdir( name
, 0777 ) == -1)
1471 if (errno
!= EEXIST
|| (flags
& O_EXCL
))
1477 flags
&= ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1480 if ((access
& FILE_UNIX_WRITE_ACCESS
) && !(options
& FILE_DIRECTORY_FILE
))
1482 if (access
& FILE_UNIX_READ_ACCESS
) rw_mode
= O_RDWR
;
1483 else rw_mode
= O_WRONLY
;
1485 else rw_mode
= O_RDONLY
;
1487 if ((fd
->unix_fd
= open( name
, rw_mode
| (flags
& ~O_TRUNC
), *mode
)) == -1)
1489 /* if we tried to open a directory for write access, retry read-only */
1490 if (errno
!= EISDIR
||
1491 !(access
& FILE_UNIX_WRITE_ACCESS
) ||
1492 (fd
->unix_fd
= open( name
, O_RDONLY
| (flags
& ~O_TRUNC
), *mode
)) == -1)
1499 closed_fd
->unix_fd
= fd
->unix_fd
;
1500 closed_fd
->unlink
[0] = 0;
1501 fstat( fd
->unix_fd
, &st
);
1504 /* only bother with an inode for normal files and directories */
1505 if (S_ISREG(st
.st_mode
) || S_ISDIR(st
.st_mode
))
1507 struct inode
*inode
= get_inode( st
.st_dev
, st
.st_ino
, fd
->unix_fd
);
1511 /* we can close the fd because there are no others open on the same file,
1512 * otherwise we wouldn't have failed to allocate a new inode
1517 fd
->closed
= closed_fd
;
1518 list_add_head( &inode
->open
, &fd
->inode_entry
);
1520 /* check directory options */
1521 if ((options
& FILE_DIRECTORY_FILE
) && !S_ISDIR(st
.st_mode
))
1523 release_object( fd
);
1524 set_error( STATUS_NOT_A_DIRECTORY
);
1527 if ((options
& FILE_NON_DIRECTORY_FILE
) && S_ISDIR(st
.st_mode
))
1529 release_object( fd
);
1530 set_error( STATUS_FILE_IS_A_DIRECTORY
);
1533 if (!check_sharing( fd
, access
, sharing
))
1535 release_object( fd
);
1536 set_error( STATUS_SHARING_VIOLATION
);
1539 strcpy( closed_fd
->unlink
, unlink_name
);
1540 if (flags
& O_TRUNC
) ftruncate( fd
->unix_fd
, 0 );
1542 else /* special file */
1544 if (options
& FILE_DIRECTORY_FILE
)
1546 set_error( STATUS_NOT_A_DIRECTORY
);
1549 if (unlink_name
[0]) /* we can't unlink special files */
1551 set_error( STATUS_INVALID_PARAMETER
);
1559 release_object( fd
);
1564 /* create an fd for an anonymous file */
1565 /* if the function fails the unix fd is closed */
1566 struct fd
*create_anonymous_fd( const struct fd_ops
*fd_user_ops
, int unix_fd
, struct object
*user
)
1568 struct fd
*fd
= alloc_fd_object();
1572 set_fd_user( fd
, fd_user_ops
, user
);
1573 fd
->unix_fd
= unix_fd
;
1580 /* retrieve the object that is using an fd */
1581 void *get_fd_user( struct fd
*fd
)
1586 /* retrieve the unix fd for an object */
1587 int get_unix_fd( struct fd
*fd
)
1589 if (fd
->unix_fd
== -1)
1591 if (fd
->unmounted
) set_error( STATUS_VOLUME_DISMOUNTED
);
1592 else set_error( STATUS_BAD_DEVICE_TYPE
);
1597 /* check if two file descriptors point to the same file */
1598 int is_same_file_fd( struct fd
*fd1
, struct fd
*fd2
)
1600 return fd1
->inode
== fd2
->inode
;
1603 /* check if fd is on a removable device */
1604 int is_fd_removable( struct fd
*fd
)
1606 return (fd
->inode
&& fd
->inode
->device
->removable
);
1609 /* handler for close_handle that refuses to close fd-associated handles in other processes */
1610 int fd_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
1612 return (!current
|| current
->process
== process
);
1615 /* callback for event happening in the main poll() loop */
1616 void fd_poll_event( struct fd
*fd
, int event
)
1618 return fd
->fd_ops
->poll_event( fd
, event
);
1621 /* check if events are pending and if yes return which one(s) */
1622 int check_fd_events( struct fd
*fd
, int events
)
1626 if (fd
->unix_fd
== -1) return POLLERR
;
1628 pfd
.fd
= fd
->unix_fd
;
1629 pfd
.events
= events
;
1630 if (poll( &pfd
, 1, 0 ) <= 0) return 0;
1634 /* default add_queue() routine for objects that poll() on an fd */
1635 int default_fd_add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1637 struct fd
*fd
= get_obj_fd( obj
);
1640 if (!fd
->inode
&& list_empty( &obj
->wait_queue
)) /* first on the queue */
1641 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1642 add_queue( obj
, entry
);
1643 release_object( fd
);
1647 /* default remove_queue() routine for objects that poll() on an fd */
1648 void default_fd_remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1650 struct fd
*fd
= get_obj_fd( obj
);
1653 remove_queue( obj
, entry
);
1654 if (!fd
->inode
&& list_empty( &obj
->wait_queue
)) /* last on the queue is gone */
1655 set_fd_events( fd
, 0 );
1656 release_object( obj
);
1657 release_object( fd
);
1660 /* default signaled() routine for objects that poll() on an fd */
1661 int default_fd_signaled( struct object
*obj
, struct thread
*thread
)
1664 struct fd
*fd
= get_obj_fd( obj
);
1666 if (fd
->inode
) ret
= 1; /* regular files are always signaled */
1669 events
= fd
->fd_ops
->get_poll_events( fd
);
1670 ret
= check_fd_events( fd
, events
) != 0;
1674 /* stop waiting on select() if we are signaled */
1675 set_fd_events( fd
, 0 );
1677 else if (!list_empty( &obj
->wait_queue
))
1679 /* restart waiting on poll() if we are no longer signaled */
1680 set_fd_events( fd
, events
);
1683 release_object( fd
);
1687 int default_fd_get_poll_events( struct fd
*fd
)
1691 if (!list_empty( &fd
->read_q
))
1693 if (!list_empty( &fd
->write_q
))
1699 /* default handler for poll() events */
1700 void default_poll_event( struct fd
*fd
, int event
)
1702 if (!list_empty( &fd
->read_q
) && (POLLIN
& event
) )
1704 async_terminate_head( &fd
->read_q
, STATUS_ALERTED
);
1707 if (!list_empty( &fd
->write_q
) && (POLLOUT
& event
) )
1709 async_terminate_head( &fd
->write_q
, STATUS_ALERTED
);
1713 /* if an error occurred, stop polling this fd to avoid busy-looping */
1714 if (event
& (POLLERR
| POLLHUP
)) set_fd_events( fd
, -1 );
1715 wake_up( fd
->user
, 0 );
1718 void fd_queue_async_timeout( struct fd
*fd
, const async_data_t
*data
, int type
, int count
,
1719 const struct timeval
*timeout
)
1724 fd
->fd_ops
->get_file_info( fd
, &flags
);
1725 if (!(flags
& (FD_FLAG_OVERLAPPED
|FD_FLAG_TIMEOUT
)))
1727 set_error( STATUS_INVALID_HANDLE
);
1733 case ASYNC_TYPE_READ
:
1734 queue
= &fd
->read_q
;
1736 case ASYNC_TYPE_WRITE
:
1737 queue
= &fd
->write_q
;
1740 set_error( STATUS_INVALID_PARAMETER
);
1744 if (!create_async( current
, timeout
, queue
, data
)) return;
1745 set_error( STATUS_PENDING
);
1747 /* Check if the new pending request can be served immediately */
1748 events
= check_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1749 if (events
) fd
->fd_ops
->poll_event( fd
, events
);
1751 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1754 void default_fd_queue_async( struct fd
*fd
, const async_data_t
*data
, int type
, int count
)
1756 fd_queue_async_timeout( fd
, data
, type
, count
, NULL
);
1759 void default_fd_cancel_async( struct fd
*fd
)
1761 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1762 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1765 /* default flush() routine */
1766 int no_flush( struct fd
*fd
, struct event
**event
)
1768 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1772 /* default get_file_info() routine */
1773 enum server_fd_type
no_get_file_info( struct fd
*fd
, int *flags
)
1776 return FD_TYPE_INVALID
;
1779 /* default queue_async() routine */
1780 void no_queue_async( struct fd
*fd
, const async_data_t
*data
, int type
, int count
)
1782 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1785 /* default cancel_async() routine */
1786 void no_cancel_async( struct fd
*fd
)
1788 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1791 static inline int is_valid_mounted_device( struct stat
*st
)
1793 #if defined(linux) || defined(__sun__)
1794 return S_ISBLK( st
->st_mode
);
1796 /* disks are char devices on *BSD */
1797 return S_ISCHR( st
->st_mode
);
1801 /* close all Unix file descriptors on a device to allow unmounting it */
1802 static void unmount_device( struct fd
*device_fd
)
1806 struct device
*device
;
1807 struct inode
*inode
;
1809 int unix_fd
= get_unix_fd( device_fd
);
1811 if (unix_fd
== -1) return;
1813 if (fstat( unix_fd
, &st
) == -1 || !is_valid_mounted_device( &st
))
1815 set_error( STATUS_INVALID_PARAMETER
);
1819 if (!(device
= get_device( st
.st_rdev
, -1 ))) return;
1821 for (i
= 0; i
< INODE_HASH_SIZE
; i
++)
1823 LIST_FOR_EACH_ENTRY( inode
, &device
->inode_hash
[i
], struct inode
, entry
)
1825 LIST_FOR_EACH_ENTRY( fd
, &inode
->open
, struct fd
, inode_entry
)
1829 inode_close_pending( inode
, 0 );
1832 /* remove it from the hash table */
1833 list_remove( &device
->entry
);
1834 list_init( &device
->entry
);
1835 release_object( device
);
1838 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1839 static struct fd
*get_handle_fd_obj( struct process
*process
, obj_handle_t handle
,
1840 unsigned int access
)
1842 struct fd
*fd
= NULL
;
1845 if ((obj
= get_handle_obj( process
, handle
, access
, NULL
)))
1847 fd
= get_obj_fd( obj
);
1848 release_object( obj
);
1853 /* flush a file buffers */
1854 DECL_HANDLER(flush_file
)
1856 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1857 struct event
* event
= NULL
;
1861 fd
->fd_ops
->flush( fd
, &event
);
1864 reply
->event
= alloc_handle( current
->process
, event
, SYNCHRONIZE
, 0 );
1866 release_object( fd
);
1870 /* open a file object */
1871 DECL_HANDLER(open_file_object
)
1873 struct unicode_str name
;
1874 struct directory
*root
= NULL
;
1875 struct object
*obj
, *result
;
1877 get_req_unicode_str( &name
);
1878 if (req
->rootdir
&& !(root
= get_directory_obj( current
->process
, req
->rootdir
, 0 )))
1881 if ((obj
= open_object_dir( root
, &name
, req
->attributes
, NULL
)))
1883 if ((result
= obj
->ops
->open_file( obj
, req
->access
, req
->sharing
, req
->options
)))
1885 reply
->handle
= alloc_handle( current
->process
, result
, req
->access
, req
->attributes
);
1886 release_object( result
);
1888 release_object( obj
);
1891 if (root
) release_object( root
);
1894 /* get a Unix fd to access a file */
1895 DECL_HANDLER(get_handle_fd
)
1899 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, req
->access
)))
1901 reply
->type
= fd
->fd_ops
->get_file_info( fd
, &reply
->flags
);
1902 if (reply
->type
!= FD_TYPE_INVALID
)
1904 if (is_fd_removable(fd
)) reply
->flags
|= FD_FLAG_REMOVABLE
;
1907 int unix_fd
= get_unix_fd( fd
);
1908 if (unix_fd
!= -1) send_client_fd( current
->process
, unix_fd
, req
->handle
);
1911 else set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1912 release_object( fd
);
1916 /* get ready to unmount a Unix device */
1917 DECL_HANDLER(unmount_device
)
1921 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 )))
1923 unmount_device( fd
);
1924 release_object( fd
);
1928 /* create / reschedule an async I/O */
1929 DECL_HANDLER(register_async
)
1931 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1934 * The queue_async method must do the following:
1936 * 1. Get the async_queue for the request of given type.
1937 * 2. Create a new asynchronous request for the selected queue
1938 * 3. Carry out any operations necessary to adjust the object's poll events
1939 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
1940 * 4. When the async request is triggered, then send back (with a proper APC)
1941 * the trigger (STATUS_ALERTED) to the thread that posted the request.
1942 * See also the implementations in file.c, serial.c, and sock.c.
1947 fd
->fd_ops
->queue_async( fd
, &req
->async
, req
->type
, req
->count
);
1948 release_object( fd
);
1952 /* cancels all async I/O */
1953 DECL_HANDLER(cancel_async
)
1955 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1958 /* Note: we don't kill the queued APC_ASYNC_IO on this thread because
1959 * NtCancelIoFile() will force the pending APC to be run. Since,
1960 * Windows only guarantees that the current thread will have no async
1961 * operation on the current fd when NtCancelIoFile returns, this shall
1964 fd
->fd_ops
->cancel_async( fd
);
1965 release_object( fd
);