2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
37 #ifdef HAVE_SYS_POLL_H
40 #ifdef HAVE_LINUX_MAJOR_H
41 #include <linux/major.h>
43 #ifdef HAVE_SYS_STATVFS_H
44 #include <sys/statvfs.h>
49 #ifdef HAVE_SYS_PARAM_H
50 #include <sys/param.h>
52 #ifdef HAVE_SYS_MOUNT_H
53 #include <sys/mount.h>
55 #ifdef HAVE_SYS_STATFS_H
56 #include <sys/statfs.h>
58 #ifdef HAVE_SYS_EVENT_H
59 #include <sys/event.h>
68 #include <sys/types.h>
72 #define WIN32_NO_STATUS
82 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
83 # include <sys/epoll.h>
85 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
87 # define EPOLLIN POLLIN
88 # define EPOLLOUT POLLOUT
89 # define EPOLLERR POLLERR
90 # define EPOLLHUP POLLHUP
91 # define EPOLL_CTL_ADD 1
92 # define EPOLL_CTL_DEL 2
93 # define EPOLL_CTL_MOD 3
95 typedef union epoll_data
109 #define SYSCALL_RET(ret) do { \
110 if (ret < 0) { errno = -ret; ret = -1; } \
114 static inline int epoll_create( int size
)
117 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
118 : "=a" (ret
) : "0" (254 /*NR_epoll_create*/), "r" (size
) );
122 static inline int epoll_ctl( int epfd
, int op
, int fd
, const struct epoll_event
*event
)
125 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
127 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd
), "c" (op
), "d" (fd
), "S" (event
), "m" (*event
) );
131 static inline int epoll_wait( int epfd
, struct epoll_event
*events
, int maxevents
, int timeout
)
134 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
136 : "0" (256 /*NR_epoll_wait*/), "r" (epfd
), "c" (events
), "d" (maxevents
), "S" (timeout
)
142 #endif /* linux && __i386__ && HAVE_STDINT_H */
145 /* Because of the stupid Posix locking semantics, we need to keep
146 * track of all file descriptors referencing a given file, and not
147 * close a single one until all the locks are gone (sigh).
150 /* file descriptor object */
152 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
155 struct list entry
; /* entry in inode closed list */
156 int unix_fd
; /* the unix file descriptor */
157 char unlink
[1]; /* name to unlink on close (if any) */
162 struct object obj
; /* object header */
163 const struct fd_ops
*fd_ops
; /* file descriptor operations */
164 struct inode
*inode
; /* inode that this fd belongs to */
165 struct list inode_entry
; /* entry in inode fd list */
166 struct closed_fd
*closed
; /* structure to store the unix fd at destroy time */
167 struct object
*user
; /* object using this file descriptor */
168 struct list locks
; /* list of locks on this fd */
169 unsigned int access
; /* file access (FILE_READ_DATA etc.) */
170 unsigned int options
; /* file options (FILE_DELETE_ON_CLOSE, FILE_SYNCHRONOUS...) */
171 unsigned int sharing
; /* file sharing mode */
172 int unix_fd
; /* unix file descriptor */
173 unsigned int no_fd_status
;/* status to return when unix_fd is -1 */
174 int signaled
:1; /* is the fd signaled? */
175 int fs_locks
:1; /* can we use filesystem locks for this fd? */
176 int poll_index
; /* index of fd in poll array */
177 struct async_queue
*read_q
; /* async readers of this fd */
178 struct async_queue
*write_q
; /* async writers of this fd */
179 struct async_queue
*wait_q
; /* other async waiters of this fd */
182 static void fd_dump( struct object
*obj
, int verbose
);
183 static void fd_destroy( struct object
*obj
);
185 static const struct object_ops fd_ops
=
187 sizeof(struct fd
), /* size */
189 no_add_queue
, /* add_queue */
190 NULL
, /* remove_queue */
192 NULL
, /* satisfied */
193 no_signal
, /* signal */
194 no_get_fd
, /* get_fd */
195 no_map_access
, /* map_access */
196 no_lookup_name
, /* lookup_name */
197 no_open_file
, /* open_file */
198 no_close_handle
, /* close_handle */
199 fd_destroy
/* destroy */
204 #define DEVICE_HASH_SIZE 7
205 #define INODE_HASH_SIZE 17
209 struct object obj
; /* object header */
210 struct list entry
; /* entry in device hash list */
211 dev_t dev
; /* device number */
212 int removable
; /* removable device? (or -1 if unknown) */
213 struct list inode_hash
[INODE_HASH_SIZE
]; /* inodes hash table */
216 static void device_dump( struct object
*obj
, int verbose
);
217 static void device_destroy( struct object
*obj
);
219 static const struct object_ops device_ops
=
221 sizeof(struct device
), /* size */
222 device_dump
, /* dump */
223 no_add_queue
, /* add_queue */
224 NULL
, /* remove_queue */
226 NULL
, /* satisfied */
227 no_signal
, /* signal */
228 no_get_fd
, /* get_fd */
229 no_map_access
, /* map_access */
230 no_lookup_name
, /* lookup_name */
231 no_open_file
, /* open_file */
232 no_close_handle
, /* close_handle */
233 device_destroy
/* destroy */
240 struct object obj
; /* object header */
241 struct list entry
; /* inode hash list entry */
242 struct device
*device
; /* device containing this inode */
243 ino_t ino
; /* inode number */
244 struct list open
; /* list of open file descriptors */
245 struct list locks
; /* list of file locks */
246 struct list closed
; /* list of file descriptors to close at destroy time */
249 static void inode_dump( struct object
*obj
, int verbose
);
250 static void inode_destroy( struct object
*obj
);
252 static const struct object_ops inode_ops
=
254 sizeof(struct inode
), /* size */
255 inode_dump
, /* dump */
256 no_add_queue
, /* add_queue */
257 NULL
, /* remove_queue */
259 NULL
, /* satisfied */
260 no_signal
, /* signal */
261 no_get_fd
, /* get_fd */
262 no_map_access
, /* map_access */
263 no_lookup_name
, /* lookup_name */
264 no_open_file
, /* open_file */
265 no_close_handle
, /* close_handle */
266 inode_destroy
/* destroy */
269 /* file lock object */
273 struct object obj
; /* object header */
274 struct fd
*fd
; /* fd owning this lock */
275 struct list fd_entry
; /* entry in list of locks on a given fd */
276 struct list inode_entry
; /* entry in inode list of locks */
277 int shared
; /* shared lock? */
278 file_pos_t start
; /* locked region is interval [start;end) */
280 struct process
*process
; /* process owning this lock */
281 struct list proc_entry
; /* entry in list of locks owned by the process */
284 static void file_lock_dump( struct object
*obj
, int verbose
);
285 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
);
287 static const struct object_ops file_lock_ops
=
289 sizeof(struct file_lock
), /* size */
290 file_lock_dump
, /* dump */
291 add_queue
, /* add_queue */
292 remove_queue
, /* remove_queue */
293 file_lock_signaled
, /* signaled */
294 no_satisfied
, /* satisfied */
295 no_signal
, /* signal */
296 no_get_fd
, /* get_fd */
297 no_map_access
, /* map_access */
298 no_lookup_name
, /* lookup_name */
299 no_open_file
, /* open_file */
300 no_close_handle
, /* close_handle */
301 no_destroy
/* destroy */
305 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
306 #define FILE_POS_T_MAX (~(file_pos_t)0)
308 static file_pos_t max_unix_offset
= OFF_T_MAX
;
310 #define DUMP_LONG_LONG(val) do { \
311 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
312 fprintf( stderr, "%lx%08lx", (unsigned long)((unsigned long long)(val) >> 32), (unsigned long)(val) ); \
314 fprintf( stderr, "%lx", (unsigned long)(val) ); \
319 /****************************************************************/
320 /* timeouts support */
324 struct list entry
; /* entry in sorted timeout list */
325 timeout_t when
; /* timeout expiry (absolute time) */
326 timeout_callback callback
; /* callback function */
327 void *private; /* callback private data */
330 static struct list timeout_list
= LIST_INIT(timeout_list
); /* sorted timeouts list */
331 timeout_t current_time
;
333 static inline void set_current_time(void)
335 static const timeout_t ticks_1601_to_1970
= (timeout_t
)86400 * (369 * 365 + 89) * TICKS_PER_SEC
;
337 gettimeofday( &now
, NULL
);
338 current_time
= (timeout_t
)now
.tv_sec
* TICKS_PER_SEC
+ now
.tv_usec
* 10 + ticks_1601_to_1970
;
341 /* add a timeout user */
342 struct timeout_user
*add_timeout_user( timeout_t when
, timeout_callback func
, void *private )
344 struct timeout_user
*user
;
347 if (!(user
= mem_alloc( sizeof(*user
) ))) return NULL
;
348 user
->when
= (when
> 0) ? when
: current_time
- when
;
349 user
->callback
= func
;
350 user
->private = private;
352 /* Now insert it in the linked list */
354 LIST_FOR_EACH( ptr
, &timeout_list
)
356 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
357 if (timeout
->when
>= user
->when
) break;
359 list_add_before( ptr
, &user
->entry
);
363 /* remove a timeout user */
364 void remove_timeout_user( struct timeout_user
*user
)
366 list_remove( &user
->entry
);
370 /* return a text description of a timeout for debugging purposes */
371 const char *get_timeout_str( timeout_t timeout
)
373 static char buffer
[64];
376 if (!timeout
) return "0";
377 if (timeout
== TIMEOUT_INFINITE
) return "infinite";
379 if (timeout
< 0) /* relative */
381 secs
= -timeout
/ TICKS_PER_SEC
;
382 nsecs
= -timeout
% TICKS_PER_SEC
;
383 sprintf( buffer
, "+%ld.%07ld", secs
, nsecs
);
387 secs
= (timeout
- current_time
) / TICKS_PER_SEC
;
388 nsecs
= (timeout
- current_time
) % TICKS_PER_SEC
;
391 nsecs
+= TICKS_PER_SEC
;
395 sprintf( buffer
, "%x%08x (+%ld.%07ld)",
396 (unsigned int)(timeout
>> 32), (unsigned int)timeout
, secs
, nsecs
);
398 sprintf( buffer
, "%x%08x (-%ld.%07ld)",
399 (unsigned int)(timeout
>> 32), (unsigned int)timeout
,
400 -(secs
+ 1), TICKS_PER_SEC
- nsecs
);
406 /****************************************************************/
409 static struct fd
**poll_users
; /* users array */
410 static struct pollfd
*pollfd
; /* poll fd array */
411 static int nb_users
; /* count of array entries actually in use */
412 static int active_users
; /* current number of active users */
413 static int allocated_users
; /* count of allocated entries in the array */
414 static struct fd
**freelist
; /* list of free entries in the array */
416 static int get_next_timeout(void);
420 static int epoll_fd
= -1;
422 static inline void init_epoll(void)
424 epoll_fd
= epoll_create( 128 );
427 /* set the events that epoll waits for on this fd; helper for set_fd_events */
428 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
430 struct epoll_event ev
;
433 if (epoll_fd
== -1) return;
435 if (events
== -1) /* stop waiting on this fd completely */
437 if (pollfd
[user
].fd
== -1) return; /* already removed */
440 else if (pollfd
[user
].fd
== -1)
442 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
447 if (pollfd
[user
].events
== events
) return; /* nothing to do */
452 memset(&ev
.data
, 0, sizeof(ev
.data
));
455 if (epoll_ctl( epoll_fd
, ctl
, fd
->unix_fd
, &ev
) == -1)
457 if (errno
== ENOMEM
) /* not enough memory, give up on epoll */
462 else perror( "epoll_ctl" ); /* should not happen */
466 static inline void remove_epoll_user( struct fd
*fd
, int user
)
468 if (epoll_fd
== -1) return;
470 if (pollfd
[user
].fd
!= -1)
472 struct epoll_event dummy
;
473 epoll_ctl( epoll_fd
, EPOLL_CTL_DEL
, fd
->unix_fd
, &dummy
);
477 static inline void main_loop_epoll(void)
480 struct epoll_event events
[128];
482 assert( POLLIN
== EPOLLIN
);
483 assert( POLLOUT
== EPOLLOUT
);
484 assert( POLLERR
== EPOLLERR
);
485 assert( POLLHUP
== EPOLLHUP
);
487 if (epoll_fd
== -1) return;
491 timeout
= get_next_timeout();
493 if (!active_users
) break; /* last user removed by a timeout */
494 if (epoll_fd
== -1) break; /* an error occurred with epoll */
496 ret
= epoll_wait( epoll_fd
, events
, sizeof(events
)/sizeof(events
[0]), timeout
);
499 /* put the events into the pollfd array first, like poll does */
500 for (i
= 0; i
< ret
; i
++)
502 int user
= events
[i
].data
.u32
;
503 pollfd
[user
].revents
= events
[i
].events
;
506 /* read events from the pollfd array, as set_fd_events may modify them */
507 for (i
= 0; i
< ret
; i
++)
509 int user
= events
[i
].data
.u32
;
510 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
515 #elif defined(HAVE_KQUEUE)
517 static int kqueue_fd
= -1;
519 static inline void init_epoll(void)
521 #ifndef __APPLE__ /* kqueue support is broken in the MacOS kernel so we can't use it */
522 kqueue_fd
= kqueue();
526 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
530 if (kqueue_fd
== -1) return;
532 EV_SET( &ev
[0], fd
->unix_fd
, EVFILT_READ
, 0, NOTE_LOWAT
, 1, (void *)user
);
533 EV_SET( &ev
[1], fd
->unix_fd
, EVFILT_WRITE
, 0, NOTE_LOWAT
, 1, (void *)user
);
535 if (events
== -1) /* stop waiting on this fd completely */
537 if (pollfd
[user
].fd
== -1) return; /* already removed */
538 ev
[0].flags
|= EV_DELETE
;
539 ev
[1].flags
|= EV_DELETE
;
541 else if (pollfd
[user
].fd
== -1)
543 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
544 ev
[0].flags
|= EV_ADD
| ((events
& POLLIN
) ? EV_ENABLE
: EV_DISABLE
);
545 ev
[1].flags
|= EV_ADD
| ((events
& POLLOUT
) ? EV_ENABLE
: EV_DISABLE
);
549 if (pollfd
[user
].events
== events
) return; /* nothing to do */
550 ev
[0].flags
|= (events
& POLLIN
) ? EV_ENABLE
: EV_DISABLE
;
551 ev
[1].flags
|= (events
& POLLOUT
) ? EV_ENABLE
: EV_DISABLE
;
554 if (kevent( kqueue_fd
, ev
, 2, NULL
, 0, NULL
) == -1)
556 if (errno
== ENOMEM
) /* not enough memory, give up on kqueue */
561 else perror( "kevent" ); /* should not happen */
565 static inline void remove_epoll_user( struct fd
*fd
, int user
)
567 if (kqueue_fd
== -1) return;
569 if (pollfd
[user
].fd
!= -1)
573 EV_SET( &ev
[0], fd
->unix_fd
, EVFILT_READ
, EV_DELETE
, 0, 0, 0 );
574 EV_SET( &ev
[1], fd
->unix_fd
, EVFILT_WRITE
, EV_DELETE
, 0, 0, 0 );
575 kevent( kqueue_fd
, ev
, 2, NULL
, 0, NULL
);
579 static inline void main_loop_epoll(void)
582 struct kevent events
[128];
584 if (kqueue_fd
== -1) return;
588 timeout
= get_next_timeout();
590 if (!active_users
) break; /* last user removed by a timeout */
591 if (kqueue_fd
== -1) break; /* an error occurred with kqueue */
597 ts
.tv_sec
= timeout
/ 1000;
598 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
599 ret
= kevent( kqueue_fd
, NULL
, 0, events
, sizeof(events
)/sizeof(events
[0]), &ts
);
601 else ret
= kevent( kqueue_fd
, NULL
, 0, events
, sizeof(events
)/sizeof(events
[0]), NULL
);
605 /* put the events into the pollfd array first, like poll does */
606 for (i
= 0; i
< ret
; i
++)
608 long user
= (long)events
[i
].udata
;
609 pollfd
[user
].revents
= 0;
611 for (i
= 0; i
< ret
; i
++)
613 long user
= (long)events
[i
].udata
;
614 if (events
[i
].filter
== EVFILT_READ
) pollfd
[user
].revents
|= POLLIN
;
615 else if (events
[i
].filter
== EVFILT_WRITE
) pollfd
[user
].revents
|= POLLOUT
;
616 if (events
[i
].flags
& EV_EOF
) pollfd
[user
].revents
|= POLLHUP
;
617 if (events
[i
].flags
& EV_ERROR
) pollfd
[user
].revents
|= POLLERR
;
620 /* read events from the pollfd array, as set_fd_events may modify them */
621 for (i
= 0; i
< ret
; i
++)
623 long user
= (long)events
[i
].udata
;
624 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
625 pollfd
[user
].revents
= 0;
630 #else /* HAVE_KQUEUE */
632 static inline void init_epoll(void) { }
633 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
) { }
634 static inline void remove_epoll_user( struct fd
*fd
, int user
) { }
635 static inline void main_loop_epoll(void) { }
637 #endif /* USE_EPOLL */
640 /* add a user in the poll array and return its index, or -1 on failure */
641 static int add_poll_user( struct fd
*fd
)
646 ret
= freelist
- poll_users
;
647 freelist
= (struct fd
**)poll_users
[ret
];
651 if (nb_users
== allocated_users
)
653 struct fd
**newusers
;
654 struct pollfd
*newpoll
;
655 int new_count
= allocated_users
? (allocated_users
+ allocated_users
/ 2) : 16;
656 if (!(newusers
= realloc( poll_users
, new_count
* sizeof(*poll_users
) ))) return -1;
657 if (!(newpoll
= realloc( pollfd
, new_count
* sizeof(*pollfd
) )))
660 poll_users
= newusers
;
665 poll_users
= newusers
;
667 if (!allocated_users
) init_epoll();
668 allocated_users
= new_count
;
673 pollfd
[ret
].events
= 0;
674 pollfd
[ret
].revents
= 0;
675 poll_users
[ret
] = fd
;
680 /* remove a user from the poll list */
681 static void remove_poll_user( struct fd
*fd
, int user
)
684 assert( poll_users
[user
] == fd
);
686 remove_epoll_user( fd
, user
);
687 pollfd
[user
].fd
= -1;
688 pollfd
[user
].events
= 0;
689 pollfd
[user
].revents
= 0;
690 poll_users
[user
] = (struct fd
*)freelist
;
691 freelist
= &poll_users
[user
];
695 /* process pending timeouts and return the time until the next timeout, in milliseconds */
696 static int get_next_timeout(void)
698 if (!list_empty( &timeout_list
))
700 struct list expired_list
, *ptr
;
702 /* first remove all expired timers from the list */
704 list_init( &expired_list
);
705 while ((ptr
= list_head( &timeout_list
)) != NULL
)
707 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
709 if (timeout
->when
<= current_time
)
711 list_remove( &timeout
->entry
);
712 list_add_tail( &expired_list
, &timeout
->entry
);
717 /* now call the callback for all the removed timers */
719 while ((ptr
= list_head( &expired_list
)) != NULL
)
721 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
722 list_remove( &timeout
->entry
);
723 timeout
->callback( timeout
->private );
727 if ((ptr
= list_head( &timeout_list
)) != NULL
)
729 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
730 int diff
= (timeout
->when
- current_time
+ 9999) / 10000;
731 if (diff
< 0) diff
= 0;
735 return -1; /* no pending timeouts */
738 /* server main poll() loop */
744 server_start_time
= current_time
;
747 /* fall through to normal poll loop */
751 timeout
= get_next_timeout();
753 if (!active_users
) break; /* last user removed by a timeout */
755 ret
= poll( pollfd
, nb_users
, timeout
);
760 for (i
= 0; i
< nb_users
; i
++)
762 if (pollfd
[i
].revents
)
764 fd_poll_event( poll_users
[i
], pollfd
[i
].revents
);
773 /****************************************************************/
774 /* device functions */
776 static struct list device_hash
[DEVICE_HASH_SIZE
];
778 static int is_device_removable( dev_t dev
, int unix_fd
)
780 #if defined(linux) && defined(HAVE_FSTATFS)
783 /* check for floppy disk */
784 if (major(dev
) == FLOPPY_MAJOR
) return 1;
786 if (fstatfs( unix_fd
, &stfs
) == -1) return 0;
787 return (stfs
.f_type
== 0x9660 || /* iso9660 */
788 stfs
.f_type
== 0x9fa1 || /* supermount */
789 stfs
.f_type
== 0x15013346); /* udf */
790 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__)
793 if (fstatfs( unix_fd
, &stfs
) == -1) return 0;
794 return (!strncmp("cd9660", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)) ||
795 !strncmp("udf", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)));
796 #elif defined(__NetBSD__)
799 if (fstatvfs( unix_fd
, &stfs
) == -1) return 0;
800 return (!strncmp("cd9660", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)) ||
801 !strncmp("udf", stfs
.f_fstypename
, sizeof(stfs
.f_fstypename
)));
803 # include <sys/dkio.h>
804 # include <sys/vtoc.h>
805 struct dk_cinfo dkinf
;
806 if (ioctl( unix_fd
, DKIOCINFO
, &dkinf
) == -1) return 0;
807 return (dkinf
.dki_ctype
== DKC_CDROM
||
808 dkinf
.dki_ctype
== DKC_NCRFLOPPY
||
809 dkinf
.dki_ctype
== DKC_SMSFLOPPY
||
810 dkinf
.dki_ctype
== DKC_INTEL82072
||
811 dkinf
.dki_ctype
== DKC_INTEL82077
);
817 /* retrieve the device object for a given fd, creating it if needed */
818 static struct device
*get_device( dev_t dev
, int unix_fd
)
820 struct device
*device
;
821 unsigned int i
, hash
= dev
% DEVICE_HASH_SIZE
;
823 if (device_hash
[hash
].next
)
825 LIST_FOR_EACH_ENTRY( device
, &device_hash
[hash
], struct device
, entry
)
826 if (device
->dev
== dev
) return (struct device
*)grab_object( device
);
828 else list_init( &device_hash
[hash
] );
830 /* not found, create it */
832 if (unix_fd
== -1) return NULL
;
833 if ((device
= alloc_object( &device_ops
)))
836 device
->removable
= is_device_removable( dev
, unix_fd
);
837 for (i
= 0; i
< INODE_HASH_SIZE
; i
++) list_init( &device
->inode_hash
[i
] );
838 list_add_head( &device_hash
[hash
], &device
->entry
);
843 static void device_dump( struct object
*obj
, int verbose
)
845 struct device
*device
= (struct device
*)obj
;
846 fprintf( stderr
, "Device dev=" );
847 DUMP_LONG_LONG( device
->dev
);
848 fprintf( stderr
, "\n" );
851 static void device_destroy( struct object
*obj
)
853 struct device
*device
= (struct device
*)obj
;
856 for (i
= 0; i
< INODE_HASH_SIZE
; i
++)
857 assert( list_empty(&device
->inode_hash
[i
]) );
859 list_remove( &device
->entry
); /* remove it from the hash table */
863 /****************************************************************/
864 /* inode functions */
866 /* close all pending file descriptors in the closed list */
867 static void inode_close_pending( struct inode
*inode
, int keep_unlinks
)
869 struct list
*ptr
= list_head( &inode
->closed
);
873 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
874 struct list
*next
= list_next( &inode
->closed
, ptr
);
876 if (fd
->unix_fd
!= -1)
878 close( fd
->unix_fd
);
881 if (!keep_unlinks
|| !fd
->unlink
[0]) /* get rid of it unless there's an unlink pending on that file */
890 static void inode_dump( struct object
*obj
, int verbose
)
892 struct inode
*inode
= (struct inode
*)obj
;
893 fprintf( stderr
, "Inode device=%p ino=", inode
->device
);
894 DUMP_LONG_LONG( inode
->ino
);
895 fprintf( stderr
, "\n" );
898 static void inode_destroy( struct object
*obj
)
900 struct inode
*inode
= (struct inode
*)obj
;
903 assert( list_empty(&inode
->open
) );
904 assert( list_empty(&inode
->locks
) );
906 list_remove( &inode
->entry
);
908 while ((ptr
= list_head( &inode
->closed
)))
910 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
912 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
915 /* make sure it is still the same file */
917 if (!stat( fd
->unlink
, &st
) && st
.st_dev
== inode
->device
->dev
&& st
.st_ino
== inode
->ino
)
919 if (S_ISDIR(st
.st_mode
)) rmdir( fd
->unlink
);
920 else unlink( fd
->unlink
);
925 release_object( inode
->device
);
928 /* retrieve the inode object for a given fd, creating it if needed */
929 static struct inode
*get_inode( dev_t dev
, ino_t ino
, int unix_fd
)
931 struct device
*device
;
933 unsigned int hash
= ino
% INODE_HASH_SIZE
;
935 if (!(device
= get_device( dev
, unix_fd
))) return NULL
;
937 LIST_FOR_EACH_ENTRY( inode
, &device
->inode_hash
[hash
], struct inode
, entry
)
939 if (inode
->ino
== ino
)
941 release_object( device
);
942 return (struct inode
*)grab_object( inode
);
946 /* not found, create it */
947 if ((inode
= alloc_object( &inode_ops
)))
949 inode
->device
= device
;
951 list_init( &inode
->open
);
952 list_init( &inode
->locks
);
953 list_init( &inode
->closed
);
954 list_add_head( &device
->inode_hash
[hash
], &inode
->entry
);
956 else release_object( device
);
961 /* add fd to the inode list of file descriptors to close */
962 static void inode_add_closed_fd( struct inode
*inode
, struct closed_fd
*fd
)
964 if (!list_empty( &inode
->locks
))
966 list_add_head( &inode
->closed
, &fd
->entry
);
968 else if (fd
->unlink
[0]) /* close the fd but keep the structure around for unlink */
970 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
972 list_add_head( &inode
->closed
, &fd
->entry
);
974 else /* no locks on this inode and no unlink, get rid of the fd */
976 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
982 /****************************************************************/
983 /* file lock functions */
985 static void file_lock_dump( struct object
*obj
, int verbose
)
987 struct file_lock
*lock
= (struct file_lock
*)obj
;
988 fprintf( stderr
, "Lock %s fd=%p proc=%p start=",
989 lock
->shared
? "shared" : "excl", lock
->fd
, lock
->process
);
990 DUMP_LONG_LONG( lock
->start
);
991 fprintf( stderr
, " end=" );
992 DUMP_LONG_LONG( lock
->end
);
993 fprintf( stderr
, "\n" );
996 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
)
998 struct file_lock
*lock
= (struct file_lock
*)obj
;
999 /* lock is signaled if it has lost its owner */
1000 return !lock
->process
;
1003 /* set (or remove) a Unix lock if possible for the given range */
1004 static int set_unix_lock( struct fd
*fd
, file_pos_t start
, file_pos_t end
, int type
)
1008 if (!fd
->fs_locks
) return 1; /* no fs locks possible for this fd */
1011 if (start
== end
) return 1; /* can't set zero-byte lock */
1012 if (start
> max_unix_offset
) return 1; /* ignore it */
1014 fl
.l_whence
= SEEK_SET
;
1016 if (!end
|| end
> max_unix_offset
) fl
.l_len
= 0;
1017 else fl
.l_len
= end
- start
;
1018 if (fcntl( fd
->unix_fd
, F_SETLK
, &fl
) != -1) return 1;
1023 /* check whether locks work at all on this file system */
1024 if (fcntl( fd
->unix_fd
, F_GETLK
, &fl
) != -1)
1026 set_error( STATUS_FILE_LOCK_CONFLICT
);
1032 /* no locking on this fs, just ignore it */
1036 set_error( STATUS_FILE_LOCK_CONFLICT
);
1039 /* this can happen if we try to set a write lock on a read-only file */
1040 /* we just ignore that error */
1041 if (fl
.l_type
== F_WRLCK
) return 1;
1042 set_error( STATUS_ACCESS_DENIED
);
1048 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
1049 /* in that case we shrink the limit and retry */
1050 if (max_unix_offset
> INT_MAX
)
1052 max_unix_offset
= INT_MAX
;
1063 /* check if interval [start;end) overlaps the lock */
1064 static inline int lock_overlaps( struct file_lock
*lock
, file_pos_t start
, file_pos_t end
)
1066 if (lock
->end
&& start
>= lock
->end
) return 0;
1067 if (end
&& lock
->start
>= end
) return 0;
1071 /* remove Unix locks for all bytes in the specified area that are no longer locked */
1072 static void remove_unix_locks( struct fd
*fd
, file_pos_t start
, file_pos_t end
)
1080 } *first
, *cur
, *next
, *buffer
;
1085 if (!fd
->inode
) return;
1086 if (!fd
->fs_locks
) return;
1087 if (start
== end
|| start
> max_unix_offset
) return;
1088 if (!end
|| end
> max_unix_offset
) end
= max_unix_offset
+ 1;
1090 /* count the number of locks overlapping the specified area */
1092 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1094 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1095 if (lock
->start
== lock
->end
) continue;
1096 if (lock_overlaps( lock
, start
, end
)) count
++;
1099 if (!count
) /* no locks at all, we can unlock everything */
1101 set_unix_lock( fd
, start
, end
, F_UNLCK
);
1105 /* allocate space for the list of holes */
1106 /* max. number of holes is number of locks + 1 */
1108 if (!(buffer
= malloc( sizeof(*buffer
) * (count
+1) ))) return;
1112 first
->start
= start
;
1116 /* build a sorted list of unlocked holes in the specified area */
1118 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1120 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1121 if (lock
->start
== lock
->end
) continue;
1122 if (!lock_overlaps( lock
, start
, end
)) continue;
1124 /* go through all the holes touched by this lock */
1125 for (cur
= first
; cur
; cur
= cur
->next
)
1127 if (cur
->end
<= lock
->start
) continue; /* hole is before start of lock */
1128 if (lock
->end
&& cur
->start
>= lock
->end
) break; /* hole is after end of lock */
1130 /* now we know that lock is overlapping hole */
1132 if (cur
->start
>= lock
->start
) /* lock starts before hole, shrink from start */
1134 cur
->start
= lock
->end
;
1135 if (cur
->start
&& cur
->start
< cur
->end
) break; /* done with this lock */
1136 /* now hole is empty, remove it */
1137 if (cur
->next
) cur
->next
->prev
= cur
->prev
;
1138 if (cur
->prev
) cur
->prev
->next
= cur
->next
;
1139 else if (!(first
= cur
->next
)) goto done
; /* no more holes at all */
1141 else if (!lock
->end
|| cur
->end
<= lock
->end
) /* lock larger than hole, shrink from end */
1143 cur
->end
= lock
->start
;
1144 assert( cur
->start
< cur
->end
);
1146 else /* lock is in the middle of hole, split hole in two */
1149 next
->next
= cur
->next
;
1151 next
->start
= lock
->end
;
1152 next
->end
= cur
->end
;
1153 cur
->end
= lock
->start
;
1154 assert( next
->start
< next
->end
);
1155 assert( cur
->end
< next
->start
);
1157 break; /* done with this lock */
1162 /* clear Unix locks for all the holes */
1164 for (cur
= first
; cur
; cur
= cur
->next
)
1165 set_unix_lock( fd
, cur
->start
, cur
->end
, F_UNLCK
);
1171 /* create a new lock on a fd */
1172 static struct file_lock
*add_lock( struct fd
*fd
, int shared
, file_pos_t start
, file_pos_t end
)
1174 struct file_lock
*lock
;
1176 if (!fd
->inode
) /* not a regular file */
1178 set_error( STATUS_INVALID_HANDLE
);
1182 if (!(lock
= alloc_object( &file_lock_ops
))) return NULL
;
1183 lock
->shared
= shared
;
1184 lock
->start
= start
;
1187 lock
->process
= current
->process
;
1189 /* now try to set a Unix lock */
1190 if (!set_unix_lock( lock
->fd
, lock
->start
, lock
->end
, lock
->shared
? F_RDLCK
: F_WRLCK
))
1192 release_object( lock
);
1195 list_add_head( &fd
->locks
, &lock
->fd_entry
);
1196 list_add_head( &fd
->inode
->locks
, &lock
->inode_entry
);
1197 list_add_head( &lock
->process
->locks
, &lock
->proc_entry
);
1201 /* remove an existing lock */
1202 static void remove_lock( struct file_lock
*lock
, int remove_unix
)
1204 struct inode
*inode
= lock
->fd
->inode
;
1206 list_remove( &lock
->fd_entry
);
1207 list_remove( &lock
->inode_entry
);
1208 list_remove( &lock
->proc_entry
);
1209 if (remove_unix
) remove_unix_locks( lock
->fd
, lock
->start
, lock
->end
);
1210 if (list_empty( &inode
->locks
)) inode_close_pending( inode
, 1 );
1211 lock
->process
= NULL
;
1212 wake_up( &lock
->obj
, 0 );
1213 release_object( lock
);
1216 /* remove all locks owned by a given process */
1217 void remove_process_locks( struct process
*process
)
1221 while ((ptr
= list_head( &process
->locks
)))
1223 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, proc_entry
);
1224 remove_lock( lock
, 1 ); /* this removes it from the list */
1228 /* remove all locks on a given fd */
1229 static void remove_fd_locks( struct fd
*fd
)
1231 file_pos_t start
= FILE_POS_T_MAX
, end
= 0;
1234 while ((ptr
= list_head( &fd
->locks
)))
1236 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
1237 if (lock
->start
< start
) start
= lock
->start
;
1238 if (!lock
->end
|| lock
->end
> end
) end
= lock
->end
- 1;
1239 remove_lock( lock
, 0 );
1241 if (start
< end
) remove_unix_locks( fd
, start
, end
+ 1 );
1244 /* add a lock on an fd */
1245 /* returns handle to wait on */
1246 obj_handle_t
lock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
, int shared
, int wait
)
1249 file_pos_t end
= start
+ count
;
1251 /* don't allow wrapping locks */
1252 if (end
&& end
< start
)
1254 set_error( STATUS_INVALID_PARAMETER
);
1258 /* check if another lock on that file overlaps the area */
1259 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
1261 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
1262 if (!lock_overlaps( lock
, start
, end
)) continue;
1263 if (lock
->shared
&& shared
) continue;
1267 set_error( STATUS_FILE_LOCK_CONFLICT
);
1270 set_error( STATUS_PENDING
);
1271 return alloc_handle( current
->process
, lock
, SYNCHRONIZE
, 0 );
1274 /* not found, add it */
1275 if (add_lock( fd
, shared
, start
, end
)) return 0;
1276 if (get_error() == STATUS_FILE_LOCK_CONFLICT
)
1278 /* Unix lock conflict -> tell client to wait and retry */
1279 if (wait
) set_error( STATUS_PENDING
);
1284 /* remove a lock on an fd */
1285 void unlock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
)
1288 file_pos_t end
= start
+ count
;
1290 /* find an existing lock with the exact same parameters */
1291 LIST_FOR_EACH( ptr
, &fd
->locks
)
1293 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
1294 if ((lock
->start
== start
) && (lock
->end
== end
))
1296 remove_lock( lock
, 1 );
1300 set_error( STATUS_FILE_LOCK_CONFLICT
);
1304 /****************************************************************/
1305 /* file descriptor functions */
1307 static void fd_dump( struct object
*obj
, int verbose
)
1309 struct fd
*fd
= (struct fd
*)obj
;
1310 fprintf( stderr
, "Fd unix_fd=%d user=%p options=%08x", fd
->unix_fd
, fd
->user
, fd
->options
);
1311 if (fd
->inode
) fprintf( stderr
, " inode=%p unlink='%s'", fd
->inode
, fd
->closed
->unlink
);
1312 fprintf( stderr
, "\n" );
1315 static void fd_destroy( struct object
*obj
)
1317 struct fd
*fd
= (struct fd
*)obj
;
1319 free_async_queue( fd
->read_q
);
1320 free_async_queue( fd
->write_q
);
1321 free_async_queue( fd
->wait_q
);
1323 remove_fd_locks( fd
);
1324 list_remove( &fd
->inode_entry
);
1325 if (fd
->poll_index
!= -1) remove_poll_user( fd
, fd
->poll_index
);
1328 inode_add_closed_fd( fd
->inode
, fd
->closed
);
1329 release_object( fd
->inode
);
1331 else /* no inode, close it right away */
1333 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1337 /* set the events that select waits for on this fd */
1338 void set_fd_events( struct fd
*fd
, int events
)
1340 int user
= fd
->poll_index
;
1341 assert( poll_users
[user
] == fd
);
1343 set_fd_epoll_events( fd
, user
, events
);
1345 if (events
== -1) /* stop waiting on this fd completely */
1347 pollfd
[user
].fd
= -1;
1348 pollfd
[user
].events
= POLLERR
;
1349 pollfd
[user
].revents
= 0;
1351 else if (pollfd
[user
].fd
!= -1 || !pollfd
[user
].events
)
1353 pollfd
[user
].fd
= fd
->unix_fd
;
1354 pollfd
[user
].events
= events
;
1358 /* prepare an fd for unmounting its corresponding device */
1359 static inline void unmount_fd( struct fd
*fd
)
1361 assert( fd
->inode
);
1363 async_wake_up( fd
->read_q
, STATUS_VOLUME_DISMOUNTED
);
1364 async_wake_up( fd
->write_q
, STATUS_VOLUME_DISMOUNTED
);
1366 if (fd
->poll_index
!= -1) set_fd_events( fd
, -1 );
1368 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1371 fd
->no_fd_status
= STATUS_VOLUME_DISMOUNTED
;
1372 fd
->closed
->unix_fd
= -1;
1373 fd
->closed
->unlink
[0] = 0;
1375 /* stop using Unix locks on this fd (existing locks have been removed by close) */
1379 /* allocate an fd object, without setting the unix fd yet */
1380 static struct fd
*alloc_fd_object(void)
1382 struct fd
*fd
= alloc_object( &fd_ops
);
1384 if (!fd
) return NULL
;
1396 fd
->poll_index
= -1;
1400 list_init( &fd
->inode_entry
);
1401 list_init( &fd
->locks
);
1403 if ((fd
->poll_index
= add_poll_user( fd
)) == -1)
1405 release_object( fd
);
1411 /* allocate a pseudo fd object, for objects that need to behave like files but don't have a unix fd */
1412 struct fd
*alloc_pseudo_fd( const struct fd_ops
*fd_user_ops
, struct object
*user
)
1414 struct fd
*fd
= alloc_object( &fd_ops
);
1416 if (!fd
) return NULL
;
1418 fd
->fd_ops
= fd_user_ops
;
1427 fd
->poll_index
= -1;
1431 fd
->no_fd_status
= STATUS_BAD_DEVICE_TYPE
;
1432 list_init( &fd
->inode_entry
);
1433 list_init( &fd
->locks
);
1437 /* set the status to return when the fd has no associated unix fd */
1438 void set_no_fd_status( struct fd
*fd
, unsigned int status
)
1440 fd
->no_fd_status
= status
;
1443 /* check if the desired access is possible without violating */
1444 /* the sharing mode of other opens of the same file */
1445 static int check_sharing( struct fd
*fd
, unsigned int access
, unsigned int sharing
)
1447 unsigned int existing_sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
1448 unsigned int existing_access
= 0;
1451 /* if access mode is 0, sharing mode is ignored */
1452 if (!access
) sharing
= existing_sharing
;
1453 fd
->access
= access
;
1454 fd
->sharing
= sharing
;
1456 LIST_FOR_EACH( ptr
, &fd
->inode
->open
)
1458 struct fd
*fd_ptr
= LIST_ENTRY( ptr
, struct fd
, inode_entry
);
1461 existing_sharing
&= fd_ptr
->sharing
;
1462 existing_access
|= fd_ptr
->access
;
1466 if ((access
& FILE_UNIX_READ_ACCESS
) && !(existing_sharing
& FILE_SHARE_READ
)) return 0;
1467 if ((access
& FILE_UNIX_WRITE_ACCESS
) && !(existing_sharing
& FILE_SHARE_WRITE
)) return 0;
1468 if ((access
& DELETE
) && !(existing_sharing
& FILE_SHARE_DELETE
)) return 0;
1469 if ((existing_access
& FILE_UNIX_READ_ACCESS
) && !(sharing
& FILE_SHARE_READ
)) return 0;
1470 if ((existing_access
& FILE_UNIX_WRITE_ACCESS
) && !(sharing
& FILE_SHARE_WRITE
)) return 0;
1471 if ((existing_access
& DELETE
) && !(sharing
& FILE_SHARE_DELETE
)) return 0;
1475 /* sets the user of an fd that previously had no user */
1476 void set_fd_user( struct fd
*fd
, const struct fd_ops
*user_ops
, struct object
*user
)
1478 assert( fd
->fd_ops
== NULL
);
1479 fd
->fd_ops
= user_ops
;
1483 /* open() wrapper that returns a struct fd with no fd user set */
1484 struct fd
*open_fd( const char *name
, int flags
, mode_t
*mode
, unsigned int access
,
1485 unsigned int sharing
, unsigned int options
)
1488 struct closed_fd
*closed_fd
;
1490 const char *unlink_name
= "";
1493 if ((options
& FILE_DELETE_ON_CLOSE
) && !(access
& DELETE
))
1495 set_error( STATUS_INVALID_PARAMETER
);
1499 if (!(fd
= alloc_fd_object())) return NULL
;
1501 fd
->options
= options
;
1502 if (options
& FILE_DELETE_ON_CLOSE
) unlink_name
= name
;
1503 if (!(closed_fd
= mem_alloc( sizeof(*closed_fd
) + strlen(unlink_name
) )))
1505 release_object( fd
);
1509 /* create the directory if needed */
1510 if ((options
& FILE_DIRECTORY_FILE
) && (flags
& O_CREAT
))
1512 if (mkdir( name
, 0777 ) == -1)
1514 if (errno
!= EEXIST
|| (flags
& O_EXCL
))
1520 flags
&= ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1523 if ((access
& FILE_UNIX_WRITE_ACCESS
) && !(options
& FILE_DIRECTORY_FILE
))
1525 if (access
& FILE_UNIX_READ_ACCESS
) rw_mode
= O_RDWR
;
1526 else rw_mode
= O_WRONLY
;
1528 else rw_mode
= O_RDONLY
;
1530 if ((fd
->unix_fd
= open( name
, rw_mode
| (flags
& ~O_TRUNC
), *mode
)) == -1)
1532 /* if we tried to open a directory for write access, retry read-only */
1533 if (errno
!= EISDIR
||
1534 !(access
& FILE_UNIX_WRITE_ACCESS
) ||
1535 (fd
->unix_fd
= open( name
, O_RDONLY
| (flags
& ~O_TRUNC
), *mode
)) == -1)
1542 closed_fd
->unix_fd
= fd
->unix_fd
;
1543 closed_fd
->unlink
[0] = 0;
1544 fstat( fd
->unix_fd
, &st
);
1547 /* only bother with an inode for normal files and directories */
1548 if (S_ISREG(st
.st_mode
) || S_ISDIR(st
.st_mode
))
1550 struct inode
*inode
= get_inode( st
.st_dev
, st
.st_ino
, fd
->unix_fd
);
1554 /* we can close the fd because there are no others open on the same file,
1555 * otherwise we wouldn't have failed to allocate a new inode
1560 fd
->closed
= closed_fd
;
1561 list_add_head( &inode
->open
, &fd
->inode_entry
);
1563 /* check directory options */
1564 if ((options
& FILE_DIRECTORY_FILE
) && !S_ISDIR(st
.st_mode
))
1566 release_object( fd
);
1567 set_error( STATUS_NOT_A_DIRECTORY
);
1570 if ((options
& FILE_NON_DIRECTORY_FILE
) && S_ISDIR(st
.st_mode
))
1572 release_object( fd
);
1573 set_error( STATUS_FILE_IS_A_DIRECTORY
);
1576 if (!check_sharing( fd
, access
, sharing
))
1578 release_object( fd
);
1579 set_error( STATUS_SHARING_VIOLATION
);
1582 strcpy( closed_fd
->unlink
, unlink_name
);
1583 if (flags
& O_TRUNC
) ftruncate( fd
->unix_fd
, 0 );
1585 else /* special file */
1587 if (options
& FILE_DIRECTORY_FILE
)
1589 set_error( STATUS_NOT_A_DIRECTORY
);
1592 if (unlink_name
[0]) /* we can't unlink special files */
1594 set_error( STATUS_INVALID_PARAMETER
);
1602 release_object( fd
);
1607 /* create an fd for an anonymous file */
1608 /* if the function fails the unix fd is closed */
1609 struct fd
*create_anonymous_fd( const struct fd_ops
*fd_user_ops
, int unix_fd
, struct object
*user
,
1610 unsigned int options
)
1612 struct fd
*fd
= alloc_fd_object();
1616 set_fd_user( fd
, fd_user_ops
, user
);
1617 fd
->unix_fd
= unix_fd
;
1618 fd
->options
= options
;
1625 /* retrieve the object that is using an fd */
1626 void *get_fd_user( struct fd
*fd
)
1631 /* retrieve the opening options for the fd */
1632 unsigned int get_fd_options( struct fd
*fd
)
1637 /* retrieve the unix fd for an object */
1638 int get_unix_fd( struct fd
*fd
)
1640 if (fd
->unix_fd
== -1) set_error( fd
->no_fd_status
);
1644 /* check if two file descriptors point to the same file */
1645 int is_same_file_fd( struct fd
*fd1
, struct fd
*fd2
)
1647 return fd1
->inode
== fd2
->inode
;
1650 /* check if fd is on a removable device */
1651 int is_fd_removable( struct fd
*fd
)
1653 return (fd
->inode
&& fd
->inode
->device
->removable
);
1656 /* set or clear the fd signaled state */
1657 void set_fd_signaled( struct fd
*fd
, int signaled
)
1659 fd
->signaled
= signaled
;
1660 if (signaled
) wake_up( fd
->user
, 0 );
1663 /* handler for close_handle that refuses to close fd-associated handles in other processes */
1664 int fd_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
1666 return (!current
|| current
->process
== process
);
1669 /* callback for event happening in the main poll() loop */
1670 void fd_poll_event( struct fd
*fd
, int event
)
1672 return fd
->fd_ops
->poll_event( fd
, event
);
1675 /* check if events are pending and if yes return which one(s) */
1676 int check_fd_events( struct fd
*fd
, int events
)
1680 if (fd
->unix_fd
== -1) return POLLERR
;
1681 if (fd
->inode
) return events
; /* regular files are always signaled */
1683 pfd
.fd
= fd
->unix_fd
;
1684 pfd
.events
= events
;
1685 if (poll( &pfd
, 1, 0 ) <= 0) return 0;
1689 /* default signaled() routine for objects that poll() on an fd */
1690 int default_fd_signaled( struct object
*obj
, struct thread
*thread
)
1692 struct fd
*fd
= get_obj_fd( obj
);
1693 int ret
= fd
->signaled
;
1694 release_object( fd
);
1698 int default_fd_get_poll_events( struct fd
*fd
)
1702 if (async_waiting( fd
->read_q
)) events
|= POLLIN
;
1703 if (async_waiting( fd
->write_q
)) events
|= POLLOUT
;
1707 /* default handler for poll() events */
1708 void default_poll_event( struct fd
*fd
, int event
)
1710 if (event
& (POLLIN
| POLLERR
| POLLHUP
)) async_wake_up( fd
->read_q
, STATUS_ALERTED
);
1711 if (event
& (POLLOUT
| POLLERR
| POLLHUP
)) async_wake_up( fd
->write_q
, STATUS_ALERTED
);
1713 /* if an error occurred, stop polling this fd to avoid busy-looping */
1714 if (event
& (POLLERR
| POLLHUP
)) set_fd_events( fd
, -1 );
1715 else if (!fd
->inode
) set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1718 struct async
*fd_queue_async( struct fd
*fd
, const async_data_t
*data
, int type
, int count
)
1720 struct async_queue
*queue
;
1721 struct async
*async
;
1725 case ASYNC_TYPE_READ
:
1726 if (!fd
->read_q
&& !(fd
->read_q
= create_async_queue( fd
))) return NULL
;
1729 case ASYNC_TYPE_WRITE
:
1730 if (!fd
->write_q
&& !(fd
->write_q
= create_async_queue( fd
))) return NULL
;
1731 queue
= fd
->write_q
;
1733 case ASYNC_TYPE_WAIT
:
1734 if (!fd
->wait_q
&& !(fd
->wait_q
= create_async_queue( fd
))) return NULL
;
1742 if ((async
= create_async( current
, queue
, data
)) && type
!= ASYNC_TYPE_WAIT
)
1745 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1746 else /* regular files are always ready for read and write */
1747 async_wake_up( queue
, STATUS_ALERTED
);
1752 void fd_async_wake_up( struct fd
*fd
, int type
, unsigned int status
)
1756 case ASYNC_TYPE_READ
:
1757 async_wake_up( fd
->read_q
, status
);
1759 case ASYNC_TYPE_WRITE
:
1760 async_wake_up( fd
->write_q
, status
);
1762 case ASYNC_TYPE_WAIT
:
1763 async_wake_up( fd
->wait_q
, status
);
1770 void fd_reselect_async( struct fd
*fd
, struct async_queue
*queue
)
1772 fd
->fd_ops
->reselect_async( fd
, queue
);
1775 void default_fd_queue_async( struct fd
*fd
, const async_data_t
*data
, int type
, int count
)
1777 struct async
*async
;
1779 if ((async
= fd_queue_async( fd
, data
, type
, count
)))
1781 release_object( async
);
1782 set_error( STATUS_PENDING
);
1786 /* default reselect_async() fd routine */
1787 void default_fd_reselect_async( struct fd
*fd
, struct async_queue
*queue
)
1789 if (queue
!= fd
->wait_q
)
1791 int poll_events
= fd
->fd_ops
->get_poll_events( fd
);
1792 int events
= check_fd_events( fd
, poll_events
);
1793 if (events
) fd
->fd_ops
->poll_event( fd
, events
);
1794 else set_fd_events( fd
, poll_events
);
1798 /* default cancel_async() fd routine */
1799 void default_fd_cancel_async( struct fd
*fd
)
1801 async_wake_up( fd
->read_q
, STATUS_CANCELLED
);
1802 async_wake_up( fd
->write_q
, STATUS_CANCELLED
);
1803 async_wake_up( fd
->wait_q
, STATUS_CANCELLED
);
1806 /* default flush() routine */
1807 void no_flush( struct fd
*fd
, struct event
**event
)
1809 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1812 static inline int is_valid_mounted_device( struct stat
*st
)
1814 #if defined(linux) || defined(__sun__)
1815 return S_ISBLK( st
->st_mode
);
1817 /* disks are char devices on *BSD */
1818 return S_ISCHR( st
->st_mode
);
1822 /* close all Unix file descriptors on a device to allow unmounting it */
1823 static void unmount_device( struct fd
*device_fd
)
1827 struct device
*device
;
1828 struct inode
*inode
;
1830 int unix_fd
= get_unix_fd( device_fd
);
1832 if (unix_fd
== -1) return;
1834 if (fstat( unix_fd
, &st
) == -1 || !is_valid_mounted_device( &st
))
1836 set_error( STATUS_INVALID_PARAMETER
);
1840 if (!(device
= get_device( st
.st_rdev
, -1 ))) return;
1842 for (i
= 0; i
< INODE_HASH_SIZE
; i
++)
1844 LIST_FOR_EACH_ENTRY( inode
, &device
->inode_hash
[i
], struct inode
, entry
)
1846 LIST_FOR_EACH_ENTRY( fd
, &inode
->open
, struct fd
, inode_entry
)
1850 inode_close_pending( inode
, 0 );
1853 /* remove it from the hash table */
1854 list_remove( &device
->entry
);
1855 list_init( &device
->entry
);
1856 release_object( device
);
1859 /* default ioctl() routine */
1860 void default_fd_ioctl( struct fd
*fd
, ioctl_code_t code
, const async_data_t
*async
,
1861 const void *data
, data_size_t size
)
1865 case FSCTL_DISMOUNT_VOLUME
:
1866 unmount_device( fd
);
1869 set_error( STATUS_NOT_SUPPORTED
);
1874 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1875 static struct fd
*get_handle_fd_obj( struct process
*process
, obj_handle_t handle
,
1876 unsigned int access
)
1878 struct fd
*fd
= NULL
;
1881 if ((obj
= get_handle_obj( process
, handle
, access
, NULL
)))
1883 fd
= get_obj_fd( obj
);
1884 release_object( obj
);
1889 /* flush a file buffers */
1890 DECL_HANDLER(flush_file
)
1892 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1893 struct event
* event
= NULL
;
1897 fd
->fd_ops
->flush( fd
, &event
);
1900 reply
->event
= alloc_handle( current
->process
, event
, SYNCHRONIZE
, 0 );
1902 release_object( fd
);
1906 /* open a file object */
1907 DECL_HANDLER(open_file_object
)
1909 struct unicode_str name
;
1910 struct directory
*root
= NULL
;
1911 struct object
*obj
, *result
;
1913 get_req_unicode_str( &name
);
1914 if (req
->rootdir
&& !(root
= get_directory_obj( current
->process
, req
->rootdir
, 0 )))
1917 if ((obj
= open_object_dir( root
, &name
, req
->attributes
, NULL
)))
1919 if ((result
= obj
->ops
->open_file( obj
, req
->access
, req
->sharing
, req
->options
)))
1921 reply
->handle
= alloc_handle( current
->process
, result
, req
->access
, req
->attributes
);
1922 release_object( result
);
1924 release_object( obj
);
1927 if (root
) release_object( root
);
1930 /* get a Unix fd to access a file */
1931 DECL_HANDLER(get_handle_fd
)
1935 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 )))
1937 int unix_fd
= get_unix_fd( fd
);
1940 send_client_fd( current
->process
, unix_fd
, req
->handle
);
1941 reply
->type
= fd
->fd_ops
->get_fd_type( fd
);
1942 reply
->removable
= is_fd_removable(fd
);
1943 reply
->options
= fd
->options
;
1944 reply
->access
= get_handle_access( current
->process
, req
->handle
);
1946 release_object( fd
);
1950 /* perform an ioctl on a file */
1953 unsigned int access
= (req
->code
>> 14) & (FILE_READ_DATA
|FILE_WRITE_DATA
);
1954 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, access
);
1958 fd
->fd_ops
->ioctl( fd
, req
->code
, &req
->async
, get_req_data(), get_req_data_size() );
1959 release_object( fd
);
1963 /* create / reschedule an async I/O */
1964 DECL_HANDLER(register_async
)
1966 unsigned int access
;
1971 case ASYNC_TYPE_READ
:
1972 access
= FILE_READ_DATA
;
1974 case ASYNC_TYPE_WRITE
:
1975 access
= FILE_WRITE_DATA
;
1978 set_error( STATUS_INVALID_PARAMETER
);
1982 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, access
)))
1984 if (get_unix_fd( fd
) != -1) fd
->fd_ops
->queue_async( fd
, &req
->async
, req
->type
, req
->count
);
1985 release_object( fd
);
1989 /* cancels all async I/O */
1990 DECL_HANDLER(cancel_async
)
1992 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1996 if (get_unix_fd( fd
) != -1) fd
->fd_ops
->cancel_async( fd
);
1997 release_object( fd
);