2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #ifdef HAVE_SYS_POLL_H
44 #include <sys/types.h>
57 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
58 # include <sys/epoll.h>
60 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
62 # define EPOLLIN POLLIN
63 # define EPOLLOUT POLLOUT
64 # define EPOLLERR POLLERR
65 # define EPOLLHUP POLLHUP
66 # define EPOLL_CTL_ADD 1
67 # define EPOLL_CTL_DEL 2
68 # define EPOLL_CTL_MOD 3
70 typedef union epoll_data
84 #define SYSCALL_RET(ret) do { \
85 if (ret < 0) { errno = -ret; ret = -1; } \
89 static inline int epoll_create( int size
)
92 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
93 : "=a" (ret
) : "0" (254 /*NR_epoll_create*/), "r" (size
) );
97 static inline int epoll_ctl( int epfd
, int op
, int fd
, const struct epoll_event
*event
)
100 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
102 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd
), "c" (op
), "d" (fd
), "S" (event
), "m" (*event
) );
106 static inline int epoll_wait( int epfd
, struct epoll_event
*events
, int maxevents
, int timeout
)
109 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
111 : "0" (256 /*NR_epoll_wait*/), "r" (epfd
), "c" (events
), "d" (maxevents
), "S" (timeout
)
117 #endif /* linux && __i386__ && HAVE_STDINT_H */
120 /* Because of the stupid Posix locking semantics, we need to keep
121 * track of all file descriptors referencing a given file, and not
122 * close a single one until all the locks are gone (sigh).
125 /* file descriptor object */
127 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
130 struct list entry
; /* entry in inode closed list */
131 int fd
; /* the unix file descriptor */
132 char unlink
[1]; /* name to unlink on close (if any) */
137 struct object obj
; /* object header */
138 const struct fd_ops
*fd_ops
; /* file descriptor operations */
139 struct inode
*inode
; /* inode that this fd belongs to */
140 struct list inode_entry
; /* entry in inode fd list */
141 struct closed_fd
*closed
; /* structure to store the unix fd at destroy time */
142 struct object
*user
; /* object using this file descriptor */
143 struct list locks
; /* list of locks on this fd */
144 unsigned int access
; /* file access (GENERIC_READ/WRITE) */
145 unsigned int sharing
; /* file sharing mode */
146 int unix_fd
; /* unix file descriptor */
147 int fs_locks
; /* can we use filesystem locks for this fd? */
148 int poll_index
; /* index of fd in poll array */
149 struct list read_q
; /* async readers of this fd */
150 struct list write_q
; /* async writers of this fd */
153 static void fd_dump( struct object
*obj
, int verbose
);
154 static void fd_destroy( struct object
*obj
);
156 static const struct object_ops fd_ops
=
158 sizeof(struct fd
), /* size */
160 no_add_queue
, /* add_queue */
161 NULL
, /* remove_queue */
163 NULL
, /* satisfied */
164 no_signal
, /* signal */
165 no_get_fd
, /* get_fd */
166 no_close_handle
, /* close_handle */
167 fd_destroy
/* destroy */
174 struct object obj
; /* object header */
175 struct list entry
; /* inode hash list entry */
176 unsigned int hash
; /* hashing code */
177 dev_t dev
; /* device number */
178 ino_t ino
; /* inode number */
179 struct list open
; /* list of open file descriptors */
180 struct list locks
; /* list of file locks */
181 struct list closed
; /* list of file descriptors to close at destroy time */
184 static void inode_dump( struct object
*obj
, int verbose
);
185 static void inode_destroy( struct object
*obj
);
187 static const struct object_ops inode_ops
=
189 sizeof(struct inode
), /* size */
190 inode_dump
, /* dump */
191 no_add_queue
, /* add_queue */
192 NULL
, /* remove_queue */
194 NULL
, /* satisfied */
195 no_signal
, /* signal */
196 no_get_fd
, /* get_fd */
197 no_close_handle
, /* close_handle */
198 inode_destroy
/* destroy */
201 /* file lock object */
205 struct object obj
; /* object header */
206 struct fd
*fd
; /* fd owning this lock */
207 struct list fd_entry
; /* entry in list of locks on a given fd */
208 struct list inode_entry
; /* entry in inode list of locks */
209 int shared
; /* shared lock? */
210 file_pos_t start
; /* locked region is interval [start;end) */
212 struct process
*process
; /* process owning this lock */
213 struct list proc_entry
; /* entry in list of locks owned by the process */
216 static void file_lock_dump( struct object
*obj
, int verbose
);
217 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
);
219 static const struct object_ops file_lock_ops
=
221 sizeof(struct file_lock
), /* size */
222 file_lock_dump
, /* dump */
223 add_queue
, /* add_queue */
224 remove_queue
, /* remove_queue */
225 file_lock_signaled
, /* signaled */
226 no_satisfied
, /* satisfied */
227 no_signal
, /* signal */
228 no_get_fd
, /* get_fd */
229 no_close_handle
, /* close_handle */
230 no_destroy
/* destroy */
234 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
235 #define FILE_POS_T_MAX (~(file_pos_t)0)
237 static file_pos_t max_unix_offset
= OFF_T_MAX
;
239 #define DUMP_LONG_LONG(val) do { \
240 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
241 fprintf( stderr, "%lx%08lx", (unsigned long)((val) >> 32), (unsigned long)(val) ); \
243 fprintf( stderr, "%lx", (unsigned long)(val) ); \
248 /****************************************************************/
249 /* timeouts support */
253 struct list entry
; /* entry in sorted timeout list */
254 struct timeval when
; /* timeout expiry (absolute time) */
255 timeout_callback callback
; /* callback function */
256 void *private; /* callback private data */
259 static struct list timeout_list
= LIST_INIT(timeout_list
); /* sorted timeouts list */
261 /* add a timeout user */
262 struct timeout_user
*add_timeout_user( const struct timeval
*when
, timeout_callback func
,
265 struct timeout_user
*user
;
268 if (!(user
= mem_alloc( sizeof(*user
) ))) return NULL
;
270 user
->callback
= func
;
271 user
->private = private;
273 /* Now insert it in the linked list */
275 LIST_FOR_EACH( ptr
, &timeout_list
)
277 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
278 if (!time_before( &timeout
->when
, when
)) break;
280 list_add_before( ptr
, &user
->entry
);
284 /* remove a timeout user */
285 void remove_timeout_user( struct timeout_user
*user
)
287 list_remove( &user
->entry
);
291 /* add a timeout in milliseconds to an absolute time */
292 void add_timeout( struct timeval
*when
, int timeout
)
296 long sec
= timeout
/ 1000;
297 if ((when
->tv_usec
+= (timeout
- 1000*sec
) * 1000) >= 1000000)
299 when
->tv_usec
-= 1000000;
307 /****************************************************************/
310 static struct fd
**poll_users
; /* users array */
311 static struct pollfd
*pollfd
; /* poll fd array */
312 static int nb_users
; /* count of array entries actually in use */
313 static int active_users
; /* current number of active users */
314 static int allocated_users
; /* count of allocated entries in the array */
315 static struct fd
**freelist
; /* list of free entries in the array */
320 static struct epoll_event
*epoll_events
;
322 /* set the events that epoll waits for on this fd; helper for set_fd_events */
323 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
325 struct epoll_event ev
;
328 if (epoll_fd
== -1) return;
330 if (events
== -1) /* stop waiting on this fd completely */
332 if (pollfd
[user
].fd
== -1) return; /* already removed */
335 else if (pollfd
[user
].fd
== -1)
337 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
342 if (pollfd
[user
].events
== events
) return; /* nothing to do */
349 if (epoll_ctl( epoll_fd
, ctl
, fd
->unix_fd
, &ev
) == -1)
351 if (errno
== ENOMEM
) /* not enough memory, give up on epoll */
356 else perror( "epoll_ctl" ); /* should not happen */
360 #else /* USE_EPOLL */
362 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
366 #endif /* USE_EPOLL */
369 /* add a user in the poll array and return its index, or -1 on failure */
370 static int add_poll_user( struct fd
*fd
)
375 ret
= freelist
- poll_users
;
376 freelist
= (struct fd
**)poll_users
[ret
];
380 if (nb_users
== allocated_users
)
382 struct fd
**newusers
;
383 struct pollfd
*newpoll
;
384 int new_count
= allocated_users
? (allocated_users
+ allocated_users
/ 2) : 16;
385 if (!(newusers
= realloc( poll_users
, new_count
* sizeof(*poll_users
) ))) return -1;
386 if (!(newpoll
= realloc( pollfd
, new_count
* sizeof(*pollfd
) )))
389 poll_users
= newusers
;
394 poll_users
= newusers
;
397 if (!allocated_users
) epoll_fd
= epoll_create( new_count
);
400 struct epoll_event
*new_events
;
401 if (!(new_events
= realloc( epoll_events
, new_count
* sizeof(*epoll_events
) )))
403 epoll_events
= new_events
;
406 allocated_users
= new_count
;
411 pollfd
[ret
].events
= 0;
412 pollfd
[ret
].revents
= 0;
413 poll_users
[ret
] = fd
;
418 /* remove a user from the poll list */
419 static void remove_poll_user( struct fd
*fd
, int user
)
422 assert( poll_users
[user
] == fd
);
425 if (epoll_fd
!= -1 && pollfd
[user
].fd
!= -1)
427 struct epoll_event dummy
;
428 epoll_ctl( epoll_fd
, EPOLL_CTL_DEL
, fd
->unix_fd
, &dummy
);
431 pollfd
[user
].fd
= -1;
432 pollfd
[user
].events
= 0;
433 pollfd
[user
].revents
= 0;
434 poll_users
[user
] = (struct fd
*)freelist
;
435 freelist
= &poll_users
[user
];
439 /* process pending timeouts and return the time until the next timeout, in milliseconds */
440 static int get_next_timeout(void)
442 if (!list_empty( &timeout_list
))
444 struct list expired_list
, *ptr
;
447 gettimeofday( &now
, NULL
);
449 /* first remove all expired timers from the list */
451 list_init( &expired_list
);
452 while ((ptr
= list_head( &timeout_list
)) != NULL
)
454 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
456 if (!time_before( &now
, &timeout
->when
))
458 list_remove( &timeout
->entry
);
459 list_add_tail( &expired_list
, &timeout
->entry
);
464 /* now call the callback for all the removed timers */
466 while ((ptr
= list_head( &expired_list
)) != NULL
)
468 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
469 list_remove( &timeout
->entry
);
470 timeout
->callback( timeout
->private );
474 if ((ptr
= list_head( &timeout_list
)) != NULL
)
476 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
477 int diff
= (timeout
->when
.tv_sec
- now
.tv_sec
) * 1000
478 + (timeout
->when
.tv_usec
- now
.tv_usec
) / 1000;
479 if (diff
< 0) diff
= 0;
483 return -1; /* no pending timeouts */
486 /* server main poll() loop */
492 assert( POLLIN
== EPOLLIN
);
493 assert( POLLOUT
== EPOLLOUT
);
494 assert( POLLERR
== EPOLLERR
);
495 assert( POLLHUP
== EPOLLHUP
);
501 timeout
= get_next_timeout();
503 if (!active_users
) break; /* last user removed by a timeout */
504 if (epoll_fd
== -1) break; /* an error occurred with epoll */
506 ret
= epoll_wait( epoll_fd
, epoll_events
, allocated_users
, timeout
);
508 /* put the events into the pollfd array first, like poll does */
509 for (i
= 0; i
< ret
; i
++)
511 int user
= epoll_events
[i
].data
.u32
;
512 pollfd
[user
].revents
= epoll_events
[i
].events
;
515 /* read events from the pollfd array, as set_fd_events may modify them */
516 for (i
= 0; i
< ret
; i
++)
518 int user
= epoll_events
[i
].data
.u32
;
519 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
523 /* fall through to normal poll loop */
524 #endif /* USE_EPOLL */
528 timeout
= get_next_timeout();
530 if (!active_users
) break; /* last user removed by a timeout */
532 ret
= poll( pollfd
, nb_users
, timeout
);
535 for (i
= 0; i
< nb_users
; i
++)
537 if (pollfd
[i
].revents
)
539 fd_poll_event( poll_users
[i
], pollfd
[i
].revents
);
548 /****************************************************************/
549 /* inode functions */
553 static struct list inode_hash
[HASH_SIZE
];
555 /* close all pending file descriptors in the closed list */
556 static void inode_close_pending( struct inode
*inode
)
558 struct list
*ptr
= list_head( &inode
->closed
);
562 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
563 struct list
*next
= list_next( &inode
->closed
, ptr
);
570 if (!fd
->unlink
) /* get rid of it unless there's an unlink pending on that file */
580 static void inode_dump( struct object
*obj
, int verbose
)
582 struct inode
*inode
= (struct inode
*)obj
;
583 fprintf( stderr
, "Inode dev=" );
584 DUMP_LONG_LONG( inode
->dev
);
585 fprintf( stderr
, " ino=" );
586 DUMP_LONG_LONG( inode
->ino
);
587 fprintf( stderr
, "\n" );
590 static void inode_destroy( struct object
*obj
)
592 struct inode
*inode
= (struct inode
*)obj
;
595 assert( list_empty(&inode
->open
) );
596 assert( list_empty(&inode
->locks
) );
598 list_remove( &inode
->entry
);
600 while ((ptr
= list_head( &inode
->closed
)))
602 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
604 if (fd
->fd
!= -1) close( fd
->fd
);
607 /* make sure it is still the same file */
609 if (!stat( fd
->unlink
, &st
) && st
.st_dev
== inode
->dev
&& st
.st_ino
== inode
->ino
)
611 if (S_ISDIR(st
.st_mode
)) rmdir( fd
->unlink
);
612 else unlink( fd
->unlink
);
619 /* retrieve the inode object for a given fd, creating it if needed */
620 static struct inode
*get_inode( dev_t dev
, ino_t ino
)
624 unsigned int hash
= (dev
^ ino
) % HASH_SIZE
;
626 if (inode_hash
[hash
].next
)
628 LIST_FOR_EACH( ptr
, &inode_hash
[hash
] )
630 inode
= LIST_ENTRY( ptr
, struct inode
, entry
);
631 if (inode
->dev
== dev
&& inode
->ino
== ino
)
632 return (struct inode
*)grab_object( inode
);
635 else list_init( &inode_hash
[hash
] );
637 /* not found, create it */
638 if ((inode
= alloc_object( &inode_ops
)))
643 list_init( &inode
->open
);
644 list_init( &inode
->locks
);
645 list_init( &inode
->closed
);
646 list_add_head( &inode_hash
[hash
], &inode
->entry
);
651 /* add fd to the indoe list of file descriptors to close */
652 static void inode_add_closed_fd( struct inode
*inode
, struct closed_fd
*fd
)
654 if (!list_empty( &inode
->locks
))
656 list_add_head( &inode
->closed
, &fd
->entry
);
658 else if (fd
->unlink
[0]) /* close the fd but keep the structure around for unlink */
662 list_add_head( &inode
->closed
, &fd
->entry
);
664 else /* no locks on this inode and no unlink, get rid of the fd */
672 /****************************************************************/
673 /* file lock functions */
675 static void file_lock_dump( struct object
*obj
, int verbose
)
677 struct file_lock
*lock
= (struct file_lock
*)obj
;
678 fprintf( stderr
, "Lock %s fd=%p proc=%p start=",
679 lock
->shared
? "shared" : "excl", lock
->fd
, lock
->process
);
680 DUMP_LONG_LONG( lock
->start
);
681 fprintf( stderr
, " end=" );
682 DUMP_LONG_LONG( lock
->end
);
683 fprintf( stderr
, "\n" );
686 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
)
688 struct file_lock
*lock
= (struct file_lock
*)obj
;
689 /* lock is signaled if it has lost its owner */
690 return !lock
->process
;
693 /* set (or remove) a Unix lock if possible for the given range */
694 static int set_unix_lock( struct fd
*fd
, file_pos_t start
, file_pos_t end
, int type
)
698 if (!fd
->fs_locks
) return 1; /* no fs locks possible for this fd */
701 if (start
== end
) return 1; /* can't set zero-byte lock */
702 if (start
> max_unix_offset
) return 1; /* ignore it */
704 fl
.l_whence
= SEEK_SET
;
706 if (!end
|| end
> max_unix_offset
) fl
.l_len
= 0;
707 else fl
.l_len
= end
- start
;
708 if (fcntl( fd
->unix_fd
, F_SETLK
, &fl
) != -1) return 1;
713 /* check whether locks work at all on this file system */
714 if (fcntl( fd
->unix_fd
, F_GETLK
, &fl
) != -1)
716 set_error( STATUS_FILE_LOCK_CONFLICT
);
722 /* no locking on this fs, just ignore it */
726 set_error( STATUS_FILE_LOCK_CONFLICT
);
729 /* this can happen if we try to set a write lock on a read-only file */
730 /* we just ignore that error */
731 if (fl
.l_type
== F_WRLCK
) return 1;
732 set_error( STATUS_ACCESS_DENIED
);
738 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
739 /* in that case we shrink the limit and retry */
740 if (max_unix_offset
> INT_MAX
)
742 max_unix_offset
= INT_MAX
;
753 /* check if interval [start;end) overlaps the lock */
754 inline static int lock_overlaps( struct file_lock
*lock
, file_pos_t start
, file_pos_t end
)
756 if (lock
->end
&& start
>= lock
->end
) return 0;
757 if (end
&& lock
->start
>= end
) return 0;
761 /* remove Unix locks for all bytes in the specified area that are no longer locked */
762 static void remove_unix_locks( struct fd
*fd
, file_pos_t start
, file_pos_t end
)
770 } *first
, *cur
, *next
, *buffer
;
775 if (!fd
->inode
) return;
776 if (!fd
->fs_locks
) return;
777 if (start
== end
|| start
> max_unix_offset
) return;
778 if (!end
|| end
> max_unix_offset
) end
= max_unix_offset
+ 1;
780 /* count the number of locks overlapping the specified area */
782 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
784 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
785 if (lock
->start
== lock
->end
) continue;
786 if (lock_overlaps( lock
, start
, end
)) count
++;
789 if (!count
) /* no locks at all, we can unlock everything */
791 set_unix_lock( fd
, start
, end
, F_UNLCK
);
795 /* allocate space for the list of holes */
796 /* max. number of holes is number of locks + 1 */
798 if (!(buffer
= malloc( sizeof(*buffer
) * (count
+1) ))) return;
802 first
->start
= start
;
806 /* build a sorted list of unlocked holes in the specified area */
808 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
810 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
811 if (lock
->start
== lock
->end
) continue;
812 if (!lock_overlaps( lock
, start
, end
)) continue;
814 /* go through all the holes touched by this lock */
815 for (cur
= first
; cur
; cur
= cur
->next
)
817 if (cur
->end
<= lock
->start
) continue; /* hole is before start of lock */
818 if (lock
->end
&& cur
->start
>= lock
->end
) break; /* hole is after end of lock */
820 /* now we know that lock is overlapping hole */
822 if (cur
->start
>= lock
->start
) /* lock starts before hole, shrink from start */
824 cur
->start
= lock
->end
;
825 if (cur
->start
&& cur
->start
< cur
->end
) break; /* done with this lock */
826 /* now hole is empty, remove it */
827 if (cur
->next
) cur
->next
->prev
= cur
->prev
;
828 if (cur
->prev
) cur
->prev
->next
= cur
->next
;
829 else if (!(first
= cur
->next
)) goto done
; /* no more holes at all */
831 else if (!lock
->end
|| cur
->end
<= lock
->end
) /* lock larger than hole, shrink from end */
833 cur
->end
= lock
->start
;
834 assert( cur
->start
< cur
->end
);
836 else /* lock is in the middle of hole, split hole in two */
839 next
->next
= cur
->next
;
841 next
->start
= lock
->end
;
842 next
->end
= cur
->end
;
843 cur
->end
= lock
->start
;
844 assert( next
->start
< next
->end
);
845 assert( cur
->end
< next
->start
);
847 break; /* done with this lock */
852 /* clear Unix locks for all the holes */
854 for (cur
= first
; cur
; cur
= cur
->next
)
855 set_unix_lock( fd
, cur
->start
, cur
->end
, F_UNLCK
);
861 /* create a new lock on a fd */
862 static struct file_lock
*add_lock( struct fd
*fd
, int shared
, file_pos_t start
, file_pos_t end
)
864 struct file_lock
*lock
;
866 if (!fd
->inode
) /* not a regular file */
868 set_error( STATUS_INVALID_HANDLE
);
872 if (!(lock
= alloc_object( &file_lock_ops
))) return NULL
;
873 lock
->shared
= shared
;
877 lock
->process
= current
->process
;
879 /* now try to set a Unix lock */
880 if (!set_unix_lock( lock
->fd
, lock
->start
, lock
->end
, lock
->shared
? F_RDLCK
: F_WRLCK
))
882 release_object( lock
);
885 list_add_head( &fd
->locks
, &lock
->fd_entry
);
886 list_add_head( &fd
->inode
->locks
, &lock
->inode_entry
);
887 list_add_head( &lock
->process
->locks
, &lock
->proc_entry
);
891 /* remove an existing lock */
892 static void remove_lock( struct file_lock
*lock
, int remove_unix
)
894 struct inode
*inode
= lock
->fd
->inode
;
896 list_remove( &lock
->fd_entry
);
897 list_remove( &lock
->inode_entry
);
898 list_remove( &lock
->proc_entry
);
899 if (remove_unix
) remove_unix_locks( lock
->fd
, lock
->start
, lock
->end
);
900 if (list_empty( &inode
->locks
)) inode_close_pending( inode
);
901 lock
->process
= NULL
;
902 wake_up( &lock
->obj
, 0 );
903 release_object( lock
);
906 /* remove all locks owned by a given process */
907 void remove_process_locks( struct process
*process
)
911 while ((ptr
= list_head( &process
->locks
)))
913 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, proc_entry
);
914 remove_lock( lock
, 1 ); /* this removes it from the list */
918 /* remove all locks on a given fd */
919 static void remove_fd_locks( struct fd
*fd
)
921 file_pos_t start
= FILE_POS_T_MAX
, end
= 0;
924 while ((ptr
= list_head( &fd
->locks
)))
926 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
927 if (lock
->start
< start
) start
= lock
->start
;
928 if (!lock
->end
|| lock
->end
> end
) end
= lock
->end
- 1;
929 remove_lock( lock
, 0 );
931 if (start
< end
) remove_unix_locks( fd
, start
, end
+ 1 );
934 /* add a lock on an fd */
935 /* returns handle to wait on */
936 obj_handle_t
lock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
, int shared
, int wait
)
939 file_pos_t end
= start
+ count
;
941 /* don't allow wrapping locks */
942 if (end
&& end
< start
)
944 set_error( STATUS_INVALID_PARAMETER
);
948 /* check if another lock on that file overlaps the area */
949 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
951 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
952 if (!lock_overlaps( lock
, start
, end
)) continue;
953 if (lock
->shared
&& shared
) continue;
957 set_error( STATUS_FILE_LOCK_CONFLICT
);
960 set_error( STATUS_PENDING
);
961 return alloc_handle( current
->process
, lock
, SYNCHRONIZE
, 0 );
964 /* not found, add it */
965 if (add_lock( fd
, shared
, start
, end
)) return 0;
966 if (get_error() == STATUS_FILE_LOCK_CONFLICT
)
968 /* Unix lock conflict -> tell client to wait and retry */
969 if (wait
) set_error( STATUS_PENDING
);
974 /* remove a lock on an fd */
975 void unlock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
)
978 file_pos_t end
= start
+ count
;
980 /* find an existing lock with the exact same parameters */
981 LIST_FOR_EACH( ptr
, &fd
->locks
)
983 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
984 if ((lock
->start
== start
) && (lock
->end
== end
))
986 remove_lock( lock
, 1 );
990 set_error( STATUS_FILE_LOCK_CONFLICT
);
994 /****************************************************************/
995 /* asynchronous operations support */
999 struct thread
*thread
;
1003 struct timeout_user
*timeout
;
1007 /* notifies client thread of new status of its async request */
1008 /* destroys the server side of it */
1009 static void async_terminate( struct async
*async
, int status
)
1011 thread_queue_apc( async
->thread
, NULL
, async
->apc
, APC_ASYNC_IO
,
1012 1, async
->user
, async
->sb
, (void *)status
);
1014 if (async
->timeout
) remove_timeout_user( async
->timeout
);
1015 async
->timeout
= NULL
;
1016 list_remove( &async
->entry
);
1017 release_object( async
->thread
);
1021 /* cb for timeout on an async request */
1022 static void async_callback(void *private)
1024 struct async
*async
= (struct async
*)private;
1026 /* fprintf(stderr, "async timeout out %p\n", async); */
1027 async
->timeout
= NULL
;
1028 async_terminate( async
, STATUS_TIMEOUT
);
1031 /* create an async on a given queue of a fd */
1032 struct async
*create_async(struct thread
*thread
, int* timeout
, struct list
*queue
,
1033 void *io_apc
, void *io_user
, void* io_sb
)
1035 struct async
*async
= mem_alloc( sizeof(struct async
) );
1037 if (!async
) return NULL
;
1039 async
->thread
= (struct thread
*)grab_object(thread
);
1040 async
->apc
= io_apc
;
1041 async
->user
= io_user
;
1044 list_add_tail( queue
, &async
->entry
);
1048 struct timeval when
;
1050 gettimeofday( &when
, NULL
);
1051 add_timeout( &when
, *timeout
);
1052 async
->timeout
= add_timeout_user( &when
, async_callback
, async
);
1054 else async
->timeout
= NULL
;
1059 /* terminate the async operation at the head of the queue */
1060 void async_terminate_head( struct list
*queue
, int status
)
1062 struct list
*ptr
= list_head( queue
);
1063 if (ptr
) async_terminate( LIST_ENTRY( ptr
, struct async
, entry
), status
);
1066 /****************************************************************/
1067 /* file descriptor functions */
1069 static void fd_dump( struct object
*obj
, int verbose
)
1071 struct fd
*fd
= (struct fd
*)obj
;
1072 fprintf( stderr
, "Fd unix_fd=%d user=%p", fd
->unix_fd
, fd
->user
);
1073 if (fd
->inode
) fprintf( stderr
, " inode=%p unlink='%s'", fd
->inode
, fd
->closed
->unlink
);
1074 fprintf( stderr
, "\n" );
1077 static void fd_destroy( struct object
*obj
)
1079 struct fd
*fd
= (struct fd
*)obj
;
1081 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1082 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1084 remove_fd_locks( fd
);
1085 list_remove( &fd
->inode_entry
);
1086 if (fd
->poll_index
!= -1) remove_poll_user( fd
, fd
->poll_index
);
1089 inode_add_closed_fd( fd
->inode
, fd
->closed
);
1090 release_object( fd
->inode
);
1092 else /* no inode, close it right away */
1094 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1098 /* set the events that select waits for on this fd */
1099 void set_fd_events( struct fd
*fd
, int events
)
1101 int user
= fd
->poll_index
;
1102 assert( poll_users
[user
] == fd
);
1104 set_fd_epoll_events( fd
, user
, events
);
1106 if (events
== -1) /* stop waiting on this fd completely */
1108 pollfd
[user
].fd
= -1;
1109 pollfd
[user
].events
= POLLERR
;
1110 pollfd
[user
].revents
= 0;
1112 else if (pollfd
[user
].fd
!= -1 || !pollfd
[user
].events
)
1114 pollfd
[user
].fd
= fd
->unix_fd
;
1115 pollfd
[user
].events
= events
;
1119 /* allocate an fd object, without setting the unix fd yet */
1120 struct fd
*alloc_fd( const struct fd_ops
*fd_user_ops
, struct object
*user
)
1122 struct fd
*fd
= alloc_object( &fd_ops
);
1124 if (!fd
) return NULL
;
1126 fd
->fd_ops
= fd_user_ops
;
1134 fd
->poll_index
= -1;
1135 list_init( &fd
->inode_entry
);
1136 list_init( &fd
->locks
);
1137 list_init( &fd
->read_q
);
1138 list_init( &fd
->write_q
);
1140 if ((fd
->poll_index
= add_poll_user( fd
)) == -1)
1142 release_object( fd
);
1148 /* check if the desired access is possible without violating */
1149 /* the sharing mode of other opens of the same file */
1150 static int check_sharing( struct fd
*fd
, unsigned int access
, unsigned int sharing
)
1152 unsigned int existing_sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
1153 unsigned int existing_access
= 0;
1157 /* if access mode is 0, sharing mode is ignored */
1158 if (!access
) sharing
= existing_sharing
;
1159 fd
->access
= access
;
1160 fd
->sharing
= sharing
;
1162 LIST_FOR_EACH( ptr
, &fd
->inode
->open
)
1164 struct fd
*fd_ptr
= LIST_ENTRY( ptr
, struct fd
, inode_entry
);
1167 existing_sharing
&= fd_ptr
->sharing
;
1168 existing_access
|= fd_ptr
->access
;
1169 if (fd_ptr
->closed
->unlink
[0]) unlink
= 1;
1173 if ((access
& GENERIC_READ
) && !(existing_sharing
& FILE_SHARE_READ
)) return 0;
1174 if ((access
& GENERIC_WRITE
) && !(existing_sharing
& FILE_SHARE_WRITE
)) return 0;
1175 if ((existing_access
& GENERIC_READ
) && !(sharing
& FILE_SHARE_READ
)) return 0;
1176 if ((existing_access
& GENERIC_WRITE
) && !(sharing
& FILE_SHARE_WRITE
)) return 0;
1177 if (fd
->closed
->unlink
[0] && !(existing_sharing
& FILE_SHARE_DELETE
)) return 0;
1178 if (unlink
&& !(sharing
& FILE_SHARE_DELETE
)) return 0;
1182 /* open() wrapper using a struct fd */
1183 /* the fd must have been created with alloc_fd */
1184 /* on error the fd object is released */
1185 struct fd
*open_fd( struct fd
*fd
, const char *name
, int flags
, mode_t
*mode
,
1186 unsigned int access
, unsigned int sharing
, unsigned int options
)
1189 struct closed_fd
*closed_fd
;
1190 const char *unlink_name
= "";
1192 assert( fd
->unix_fd
== -1 );
1194 if (options
& FILE_DELETE_ON_CLOSE
) unlink_name
= name
;
1195 if (!(closed_fd
= mem_alloc( sizeof(*closed_fd
) + strlen(unlink_name
) )))
1197 release_object( fd
);
1200 /* create the directory if needed */
1201 if ((options
& FILE_DIRECTORY_FILE
) && (flags
& O_CREAT
))
1203 if (mkdir( name
, 0777 ) == -1)
1205 if (errno
!= EEXIST
|| (flags
& O_EXCL
))
1208 release_object( fd
);
1213 flags
&= ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1215 if ((fd
->unix_fd
= open( name
, flags
& ~O_TRUNC
, *mode
)) == -1)
1218 release_object( fd
);
1222 closed_fd
->fd
= fd
->unix_fd
;
1223 closed_fd
->unlink
[0] = 0;
1224 fstat( fd
->unix_fd
, &st
);
1227 /* only bother with an inode for normal files and directories */
1228 if (S_ISREG(st
.st_mode
) || S_ISDIR(st
.st_mode
))
1230 struct inode
*inode
= get_inode( st
.st_dev
, st
.st_ino
);
1234 /* we can close the fd because there are no others open on the same file,
1235 * otherwise we wouldn't have failed to allocate a new inode
1240 fd
->closed
= closed_fd
;
1241 list_add_head( &inode
->open
, &fd
->inode_entry
);
1243 /* check directory options */
1244 if ((options
& FILE_DIRECTORY_FILE
) && !S_ISDIR(st
.st_mode
))
1246 release_object( fd
);
1247 set_error( STATUS_NOT_A_DIRECTORY
);
1250 if ((options
& FILE_NON_DIRECTORY_FILE
) && S_ISDIR(st
.st_mode
))
1252 release_object( fd
);
1253 set_error( STATUS_FILE_IS_A_DIRECTORY
);
1256 if (!check_sharing( fd
, access
, sharing
))
1258 release_object( fd
);
1259 set_error( STATUS_SHARING_VIOLATION
);
1262 strcpy( closed_fd
->unlink
, unlink_name
);
1263 if (flags
& O_TRUNC
) ftruncate( fd
->unix_fd
, 0 );
1265 else /* special file */
1267 if (options
& FILE_DIRECTORY_FILE
)
1269 set_error( STATUS_NOT_A_DIRECTORY
);
1272 if (unlink_name
[0]) /* we can't unlink special files */
1274 set_error( STATUS_INVALID_PARAMETER
);
1282 release_object( fd
);
1287 /* create an fd for an anonymous file */
1288 /* if the function fails the unix fd is closed */
1289 struct fd
*create_anonymous_fd( const struct fd_ops
*fd_user_ops
, int unix_fd
, struct object
*user
)
1291 struct fd
*fd
= alloc_fd( fd_user_ops
, user
);
1295 fd
->unix_fd
= unix_fd
;
1302 /* retrieve the object that is using an fd */
1303 void *get_fd_user( struct fd
*fd
)
1308 /* retrieve the unix fd for an object */
1309 int get_unix_fd( struct fd
*fd
)
1314 /* check if two file descriptors point to the same file */
1315 int is_same_file_fd( struct fd
*fd1
, struct fd
*fd2
)
1317 return fd1
->inode
== fd2
->inode
;
1320 /* callback for event happening in the main poll() loop */
1321 void fd_poll_event( struct fd
*fd
, int event
)
1323 return fd
->fd_ops
->poll_event( fd
, event
);
1326 /* check if events are pending and if yes return which one(s) */
1327 int check_fd_events( struct fd
*fd
, int events
)
1331 pfd
.fd
= fd
->unix_fd
;
1332 pfd
.events
= events
;
1333 if (poll( &pfd
, 1, 0 ) <= 0) return 0;
1337 /* default add_queue() routine for objects that poll() on an fd */
1338 int default_fd_add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1340 struct fd
*fd
= get_obj_fd( obj
);
1343 if (list_empty( &obj
->wait_queue
)) /* first on the queue */
1344 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1345 add_queue( obj
, entry
);
1346 release_object( fd
);
1350 /* default remove_queue() routine for objects that poll() on an fd */
1351 void default_fd_remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1353 struct fd
*fd
= get_obj_fd( obj
);
1356 remove_queue( obj
, entry
);
1357 if (list_empty( &obj
->wait_queue
)) /* last on the queue is gone */
1358 set_fd_events( fd
, 0 );
1359 release_object( obj
);
1360 release_object( fd
);
1363 /* default signaled() routine for objects that poll() on an fd */
1364 int default_fd_signaled( struct object
*obj
, struct thread
*thread
)
1367 struct fd
*fd
= get_obj_fd( obj
);
1369 if (fd
->inode
) return 1; /* regular files are always signaled */
1371 events
= fd
->fd_ops
->get_poll_events( fd
);
1372 ret
= check_fd_events( fd
, events
) != 0;
1375 set_fd_events( fd
, 0 ); /* stop waiting on select() if we are signaled */
1376 else if (!list_empty( &obj
->wait_queue
))
1377 set_fd_events( fd
, events
); /* restart waiting on poll() if we are no longer signaled */
1379 release_object( fd
);
1383 int default_fd_get_poll_events( struct fd
*fd
)
1387 if (!list_empty( &fd
->read_q
))
1389 if (!list_empty( &fd
->write_q
))
1395 /* default handler for poll() events */
1396 void default_poll_event( struct fd
*fd
, int event
)
1398 if (!list_empty( &fd
->read_q
) && (POLLIN
& event
) )
1400 async_terminate_head( &fd
->read_q
, STATUS_ALERTED
);
1403 if (!list_empty( &fd
->write_q
) && (POLLOUT
& event
) )
1405 async_terminate_head( &fd
->write_q
, STATUS_ALERTED
);
1409 /* if an error occurred, stop polling this fd to avoid busy-looping */
1410 if (event
& (POLLERR
| POLLHUP
)) set_fd_events( fd
, -1 );
1411 wake_up( fd
->user
, 0 );
1414 void default_fd_queue_async( struct fd
*fd
, void *apc
, void *user
, void *io_sb
, int type
, int count
)
1419 if (!(fd
->fd_ops
->get_file_info( fd
) & FD_FLAG_OVERLAPPED
))
1421 set_error( STATUS_INVALID_HANDLE
);
1427 case ASYNC_TYPE_READ
:
1428 queue
= &fd
->read_q
;
1430 case ASYNC_TYPE_WRITE
:
1431 queue
= &fd
->write_q
;
1434 set_error( STATUS_INVALID_PARAMETER
);
1438 if (!create_async( current
, NULL
, queue
, apc
, user
, io_sb
))
1441 /* Check if the new pending request can be served immediately */
1442 events
= check_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1443 if (events
) fd
->fd_ops
->poll_event( fd
, events
);
1445 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1448 void default_fd_cancel_async( struct fd
*fd
)
1450 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1451 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1454 /* default flush() routine */
1455 int no_flush( struct fd
*fd
, struct event
**event
)
1457 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1461 /* default get_file_info() routine */
1462 int no_get_file_info( struct fd
*fd
)
1464 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1468 /* default queue_async() routine */
1469 void no_queue_async( struct fd
*fd
, void* apc
, void* user
, void* io_sb
,
1470 int type
, int count
)
1472 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1475 /* default cancel_async() routine */
1476 void no_cancel_async( struct fd
*fd
)
1478 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1481 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1482 static struct fd
*get_handle_fd_obj( struct process
*process
, obj_handle_t handle
,
1483 unsigned int access
)
1485 struct fd
*fd
= NULL
;
1488 if ((obj
= get_handle_obj( process
, handle
, access
, NULL
)))
1490 fd
= get_obj_fd( obj
);
1491 release_object( obj
);
1496 /* flush a file buffers */
1497 DECL_HANDLER(flush_file
)
1499 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1500 struct event
* event
= NULL
;
1504 fd
->fd_ops
->flush( fd
, &event
);
1507 reply
->event
= alloc_handle( current
->process
, event
, SYNCHRONIZE
, 0 );
1509 release_object( fd
);
1513 /* get a Unix fd to access a file */
1514 DECL_HANDLER(get_handle_fd
)
1520 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, req
->access
)))
1522 int unix_fd
= get_handle_unix_fd( current
->process
, req
->handle
, req
->access
);
1523 if (unix_fd
!= -1) reply
->fd
= unix_fd
;
1524 else if (!get_error())
1526 assert( fd
->unix_fd
!= -1 );
1527 send_client_fd( current
->process
, fd
->unix_fd
, req
->handle
);
1529 reply
->flags
= fd
->fd_ops
->get_file_info( fd
);
1530 release_object( fd
);
1534 /* create / reschedule an async I/O */
1535 DECL_HANDLER(register_async
)
1537 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1540 * The queue_async method must do the following:
1542 * 1. Get the async_queue for the request of given type.
1543 * 2. Create a new asynchronous request for the selected queue
1544 * 3. Carry out any operations necessary to adjust the object's poll events
1545 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
1546 * 4. When the async request is triggered, then send back (with a proper APC)
1547 * the trigger (STATUS_ALERTED) to the thread that posted the request.
1548 * async_destroy() is to be called: it will both notify the sender about
1549 * the trigger and destroy the request by itself
1550 * See also the implementations in file.c, serial.c, and sock.c.
1555 fd
->fd_ops
->queue_async( fd
, req
->io_apc
, req
->io_user
, req
->io_sb
,
1556 req
->type
, req
->count
);
1557 release_object( fd
);
1561 /* cancels all async I/O */
1562 DECL_HANDLER(cancel_async
)
1564 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1567 /* Note: we don't kill the queued APC_ASYNC_IO on this thread because
1568 * NtCancelIoFile() will force the pending APC to be run. Since,
1569 * Windows only guarantees that the current thread will have no async
1570 * operation on the current fd when NtCancelIoFile returns, this shall
1573 fd
->fd_ops
->cancel_async( fd
);
1574 release_object( fd
);