2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #ifdef HAVE_SYS_POLL_H
44 #include <sys/types.h>
55 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
56 # include <sys/epoll.h>
58 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
60 # define EPOLLIN POLLIN
61 # define EPOLLOUT POLLOUT
62 # define EPOLLERR POLLERR
63 # define EPOLLHUP POLLHUP
64 # define EPOLL_CTL_ADD 1
65 # define EPOLL_CTL_DEL 2
66 # define EPOLL_CTL_MOD 3
68 typedef union epoll_data
82 #define SYSCALL_RET(ret) do { \
83 if (ret < 0) { errno = -ret; ret = -1; } \
87 static inline int epoll_create( int size
)
90 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
91 : "=a" (ret
) : "0" (254 /*NR_epoll_create*/), "r" (size
) );
95 static inline int epoll_ctl( int epfd
, int op
, int fd
, const struct epoll_event
*event
)
98 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
100 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd
), "c" (op
), "d" (fd
), "S" (event
), "m" (*event
) );
104 static inline int epoll_wait( int epfd
, struct epoll_event
*events
, int maxevents
, int timeout
)
107 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
109 : "0" (256 /*NR_epoll_wait*/), "r" (epfd
), "c" (events
), "d" (maxevents
), "S" (timeout
)
115 #endif /* linux && __i386__ && HAVE_STDINT_H */
118 /* Because of the stupid Posix locking semantics, we need to keep
119 * track of all file descriptors referencing a given file, and not
120 * close a single one until all the locks are gone (sigh).
123 /* file descriptor object */
125 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
128 struct list entry
; /* entry in inode closed list */
129 int fd
; /* the unix file descriptor */
130 char unlink
[1]; /* name to unlink on close (if any) */
135 struct object obj
; /* object header */
136 const struct fd_ops
*fd_ops
; /* file descriptor operations */
137 struct inode
*inode
; /* inode that this fd belongs to */
138 struct list inode_entry
; /* entry in inode fd list */
139 struct closed_fd
*closed
; /* structure to store the unix fd at destroy time */
140 struct object
*user
; /* object using this file descriptor */
141 struct list locks
; /* list of locks on this fd */
142 unsigned int access
; /* file access (GENERIC_READ/WRITE) */
143 unsigned int sharing
; /* file sharing mode */
144 int unix_fd
; /* unix file descriptor */
145 int fs_locks
; /* can we use filesystem locks for this fd? */
146 int poll_index
; /* index of fd in poll array */
147 struct list read_q
; /* async readers of this fd */
148 struct list write_q
; /* async writers of this fd */
151 static void fd_dump( struct object
*obj
, int verbose
);
152 static void fd_destroy( struct object
*obj
);
154 static const struct object_ops fd_ops
=
156 sizeof(struct fd
), /* size */
158 no_add_queue
, /* add_queue */
159 NULL
, /* remove_queue */
161 NULL
, /* satisfied */
162 no_signal
, /* signal */
163 no_get_fd
, /* get_fd */
164 no_close_handle
, /* close_handle */
165 fd_destroy
/* destroy */
172 struct object obj
; /* object header */
173 struct list entry
; /* inode hash list entry */
174 unsigned int hash
; /* hashing code */
175 dev_t dev
; /* device number */
176 ino_t ino
; /* inode number */
177 struct list open
; /* list of open file descriptors */
178 struct list locks
; /* list of file locks */
179 struct list closed
; /* list of file descriptors to close at destroy time */
182 static void inode_dump( struct object
*obj
, int verbose
);
183 static void inode_destroy( struct object
*obj
);
185 static const struct object_ops inode_ops
=
187 sizeof(struct inode
), /* size */
188 inode_dump
, /* dump */
189 no_add_queue
, /* add_queue */
190 NULL
, /* remove_queue */
192 NULL
, /* satisfied */
193 no_signal
, /* signal */
194 no_get_fd
, /* get_fd */
195 no_close_handle
, /* close_handle */
196 inode_destroy
/* destroy */
199 /* file lock object */
203 struct object obj
; /* object header */
204 struct fd
*fd
; /* fd owning this lock */
205 struct list fd_entry
; /* entry in list of locks on a given fd */
206 struct list inode_entry
; /* entry in inode list of locks */
207 int shared
; /* shared lock? */
208 file_pos_t start
; /* locked region is interval [start;end) */
210 struct process
*process
; /* process owning this lock */
211 struct list proc_entry
; /* entry in list of locks owned by the process */
214 static void file_lock_dump( struct object
*obj
, int verbose
);
215 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
);
217 static const struct object_ops file_lock_ops
=
219 sizeof(struct file_lock
), /* size */
220 file_lock_dump
, /* dump */
221 add_queue
, /* add_queue */
222 remove_queue
, /* remove_queue */
223 file_lock_signaled
, /* signaled */
224 no_satisfied
, /* satisfied */
225 no_signal
, /* signal */
226 no_get_fd
, /* get_fd */
227 no_close_handle
, /* close_handle */
228 no_destroy
/* destroy */
232 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
233 #define FILE_POS_T_MAX (~(file_pos_t)0)
235 static file_pos_t max_unix_offset
= OFF_T_MAX
;
237 #define DUMP_LONG_LONG(val) do { \
238 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
239 fprintf( stderr, "%lx%08lx", (unsigned long)((val) >> 32), (unsigned long)(val) ); \
241 fprintf( stderr, "%lx", (unsigned long)(val) ); \
246 /****************************************************************/
247 /* timeouts support */
251 struct list entry
; /* entry in sorted timeout list */
252 struct timeval when
; /* timeout expiry (absolute time) */
253 timeout_callback callback
; /* callback function */
254 void *private; /* callback private data */
257 static struct list timeout_list
= LIST_INIT(timeout_list
); /* sorted timeouts list */
259 /* add a timeout user */
260 struct timeout_user
*add_timeout_user( const struct timeval
*when
, timeout_callback func
,
263 struct timeout_user
*user
;
266 if (!(user
= mem_alloc( sizeof(*user
) ))) return NULL
;
268 user
->callback
= func
;
269 user
->private = private;
271 /* Now insert it in the linked list */
273 LIST_FOR_EACH( ptr
, &timeout_list
)
275 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
276 if (!time_before( &timeout
->when
, when
)) break;
278 list_add_before( ptr
, &user
->entry
);
282 /* remove a timeout user */
283 void remove_timeout_user( struct timeout_user
*user
)
285 list_remove( &user
->entry
);
289 /* add a timeout in milliseconds to an absolute time */
290 void add_timeout( struct timeval
*when
, int timeout
)
294 long sec
= timeout
/ 1000;
295 if ((when
->tv_usec
+= (timeout
- 1000*sec
) * 1000) >= 1000000)
297 when
->tv_usec
-= 1000000;
305 /****************************************************************/
308 static struct fd
**poll_users
; /* users array */
309 static struct pollfd
*pollfd
; /* poll fd array */
310 static int nb_users
; /* count of array entries actually in use */
311 static int active_users
; /* current number of active users */
312 static int allocated_users
; /* count of allocated entries in the array */
313 static struct fd
**freelist
; /* list of free entries in the array */
318 static struct epoll_event
*epoll_events
;
320 /* set the events that epoll waits for on this fd; helper for set_fd_events */
321 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
323 struct epoll_event ev
;
326 if (epoll_fd
== -1) return;
328 if (events
== -1) /* stop waiting on this fd completely */
330 if (pollfd
[user
].fd
== -1) return; /* already removed */
333 else if (pollfd
[user
].fd
== -1)
335 if (pollfd
[user
].events
) return; /* stopped waiting on it, don't restart */
340 if (pollfd
[user
].events
== events
) return; /* nothing to do */
347 if (epoll_ctl( epoll_fd
, ctl
, fd
->unix_fd
, &ev
) == -1)
349 if (errno
== ENOMEM
) /* not enough memory, give up on epoll */
354 else perror( "epoll_ctl" ); /* should not happen */
358 #else /* USE_EPOLL */
360 static inline void set_fd_epoll_events( struct fd
*fd
, int user
, int events
)
364 #endif /* USE_EPOLL */
367 /* add a user in the poll array and return its index, or -1 on failure */
368 static int add_poll_user( struct fd
*fd
)
373 ret
= freelist
- poll_users
;
374 freelist
= (struct fd
**)poll_users
[ret
];
378 if (nb_users
== allocated_users
)
380 struct fd
**newusers
;
381 struct pollfd
*newpoll
;
382 int new_count
= allocated_users
? (allocated_users
+ allocated_users
/ 2) : 16;
383 if (!(newusers
= realloc( poll_users
, new_count
* sizeof(*poll_users
) ))) return -1;
384 if (!(newpoll
= realloc( pollfd
, new_count
* sizeof(*pollfd
) )))
387 poll_users
= newusers
;
392 poll_users
= newusers
;
395 if (!allocated_users
) epoll_fd
= epoll_create( new_count
);
398 struct epoll_event
*new_events
;
399 if (!(new_events
= realloc( epoll_events
, new_count
* sizeof(*epoll_events
) )))
401 epoll_events
= new_events
;
404 allocated_users
= new_count
;
409 pollfd
[ret
].events
= 0;
410 pollfd
[ret
].revents
= 0;
411 poll_users
[ret
] = fd
;
416 /* remove a user from the poll list */
417 static void remove_poll_user( struct fd
*fd
, int user
)
420 assert( poll_users
[user
] == fd
);
423 if (epoll_fd
!= -1 && pollfd
[user
].fd
!= -1)
425 struct epoll_event dummy
;
426 epoll_ctl( epoll_fd
, EPOLL_CTL_DEL
, fd
->unix_fd
, &dummy
);
429 pollfd
[user
].fd
= -1;
430 pollfd
[user
].events
= 0;
431 pollfd
[user
].revents
= 0;
432 poll_users
[user
] = (struct fd
*)freelist
;
433 freelist
= &poll_users
[user
];
437 /* process pending timeouts and return the time until the next timeout, in milliseconds */
438 static int get_next_timeout(void)
440 if (!list_empty( &timeout_list
))
442 struct list expired_list
, *ptr
;
445 gettimeofday( &now
, NULL
);
447 /* first remove all expired timers from the list */
449 list_init( &expired_list
);
450 while ((ptr
= list_head( &timeout_list
)) != NULL
)
452 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
454 if (!time_before( &now
, &timeout
->when
))
456 list_remove( &timeout
->entry
);
457 list_add_tail( &expired_list
, &timeout
->entry
);
462 /* now call the callback for all the removed timers */
464 while ((ptr
= list_head( &expired_list
)) != NULL
)
466 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
467 list_remove( &timeout
->entry
);
468 timeout
->callback( timeout
->private );
472 if ((ptr
= list_head( &timeout_list
)) != NULL
)
474 struct timeout_user
*timeout
= LIST_ENTRY( ptr
, struct timeout_user
, entry
);
475 int diff
= (timeout
->when
.tv_sec
- now
.tv_sec
) * 1000
476 + (timeout
->when
.tv_usec
- now
.tv_usec
) / 1000;
477 if (diff
< 0) diff
= 0;
481 return -1; /* no pending timeouts */
484 /* server main poll() loop */
490 assert( POLLIN
== EPOLLIN
);
491 assert( POLLOUT
== EPOLLOUT
);
492 assert( POLLERR
== EPOLLERR
);
493 assert( POLLHUP
== EPOLLHUP
);
499 timeout
= get_next_timeout();
501 if (!active_users
) break; /* last user removed by a timeout */
502 if (epoll_fd
== -1) break; /* an error occurred with epoll */
504 ret
= epoll_wait( epoll_fd
, epoll_events
, allocated_users
, timeout
);
506 /* put the events into the pollfd array first, like poll does */
507 for (i
= 0; i
< ret
; i
++)
509 int user
= epoll_events
[i
].data
.u32
;
510 pollfd
[user
].revents
= epoll_events
[i
].events
;
513 /* read events from the pollfd array, as set_fd_events may modify them */
514 for (i
= 0; i
< ret
; i
++)
516 int user
= epoll_events
[i
].data
.u32
;
517 if (pollfd
[user
].revents
) fd_poll_event( poll_users
[user
], pollfd
[user
].revents
);
521 /* fall through to normal poll loop */
522 #endif /* USE_EPOLL */
526 timeout
= get_next_timeout();
528 if (!active_users
) break; /* last user removed by a timeout */
530 ret
= poll( pollfd
, nb_users
, timeout
);
533 for (i
= 0; i
< nb_users
; i
++)
535 if (pollfd
[i
].revents
)
537 fd_poll_event( poll_users
[i
], pollfd
[i
].revents
);
546 /****************************************************************/
547 /* inode functions */
551 static struct list inode_hash
[HASH_SIZE
];
553 /* close all pending file descriptors in the closed list */
554 static void inode_close_pending( struct inode
*inode
)
556 struct list
*ptr
= list_head( &inode
->closed
);
560 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
561 struct list
*next
= list_next( &inode
->closed
, ptr
);
568 if (!fd
->unlink
) /* get rid of it unless there's an unlink pending on that file */
578 static void inode_dump( struct object
*obj
, int verbose
)
580 struct inode
*inode
= (struct inode
*)obj
;
581 fprintf( stderr
, "Inode dev=" );
582 DUMP_LONG_LONG( inode
->dev
);
583 fprintf( stderr
, " ino=" );
584 DUMP_LONG_LONG( inode
->ino
);
585 fprintf( stderr
, "\n" );
588 static void inode_destroy( struct object
*obj
)
590 struct inode
*inode
= (struct inode
*)obj
;
593 assert( list_empty(&inode
->open
) );
594 assert( list_empty(&inode
->locks
) );
596 list_remove( &inode
->entry
);
598 while ((ptr
= list_head( &inode
->closed
)))
600 struct closed_fd
*fd
= LIST_ENTRY( ptr
, struct closed_fd
, entry
);
602 if (fd
->fd
!= -1) close( fd
->fd
);
605 /* make sure it is still the same file */
607 if (!stat( fd
->unlink
, &st
) && st
.st_dev
== inode
->dev
&& st
.st_ino
== inode
->ino
)
609 if (S_ISDIR(st
.st_mode
)) rmdir( fd
->unlink
);
610 else unlink( fd
->unlink
);
617 /* retrieve the inode object for a given fd, creating it if needed */
618 static struct inode
*get_inode( dev_t dev
, ino_t ino
)
622 unsigned int hash
= (dev
^ ino
) % HASH_SIZE
;
624 if (inode_hash
[hash
].next
)
626 LIST_FOR_EACH( ptr
, &inode_hash
[hash
] )
628 inode
= LIST_ENTRY( ptr
, struct inode
, entry
);
629 if (inode
->dev
== dev
&& inode
->ino
== ino
)
630 return (struct inode
*)grab_object( inode
);
633 else list_init( &inode_hash
[hash
] );
635 /* not found, create it */
636 if ((inode
= alloc_object( &inode_ops
)))
641 list_init( &inode
->open
);
642 list_init( &inode
->locks
);
643 list_init( &inode
->closed
);
644 list_add_head( &inode_hash
[hash
], &inode
->entry
);
649 /* add fd to the indoe list of file descriptors to close */
650 static void inode_add_closed_fd( struct inode
*inode
, struct closed_fd
*fd
)
652 if (!list_empty( &inode
->locks
))
654 list_add_head( &inode
->closed
, &fd
->entry
);
656 else if (fd
->unlink
[0]) /* close the fd but keep the structure around for unlink */
660 list_add_head( &inode
->closed
, &fd
->entry
);
662 else /* no locks on this inode and no unlink, get rid of the fd */
670 /****************************************************************/
671 /* file lock functions */
673 static void file_lock_dump( struct object
*obj
, int verbose
)
675 struct file_lock
*lock
= (struct file_lock
*)obj
;
676 fprintf( stderr
, "Lock %s fd=%p proc=%p start=",
677 lock
->shared
? "shared" : "excl", lock
->fd
, lock
->process
);
678 DUMP_LONG_LONG( lock
->start
);
679 fprintf( stderr
, " end=" );
680 DUMP_LONG_LONG( lock
->end
);
681 fprintf( stderr
, "\n" );
684 static int file_lock_signaled( struct object
*obj
, struct thread
*thread
)
686 struct file_lock
*lock
= (struct file_lock
*)obj
;
687 /* lock is signaled if it has lost its owner */
688 return !lock
->process
;
691 /* set (or remove) a Unix lock if possible for the given range */
692 static int set_unix_lock( struct fd
*fd
, file_pos_t start
, file_pos_t end
, int type
)
696 if (!fd
->fs_locks
) return 1; /* no fs locks possible for this fd */
699 if (start
== end
) return 1; /* can't set zero-byte lock */
700 if (start
> max_unix_offset
) return 1; /* ignore it */
702 fl
.l_whence
= SEEK_SET
;
704 if (!end
|| end
> max_unix_offset
) fl
.l_len
= 0;
705 else fl
.l_len
= end
- start
;
706 if (fcntl( fd
->unix_fd
, F_SETLK
, &fl
) != -1) return 1;
711 /* check whether locks work at all on this file system */
712 if (fcntl( fd
->unix_fd
, F_GETLK
, &fl
) != -1)
714 set_error( STATUS_FILE_LOCK_CONFLICT
);
720 /* no locking on this fs, just ignore it */
724 set_error( STATUS_FILE_LOCK_CONFLICT
);
727 /* this can happen if we try to set a write lock on a read-only file */
728 /* we just ignore that error */
729 if (fl
.l_type
== F_WRLCK
) return 1;
730 set_error( STATUS_ACCESS_DENIED
);
736 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
737 /* in that case we shrink the limit and retry */
738 if (max_unix_offset
> INT_MAX
)
740 max_unix_offset
= INT_MAX
;
751 /* check if interval [start;end) overlaps the lock */
752 inline static int lock_overlaps( struct file_lock
*lock
, file_pos_t start
, file_pos_t end
)
754 if (lock
->end
&& start
>= lock
->end
) return 0;
755 if (end
&& lock
->start
>= end
) return 0;
759 /* remove Unix locks for all bytes in the specified area that are no longer locked */
760 static void remove_unix_locks( struct fd
*fd
, file_pos_t start
, file_pos_t end
)
768 } *first
, *cur
, *next
, *buffer
;
773 if (!fd
->inode
) return;
774 if (!fd
->fs_locks
) return;
775 if (start
== end
|| start
> max_unix_offset
) return;
776 if (!end
|| end
> max_unix_offset
) end
= max_unix_offset
+ 1;
778 /* count the number of locks overlapping the specified area */
780 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
782 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
783 if (lock
->start
== lock
->end
) continue;
784 if (lock_overlaps( lock
, start
, end
)) count
++;
787 if (!count
) /* no locks at all, we can unlock everything */
789 set_unix_lock( fd
, start
, end
, F_UNLCK
);
793 /* allocate space for the list of holes */
794 /* max. number of holes is number of locks + 1 */
796 if (!(buffer
= malloc( sizeof(*buffer
) * (count
+1) ))) return;
800 first
->start
= start
;
804 /* build a sorted list of unlocked holes in the specified area */
806 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
808 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
809 if (lock
->start
== lock
->end
) continue;
810 if (!lock_overlaps( lock
, start
, end
)) continue;
812 /* go through all the holes touched by this lock */
813 for (cur
= first
; cur
; cur
= cur
->next
)
815 if (cur
->end
<= lock
->start
) continue; /* hole is before start of lock */
816 if (lock
->end
&& cur
->start
>= lock
->end
) break; /* hole is after end of lock */
818 /* now we know that lock is overlapping hole */
820 if (cur
->start
>= lock
->start
) /* lock starts before hole, shrink from start */
822 cur
->start
= lock
->end
;
823 if (cur
->start
&& cur
->start
< cur
->end
) break; /* done with this lock */
824 /* now hole is empty, remove it */
825 if (cur
->next
) cur
->next
->prev
= cur
->prev
;
826 if (cur
->prev
) cur
->prev
->next
= cur
->next
;
827 else if (!(first
= cur
->next
)) goto done
; /* no more holes at all */
829 else if (!lock
->end
|| cur
->end
<= lock
->end
) /* lock larger than hole, shrink from end */
831 cur
->end
= lock
->start
;
832 assert( cur
->start
< cur
->end
);
834 else /* lock is in the middle of hole, split hole in two */
837 next
->next
= cur
->next
;
839 next
->start
= lock
->end
;
840 next
->end
= cur
->end
;
841 cur
->end
= lock
->start
;
842 assert( next
->start
< next
->end
);
843 assert( cur
->end
< next
->start
);
845 break; /* done with this lock */
850 /* clear Unix locks for all the holes */
852 for (cur
= first
; cur
; cur
= cur
->next
)
853 set_unix_lock( fd
, cur
->start
, cur
->end
, F_UNLCK
);
859 /* create a new lock on a fd */
860 static struct file_lock
*add_lock( struct fd
*fd
, int shared
, file_pos_t start
, file_pos_t end
)
862 struct file_lock
*lock
;
864 if (!fd
->inode
) /* not a regular file */
866 set_error( STATUS_INVALID_HANDLE
);
870 if (!(lock
= alloc_object( &file_lock_ops
))) return NULL
;
871 lock
->shared
= shared
;
875 lock
->process
= current
->process
;
877 /* now try to set a Unix lock */
878 if (!set_unix_lock( lock
->fd
, lock
->start
, lock
->end
, lock
->shared
? F_RDLCK
: F_WRLCK
))
880 release_object( lock
);
883 list_add_head( &fd
->locks
, &lock
->fd_entry
);
884 list_add_head( &fd
->inode
->locks
, &lock
->inode_entry
);
885 list_add_head( &lock
->process
->locks
, &lock
->proc_entry
);
889 /* remove an existing lock */
890 static void remove_lock( struct file_lock
*lock
, int remove_unix
)
892 struct inode
*inode
= lock
->fd
->inode
;
894 list_remove( &lock
->fd_entry
);
895 list_remove( &lock
->inode_entry
);
896 list_remove( &lock
->proc_entry
);
897 if (remove_unix
) remove_unix_locks( lock
->fd
, lock
->start
, lock
->end
);
898 if (list_empty( &inode
->locks
)) inode_close_pending( inode
);
899 lock
->process
= NULL
;
900 wake_up( &lock
->obj
, 0 );
901 release_object( lock
);
904 /* remove all locks owned by a given process */
905 void remove_process_locks( struct process
*process
)
909 while ((ptr
= list_head( &process
->locks
)))
911 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, proc_entry
);
912 remove_lock( lock
, 1 ); /* this removes it from the list */
916 /* remove all locks on a given fd */
917 static void remove_fd_locks( struct fd
*fd
)
919 file_pos_t start
= FILE_POS_T_MAX
, end
= 0;
922 while ((ptr
= list_head( &fd
->locks
)))
924 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
925 if (lock
->start
< start
) start
= lock
->start
;
926 if (!lock
->end
|| lock
->end
> end
) end
= lock
->end
- 1;
927 remove_lock( lock
, 0 );
929 if (start
< end
) remove_unix_locks( fd
, start
, end
+ 1 );
932 /* add a lock on an fd */
933 /* returns handle to wait on */
934 obj_handle_t
lock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
, int shared
, int wait
)
937 file_pos_t end
= start
+ count
;
939 /* don't allow wrapping locks */
940 if (end
&& end
< start
)
942 set_error( STATUS_INVALID_PARAMETER
);
946 /* check if another lock on that file overlaps the area */
947 LIST_FOR_EACH( ptr
, &fd
->inode
->locks
)
949 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, inode_entry
);
950 if (!lock_overlaps( lock
, start
, end
)) continue;
951 if (lock
->shared
&& shared
) continue;
955 set_error( STATUS_FILE_LOCK_CONFLICT
);
958 set_error( STATUS_PENDING
);
959 return alloc_handle( current
->process
, lock
, SYNCHRONIZE
, 0 );
962 /* not found, add it */
963 if (add_lock( fd
, shared
, start
, end
)) return 0;
964 if (get_error() == STATUS_FILE_LOCK_CONFLICT
)
966 /* Unix lock conflict -> tell client to wait and retry */
967 if (wait
) set_error( STATUS_PENDING
);
972 /* remove a lock on an fd */
973 void unlock_fd( struct fd
*fd
, file_pos_t start
, file_pos_t count
)
976 file_pos_t end
= start
+ count
;
978 /* find an existing lock with the exact same parameters */
979 LIST_FOR_EACH( ptr
, &fd
->locks
)
981 struct file_lock
*lock
= LIST_ENTRY( ptr
, struct file_lock
, fd_entry
);
982 if ((lock
->start
== start
) && (lock
->end
== end
))
984 remove_lock( lock
, 1 );
988 set_error( STATUS_FILE_LOCK_CONFLICT
);
992 /****************************************************************/
993 /* asynchronous operations support */
997 struct thread
*thread
;
1001 struct timeout_user
*timeout
;
1005 /* notifies client thread of new status of its async request */
1006 /* destroys the server side of it */
1007 static void async_terminate( struct async
*async
, int status
)
1009 thread_queue_apc( async
->thread
, NULL
, async
->apc
, APC_ASYNC_IO
,
1010 1, async
->user
, async
->sb
, (void *)status
);
1012 if (async
->timeout
) remove_timeout_user( async
->timeout
);
1013 async
->timeout
= NULL
;
1014 list_remove( &async
->entry
);
1015 release_object( async
->thread
);
1019 /* cb for timeout on an async request */
1020 static void async_callback(void *private)
1022 struct async
*async
= (struct async
*)private;
1024 /* fprintf(stderr, "async timeout out %p\n", async); */
1025 async
->timeout
= NULL
;
1026 async_terminate( async
, STATUS_TIMEOUT
);
1029 /* create an async on a given queue of a fd */
1030 struct async
*create_async(struct thread
*thread
, int* timeout
, struct list
*queue
,
1031 void *io_apc
, void *io_user
, void* io_sb
)
1033 struct async
*async
= mem_alloc( sizeof(struct async
) );
1035 if (!async
) return NULL
;
1037 async
->thread
= (struct thread
*)grab_object(thread
);
1038 async
->apc
= io_apc
;
1039 async
->user
= io_user
;
1042 list_add_tail( queue
, &async
->entry
);
1046 struct timeval when
;
1048 gettimeofday( &when
, NULL
);
1049 add_timeout( &when
, *timeout
);
1050 async
->timeout
= add_timeout_user( &when
, async_callback
, async
);
1052 else async
->timeout
= NULL
;
1057 /* terminate the async operation at the head of the queue */
1058 void async_terminate_head( struct list
*queue
, int status
)
1060 struct list
*ptr
= list_head( queue
);
1061 if (ptr
) async_terminate( LIST_ENTRY( ptr
, struct async
, entry
), status
);
1064 /****************************************************************/
1065 /* file descriptor functions */
1067 static void fd_dump( struct object
*obj
, int verbose
)
1069 struct fd
*fd
= (struct fd
*)obj
;
1070 fprintf( stderr
, "Fd unix_fd=%d user=%p", fd
->unix_fd
, fd
->user
);
1071 if (fd
->inode
) fprintf( stderr
, " inode=%p unlink='%s'", fd
->inode
, fd
->closed
->unlink
);
1072 fprintf( stderr
, "\n" );
1075 static void fd_destroy( struct object
*obj
)
1077 struct fd
*fd
= (struct fd
*)obj
;
1079 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1080 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1082 remove_fd_locks( fd
);
1083 list_remove( &fd
->inode_entry
);
1084 if (fd
->poll_index
!= -1) remove_poll_user( fd
, fd
->poll_index
);
1087 inode_add_closed_fd( fd
->inode
, fd
->closed
);
1088 release_object( fd
->inode
);
1090 else /* no inode, close it right away */
1092 if (fd
->unix_fd
!= -1) close( fd
->unix_fd
);
1096 /* set the events that select waits for on this fd */
1097 void set_fd_events( struct fd
*fd
, int events
)
1099 int user
= fd
->poll_index
;
1100 assert( poll_users
[user
] == fd
);
1102 set_fd_epoll_events( fd
, user
, events
);
1104 if (events
== -1) /* stop waiting on this fd completely */
1106 pollfd
[user
].fd
= -1;
1107 pollfd
[user
].events
= POLLERR
;
1108 pollfd
[user
].revents
= 0;
1110 else if (pollfd
[user
].fd
!= -1 || !pollfd
[user
].events
)
1112 pollfd
[user
].fd
= fd
->unix_fd
;
1113 pollfd
[user
].events
= events
;
1117 /* allocate an fd object, without setting the unix fd yet */
1118 struct fd
*alloc_fd( const struct fd_ops
*fd_user_ops
, struct object
*user
)
1120 struct fd
*fd
= alloc_object( &fd_ops
);
1122 if (!fd
) return NULL
;
1124 fd
->fd_ops
= fd_user_ops
;
1132 fd
->poll_index
= -1;
1133 list_init( &fd
->inode_entry
);
1134 list_init( &fd
->locks
);
1135 list_init( &fd
->read_q
);
1136 list_init( &fd
->write_q
);
1138 if ((fd
->poll_index
= add_poll_user( fd
)) == -1)
1140 release_object( fd
);
1146 /* check if the desired access is possible without violating */
1147 /* the sharing mode of other opens of the same file */
1148 static int check_sharing( struct fd
*fd
, unsigned int access
, unsigned int sharing
)
1150 unsigned int existing_sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
1151 unsigned int existing_access
= 0;
1155 /* if access mode is 0, sharing mode is ignored */
1156 if (!access
) sharing
= existing_sharing
;
1157 fd
->access
= access
;
1158 fd
->sharing
= sharing
;
1160 LIST_FOR_EACH( ptr
, &fd
->inode
->open
)
1162 struct fd
*fd_ptr
= LIST_ENTRY( ptr
, struct fd
, inode_entry
);
1165 existing_sharing
&= fd_ptr
->sharing
;
1166 existing_access
|= fd_ptr
->access
;
1167 if (fd_ptr
->closed
->unlink
[0]) unlink
= 1;
1171 if ((access
& GENERIC_READ
) && !(existing_sharing
& FILE_SHARE_READ
)) return 0;
1172 if ((access
& GENERIC_WRITE
) && !(existing_sharing
& FILE_SHARE_WRITE
)) return 0;
1173 if ((existing_access
& GENERIC_READ
) && !(sharing
& FILE_SHARE_READ
)) return 0;
1174 if ((existing_access
& GENERIC_WRITE
) && !(sharing
& FILE_SHARE_WRITE
)) return 0;
1175 if (fd
->closed
->unlink
[0] && !(existing_sharing
& FILE_SHARE_DELETE
)) return 0;
1176 if (unlink
&& !(sharing
& FILE_SHARE_DELETE
)) return 0;
1180 /* open() wrapper using a struct fd */
1181 /* the fd must have been created with alloc_fd */
1182 /* on error the fd object is released */
1183 struct fd
*open_fd( struct fd
*fd
, const char *name
, int flags
, mode_t
*mode
,
1184 unsigned int access
, unsigned int sharing
, unsigned int options
)
1187 struct closed_fd
*closed_fd
;
1188 const char *unlink_name
= "";
1190 assert( fd
->unix_fd
== -1 );
1192 if (options
& FILE_DELETE_ON_CLOSE
) unlink_name
= name
;
1193 if (!(closed_fd
= mem_alloc( sizeof(*closed_fd
) + strlen(unlink_name
) )))
1195 release_object( fd
);
1198 /* create the directory if needed */
1199 if ((options
& FILE_DIRECTORY_FILE
) && (flags
& O_CREAT
))
1201 if (mkdir( name
, 0777 ) == -1)
1203 if (errno
!= EEXIST
|| (flags
& O_EXCL
))
1206 release_object( fd
);
1211 flags
&= ~(O_CREAT
| O_EXCL
| O_TRUNC
);
1213 if ((fd
->unix_fd
= open( name
, flags
& ~O_TRUNC
, *mode
)) == -1)
1216 release_object( fd
);
1220 closed_fd
->fd
= fd
->unix_fd
;
1221 closed_fd
->unlink
[0] = 0;
1222 fstat( fd
->unix_fd
, &st
);
1225 /* only bother with an inode for normal files and directories */
1226 if (S_ISREG(st
.st_mode
) || S_ISDIR(st
.st_mode
))
1228 struct inode
*inode
= get_inode( st
.st_dev
, st
.st_ino
);
1232 /* we can close the fd because there are no others open on the same file,
1233 * otherwise we wouldn't have failed to allocate a new inode
1238 fd
->closed
= closed_fd
;
1239 list_add_head( &inode
->open
, &fd
->inode_entry
);
1241 /* check directory options */
1242 if ((options
& FILE_DIRECTORY_FILE
) && !S_ISDIR(st
.st_mode
))
1244 release_object( fd
);
1245 set_error( STATUS_NOT_A_DIRECTORY
);
1248 if ((options
& FILE_NON_DIRECTORY_FILE
) && S_ISDIR(st
.st_mode
))
1250 release_object( fd
);
1251 set_error( STATUS_FILE_IS_A_DIRECTORY
);
1254 if (!check_sharing( fd
, access
, sharing
))
1256 release_object( fd
);
1257 set_error( STATUS_SHARING_VIOLATION
);
1260 strcpy( closed_fd
->unlink
, unlink_name
);
1261 if (flags
& O_TRUNC
) ftruncate( fd
->unix_fd
, 0 );
1263 else /* special file */
1265 if (options
& FILE_DIRECTORY_FILE
)
1267 set_error( STATUS_NOT_A_DIRECTORY
);
1270 if (unlink_name
[0]) /* we can't unlink special files */
1272 set_error( STATUS_INVALID_PARAMETER
);
1280 release_object( fd
);
1285 /* create an fd for an anonymous file */
1286 /* if the function fails the unix fd is closed */
1287 struct fd
*create_anonymous_fd( const struct fd_ops
*fd_user_ops
, int unix_fd
, struct object
*user
)
1289 struct fd
*fd
= alloc_fd( fd_user_ops
, user
);
1293 fd
->unix_fd
= unix_fd
;
1300 /* retrieve the object that is using an fd */
1301 void *get_fd_user( struct fd
*fd
)
1306 /* retrieve the unix fd for an object */
1307 int get_unix_fd( struct fd
*fd
)
1312 /* check if two file descriptors point to the same file */
1313 int is_same_file_fd( struct fd
*fd1
, struct fd
*fd2
)
1315 return fd1
->inode
== fd2
->inode
;
1318 /* callback for event happening in the main poll() loop */
1319 void fd_poll_event( struct fd
*fd
, int event
)
1321 return fd
->fd_ops
->poll_event( fd
, event
);
1324 /* check if events are pending and if yes return which one(s) */
1325 int check_fd_events( struct fd
*fd
, int events
)
1329 pfd
.fd
= fd
->unix_fd
;
1330 pfd
.events
= events
;
1331 if (poll( &pfd
, 1, 0 ) <= 0) return 0;
1335 /* default add_queue() routine for objects that poll() on an fd */
1336 int default_fd_add_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1338 struct fd
*fd
= get_obj_fd( obj
);
1341 if (list_empty( &obj
->wait_queue
)) /* first on the queue */
1342 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1343 add_queue( obj
, entry
);
1344 release_object( fd
);
1348 /* default remove_queue() routine for objects that poll() on an fd */
1349 void default_fd_remove_queue( struct object
*obj
, struct wait_queue_entry
*entry
)
1351 struct fd
*fd
= get_obj_fd( obj
);
1354 remove_queue( obj
, entry
);
1355 if (list_empty( &obj
->wait_queue
)) /* last on the queue is gone */
1356 set_fd_events( fd
, 0 );
1357 release_object( obj
);
1358 release_object( fd
);
1361 /* default signaled() routine for objects that poll() on an fd */
1362 int default_fd_signaled( struct object
*obj
, struct thread
*thread
)
1365 struct fd
*fd
= get_obj_fd( obj
);
1367 if (fd
->inode
) return 1; /* regular files are always signaled */
1369 events
= fd
->fd_ops
->get_poll_events( fd
);
1370 ret
= check_fd_events( fd
, events
) != 0;
1373 set_fd_events( fd
, 0 ); /* stop waiting on select() if we are signaled */
1374 else if (!list_empty( &obj
->wait_queue
))
1375 set_fd_events( fd
, events
); /* restart waiting on poll() if we are no longer signaled */
1377 release_object( fd
);
1381 int default_fd_get_poll_events( struct fd
*fd
)
1385 if (!list_empty( &fd
->read_q
))
1387 if (!list_empty( &fd
->write_q
))
1393 /* default handler for poll() events */
1394 void default_poll_event( struct fd
*fd
, int event
)
1396 if (!list_empty( &fd
->read_q
) && (POLLIN
& event
) )
1398 async_terminate_head( &fd
->read_q
, STATUS_ALERTED
);
1401 if (!list_empty( &fd
->write_q
) && (POLLOUT
& event
) )
1403 async_terminate_head( &fd
->write_q
, STATUS_ALERTED
);
1407 /* if an error occurred, stop polling this fd to avoid busy-looping */
1408 if (event
& (POLLERR
| POLLHUP
)) set_fd_events( fd
, -1 );
1409 wake_up( fd
->user
, 0 );
1412 void default_fd_queue_async( struct fd
*fd
, void *apc
, void *user
, void *io_sb
, int type
, int count
)
1417 if (!(fd
->fd_ops
->get_file_info( fd
) & FD_FLAG_OVERLAPPED
))
1419 set_error( STATUS_INVALID_HANDLE
);
1425 case ASYNC_TYPE_READ
:
1426 queue
= &fd
->read_q
;
1428 case ASYNC_TYPE_WRITE
:
1429 queue
= &fd
->write_q
;
1432 set_error( STATUS_INVALID_PARAMETER
);
1436 if (!create_async( current
, NULL
, queue
, apc
, user
, io_sb
))
1439 /* Check if the new pending request can be served immediately */
1440 events
= check_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1441 if (events
) fd
->fd_ops
->poll_event( fd
, events
);
1443 set_fd_events( fd
, fd
->fd_ops
->get_poll_events( fd
) );
1446 void default_fd_cancel_async( struct fd
*fd
)
1448 async_terminate_queue( &fd
->read_q
, STATUS_CANCELLED
);
1449 async_terminate_queue( &fd
->write_q
, STATUS_CANCELLED
);
1452 /* default flush() routine */
1453 int no_flush( struct fd
*fd
, struct event
**event
)
1455 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1459 /* default get_file_info() routine */
1460 int no_get_file_info( struct fd
*fd
)
1462 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1466 /* default queue_async() routine */
1467 void no_queue_async( struct fd
*fd
, void* apc
, void* user
, void* io_sb
,
1468 int type
, int count
)
1470 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1473 /* default cancel_async() routine */
1474 void no_cancel_async( struct fd
*fd
)
1476 set_error( STATUS_OBJECT_TYPE_MISMATCH
);
1479 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1480 static struct fd
*get_handle_fd_obj( struct process
*process
, obj_handle_t handle
,
1481 unsigned int access
)
1483 struct fd
*fd
= NULL
;
1486 if ((obj
= get_handle_obj( process
, handle
, access
, NULL
)))
1488 fd
= get_obj_fd( obj
);
1489 release_object( obj
);
1494 /* flush a file buffers */
1495 DECL_HANDLER(flush_file
)
1497 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1498 struct event
* event
= NULL
;
1502 fd
->fd_ops
->flush( fd
, &event
);
1505 reply
->event
= alloc_handle( current
->process
, event
, SYNCHRONIZE
, 0 );
1507 release_object( fd
);
1511 /* get a Unix fd to access a file */
1512 DECL_HANDLER(get_handle_fd
)
1518 if ((fd
= get_handle_fd_obj( current
->process
, req
->handle
, req
->access
)))
1520 int unix_fd
= get_handle_unix_fd( current
->process
, req
->handle
, req
->access
);
1521 if (unix_fd
!= -1) reply
->fd
= unix_fd
;
1522 else if (!get_error())
1524 assert( fd
->unix_fd
!= -1 );
1525 send_client_fd( current
->process
, fd
->unix_fd
, req
->handle
);
1527 reply
->flags
= fd
->fd_ops
->get_file_info( fd
);
1528 release_object( fd
);
1532 /* create / reschedule an async I/O */
1533 DECL_HANDLER(register_async
)
1535 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1538 * The queue_async method must do the following:
1540 * 1. Get the async_queue for the request of given type.
1541 * 2. Create a new asynchronous request for the selected queue
1542 * 3. Carry out any operations necessary to adjust the object's poll events
1543 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
1544 * 4. When the async request is triggered, then send back (with a proper APC)
1545 * the trigger (STATUS_ALERTED) to the thread that posted the request.
1546 * async_destroy() is to be called: it will both notify the sender about
1547 * the trigger and destroy the request by itself
1548 * See also the implementations in file.c, serial.c, and sock.c.
1553 fd
->fd_ops
->queue_async( fd
, req
->io_apc
, req
->io_user
, req
->io_sb
,
1554 req
->type
, req
->count
);
1555 release_object( fd
);
1559 /* cancels all async I/O */
1560 DECL_HANDLER(cancel_async
)
1562 struct fd
*fd
= get_handle_fd_obj( current
->process
, req
->handle
, 0 );
1565 /* Note: we don't kill the queued APC_ASYNC_IO on this thread because
1566 * NtCancelIoFile() will force the pending APC to be run. Since,
1567 * Windows only guarantees that the current thread will have no async
1568 * operation on the current fd when NtCancelIoFile returns, this shall
1571 fd
->fd_ops
->cancel_async( fd
);
1572 release_object( fd
);