2 * Server-side change notification management
4 * Copyright (C) 1998 Alexandre Julliard
5 * Copyright (C) 2006 Mike McCormack
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
31 #include <sys/types.h>
40 #define WIN32_NO_STATUS
56 #define DN_ACCESS 0x00000001 /* File accessed */
57 #define DN_MODIFY 0x00000002 /* File modified */
58 #define DN_CREATE 0x00000004 /* File created */
59 #define DN_DELETE 0x00000008 /* File removed */
60 #define DN_RENAME 0x00000010 /* File renamed */
61 #define DN_ATTRIB 0x00000020 /* File changed attributes */
62 #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
68 #ifdef HAVE_SYS_INOTIFY_H
69 #include <sys/inotify.h>
71 #elif defined(__linux__) && defined(__i386__)
73 #define SYS_inotify_init 291
74 #define SYS_inotify_add_watch 292
75 #define SYS_inotify_rm_watch 293
77 struct inotify_event
{
85 #define IN_ACCESS 0x00000001
86 #define IN_MODIFY 0x00000002
87 #define IN_ATTRIB 0x00000004
88 #define IN_CLOSE_WRITE 0x00000008
89 #define IN_CLOSE_NOWRITE 0x00000010
90 #define IN_OPEN 0x00000020
91 #define IN_MOVED_FROM 0x00000040
92 #define IN_MOVED_TO 0x00000080
93 #define IN_CREATE 0x00000100
94 #define IN_DELETE 0x00000200
95 #define IN_DELETE_SELF 0x00000400
97 #define IN_ISDIR 0x40000000
99 static inline int inotify_init( void )
101 return syscall( SYS_inotify_init
);
104 static inline int inotify_add_watch( int fd
, const char *name
, unsigned int mask
)
106 return syscall( SYS_inotify_add_watch
, fd
, name
, mask
);
109 static inline int inotify_rm_watch( int fd
, int wd
)
111 return syscall( SYS_inotify_rm_watch
, fd
, wd
);
120 static void free_inode( struct inode
*inode
);
122 static struct fd
*inotify_fd
;
124 struct change_record
{
127 struct filesystem_event event
;
132 struct object obj
; /* object header */
133 struct fd
*fd
; /* file descriptor to the directory */
134 mode_t mode
; /* file stat.st_mode */
135 uid_t uid
; /* file stat.st_uid */
136 struct list entry
; /* entry in global change notifications list */
137 unsigned int filter
; /* notification filter */
138 int notified
; /* SIGIO counter */
139 int want_data
; /* return change data */
140 int subtree
; /* do we want to watch subdirectories? */
141 struct list change_records
; /* data for the change */
142 struct list in_entry
; /* entry in the inode dirs list */
143 struct inode
*inode
; /* inode of the associated directory */
144 struct process
*client_process
; /* client process that has a cache for this directory */
145 int client_entry
; /* entry in client process cache */
148 static struct fd
*dir_get_fd( struct object
*obj
);
149 static struct security_descriptor
*dir_get_sd( struct object
*obj
);
150 static int dir_set_sd( struct object
*obj
, const struct security_descriptor
*sd
,
151 unsigned int set_info
);
152 static void dir_dump( struct object
*obj
, int verbose
);
153 static struct object_type
*dir_get_type( struct object
*obj
);
154 static int dir_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
);
155 static void dir_destroy( struct object
*obj
);
157 static const struct object_ops dir_ops
=
159 sizeof(struct dir
), /* size */
161 dir_get_type
, /* get_type */
162 add_queue
, /* add_queue */
163 remove_queue
, /* remove_queue */
164 default_fd_signaled
, /* signaled */
165 no_satisfied
, /* satisfied */
166 no_signal
, /* signal */
167 dir_get_fd
, /* get_fd */
168 default_fd_map_access
, /* map_access */
169 dir_get_sd
, /* get_sd */
170 dir_set_sd
, /* set_sd */
171 no_lookup_name
, /* lookup_name */
172 no_link_name
, /* link_name */
173 NULL
, /* unlink_name */
174 no_open_file
, /* open_file */
175 dir_close_handle
, /* close_handle */
176 dir_destroy
/* destroy */
179 static int dir_get_poll_events( struct fd
*fd
);
180 static enum server_fd_type
dir_get_fd_type( struct fd
*fd
);
182 static const struct fd_ops dir_fd_ops
=
184 dir_get_poll_events
, /* get_poll_events */
185 default_poll_event
, /* poll_event */
186 dir_get_fd_type
, /* get_fd_type */
187 no_fd_read
, /* read */
188 no_fd_write
, /* write */
189 no_fd_flush
, /* flush */
190 no_fd_get_file_info
, /* get_file_info */
191 no_fd_get_volume_info
, /* get_volume_info */
192 default_fd_ioctl
, /* ioctl */
193 default_fd_queue_async
, /* queue_async */
194 default_fd_reselect_async
/* reselect_async */
197 static struct list change_list
= LIST_INIT(change_list
);
199 /* per-process structure to keep track of cache entries on the client size */
204 unsigned char state
[1];
209 DIR_CACHE_STATE_FREE
,
210 DIR_CACHE_STATE_INUSE
,
211 DIR_CACHE_STATE_RELEASED
214 /* return an array of cache entries that can be freed on the client side */
215 static int *get_free_dir_cache_entries( struct process
*process
, data_size_t
*size
)
218 struct dir_cache
*cache
= process
->dir_cache
;
219 unsigned int i
, j
, count
;
221 if (!cache
) return NULL
;
222 for (i
= count
= 0; i
< cache
->count
&& count
< *size
/ sizeof(*ret
); i
++)
223 if (cache
->state
[i
] == DIR_CACHE_STATE_RELEASED
) count
++;
224 if (!count
) return NULL
;
226 if ((ret
= malloc( count
* sizeof(*ret
) )))
228 for (i
= j
= 0; j
< count
; i
++)
230 if (cache
->state
[i
] != DIR_CACHE_STATE_RELEASED
) continue;
231 cache
->state
[i
] = DIR_CACHE_STATE_FREE
;
234 *size
= count
* sizeof(*ret
);
239 /* allocate a new client-side directory cache entry */
240 static int alloc_dir_cache_entry( struct dir
*dir
, struct process
*process
)
243 struct dir_cache
*cache
= process
->dir_cache
;
246 for (i
= 0; i
< cache
->count
; i
++)
247 if (cache
->state
[i
] == DIR_CACHE_STATE_FREE
) goto found
;
249 if (!cache
|| cache
->count
== cache
->size
)
251 unsigned int size
= cache
? cache
->size
* 2 : 256;
252 if (!(cache
= realloc( cache
, offsetof( struct dir_cache
, state
[size
] ))))
254 set_error( STATUS_NO_MEMORY
);
257 process
->dir_cache
= cache
;
260 cache
->count
= i
+ 1;
263 cache
->state
[i
] = DIR_CACHE_STATE_INUSE
;
267 /* release a directory cache entry; it will be freed on the client side on the next cache request */
268 static void release_dir_cache_entry( struct dir
*dir
)
270 struct dir_cache
*cache
;
272 if (!dir
->client_process
) return;
273 cache
= dir
->client_process
->dir_cache
;
274 cache
->state
[dir
->client_entry
] = DIR_CACHE_STATE_RELEASED
;
275 release_object( dir
->client_process
);
276 dir
->client_process
= NULL
;
279 static void dnotify_adjust_changes( struct dir
*dir
)
281 #if defined(F_SETSIG) && defined(F_NOTIFY)
282 int fd
= get_unix_fd( dir
->fd
);
283 unsigned int filter
= dir
->filter
;
285 if ( 0 > fcntl( fd
, F_SETSIG
, SIGIO
) )
289 if (filter
& FILE_NOTIFY_CHANGE_FILE_NAME
)
290 val
|= DN_RENAME
| DN_DELETE
| DN_CREATE
;
291 if (filter
& FILE_NOTIFY_CHANGE_DIR_NAME
)
292 val
|= DN_RENAME
| DN_DELETE
| DN_CREATE
;
293 if (filter
& FILE_NOTIFY_CHANGE_ATTRIBUTES
)
295 if (filter
& FILE_NOTIFY_CHANGE_SIZE
)
297 if (filter
& FILE_NOTIFY_CHANGE_LAST_WRITE
)
299 if (filter
& FILE_NOTIFY_CHANGE_LAST_ACCESS
)
301 if (filter
& FILE_NOTIFY_CHANGE_CREATION
)
303 if (filter
& FILE_NOTIFY_CHANGE_SECURITY
)
305 fcntl( fd
, F_NOTIFY
, val
);
309 /* insert change in the global list */
310 static inline void insert_change( struct dir
*dir
)
314 sigemptyset( &sigset
);
315 sigaddset( &sigset
, SIGIO
);
316 sigprocmask( SIG_BLOCK
, &sigset
, NULL
);
317 list_add_head( &change_list
, &dir
->entry
);
318 sigprocmask( SIG_UNBLOCK
, &sigset
, NULL
);
321 /* remove change from the global list */
322 static inline void remove_change( struct dir
*dir
)
326 sigemptyset( &sigset
);
327 sigaddset( &sigset
, SIGIO
);
328 sigprocmask( SIG_BLOCK
, &sigset
, NULL
);
329 list_remove( &dir
->entry
);
330 sigprocmask( SIG_UNBLOCK
, &sigset
, NULL
);
333 static void dir_dump( struct object
*obj
, int verbose
)
335 struct dir
*dir
= (struct dir
*)obj
;
336 assert( obj
->ops
== &dir_ops
);
337 fprintf( stderr
, "Dirfile fd=%p filter=%08x\n", dir
->fd
, dir
->filter
);
340 static struct object_type
*dir_get_type( struct object
*obj
)
342 static const WCHAR name
[] = {'F','i','l','e'};
343 static const struct unicode_str str
= { name
, sizeof(name
) };
344 return get_object_type( &str
);
347 /* enter here directly from SIGIO signal handler */
348 void do_change_notify( int unix_fd
)
352 /* FIXME: this is O(n) ... probably can be improved */
353 LIST_FOR_EACH_ENTRY( dir
, &change_list
, struct dir
, entry
)
355 if (get_unix_fd( dir
->fd
) != unix_fd
) continue;
356 interlocked_xchg_add( &dir
->notified
, 1 );
361 /* SIGIO callback, called synchronously with the poll loop */
362 void sigio_callback(void)
366 LIST_FOR_EACH_ENTRY( dir
, &change_list
, struct dir
, entry
)
368 if (interlocked_xchg( &dir
->notified
, 0 ))
369 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
373 static struct fd
*dir_get_fd( struct object
*obj
)
375 struct dir
*dir
= (struct dir
*)obj
;
376 assert( obj
->ops
== &dir_ops
);
377 return (struct fd
*)grab_object( dir
->fd
);
380 static int get_dir_unix_fd( struct dir
*dir
)
382 return get_unix_fd( dir
->fd
);
385 static struct security_descriptor
*dir_get_sd( struct object
*obj
)
387 struct dir
*dir
= (struct dir
*)obj
;
390 struct security_descriptor
*sd
;
391 assert( obj
->ops
== &dir_ops
);
393 unix_fd
= get_dir_unix_fd( dir
);
395 if (unix_fd
== -1 || fstat( unix_fd
, &st
) == -1)
398 /* mode and uid the same? if so, no need to re-generate security descriptor */
400 (st
.st_mode
& (S_IRWXU
|S_IRWXO
)) == (dir
->mode
& (S_IRWXU
|S_IRWXO
)) &&
401 (st
.st_uid
== dir
->uid
))
404 sd
= mode_to_sd( st
.st_mode
,
405 security_unix_uid_to_sid( st
.st_uid
),
406 token_get_primary_group( current
->process
->token
));
407 if (!sd
) return obj
->sd
;
409 dir
->mode
= st
.st_mode
;
410 dir
->uid
= st
.st_uid
;
416 static int dir_set_sd( struct object
*obj
, const struct security_descriptor
*sd
,
417 unsigned int set_info
)
419 struct dir
*dir
= (struct dir
*)obj
;
425 assert( obj
->ops
== &dir_ops
);
427 unix_fd
= get_dir_unix_fd( dir
);
429 if (unix_fd
== -1 || fstat( unix_fd
, &st
) == -1) return 1;
431 if (set_info
& OWNER_SECURITY_INFORMATION
)
433 owner
= sd_get_owner( sd
);
436 set_error( STATUS_INVALID_SECURITY_DESCR
);
439 if (!obj
->sd
|| !security_equal_sid( owner
, sd_get_owner( obj
->sd
) ))
441 /* FIXME: get Unix uid and call fchown */
445 owner
= sd_get_owner( obj
->sd
);
447 owner
= token_get_user( current
->process
->token
);
449 if (set_info
& DACL_SECURITY_INFORMATION
)
451 /* keep the bits that we don't map to access rights in the ACL */
452 mode
= st
.st_mode
& (S_ISUID
|S_ISGID
|S_ISVTX
);
453 mode
|= sd_to_mode( sd
, owner
);
455 if (((st
.st_mode
^ mode
) & (S_IRWXU
|S_IRWXG
|S_IRWXO
)) && fchmod( unix_fd
, mode
) == -1)
464 static struct change_record
*get_first_change_record( struct dir
*dir
)
466 struct list
*ptr
= list_head( &dir
->change_records
);
467 if (!ptr
) return NULL
;
469 return LIST_ENTRY( ptr
, struct change_record
, entry
);
472 static int dir_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
474 struct dir
*dir
= (struct dir
*)obj
;
476 if (!fd_close_handle( obj
, process
, handle
)) return 0;
477 if (obj
->handle_count
== 1) release_dir_cache_entry( dir
); /* closing last handle, release cache */
478 return 1; /* ok to close */
481 static void dir_destroy( struct object
*obj
)
483 struct change_record
*record
;
484 struct dir
*dir
= (struct dir
*)obj
;
485 assert (obj
->ops
== &dir_ops
);
488 remove_change( dir
);
492 list_remove( &dir
->in_entry
);
493 free_inode( dir
->inode
);
496 while ((record
= get_first_change_record( dir
))) free( record
);
498 release_dir_cache_entry( dir
);
499 release_object( dir
->fd
);
501 if (inotify_fd
&& list_empty( &change_list
))
503 release_object( inotify_fd
);
508 struct dir
*get_dir_obj( struct process
*process
, obj_handle_t handle
, unsigned int access
)
510 return (struct dir
*)get_handle_obj( process
, handle
, access
, &dir_ops
);
513 static int dir_get_poll_events( struct fd
*fd
)
518 static enum server_fd_type
dir_get_fd_type( struct fd
*fd
)
528 struct list ch_entry
; /* entry in the children list */
529 struct list children
; /* children of this inode */
530 struct inode
*parent
; /* parent of this inode */
531 struct list dirs
; /* directory handles watching this inode */
532 struct list ino_entry
; /* entry in the inode hash */
533 struct list wd_entry
; /* entry in the watch descriptor hash */
534 dev_t dev
; /* device number */
535 ino_t ino
; /* device's inode number */
536 int wd
; /* inotify's watch descriptor */
537 char *name
; /* basename name of the inode */
540 static struct list inode_hash
[ HASH_SIZE
];
541 static struct list wd_hash
[ HASH_SIZE
];
543 static int inotify_add_dir( char *path
, unsigned int filter
);
545 static struct inode
*inode_from_wd( int wd
)
547 struct list
*bucket
= &wd_hash
[ wd
% HASH_SIZE
];
550 LIST_FOR_EACH_ENTRY( inode
, bucket
, struct inode
, wd_entry
)
557 static inline struct list
*get_hash_list( dev_t dev
, ino_t ino
)
559 return &inode_hash
[ (ino
^ dev
) % HASH_SIZE
];
562 static struct inode
*find_inode( dev_t dev
, ino_t ino
)
564 struct list
*bucket
= get_hash_list( dev
, ino
);
567 LIST_FOR_EACH_ENTRY( inode
, bucket
, struct inode
, ino_entry
)
568 if (inode
->ino
== ino
&& inode
->dev
== dev
)
574 static struct inode
*create_inode( dev_t dev
, ino_t ino
)
578 inode
= malloc( sizeof *inode
);
581 list_init( &inode
->children
);
582 list_init( &inode
->dirs
);
586 inode
->parent
= NULL
;
588 list_add_tail( get_hash_list( dev
, ino
), &inode
->ino_entry
);
593 static struct inode
*get_inode( dev_t dev
, ino_t ino
)
597 inode
= find_inode( dev
, ino
);
600 return create_inode( dev
, ino
);
603 static void inode_set_wd( struct inode
*inode
, int wd
)
606 list_remove( &inode
->wd_entry
);
608 list_add_tail( &wd_hash
[ wd
% HASH_SIZE
], &inode
->wd_entry
);
611 static void inode_set_name( struct inode
*inode
, const char *name
)
614 inode
->name
= name
? strdup( name
) : NULL
;
617 static void free_inode( struct inode
*inode
)
619 int subtree
= 0, watches
= 0;
620 struct inode
*tmp
, *next
;
623 LIST_FOR_EACH_ENTRY( dir
, &inode
->dirs
, struct dir
, in_entry
)
625 subtree
|= dir
->subtree
;
629 if (!subtree
&& !inode
->parent
)
631 LIST_FOR_EACH_ENTRY_SAFE( tmp
, next
, &inode
->children
,
632 struct inode
, ch_entry
)
634 assert( tmp
!= inode
);
635 assert( tmp
->parent
== inode
);
644 list_remove( &inode
->ch_entry
);
646 /* disconnect remaining children from the parent */
647 LIST_FOR_EACH_ENTRY_SAFE( tmp
, next
, &inode
->children
, struct inode
, ch_entry
)
649 list_remove( &tmp
->ch_entry
);
655 inotify_rm_watch( get_unix_fd( inotify_fd
), inode
->wd
);
656 list_remove( &inode
->wd_entry
);
658 list_remove( &inode
->ino_entry
);
664 static struct inode
*inode_add( struct inode
*parent
,
665 dev_t dev
, ino_t ino
, const char *name
)
669 inode
= get_inode( dev
, ino
);
675 list_add_tail( &parent
->children
, &inode
->ch_entry
);
676 inode
->parent
= parent
;
677 assert( inode
!= parent
);
679 inode_set_name( inode
, name
);
684 static struct inode
*inode_from_name( struct inode
*inode
, const char *name
)
688 LIST_FOR_EACH_ENTRY( i
, &inode
->children
, struct inode
, ch_entry
)
689 if (i
->name
&& !strcmp( i
->name
, name
))
694 static int inotify_get_poll_events( struct fd
*fd
);
695 static void inotify_poll_event( struct fd
*fd
, int event
);
697 static const struct fd_ops inotify_fd_ops
=
699 inotify_get_poll_events
, /* get_poll_events */
700 inotify_poll_event
, /* poll_event */
702 NULL
, /* get_fd_type */
704 NULL
, /* queue_async */
705 NULL
/* reselect_async */
708 static int inotify_get_poll_events( struct fd
*fd
)
713 static void inotify_do_change_notify( struct dir
*dir
, unsigned int action
,
714 unsigned int cookie
, const char *relpath
)
716 struct change_record
*record
;
718 assert( dir
->obj
.ops
== &dir_ops
);
722 size_t len
= strlen(relpath
);
723 record
= malloc( offsetof(struct change_record
, event
.name
[len
]) );
727 record
->cookie
= cookie
;
728 record
->event
.action
= action
;
729 memcpy( record
->event
.name
, relpath
, len
);
730 record
->event
.len
= len
;
732 list_add_tail( &dir
->change_records
, &record
->entry
);
735 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
738 static unsigned int filter_from_event( struct inotify_event
*ie
)
740 unsigned int filter
= 0;
742 if (ie
->mask
& (IN_MOVED_FROM
| IN_MOVED_TO
| IN_DELETE
| IN_CREATE
))
743 filter
|= FILE_NOTIFY_CHANGE_FILE_NAME
| FILE_NOTIFY_CHANGE_DIR_NAME
;
744 if (ie
->mask
& IN_MODIFY
)
745 filter
|= FILE_NOTIFY_CHANGE_SIZE
| FILE_NOTIFY_CHANGE_LAST_WRITE
;
746 if (ie
->mask
& IN_ATTRIB
)
747 filter
|= FILE_NOTIFY_CHANGE_ATTRIBUTES
| FILE_NOTIFY_CHANGE_SECURITY
;
748 if (ie
->mask
& IN_ACCESS
)
749 filter
|= FILE_NOTIFY_CHANGE_LAST_ACCESS
;
750 if (ie
->mask
& IN_CREATE
)
751 filter
|= FILE_NOTIFY_CHANGE_CREATION
;
753 if (ie
->mask
& IN_ISDIR
)
754 filter
&= ~FILE_NOTIFY_CHANGE_FILE_NAME
;
756 filter
&= ~FILE_NOTIFY_CHANGE_DIR_NAME
;
761 /* scan up the parent directories for watches */
762 static unsigned int filter_from_inode( struct inode
*inode
, int is_parent
)
764 unsigned int filter
= 0;
767 /* combine filters from parents watching subtrees */
770 LIST_FOR_EACH_ENTRY( dir
, &inode
->dirs
, struct dir
, in_entry
)
771 if (dir
->subtree
|| !is_parent
)
772 filter
|= dir
->filter
;
774 inode
= inode
->parent
;
780 static char *inode_get_path( struct inode
*inode
, int sz
)
789 head
= list_head( &inode
->dirs
);
792 int unix_fd
= get_unix_fd( LIST_ENTRY( head
, struct dir
, in_entry
)->fd
);
793 path
= malloc ( 32 + sz
);
795 sprintf( path
, "/proc/self/fd/%u/", unix_fd
);
802 len
= strlen( inode
->name
);
803 path
= inode_get_path( inode
->parent
, sz
+ len
+ 1 );
807 strcat( path
, inode
->name
);
813 static void inode_check_dir( struct inode
*parent
, const char *name
)
821 path
= inode_get_path( parent
, strlen(name
) );
825 strcat( path
, name
);
827 if (stat( path
, &st
) < 0)
830 filter
= filter_from_inode( parent
, 1 );
834 inode
= inode_add( parent
, st
.st_dev
, st
.st_ino
, name
);
835 if (!inode
|| inode
->wd
!= -1)
838 wd
= inotify_add_dir( path
, filter
);
840 inode_set_wd( inode
, wd
);
848 static int prepend( char **path
, const char *segment
)
853 extra
= strlen( segment
) + 1;
856 int len
= strlen( *path
) + 1;
857 p
= realloc( *path
, len
+ extra
);
859 memmove( &p
[ extra
], p
, len
);
860 p
[ extra
- 1 ] = '/';
861 memcpy( p
, segment
, extra
- 1 );
867 memcpy( p
, segment
, extra
);
875 static void inotify_notify_all( struct inotify_event
*ie
)
877 unsigned int filter
, action
;
878 struct inode
*inode
, *i
;
882 inode
= inode_from_wd( ie
->wd
);
885 fprintf( stderr
, "no inode matches %d\n", ie
->wd
);
889 filter
= filter_from_event( ie
);
891 if (ie
->mask
& IN_CREATE
)
893 if (ie
->mask
& IN_ISDIR
)
894 inode_check_dir( inode
, ie
->name
);
896 action
= FILE_ACTION_ADDED
;
898 else if (ie
->mask
& IN_DELETE
)
899 action
= FILE_ACTION_REMOVED
;
900 else if (ie
->mask
& IN_MOVED_FROM
)
901 action
= FILE_ACTION_RENAMED_OLD_NAME
;
902 else if (ie
->mask
& IN_MOVED_TO
)
903 action
= FILE_ACTION_RENAMED_NEW_NAME
;
905 action
= FILE_ACTION_MODIFIED
;
908 * Work our way up the inode hierarchy
909 * extending the relative path as we go
910 * and notifying all recursive watches.
912 if (!prepend( &path
, ie
->name
))
915 for (i
= inode
; i
; i
= i
->parent
)
917 LIST_FOR_EACH_ENTRY( dir
, &i
->dirs
, struct dir
, in_entry
)
918 if ((filter
& dir
->filter
) && (i
==inode
|| dir
->subtree
))
919 inotify_do_change_notify( dir
, action
, ie
->cookie
, path
);
921 if (!i
->name
|| !prepend( &path
, i
->name
))
927 if (ie
->mask
& IN_DELETE
)
929 i
= inode_from_name( inode
, ie
->name
);
935 static void inotify_poll_event( struct fd
*fd
, int event
)
939 struct inotify_event
*ie
;
941 unix_fd
= get_unix_fd( fd
);
942 r
= read( unix_fd
, buffer
, sizeof buffer
);
945 fprintf(stderr
,"inotify_poll_event(): inotify read failed!\n");
949 for( ofs
= 0; ofs
< r
- offsetof(struct inotify_event
, name
); )
951 ie
= (struct inotify_event
*) &buffer
[ofs
];
954 ofs
+= offsetof( struct inotify_event
, name
[ie
->len
] );
956 inotify_notify_all( ie
);
960 static inline struct fd
*create_inotify_fd( void )
964 unix_fd
= inotify_init();
967 return create_anonymous_fd( &inotify_fd_ops
, unix_fd
, NULL
, 0 );
970 static int map_flags( unsigned int filter
)
974 /* always watch these so we can track subdirectories in recursive watches */
975 mask
= (IN_MOVED_FROM
| IN_MOVED_TO
| IN_DELETE
| IN_CREATE
| IN_DELETE_SELF
);
977 if (filter
& FILE_NOTIFY_CHANGE_ATTRIBUTES
)
979 if (filter
& FILE_NOTIFY_CHANGE_SIZE
)
981 if (filter
& FILE_NOTIFY_CHANGE_LAST_WRITE
)
983 if (filter
& FILE_NOTIFY_CHANGE_LAST_ACCESS
)
985 if (filter
& FILE_NOTIFY_CHANGE_SECURITY
)
991 static int inotify_add_dir( char *path
, unsigned int filter
)
993 int wd
= inotify_add_watch( get_unix_fd( inotify_fd
),
994 path
, map_flags( filter
) );
996 set_fd_events( inotify_fd
, POLLIN
);
1000 static int init_inotify( void )
1007 inotify_fd
= create_inotify_fd();
1011 for (i
=0; i
<HASH_SIZE
; i
++)
1013 list_init( &inode_hash
[i
] );
1014 list_init( &wd_hash
[i
] );
1020 static int inotify_adjust_changes( struct dir
*dir
)
1022 unsigned int filter
;
1023 struct inode
*inode
;
1031 unix_fd
= get_unix_fd( dir
->fd
);
1036 /* check if this fd is already being watched */
1037 if (-1 == fstat( unix_fd
, &st
))
1040 inode
= get_inode( st
.st_dev
, st
.st_ino
);
1042 inode
= create_inode( st
.st_dev
, st
.st_ino
);
1045 list_add_tail( &inode
->dirs
, &dir
->in_entry
);
1049 filter
= filter_from_inode( inode
, 0 );
1051 sprintf( path
, "/proc/self/fd/%u", unix_fd
);
1052 wd
= inotify_add_dir( path
, filter
);
1053 if (wd
== -1) return 0;
1055 inode_set_wd( inode
, wd
);
1060 static char *get_basename( const char *link
)
1062 char *buffer
, *name
= NULL
;
1067 buffer
= malloc( n
);
1068 if (!buffer
) return NULL
;
1070 r
= readlink( link
, buffer
, n
);
1085 while (r
> 0 && name
[ r
- 1 ] == '/' )
1089 name
= strrchr( name
, '/' );
1091 name
= strdup( &name
[1] );
1098 static int dir_add_to_existing_notify( struct dir
*dir
)
1100 struct inode
*inode
, *parent
;
1101 unsigned int filter
= 0;
1102 struct stat st
, st_new
;
1103 char link
[35], *name
;
1109 unix_fd
= get_unix_fd( dir
->fd
);
1111 /* check if it's in the list of inodes we want to watch */
1112 if (-1 == fstat( unix_fd
, &st_new
))
1114 inode
= find_inode( st_new
.st_dev
, st_new
.st_ino
);
1118 /* lookup the parent */
1119 sprintf( link
, "/proc/self/fd/%u/..", unix_fd
);
1120 if (-1 == stat( link
, &st
))
1124 * If there's no parent, stop. We could keep going adding
1125 * ../ to the path until we hit the root of the tree or
1126 * find a recursively watched ancestor.
1127 * Assume it's too expensive to search up the tree for now.
1129 parent
= find_inode( st
.st_dev
, st
.st_ino
);
1133 if (parent
->wd
== -1)
1136 filter
= filter_from_inode( parent
, 1 );
1140 sprintf( link
, "/proc/self/fd/%u", unix_fd
);
1141 name
= get_basename( link
);
1144 inode
= inode_add( parent
, st_new
.st_dev
, st_new
.st_ino
, name
);
1149 /* Couldn't find this inode at the start of the function, must be new */
1150 assert( inode
->wd
== -1 );
1152 wd
= inotify_add_dir( link
, filter
);
1154 inode_set_wd( inode
, wd
);
1161 static int init_inotify( void )
1166 static int inotify_adjust_changes( struct dir
*dir
)
1171 static void free_inode( struct inode
*inode
)
1176 static int dir_add_to_existing_notify( struct dir
*dir
)
1181 #endif /* USE_INOTIFY */
1183 struct object
*create_dir_obj( struct fd
*fd
, unsigned int access
, mode_t mode
)
1187 dir
= alloc_object( &dir_ops
);
1191 list_init( &dir
->change_records
);
1199 dir
->uid
= ~(uid_t
)0;
1200 dir
->client_process
= NULL
;
1201 set_fd_user( fd
, &dir_fd_ops
, &dir
->obj
);
1203 dir_add_to_existing_notify( dir
);
1208 /* retrieve (or allocate) the client-side directory cache entry */
1209 DECL_HANDLER(get_directory_cache_entry
)
1213 data_size_t free_size
;
1215 if (!(dir
= get_dir_obj( current
->process
, req
->handle
, 0 ))) return;
1217 if (!dir
->client_process
)
1219 if ((dir
->client_entry
= alloc_dir_cache_entry( dir
, current
->process
)) == -1) goto done
;
1220 dir
->client_process
= (struct process
*)grab_object( current
->process
);
1223 if (dir
->client_process
== current
->process
) reply
->entry
= dir
->client_entry
;
1224 else set_error( STATUS_SHARING_VIOLATION
);
1226 done
: /* allow freeing entries even on failure */
1227 free_size
= get_reply_max_size();
1228 free_entries
= get_free_dir_cache_entries( current
->process
, &free_size
);
1229 if (free_entries
) set_reply_data_ptr( free_entries
, free_size
);
1231 release_object( dir
);
1234 /* enable change notifications for a directory */
1235 DECL_HANDLER(read_directory_changes
)
1238 struct async
*async
;
1242 set_error(STATUS_INVALID_PARAMETER
);
1246 dir
= get_dir_obj( current
->process
, req
->async
.handle
, 0 );
1250 /* requests don't timeout */
1251 if (!(async
= create_async( dir
->fd
, current
, &req
->async
, NULL
))) goto end
;
1252 fd_queue_async( dir
->fd
, async
, ASYNC_TYPE_WAIT
);
1254 /* assign it once */
1258 insert_change( dir
);
1259 dir
->filter
= req
->filter
;
1260 dir
->subtree
= req
->subtree
;
1261 dir
->want_data
= req
->want_data
;
1264 /* if there's already a change in the queue, send it */
1265 if (!list_empty( &dir
->change_records
))
1266 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
1268 /* setup the real notification */
1269 if (!inotify_adjust_changes( dir
))
1270 dnotify_adjust_changes( dir
);
1272 set_error(STATUS_PENDING
);
1274 release_object( async
);
1276 release_object( dir
);
1279 DECL_HANDLER(read_change
)
1281 struct change_record
*record
, *next
;
1287 dir
= get_dir_obj( current
->process
, req
->handle
, 0 );
1291 list_init( &events
);
1292 list_move_tail( &events
, &dir
->change_records
);
1293 release_object( dir
);
1295 if (list_empty( &events
))
1297 set_error( STATUS_NO_DATA_DETECTED
);
1301 LIST_FOR_EACH_ENTRY( record
, &events
, struct change_record
, entry
)
1303 size
+= (offsetof(struct filesystem_event
, name
[record
->event
.len
])
1304 + sizeof(int)-1) / sizeof(int) * sizeof(int);
1307 if (size
> get_reply_max_size())
1308 set_error( STATUS_BUFFER_TOO_SMALL
);
1309 else if ((data
= mem_alloc( size
)) != NULL
)
1312 LIST_FOR_EACH_ENTRY( record
, &events
, struct change_record
, entry
)
1314 data_size_t len
= offsetof( struct filesystem_event
, name
[record
->event
.len
] );
1316 /* FIXME: rename events are sometimes reported as delete/create */
1317 if (record
->event
.action
== FILE_ACTION_RENAMED_OLD_NAME
)
1319 struct list
*elem
= list_next( &events
, &record
->entry
);
1321 next
= LIST_ENTRY(elem
, struct change_record
, entry
);
1323 if (elem
&& next
->cookie
== record
->cookie
)
1326 record
->event
.action
= FILE_ACTION_REMOVED
;
1328 else if (record
->event
.action
== FILE_ACTION_RENAMED_NEW_NAME
&& record
->cookie
)
1329 record
->event
.action
= FILE_ACTION_ADDED
;
1331 memcpy( event
, &record
->event
, len
);
1333 if (len
% sizeof(int))
1335 memset( event
, 0, sizeof(int) - len
% sizeof(int) );
1336 event
+= sizeof(int) - len
% sizeof(int);
1339 set_reply_data_ptr( data
, size
);
1342 LIST_FOR_EACH_ENTRY_SAFE( record
, next
, &events
, struct change_record
, entry
)
1344 list_remove( &record
->entry
);