2 * Server-side change notification management
4 * Copyright (C) 1998 Alexandre Julliard
5 * Copyright (C) 2006 Mike McCormack
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
31 #include <sys/types.h>
40 #define WIN32_NO_STATUS
56 #define DN_ACCESS 0x00000001 /* File accessed */
57 #define DN_MODIFY 0x00000002 /* File modified */
58 #define DN_CREATE 0x00000004 /* File created */
59 #define DN_DELETE 0x00000008 /* File removed */
60 #define DN_RENAME 0x00000010 /* File renamed */
61 #define DN_ATTRIB 0x00000020 /* File changed attributes */
62 #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
68 #ifdef HAVE_SYS_INOTIFY_H
69 #include <sys/inotify.h>
71 #elif defined(__linux__) && defined(__i386__)
73 #define SYS_inotify_init 291
74 #define SYS_inotify_add_watch 292
75 #define SYS_inotify_rm_watch 293
77 struct inotify_event
{
85 #define IN_ACCESS 0x00000001
86 #define IN_MODIFY 0x00000002
87 #define IN_ATTRIB 0x00000004
88 #define IN_CLOSE_WRITE 0x00000008
89 #define IN_CLOSE_NOWRITE 0x00000010
90 #define IN_OPEN 0x00000020
91 #define IN_MOVED_FROM 0x00000040
92 #define IN_MOVED_TO 0x00000080
93 #define IN_CREATE 0x00000100
94 #define IN_DELETE 0x00000200
95 #define IN_DELETE_SELF 0x00000400
97 #define IN_ISDIR 0x40000000
99 static inline int inotify_init( void )
101 return syscall( SYS_inotify_init
);
104 static inline int inotify_add_watch( int fd
, const char *name
, unsigned int mask
)
106 return syscall( SYS_inotify_add_watch
, fd
, name
, mask
);
109 static inline int inotify_rm_watch( int fd
, int wd
)
111 return syscall( SYS_inotify_rm_watch
, fd
, wd
);
120 static void free_inode( struct inode
*inode
);
122 static struct fd
*inotify_fd
;
124 struct change_record
{
127 struct filesystem_event event
;
132 struct object obj
; /* object header */
133 struct fd
*fd
; /* file descriptor to the directory */
134 mode_t mode
; /* file stat.st_mode */
135 uid_t uid
; /* file stat.st_uid */
136 struct list entry
; /* entry in global change notifications list */
137 unsigned int filter
; /* notification filter */
138 int notified
; /* SIGIO counter */
139 int want_data
; /* return change data */
140 int subtree
; /* do we want to watch subdirectories? */
141 struct list change_records
; /* data for the change */
142 struct list in_entry
; /* entry in the inode dirs list */
143 struct inode
*inode
; /* inode of the associated directory */
144 struct process
*client_process
; /* client process that has a cache for this directory */
145 int client_entry
; /* entry in client process cache */
148 static struct fd
*dir_get_fd( struct object
*obj
);
149 static struct security_descriptor
*dir_get_sd( struct object
*obj
);
150 static int dir_set_sd( struct object
*obj
, const struct security_descriptor
*sd
,
151 unsigned int set_info
);
152 static void dir_dump( struct object
*obj
, int verbose
);
153 static struct object_type
*dir_get_type( struct object
*obj
);
154 static int dir_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
);
155 static void dir_destroy( struct object
*obj
);
157 static const struct object_ops dir_ops
=
159 sizeof(struct dir
), /* size */
161 dir_get_type
, /* get_type */
162 add_queue
, /* add_queue */
163 remove_queue
, /* remove_queue */
164 default_fd_signaled
, /* signaled */
165 no_satisfied
, /* satisfied */
166 no_signal
, /* signal */
167 dir_get_fd
, /* get_fd */
168 default_fd_map_access
, /* map_access */
169 dir_get_sd
, /* get_sd */
170 dir_set_sd
, /* set_sd */
171 no_lookup_name
, /* lookup_name */
172 no_link_name
, /* link_name */
173 NULL
, /* unlink_name */
174 no_open_file
, /* open_file */
175 no_kernel_obj_list
, /* get_kernel_obj_list */
176 dir_close_handle
, /* close_handle */
177 dir_destroy
/* destroy */
180 static int dir_get_poll_events( struct fd
*fd
);
181 static enum server_fd_type
dir_get_fd_type( struct fd
*fd
);
183 static const struct fd_ops dir_fd_ops
=
185 dir_get_poll_events
, /* get_poll_events */
186 default_poll_event
, /* poll_event */
187 dir_get_fd_type
, /* get_fd_type */
188 no_fd_read
, /* read */
189 no_fd_write
, /* write */
190 no_fd_flush
, /* flush */
191 default_fd_get_file_info
, /* get_file_info */
192 no_fd_get_volume_info
, /* get_volume_info */
193 default_fd_ioctl
, /* ioctl */
194 default_fd_queue_async
, /* queue_async */
195 default_fd_reselect_async
/* reselect_async */
198 static struct list change_list
= LIST_INIT(change_list
);
200 /* per-process structure to keep track of cache entries on the client size */
205 unsigned char state
[1];
210 DIR_CACHE_STATE_FREE
,
211 DIR_CACHE_STATE_INUSE
,
212 DIR_CACHE_STATE_RELEASED
215 /* return an array of cache entries that can be freed on the client side */
216 static int *get_free_dir_cache_entries( struct process
*process
, data_size_t
*size
)
219 struct dir_cache
*cache
= process
->dir_cache
;
220 unsigned int i
, j
, count
;
222 if (!cache
) return NULL
;
223 for (i
= count
= 0; i
< cache
->count
&& count
< *size
/ sizeof(*ret
); i
++)
224 if (cache
->state
[i
] == DIR_CACHE_STATE_RELEASED
) count
++;
225 if (!count
) return NULL
;
227 if ((ret
= malloc( count
* sizeof(*ret
) )))
229 for (i
= j
= 0; j
< count
; i
++)
231 if (cache
->state
[i
] != DIR_CACHE_STATE_RELEASED
) continue;
232 cache
->state
[i
] = DIR_CACHE_STATE_FREE
;
235 *size
= count
* sizeof(*ret
);
240 /* allocate a new client-side directory cache entry */
241 static int alloc_dir_cache_entry( struct dir
*dir
, struct process
*process
)
244 struct dir_cache
*cache
= process
->dir_cache
;
247 for (i
= 0; i
< cache
->count
; i
++)
248 if (cache
->state
[i
] == DIR_CACHE_STATE_FREE
) goto found
;
250 if (!cache
|| cache
->count
== cache
->size
)
252 unsigned int size
= cache
? cache
->size
* 2 : 256;
253 if (!(cache
= realloc( cache
, offsetof( struct dir_cache
, state
[size
] ))))
255 set_error( STATUS_NO_MEMORY
);
258 process
->dir_cache
= cache
;
261 cache
->count
= i
+ 1;
264 cache
->state
[i
] = DIR_CACHE_STATE_INUSE
;
268 /* release a directory cache entry; it will be freed on the client side on the next cache request */
269 static void release_dir_cache_entry( struct dir
*dir
)
271 struct dir_cache
*cache
;
273 if (!dir
->client_process
) return;
274 cache
= dir
->client_process
->dir_cache
;
275 cache
->state
[dir
->client_entry
] = DIR_CACHE_STATE_RELEASED
;
276 release_object( dir
->client_process
);
277 dir
->client_process
= NULL
;
280 static void dnotify_adjust_changes( struct dir
*dir
)
282 #if defined(F_SETSIG) && defined(F_NOTIFY)
283 int fd
= get_unix_fd( dir
->fd
);
284 unsigned int filter
= dir
->filter
;
286 if ( 0 > fcntl( fd
, F_SETSIG
, SIGIO
) )
290 if (filter
& FILE_NOTIFY_CHANGE_FILE_NAME
)
291 val
|= DN_RENAME
| DN_DELETE
| DN_CREATE
;
292 if (filter
& FILE_NOTIFY_CHANGE_DIR_NAME
)
293 val
|= DN_RENAME
| DN_DELETE
| DN_CREATE
;
294 if (filter
& FILE_NOTIFY_CHANGE_ATTRIBUTES
)
296 if (filter
& FILE_NOTIFY_CHANGE_SIZE
)
298 if (filter
& FILE_NOTIFY_CHANGE_LAST_WRITE
)
300 if (filter
& FILE_NOTIFY_CHANGE_LAST_ACCESS
)
302 if (filter
& FILE_NOTIFY_CHANGE_CREATION
)
304 if (filter
& FILE_NOTIFY_CHANGE_SECURITY
)
306 fcntl( fd
, F_NOTIFY
, val
);
310 /* insert change in the global list */
311 static inline void insert_change( struct dir
*dir
)
315 sigemptyset( &sigset
);
316 sigaddset( &sigset
, SIGIO
);
317 sigprocmask( SIG_BLOCK
, &sigset
, NULL
);
318 list_add_head( &change_list
, &dir
->entry
);
319 sigprocmask( SIG_UNBLOCK
, &sigset
, NULL
);
322 /* remove change from the global list */
323 static inline void remove_change( struct dir
*dir
)
327 sigemptyset( &sigset
);
328 sigaddset( &sigset
, SIGIO
);
329 sigprocmask( SIG_BLOCK
, &sigset
, NULL
);
330 list_remove( &dir
->entry
);
331 sigprocmask( SIG_UNBLOCK
, &sigset
, NULL
);
334 static void dir_dump( struct object
*obj
, int verbose
)
336 struct dir
*dir
= (struct dir
*)obj
;
337 assert( obj
->ops
== &dir_ops
);
338 fprintf( stderr
, "Dirfile fd=%p filter=%08x\n", dir
->fd
, dir
->filter
);
341 static struct object_type
*dir_get_type( struct object
*obj
)
343 static const WCHAR name
[] = {'F','i','l','e'};
344 static const struct unicode_str str
= { name
, sizeof(name
) };
345 return get_object_type( &str
);
348 /* enter here directly from SIGIO signal handler */
349 void do_change_notify( int unix_fd
)
353 /* FIXME: this is O(n) ... probably can be improved */
354 LIST_FOR_EACH_ENTRY( dir
, &change_list
, struct dir
, entry
)
356 if (get_unix_fd( dir
->fd
) != unix_fd
) continue;
357 interlocked_xchg_add( &dir
->notified
, 1 );
362 /* SIGIO callback, called synchronously with the poll loop */
363 void sigio_callback(void)
367 LIST_FOR_EACH_ENTRY( dir
, &change_list
, struct dir
, entry
)
369 if (interlocked_xchg( &dir
->notified
, 0 ))
370 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
374 static struct fd
*dir_get_fd( struct object
*obj
)
376 struct dir
*dir
= (struct dir
*)obj
;
377 assert( obj
->ops
== &dir_ops
);
378 return (struct fd
*)grab_object( dir
->fd
);
381 static int get_dir_unix_fd( struct dir
*dir
)
383 return get_unix_fd( dir
->fd
);
386 static struct security_descriptor
*dir_get_sd( struct object
*obj
)
388 struct dir
*dir
= (struct dir
*)obj
;
391 struct security_descriptor
*sd
;
392 assert( obj
->ops
== &dir_ops
);
394 unix_fd
= get_dir_unix_fd( dir
);
396 if (unix_fd
== -1 || fstat( unix_fd
, &st
) == -1)
399 /* mode and uid the same? if so, no need to re-generate security descriptor */
401 (st
.st_mode
& (S_IRWXU
|S_IRWXO
)) == (dir
->mode
& (S_IRWXU
|S_IRWXO
)) &&
402 (st
.st_uid
== dir
->uid
))
405 sd
= mode_to_sd( st
.st_mode
,
406 security_unix_uid_to_sid( st
.st_uid
),
407 token_get_primary_group( current
->process
->token
));
408 if (!sd
) return obj
->sd
;
410 dir
->mode
= st
.st_mode
;
411 dir
->uid
= st
.st_uid
;
417 static int dir_set_sd( struct object
*obj
, const struct security_descriptor
*sd
,
418 unsigned int set_info
)
420 struct dir
*dir
= (struct dir
*)obj
;
426 assert( obj
->ops
== &dir_ops
);
428 unix_fd
= get_dir_unix_fd( dir
);
430 if (unix_fd
== -1 || fstat( unix_fd
, &st
) == -1) return 1;
432 if (set_info
& OWNER_SECURITY_INFORMATION
)
434 owner
= sd_get_owner( sd
);
437 set_error( STATUS_INVALID_SECURITY_DESCR
);
440 if (!obj
->sd
|| !security_equal_sid( owner
, sd_get_owner( obj
->sd
) ))
442 /* FIXME: get Unix uid and call fchown */
446 owner
= sd_get_owner( obj
->sd
);
448 owner
= token_get_user( current
->process
->token
);
450 if (set_info
& DACL_SECURITY_INFORMATION
)
452 /* keep the bits that we don't map to access rights in the ACL */
453 mode
= st
.st_mode
& (S_ISUID
|S_ISGID
|S_ISVTX
);
454 mode
|= sd_to_mode( sd
, owner
);
456 if (((st
.st_mode
^ mode
) & (S_IRWXU
|S_IRWXG
|S_IRWXO
)) && fchmod( unix_fd
, mode
) == -1)
465 static struct change_record
*get_first_change_record( struct dir
*dir
)
467 struct list
*ptr
= list_head( &dir
->change_records
);
468 if (!ptr
) return NULL
;
470 return LIST_ENTRY( ptr
, struct change_record
, entry
);
473 static int dir_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
475 struct dir
*dir
= (struct dir
*)obj
;
477 if (!fd_close_handle( obj
, process
, handle
)) return 0;
478 if (obj
->handle_count
== 1) release_dir_cache_entry( dir
); /* closing last handle, release cache */
479 return 1; /* ok to close */
482 static void dir_destroy( struct object
*obj
)
484 struct change_record
*record
;
485 struct dir
*dir
= (struct dir
*)obj
;
486 assert (obj
->ops
== &dir_ops
);
489 remove_change( dir
);
493 list_remove( &dir
->in_entry
);
494 free_inode( dir
->inode
);
497 while ((record
= get_first_change_record( dir
))) free( record
);
499 release_dir_cache_entry( dir
);
500 release_object( dir
->fd
);
502 if (inotify_fd
&& list_empty( &change_list
))
504 release_object( inotify_fd
);
509 struct dir
*get_dir_obj( struct process
*process
, obj_handle_t handle
, unsigned int access
)
511 return (struct dir
*)get_handle_obj( process
, handle
, access
, &dir_ops
);
514 static int dir_get_poll_events( struct fd
*fd
)
519 static enum server_fd_type
dir_get_fd_type( struct fd
*fd
)
529 struct list ch_entry
; /* entry in the children list */
530 struct list children
; /* children of this inode */
531 struct inode
*parent
; /* parent of this inode */
532 struct list dirs
; /* directory handles watching this inode */
533 struct list ino_entry
; /* entry in the inode hash */
534 struct list wd_entry
; /* entry in the watch descriptor hash */
535 dev_t dev
; /* device number */
536 ino_t ino
; /* device's inode number */
537 int wd
; /* inotify's watch descriptor */
538 char *name
; /* basename name of the inode */
541 static struct list inode_hash
[ HASH_SIZE
];
542 static struct list wd_hash
[ HASH_SIZE
];
544 static int inotify_add_dir( char *path
, unsigned int filter
);
546 static struct inode
*inode_from_wd( int wd
)
548 struct list
*bucket
= &wd_hash
[ wd
% HASH_SIZE
];
551 LIST_FOR_EACH_ENTRY( inode
, bucket
, struct inode
, wd_entry
)
558 static inline struct list
*get_hash_list( dev_t dev
, ino_t ino
)
560 return &inode_hash
[ (ino
^ dev
) % HASH_SIZE
];
563 static struct inode
*find_inode( dev_t dev
, ino_t ino
)
565 struct list
*bucket
= get_hash_list( dev
, ino
);
568 LIST_FOR_EACH_ENTRY( inode
, bucket
, struct inode
, ino_entry
)
569 if (inode
->ino
== ino
&& inode
->dev
== dev
)
575 static struct inode
*create_inode( dev_t dev
, ino_t ino
)
579 inode
= malloc( sizeof *inode
);
582 list_init( &inode
->children
);
583 list_init( &inode
->dirs
);
587 inode
->parent
= NULL
;
589 list_add_tail( get_hash_list( dev
, ino
), &inode
->ino_entry
);
594 static struct inode
*get_inode( dev_t dev
, ino_t ino
)
598 inode
= find_inode( dev
, ino
);
601 return create_inode( dev
, ino
);
604 static void inode_set_wd( struct inode
*inode
, int wd
)
607 list_remove( &inode
->wd_entry
);
609 list_add_tail( &wd_hash
[ wd
% HASH_SIZE
], &inode
->wd_entry
);
612 static void inode_set_name( struct inode
*inode
, const char *name
)
615 inode
->name
= name
? strdup( name
) : NULL
;
618 static void free_inode( struct inode
*inode
)
620 int subtree
= 0, watches
= 0;
621 struct inode
*tmp
, *next
;
624 LIST_FOR_EACH_ENTRY( dir
, &inode
->dirs
, struct dir
, in_entry
)
626 subtree
|= dir
->subtree
;
630 if (!subtree
&& !inode
->parent
)
632 LIST_FOR_EACH_ENTRY_SAFE( tmp
, next
, &inode
->children
,
633 struct inode
, ch_entry
)
635 assert( tmp
!= inode
);
636 assert( tmp
->parent
== inode
);
645 list_remove( &inode
->ch_entry
);
647 /* disconnect remaining children from the parent */
648 LIST_FOR_EACH_ENTRY_SAFE( tmp
, next
, &inode
->children
, struct inode
, ch_entry
)
650 list_remove( &tmp
->ch_entry
);
656 inotify_rm_watch( get_unix_fd( inotify_fd
), inode
->wd
);
657 list_remove( &inode
->wd_entry
);
659 list_remove( &inode
->ino_entry
);
665 static struct inode
*inode_add( struct inode
*parent
,
666 dev_t dev
, ino_t ino
, const char *name
)
670 inode
= get_inode( dev
, ino
);
676 list_add_tail( &parent
->children
, &inode
->ch_entry
);
677 inode
->parent
= parent
;
678 assert( inode
!= parent
);
680 inode_set_name( inode
, name
);
685 static struct inode
*inode_from_name( struct inode
*inode
, const char *name
)
689 LIST_FOR_EACH_ENTRY( i
, &inode
->children
, struct inode
, ch_entry
)
690 if (i
->name
&& !strcmp( i
->name
, name
))
695 static int inotify_get_poll_events( struct fd
*fd
);
696 static void inotify_poll_event( struct fd
*fd
, int event
);
698 static const struct fd_ops inotify_fd_ops
=
700 inotify_get_poll_events
, /* get_poll_events */
701 inotify_poll_event
, /* poll_event */
703 NULL
, /* get_fd_type */
705 NULL
, /* queue_async */
706 NULL
/* reselect_async */
709 static int inotify_get_poll_events( struct fd
*fd
)
714 static void inotify_do_change_notify( struct dir
*dir
, unsigned int action
,
715 unsigned int cookie
, const char *relpath
)
717 struct change_record
*record
;
719 assert( dir
->obj
.ops
== &dir_ops
);
723 size_t len
= strlen(relpath
);
724 record
= malloc( offsetof(struct change_record
, event
.name
[len
]) );
728 record
->cookie
= cookie
;
729 record
->event
.action
= action
;
730 memcpy( record
->event
.name
, relpath
, len
);
731 record
->event
.len
= len
;
733 list_add_tail( &dir
->change_records
, &record
->entry
);
736 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
739 static unsigned int filter_from_event( struct inotify_event
*ie
)
741 unsigned int filter
= 0;
743 if (ie
->mask
& (IN_MOVED_FROM
| IN_MOVED_TO
| IN_DELETE
| IN_CREATE
))
744 filter
|= FILE_NOTIFY_CHANGE_FILE_NAME
| FILE_NOTIFY_CHANGE_DIR_NAME
;
745 if (ie
->mask
& IN_MODIFY
)
746 filter
|= FILE_NOTIFY_CHANGE_SIZE
| FILE_NOTIFY_CHANGE_LAST_WRITE
| FILE_NOTIFY_CHANGE_LAST_ACCESS
;
747 if (ie
->mask
& IN_ATTRIB
)
748 filter
|= FILE_NOTIFY_CHANGE_ATTRIBUTES
| FILE_NOTIFY_CHANGE_SECURITY
;
749 if (ie
->mask
& IN_CREATE
)
750 filter
|= FILE_NOTIFY_CHANGE_CREATION
;
752 if (ie
->mask
& IN_ISDIR
)
753 filter
&= ~FILE_NOTIFY_CHANGE_FILE_NAME
;
755 filter
&= ~FILE_NOTIFY_CHANGE_DIR_NAME
;
760 /* scan up the parent directories for watches */
761 static unsigned int filter_from_inode( struct inode
*inode
, int is_parent
)
763 unsigned int filter
= 0;
766 /* combine filters from parents watching subtrees */
769 LIST_FOR_EACH_ENTRY( dir
, &inode
->dirs
, struct dir
, in_entry
)
770 if (dir
->subtree
|| !is_parent
)
771 filter
|= dir
->filter
;
773 inode
= inode
->parent
;
779 static char *inode_get_path( struct inode
*inode
, int sz
)
788 head
= list_head( &inode
->dirs
);
791 int unix_fd
= get_unix_fd( LIST_ENTRY( head
, struct dir
, in_entry
)->fd
);
792 path
= malloc ( 32 + sz
);
794 sprintf( path
, "/proc/self/fd/%u/", unix_fd
);
801 len
= strlen( inode
->name
);
802 path
= inode_get_path( inode
->parent
, sz
+ len
+ 1 );
806 strcat( path
, inode
->name
);
812 static void inode_check_dir( struct inode
*parent
, const char *name
)
820 path
= inode_get_path( parent
, strlen(name
) );
824 strcat( path
, name
);
826 if (stat( path
, &st
) < 0)
829 filter
= filter_from_inode( parent
, 1 );
833 inode
= inode_add( parent
, st
.st_dev
, st
.st_ino
, name
);
834 if (!inode
|| inode
->wd
!= -1)
837 wd
= inotify_add_dir( path
, filter
);
839 inode_set_wd( inode
, wd
);
847 static int prepend( char **path
, const char *segment
)
852 extra
= strlen( segment
) + 1;
855 int len
= strlen( *path
) + 1;
856 p
= realloc( *path
, len
+ extra
);
858 memmove( &p
[ extra
], p
, len
);
859 p
[ extra
- 1 ] = '/';
860 memcpy( p
, segment
, extra
- 1 );
866 memcpy( p
, segment
, extra
);
874 static void inotify_notify_all( struct inotify_event
*ie
)
876 unsigned int filter
, action
;
877 struct inode
*inode
, *i
;
881 inode
= inode_from_wd( ie
->wd
);
884 fprintf( stderr
, "no inode matches %d\n", ie
->wd
);
888 filter
= filter_from_event( ie
);
890 if (ie
->mask
& IN_CREATE
)
892 if (ie
->mask
& IN_ISDIR
)
893 inode_check_dir( inode
, ie
->name
);
895 action
= FILE_ACTION_ADDED
;
897 else if (ie
->mask
& IN_DELETE
)
898 action
= FILE_ACTION_REMOVED
;
899 else if (ie
->mask
& IN_MOVED_FROM
)
900 action
= FILE_ACTION_RENAMED_OLD_NAME
;
901 else if (ie
->mask
& IN_MOVED_TO
)
902 action
= FILE_ACTION_RENAMED_NEW_NAME
;
904 action
= FILE_ACTION_MODIFIED
;
907 * Work our way up the inode hierarchy
908 * extending the relative path as we go
909 * and notifying all recursive watches.
911 if (!prepend( &path
, ie
->name
))
914 for (i
= inode
; i
; i
= i
->parent
)
916 LIST_FOR_EACH_ENTRY( dir
, &i
->dirs
, struct dir
, in_entry
)
917 if ((filter
& dir
->filter
) && (i
==inode
|| dir
->subtree
))
918 inotify_do_change_notify( dir
, action
, ie
->cookie
, path
);
920 if (!i
->name
|| !prepend( &path
, i
->name
))
926 if (ie
->mask
& IN_DELETE
)
928 i
= inode_from_name( inode
, ie
->name
);
934 static void inotify_poll_event( struct fd
*fd
, int event
)
938 struct inotify_event
*ie
;
940 unix_fd
= get_unix_fd( fd
);
941 r
= read( unix_fd
, buffer
, sizeof buffer
);
944 fprintf(stderr
,"inotify_poll_event(): inotify read failed!\n");
948 for( ofs
= 0; ofs
< r
- offsetof(struct inotify_event
, name
); )
950 ie
= (struct inotify_event
*) &buffer
[ofs
];
951 ofs
+= offsetof( struct inotify_event
, name
[ie
->len
] );
953 if (ie
->len
) inotify_notify_all( ie
);
957 static inline struct fd
*create_inotify_fd( void )
961 unix_fd
= inotify_init();
964 return create_anonymous_fd( &inotify_fd_ops
, unix_fd
, NULL
, 0 );
967 static int map_flags( unsigned int filter
)
971 /* always watch these so we can track subdirectories in recursive watches */
972 mask
= (IN_MOVED_FROM
| IN_MOVED_TO
| IN_DELETE
| IN_CREATE
| IN_DELETE_SELF
);
974 if (filter
& FILE_NOTIFY_CHANGE_ATTRIBUTES
)
976 if (filter
& FILE_NOTIFY_CHANGE_SIZE
)
978 if (filter
& FILE_NOTIFY_CHANGE_LAST_WRITE
)
980 if (filter
& FILE_NOTIFY_CHANGE_LAST_ACCESS
)
982 if (filter
& FILE_NOTIFY_CHANGE_SECURITY
)
988 static int inotify_add_dir( char *path
, unsigned int filter
)
990 int wd
= inotify_add_watch( get_unix_fd( inotify_fd
),
991 path
, map_flags( filter
) );
993 set_fd_events( inotify_fd
, POLLIN
);
997 static int init_inotify( void )
1004 inotify_fd
= create_inotify_fd();
1008 for (i
=0; i
<HASH_SIZE
; i
++)
1010 list_init( &inode_hash
[i
] );
1011 list_init( &wd_hash
[i
] );
1017 static int inotify_adjust_changes( struct dir
*dir
)
1019 unsigned int filter
;
1020 struct inode
*inode
;
1028 unix_fd
= get_unix_fd( dir
->fd
);
1033 /* check if this fd is already being watched */
1034 if (-1 == fstat( unix_fd
, &st
))
1037 inode
= get_inode( st
.st_dev
, st
.st_ino
);
1039 inode
= create_inode( st
.st_dev
, st
.st_ino
);
1042 list_add_tail( &inode
->dirs
, &dir
->in_entry
);
1046 filter
= filter_from_inode( inode
, 0 );
1048 sprintf( path
, "/proc/self/fd/%u", unix_fd
);
1049 wd
= inotify_add_dir( path
, filter
);
1050 if (wd
== -1) return 0;
1052 inode_set_wd( inode
, wd
);
1057 static char *get_basename( const char *link
)
1059 char *buffer
, *name
= NULL
;
1064 buffer
= malloc( n
);
1065 if (!buffer
) return NULL
;
1067 r
= readlink( link
, buffer
, n
);
1082 while (r
> 0 && name
[ r
- 1 ] == '/' )
1086 name
= strrchr( name
, '/' );
1088 name
= strdup( &name
[1] );
1095 static int dir_add_to_existing_notify( struct dir
*dir
)
1097 struct inode
*inode
, *parent
;
1098 unsigned int filter
= 0;
1099 struct stat st
, st_new
;
1100 char link
[35], *name
;
1106 unix_fd
= get_unix_fd( dir
->fd
);
1108 /* check if it's in the list of inodes we want to watch */
1109 if (-1 == fstat( unix_fd
, &st_new
))
1111 inode
= find_inode( st_new
.st_dev
, st_new
.st_ino
);
1115 /* lookup the parent */
1116 sprintf( link
, "/proc/self/fd/%u/..", unix_fd
);
1117 if (-1 == stat( link
, &st
))
1121 * If there's no parent, stop. We could keep going adding
1122 * ../ to the path until we hit the root of the tree or
1123 * find a recursively watched ancestor.
1124 * Assume it's too expensive to search up the tree for now.
1126 parent
= find_inode( st
.st_dev
, st
.st_ino
);
1130 if (parent
->wd
== -1)
1133 filter
= filter_from_inode( parent
, 1 );
1137 sprintf( link
, "/proc/self/fd/%u", unix_fd
);
1138 name
= get_basename( link
);
1141 inode
= inode_add( parent
, st_new
.st_dev
, st_new
.st_ino
, name
);
1146 /* Couldn't find this inode at the start of the function, must be new */
1147 assert( inode
->wd
== -1 );
1149 wd
= inotify_add_dir( link
, filter
);
1151 inode_set_wd( inode
, wd
);
1158 static int init_inotify( void )
1163 static int inotify_adjust_changes( struct dir
*dir
)
1168 static void free_inode( struct inode
*inode
)
1173 static int dir_add_to_existing_notify( struct dir
*dir
)
1178 #endif /* USE_INOTIFY */
1180 struct object
*create_dir_obj( struct fd
*fd
, unsigned int access
, mode_t mode
)
1184 dir
= alloc_object( &dir_ops
);
1188 list_init( &dir
->change_records
);
1196 dir
->uid
= ~(uid_t
)0;
1197 dir
->client_process
= NULL
;
1198 set_fd_user( fd
, &dir_fd_ops
, &dir
->obj
);
1200 dir_add_to_existing_notify( dir
);
1205 /* retrieve (or allocate) the client-side directory cache entry */
1206 DECL_HANDLER(get_directory_cache_entry
)
1210 data_size_t free_size
;
1212 if (!(dir
= get_dir_obj( current
->process
, req
->handle
, 0 ))) return;
1214 if (!dir
->client_process
)
1216 if ((dir
->client_entry
= alloc_dir_cache_entry( dir
, current
->process
)) == -1) goto done
;
1217 dir
->client_process
= (struct process
*)grab_object( current
->process
);
1220 if (dir
->client_process
== current
->process
) reply
->entry
= dir
->client_entry
;
1221 else set_error( STATUS_SHARING_VIOLATION
);
1223 done
: /* allow freeing entries even on failure */
1224 free_size
= get_reply_max_size();
1225 free_entries
= get_free_dir_cache_entries( current
->process
, &free_size
);
1226 if (free_entries
) set_reply_data_ptr( free_entries
, free_size
);
1228 release_object( dir
);
1231 /* enable change notifications for a directory */
1232 DECL_HANDLER(read_directory_changes
)
1235 struct async
*async
;
1239 set_error(STATUS_INVALID_PARAMETER
);
1243 dir
= get_dir_obj( current
->process
, req
->async
.handle
, 0 );
1247 /* requests don't timeout */
1248 if (!(async
= create_async( dir
->fd
, current
, &req
->async
, NULL
))) goto end
;
1249 fd_queue_async( dir
->fd
, async
, ASYNC_TYPE_WAIT
);
1251 /* assign it once */
1255 insert_change( dir
);
1256 dir
->filter
= req
->filter
;
1257 dir
->subtree
= req
->subtree
;
1258 dir
->want_data
= req
->want_data
;
1261 /* if there's already a change in the queue, send it */
1262 if (!list_empty( &dir
->change_records
))
1263 fd_async_wake_up( dir
->fd
, ASYNC_TYPE_WAIT
, STATUS_ALERTED
);
1265 /* setup the real notification */
1266 if (!inotify_adjust_changes( dir
))
1267 dnotify_adjust_changes( dir
);
1269 set_error(STATUS_PENDING
);
1271 release_object( async
);
1273 release_object( dir
);
1276 DECL_HANDLER(read_change
)
1278 struct change_record
*record
, *next
;
1284 dir
= get_dir_obj( current
->process
, req
->handle
, 0 );
1288 list_init( &events
);
1289 list_move_tail( &events
, &dir
->change_records
);
1290 release_object( dir
);
1292 if (list_empty( &events
))
1294 set_error( STATUS_NO_DATA_DETECTED
);
1298 LIST_FOR_EACH_ENTRY( record
, &events
, struct change_record
, entry
)
1300 size
+= (offsetof(struct filesystem_event
, name
[record
->event
.len
])
1301 + sizeof(int)-1) / sizeof(int) * sizeof(int);
1304 if (size
> get_reply_max_size())
1305 set_error( STATUS_BUFFER_TOO_SMALL
);
1306 else if ((data
= mem_alloc( size
)) != NULL
)
1309 LIST_FOR_EACH_ENTRY( record
, &events
, struct change_record
, entry
)
1311 data_size_t len
= offsetof( struct filesystem_event
, name
[record
->event
.len
] );
1313 /* FIXME: rename events are sometimes reported as delete/create */
1314 if (record
->event
.action
== FILE_ACTION_RENAMED_OLD_NAME
)
1316 struct list
*elem
= list_next( &events
, &record
->entry
);
1318 next
= LIST_ENTRY(elem
, struct change_record
, entry
);
1320 if (elem
&& next
->cookie
== record
->cookie
)
1323 record
->event
.action
= FILE_ACTION_REMOVED
;
1325 else if (record
->event
.action
== FILE_ACTION_RENAMED_NEW_NAME
&& record
->cookie
)
1326 record
->event
.action
= FILE_ACTION_ADDED
;
1328 memcpy( event
, &record
->event
, len
);
1330 if (len
% sizeof(int))
1332 memset( event
, 0, sizeof(int) - len
% sizeof(int) );
1333 event
+= sizeof(int) - len
% sizeof(int);
1336 set_reply_data_ptr( data
, size
);
1339 LIST_FOR_EACH_ENTRY_SAFE( record
, next
, &events
, struct change_record
, entry
)
1341 list_remove( &record
->entry
);