2 * Copyright (c) 2008 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <vfs/canonify.h>
37 #include <vfs/vfs_sess.h>
45 #include <sys/statfs.h>
46 #include <sys/types.h>
47 #include <ipc/services.h>
50 #include <fibril_synch.h>
58 static FIBRIL_MUTEX_INITIALIZE(vfs_mutex
);
59 static async_sess_t
*vfs_sess
= NULL
;
61 static FIBRIL_MUTEX_INITIALIZE(cwd_mutex
);
63 static int cwd_fd
= -1;
64 static char *cwd_path
= NULL
;
65 static size_t cwd_size
= 0;
67 /** Start an async exchange on the VFS session.
69 * @return New exchange.
72 async_exch_t
*vfs_exchange_begin(void)
74 fibril_mutex_lock(&vfs_mutex
);
76 while (vfs_sess
== NULL
)
77 vfs_sess
= service_connect_blocking(EXCHANGE_PARALLEL
, SERVICE_VFS
,
80 fibril_mutex_unlock(&vfs_mutex
);
82 return async_exchange_begin(vfs_sess
);
85 /** Finish an async exchange on the VFS session.
87 * @param exch Exchange to be finished.
90 void vfs_exchange_end(async_exch_t
*exch
)
92 async_exchange_end(exch
);
95 char *absolutize(const char *path
, size_t *retlen
)
100 fibril_mutex_lock(&cwd_mutex
);
101 size_t size
= str_size(path
);
104 fibril_mutex_unlock(&cwd_mutex
);
107 ncwd_path_nc
= malloc(cwd_size
+ 1 + size
+ 1);
109 fibril_mutex_unlock(&cwd_mutex
);
112 str_cpy(ncwd_path_nc
, cwd_size
+ 1 + size
+ 1, cwd_path
);
113 ncwd_path_nc
[cwd_size
] = '/';
114 ncwd_path_nc
[cwd_size
+ 1] = '\0';
116 ncwd_path_nc
= malloc(size
+ 1);
118 fibril_mutex_unlock(&cwd_mutex
);
121 ncwd_path_nc
[0] = '\0';
123 str_append(ncwd_path_nc
, cwd_size
+ 1 + size
+ 1, path
);
124 ncwd_path
= canonify(ncwd_path_nc
, retlen
);
126 fibril_mutex_unlock(&cwd_mutex
);
131 * We need to clone ncwd_path because canonify() works in-place and thus
132 * the address in ncwd_path need not be the same as ncwd_path_nc, even
133 * though they both point into the same dynamically allocated buffer.
135 ncwd_path
= str_dup(ncwd_path
);
138 fibril_mutex_unlock(&cwd_mutex
);
141 fibril_mutex_unlock(&cwd_mutex
);
145 int mount(const char *fs_name
, const char *mp
, const char *fqsn
,
146 const char *opts
, unsigned int flags
, unsigned int instance
)
149 char null
[LOC_NAME_MAXLEN
];
151 if (str_cmp(fqsn
, "") == 0) {
152 /* No device specified, create a fresh
153 null/%d device instead */
154 null_id
= loc_null_create();
159 snprintf(null
, LOC_NAME_MAXLEN
, "null/%d", null_id
);
163 service_id_t service_id
;
164 int res
= loc_service_get_id(fqsn
, &service_id
, flags
);
167 loc_null_destroy(null_id
);
173 char *mpa
= absolutize(mp
, &mpa_size
);
176 loc_null_destroy(null_id
);
181 async_exch_t
*exch
= vfs_exchange_begin();
184 aid_t req
= async_send_3(exch
, VFS_IN_MOUNT
, service_id
, flags
,
186 sysarg_t rc
= async_data_write_start(exch
, (void *) mpa
, mpa_size
);
188 vfs_exchange_end(exch
);
190 async_wait_for(req
, &rc_orig
);
193 loc_null_destroy(null_id
);
198 return (int) rc_orig
;
201 rc
= async_data_write_start(exch
, (void *) opts
, str_size(opts
));
203 vfs_exchange_end(exch
);
205 async_wait_for(req
, &rc_orig
);
208 loc_null_destroy(null_id
);
213 return (int) rc_orig
;
216 rc
= async_data_write_start(exch
, (void *) fs_name
, str_size(fs_name
));
218 vfs_exchange_end(exch
);
220 async_wait_for(req
, &rc_orig
);
223 loc_null_destroy(null_id
);
228 return (int) rc_orig
;
231 /* Ask VFS whether it likes fs_name. */
232 rc
= async_req_0_0(exch
, VFS_IN_PING
);
234 vfs_exchange_end(exch
);
236 async_wait_for(req
, &rc_orig
);
239 loc_null_destroy(null_id
);
244 return (int) rc_orig
;
247 vfs_exchange_end(exch
);
249 async_wait_for(req
, &rc
);
251 if ((rc
!= EOK
) && (null_id
!= -1))
252 loc_null_destroy(null_id
);
257 int unmount(const char *mp
)
265 mpa
= absolutize(mp
, &mpa_size
);
269 async_exch_t
*exch
= vfs_exchange_begin();
271 req
= async_send_0(exch
, VFS_IN_UNMOUNT
, NULL
);
272 rc
= async_data_write_start(exch
, (void *) mpa
, mpa_size
);
274 vfs_exchange_end(exch
);
276 async_wait_for(req
, &rc_orig
);
280 return (int) rc_orig
;
284 vfs_exchange_end(exch
);
286 async_wait_for(req
, &rc
);
291 static int open_internal(const char *abs
, size_t abs_size
, int lflag
, int oflag
)
293 async_exch_t
*exch
= vfs_exchange_begin();
296 aid_t req
= async_send_3(exch
, VFS_IN_OPEN
, lflag
, oflag
, 0, &answer
);
297 sysarg_t rc
= async_data_write_start(exch
, abs
, abs_size
);
300 vfs_exchange_end(exch
);
303 async_wait_for(req
, &rc_orig
);
308 return (int) rc_orig
;
311 vfs_exchange_end(exch
);
312 async_wait_for(req
, &rc
);
317 return (int) IPC_GET_ARG1(answer
);
320 int open(const char *path
, int oflag
, ...)
323 char *abs
= absolutize(path
, &abs_size
);
327 int ret
= open_internal(abs
, abs_size
, L_FILE
, oflag
);
333 int close(int fildes
)
337 async_exch_t
*exch
= vfs_exchange_begin();
338 rc
= async_req_1_0(exch
, VFS_IN_CLOSE
, fildes
);
339 vfs_exchange_end(exch
);
344 ssize_t
read(int fildes
, void *buf
, size_t nbyte
)
350 async_exch_t
*exch
= vfs_exchange_begin();
352 req
= async_send_1(exch
, VFS_IN_READ
, fildes
, &answer
);
353 rc
= async_data_read_start(exch
, (void *)buf
, nbyte
);
355 vfs_exchange_end(exch
);
358 async_wait_for(req
, &rc_orig
);
363 return (ssize_t
) rc_orig
;
365 vfs_exchange_end(exch
);
366 async_wait_for(req
, &rc
);
368 return (ssize_t
) IPC_GET_ARG1(answer
);
373 ssize_t
write(int fildes
, const void *buf
, size_t nbyte
)
379 async_exch_t
*exch
= vfs_exchange_begin();
381 req
= async_send_1(exch
, VFS_IN_WRITE
, fildes
, &answer
);
382 rc
= async_data_write_start(exch
, (void *)buf
, nbyte
);
384 vfs_exchange_end(exch
);
387 async_wait_for(req
, &rc_orig
);
392 return (ssize_t
) rc_orig
;
394 vfs_exchange_end(exch
);
395 async_wait_for(req
, &rc
);
397 return (ssize_t
) IPC_GET_ARG1(answer
);
402 /** Read entire buffer.
404 * In face of short reads this function continues reading until either
405 * the entire buffer is read or no more data is available (at end of file).
407 * @param fildes File descriptor
408 * @param buf Buffer, @a nbytes bytes long
409 * @param nbytes Number of bytes to read
411 * @return On success, positive number of bytes read.
412 * On failure, negative error code from read().
414 ssize_t
read_all(int fildes
, void *buf
, size_t nbyte
)
418 uint8_t *bp
= (uint8_t *) buf
;
423 cnt
= read(fildes
, bp
, nbyte
- nread
);
424 } while (cnt
> 0 && (nbyte
- nread
- cnt
) > 0);
432 /** Write entire buffer.
434 * This function fails if it cannot write exactly @a len bytes to the file.
436 * @param fildes File descriptor
437 * @param buf Data, @a nbytes bytes long
438 * @param nbytes Number of bytes to write
440 * @return EOK on error, return value from write() if writing
443 ssize_t
write_all(int fildes
, const void *buf
, size_t nbyte
)
446 ssize_t nwritten
= 0;
447 const uint8_t *bp
= (uint8_t *) buf
;
452 cnt
= write(fildes
, bp
, nbyte
- nwritten
);
453 } while (cnt
> 0 && ((ssize_t
)nbyte
- nwritten
- cnt
) > 0);
458 if ((ssize_t
)nbyte
- nwritten
- cnt
> 0)
464 int fsync(int fildes
)
466 async_exch_t
*exch
= vfs_exchange_begin();
467 sysarg_t rc
= async_req_1_0(exch
, VFS_IN_SYNC
, fildes
);
468 vfs_exchange_end(exch
);
473 off64_t
lseek(int fildes
, off64_t offset
, int whence
)
475 async_exch_t
*exch
= vfs_exchange_begin();
479 sysarg_t rc
= async_req_4_2(exch
, VFS_IN_SEEK
, fildes
,
480 LOWER32(offset
), UPPER32(offset
), whence
,
481 &newoff_lo
, &newoff_hi
);
483 vfs_exchange_end(exch
);
488 return (off64_t
) MERGE_LOUP32(newoff_lo
, newoff_hi
);
491 int ftruncate(int fildes
, aoff64_t length
)
495 async_exch_t
*exch
= vfs_exchange_begin();
496 rc
= async_req_3_0(exch
, VFS_IN_TRUNCATE
, fildes
,
497 LOWER32(length
), UPPER32(length
));
498 vfs_exchange_end(exch
);
503 int fstat(int fildes
, struct stat
*stat
)
508 async_exch_t
*exch
= vfs_exchange_begin();
510 req
= async_send_1(exch
, VFS_IN_FSTAT
, fildes
, NULL
);
511 rc
= async_data_read_start(exch
, (void *) stat
, sizeof(struct stat
));
513 vfs_exchange_end(exch
);
516 async_wait_for(req
, &rc_orig
);
521 return (ssize_t
) rc_orig
;
523 vfs_exchange_end(exch
);
524 async_wait_for(req
, &rc
);
529 int stat(const char *path
, struct stat
*stat
)
536 char *pa
= absolutize(path
, &pa_size
);
540 async_exch_t
*exch
= vfs_exchange_begin();
542 req
= async_send_0(exch
, VFS_IN_STAT
, NULL
);
543 rc
= async_data_write_start(exch
, pa
, pa_size
);
545 vfs_exchange_end(exch
);
547 async_wait_for(req
, &rc_orig
);
551 return (int) rc_orig
;
553 rc
= async_data_read_start(exch
, stat
, sizeof(struct stat
));
555 vfs_exchange_end(exch
);
557 async_wait_for(req
, &rc_orig
);
561 return (int) rc_orig
;
563 vfs_exchange_end(exch
);
565 async_wait_for(req
, &rc
);
569 DIR *opendir(const char *dirname
)
571 DIR *dirp
= malloc(sizeof(DIR));
576 char *abs
= absolutize(dirname
, &abs_size
);
582 int ret
= open_internal(abs
, abs_size
, L_DIRECTORY
, 0);
594 struct dirent
*readdir(DIR *dirp
)
596 ssize_t len
= read(dirp
->fd
, &dirp
->res
.d_name
[0], NAME_MAX
+ 1);
602 void rewinddir(DIR *dirp
)
604 (void) lseek(dirp
->fd
, 0, SEEK_SET
);
607 int closedir(DIR *dirp
)
609 (void) close(dirp
->fd
);
614 int mkdir(const char *path
, mode_t mode
)
620 char *pa
= absolutize(path
, &pa_size
);
624 async_exch_t
*exch
= vfs_exchange_begin();
626 req
= async_send_1(exch
, VFS_IN_MKDIR
, mode
, NULL
);
627 rc
= async_data_write_start(exch
, pa
, pa_size
);
629 vfs_exchange_end(exch
);
633 async_wait_for(req
, &rc_orig
);
638 return (int) rc_orig
;
640 vfs_exchange_end(exch
);
642 async_wait_for(req
, &rc
);
646 static int _unlink(const char *path
, int lflag
)
652 char *pa
= absolutize(path
, &pa_size
);
656 async_exch_t
*exch
= vfs_exchange_begin();
658 req
= async_send_1(exch
, VFS_IN_UNLINK
, lflag
, NULL
);
659 rc
= async_data_write_start(exch
, pa
, pa_size
);
661 vfs_exchange_end(exch
);
665 async_wait_for(req
, &rc_orig
);
670 return (int) rc_orig
;
672 vfs_exchange_end(exch
);
674 async_wait_for(req
, &rc
);
678 int unlink(const char *path
)
680 return _unlink(path
, L_NONE
);
683 int rmdir(const char *path
)
685 return _unlink(path
, L_DIRECTORY
);
688 int rename(const char *old
, const char *new)
695 char *olda
= absolutize(old
, &olda_size
);
700 char *newa
= absolutize(new, &newa_size
);
706 async_exch_t
*exch
= vfs_exchange_begin();
708 req
= async_send_0(exch
, VFS_IN_RENAME
, NULL
);
709 rc
= async_data_write_start(exch
, olda
, olda_size
);
711 vfs_exchange_end(exch
);
714 async_wait_for(req
, &rc_orig
);
718 return (int) rc_orig
;
720 rc
= async_data_write_start(exch
, newa
, newa_size
);
722 vfs_exchange_end(exch
);
725 async_wait_for(req
, &rc_orig
);
729 return (int) rc_orig
;
731 vfs_exchange_end(exch
);
734 async_wait_for(req
, &rc
);
738 int chdir(const char *path
)
741 char *abs
= absolutize(path
, &abs_size
);
745 int fd
= open_internal(abs
, abs_size
, L_DIRECTORY
, O_DESC
);
752 fibril_mutex_lock(&cwd_mutex
);
765 fibril_mutex_unlock(&cwd_mutex
);
769 char *getcwd(char *buf
, size_t size
)
774 fibril_mutex_lock(&cwd_mutex
);
776 if ((cwd_size
== 0) || (size
< cwd_size
+ 1)) {
777 fibril_mutex_unlock(&cwd_mutex
);
781 str_cpy(buf
, size
, cwd_path
);
782 fibril_mutex_unlock(&cwd_mutex
);
787 async_sess_t
*fd_session(exch_mgmt_t mgmt
, int fildes
)
790 int rc
= fstat(fildes
, &stat
);
801 return loc_service_connect(mgmt
, stat
.service
, 0);
804 int dup2(int oldfd
, int newfd
)
806 async_exch_t
*exch
= vfs_exchange_begin();
809 sysarg_t rc
= async_req_2_1(exch
, VFS_IN_DUP
, oldfd
, newfd
, &ret
);
811 vfs_exchange_end(exch
);
821 async_exch_t
*exch
= vfs_exchange_begin();
824 sysarg_t rc
= async_req_0_1(exch
, VFS_IN_WAIT_HANDLE
, &ret
);
826 vfs_exchange_end(exch
);
834 int get_mtab_list(list_t
*mtab_list
)
839 sysarg_t num_mounted_fs
;
841 async_exch_t
*exch
= vfs_exchange_begin();
843 req
= async_send_0(exch
, VFS_IN_MTAB_GET
, NULL
);
845 /* Ask VFS how many filesystems are mounted */
846 rc
= async_req_0_1(exch
, VFS_IN_PING
, &num_mounted_fs
);
850 for (i
= 0; i
< num_mounted_fs
; ++i
) {
851 mtab_ent_t
*mtab_ent
;
853 mtab_ent
= malloc(sizeof(mtab_ent_t
));
859 memset(mtab_ent
, 0, sizeof(mtab_ent_t
));
861 rc
= async_data_read_start(exch
, (void *) mtab_ent
->mp
,
866 rc
= async_data_read_start(exch
, (void *) mtab_ent
->opts
,
871 rc
= async_data_read_start(exch
, (void *) mtab_ent
->fs_name
,
878 rc
= async_req_0_2(exch
, VFS_IN_PING
, &p
[0], &p
[1]);
882 mtab_ent
->instance
= p
[0];
883 mtab_ent
->service_id
= p
[1];
885 link_initialize(&mtab_ent
->link
);
886 list_append(&mtab_ent
->link
, mtab_list
);
890 async_wait_for(req
, &rc
);
891 vfs_exchange_end(exch
);
895 int statfs(const char *path
, struct statfs
*st
)
897 sysarg_t rc
, rc_orig
;
901 char *pa
= absolutize(path
, &pa_size
);
905 async_exch_t
*exch
= vfs_exchange_begin();
907 req
= async_send_0(exch
, VFS_IN_STATFS
, NULL
);
908 rc
= async_data_write_start(exch
, pa
, pa_size
);
912 rc
= async_data_read_start(exch
, (void *) st
, sizeof(*st
));
915 vfs_exchange_end(exch
);
917 async_wait_for(req
, &rc_orig
);
918 return (int) (rc_orig
!= EOK
? rc_orig
: rc
);