5 * Copyright IBM, Corp. 2011
8 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include "fsdev/qemu-fsdev.h"
16 #include "qemu-thread.h"
17 #include "qemu-coroutine.h"
18 #include "virtio-9p-coth.h"
20 int v9fs_co_st_gen(V9fsPDU
*pdu
, V9fsPath
*path
, mode_t st_mode
,
24 V9fsState
*s
= pdu
->s
;
26 if (v9fs_request_cancelled(pdu
)) {
29 if (s
->ctx
.exops
.get_st_gen
) {
30 v9fs_path_read_lock(s
);
31 v9fs_co_run_in_worker(
33 err
= s
->ctx
.exops
.get_st_gen(&s
->ctx
, path
, st_mode
,
44 int v9fs_co_lstat(V9fsPDU
*pdu
, V9fsPath
*path
, struct stat
*stbuf
)
47 V9fsState
*s
= pdu
->s
;
49 if (v9fs_request_cancelled(pdu
)) {
52 v9fs_path_read_lock(s
);
53 v9fs_co_run_in_worker(
55 err
= s
->ops
->lstat(&s
->ctx
, path
, stbuf
);
64 int v9fs_co_fstat(V9fsPDU
*pdu
, int fd
, struct stat
*stbuf
)
67 V9fsState
*s
= pdu
->s
;
69 if (v9fs_request_cancelled(pdu
)) {
72 v9fs_co_run_in_worker(
74 err
= s
->ops
->fstat(&s
->ctx
, fd
, stbuf
);
82 int v9fs_co_open(V9fsPDU
*pdu
, V9fsFidState
*fidp
, int flags
)
85 V9fsState
*s
= pdu
->s
;
87 if (v9fs_request_cancelled(pdu
)) {
90 v9fs_path_read_lock(s
);
91 v9fs_co_run_in_worker(
93 fidp
->fs
.fd
= s
->ops
->open(&s
->ctx
, &fidp
->path
, flags
);
94 if (fidp
->fs
.fd
== -1) {
103 if (total_open_fd
> open_fd_hw
) {
104 v9fs_reclaim_fd(pdu
);
110 int v9fs_co_open2(V9fsPDU
*pdu
, V9fsFidState
*fidp
, V9fsString
*name
, gid_t gid
,
111 int flags
, int mode
, struct stat
*stbuf
)
116 V9fsState
*s
= pdu
->s
;
118 if (v9fs_request_cancelled(pdu
)) {
122 cred
.fc_mode
= mode
& 07777;
123 cred
.fc_uid
= fidp
->uid
;
126 * Hold the directory fid lock so that directory path name
127 * don't change. Read lock is fine because this fid cannot
128 * be used by any other operation.
130 v9fs_path_read_lock(s
);
131 v9fs_co_run_in_worker(
133 fidp
->fs
.fd
= s
->ops
->open2(&s
->ctx
, &fidp
->path
,
134 name
->data
, flags
, &cred
);
135 if (fidp
->fs
.fd
== -1) {
138 v9fs_path_init(&path
);
139 err
= v9fs_name_to_path(s
, &fidp
->path
, name
->data
, &path
);
141 err
= s
->ops
->lstat(&s
->ctx
, &path
, stbuf
);
144 s
->ops
->close(&s
->ctx
, fidp
->fs
.fd
);
146 v9fs_path_copy(&fidp
->path
, &path
);
149 s
->ops
->close(&s
->ctx
, fidp
->fs
.fd
);
151 v9fs_path_free(&path
);
157 if (total_open_fd
> open_fd_hw
) {
158 v9fs_reclaim_fd(pdu
);
164 int v9fs_co_close(V9fsPDU
*pdu
, int fd
)
167 V9fsState
*s
= pdu
->s
;
169 if (v9fs_request_cancelled(pdu
)) {
172 v9fs_co_run_in_worker(
174 err
= s
->ops
->close(&s
->ctx
, fd
);
185 int v9fs_co_fsync(V9fsPDU
*pdu
, V9fsFidState
*fidp
, int datasync
)
188 V9fsState
*s
= pdu
->s
;
190 if (v9fs_request_cancelled(pdu
)) {
194 v9fs_co_run_in_worker(
196 err
= s
->ops
->fsync(&s
->ctx
, fd
, datasync
);
204 int v9fs_co_link(V9fsPDU
*pdu
, V9fsFidState
*oldfid
,
205 V9fsFidState
*newdirfid
, V9fsString
*name
)
208 V9fsState
*s
= pdu
->s
;
210 if (v9fs_request_cancelled(pdu
)) {
213 v9fs_path_read_lock(s
);
214 v9fs_co_run_in_worker(
216 err
= s
->ops
->link(&s
->ctx
, &oldfid
->path
,
217 &newdirfid
->path
, name
->data
);
226 int v9fs_co_pwritev(V9fsPDU
*pdu
, V9fsFidState
*fidp
,
227 struct iovec
*iov
, int iovcnt
, int64_t offset
)
230 V9fsState
*s
= pdu
->s
;
232 if (v9fs_request_cancelled(pdu
)) {
236 v9fs_co_run_in_worker(
238 err
= s
->ops
->pwritev(&s
->ctx
, fd
, iov
, iovcnt
, offset
);
246 int v9fs_co_preadv(V9fsPDU
*pdu
, V9fsFidState
*fidp
,
247 struct iovec
*iov
, int iovcnt
, int64_t offset
)
250 V9fsState
*s
= pdu
->s
;
252 if (v9fs_request_cancelled(pdu
)) {
256 v9fs_co_run_in_worker(
258 err
= s
->ops
->preadv(&s
->ctx
, fd
, iov
, iovcnt
, offset
);