hw/arm/virt: fix max-cpus check
[qemu/ar7.git] / hw / 9pfs / cofile.c
blob293483e0c92712a60b6894ad1700c31bbab35795
2 /*
3 * Virtio 9p backend
5 * Copyright IBM, Corp. 2011
7 * Authors:
8 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "fsdev/qemu-fsdev.h"
17 #include "qemu/thread.h"
18 #include "qemu/coroutine.h"
19 #include "coth.h"
21 int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
22 V9fsStatDotl *v9stat)
24 int err = 0;
25 V9fsState *s = pdu->s;
27 if (v9fs_request_cancelled(pdu)) {
28 return -EINTR;
30 if (s->ctx.exops.get_st_gen) {
31 v9fs_path_read_lock(s);
32 v9fs_co_run_in_worker(
34 err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode,
35 &v9stat->st_gen);
36 if (err < 0) {
37 err = -errno;
39 });
40 v9fs_path_unlock(s);
42 return err;
45 int v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
47 int err;
48 V9fsState *s = pdu->s;
50 if (v9fs_request_cancelled(pdu)) {
51 return -EINTR;
53 v9fs_path_read_lock(s);
54 v9fs_co_run_in_worker(
56 err = s->ops->lstat(&s->ctx, path, stbuf);
57 if (err < 0) {
58 err = -errno;
60 });
61 v9fs_path_unlock(s);
62 return err;
65 int v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, struct stat *stbuf)
67 int err;
68 V9fsState *s = pdu->s;
70 if (v9fs_request_cancelled(pdu)) {
71 return -EINTR;
73 v9fs_co_run_in_worker(
75 err = s->ops->fstat(&s->ctx, fidp->fid_type, &fidp->fs, stbuf);
76 if (err < 0) {
77 err = -errno;
79 });
81 * Some FS driver (local:mapped-file) can't support fetching attributes
82 * using file descriptor. Use Path name in that case.
84 if (err == -EOPNOTSUPP) {
85 err = v9fs_co_lstat(pdu, &fidp->path, stbuf);
86 if (err == -ENOENT) {
88 * fstat on an unlinked file. Work with partial results
89 * returned from s->ops->fstat
91 err = 0;
94 return err;
97 int v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
99 int err;
100 V9fsState *s = pdu->s;
102 if (v9fs_request_cancelled(pdu)) {
103 return -EINTR;
105 v9fs_path_read_lock(s);
106 v9fs_co_run_in_worker(
108 err = s->ops->open(&s->ctx, &fidp->path, flags, &fidp->fs);
109 if (err == -1) {
110 err = -errno;
111 } else {
112 err = 0;
115 v9fs_path_unlock(s);
116 if (!err) {
117 total_open_fd++;
118 if (total_open_fd > open_fd_hw) {
119 v9fs_reclaim_fd(pdu);
122 return err;
125 int v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, gid_t gid,
126 int flags, int mode, struct stat *stbuf)
128 int err;
129 FsCred cred;
130 V9fsPath path;
131 V9fsState *s = pdu->s;
133 if (v9fs_request_cancelled(pdu)) {
134 return -EINTR;
136 cred_init(&cred);
137 cred.fc_mode = mode & 07777;
138 cred.fc_uid = fidp->uid;
139 cred.fc_gid = gid;
141 * Hold the directory fid lock so that directory path name
142 * don't change. Read lock is fine because this fid cannot
143 * be used by any other operation.
145 v9fs_path_read_lock(s);
146 v9fs_co_run_in_worker(
148 err = s->ops->open2(&s->ctx, &fidp->path,
149 name->data, flags, &cred, &fidp->fs);
150 if (err < 0) {
151 err = -errno;
152 } else {
153 v9fs_path_init(&path);
154 err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
155 if (!err) {
156 err = s->ops->lstat(&s->ctx, &path, stbuf);
157 if (err < 0) {
158 err = -errno;
159 s->ops->close(&s->ctx, &fidp->fs);
160 } else {
161 v9fs_path_copy(&fidp->path, &path);
163 } else {
164 s->ops->close(&s->ctx, &fidp->fs);
166 v9fs_path_free(&path);
169 v9fs_path_unlock(s);
170 if (!err) {
171 total_open_fd++;
172 if (total_open_fd > open_fd_hw) {
173 v9fs_reclaim_fd(pdu);
176 return err;
179 int v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
181 int err;
182 V9fsState *s = pdu->s;
184 if (v9fs_request_cancelled(pdu)) {
185 return -EINTR;
187 v9fs_co_run_in_worker(
189 err = s->ops->close(&s->ctx, fs);
190 if (err < 0) {
191 err = -errno;
194 if (!err) {
195 total_open_fd--;
197 return err;
200 int v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
202 int err;
203 V9fsState *s = pdu->s;
205 if (v9fs_request_cancelled(pdu)) {
206 return -EINTR;
208 v9fs_co_run_in_worker(
210 err = s->ops->fsync(&s->ctx, fidp->fid_type, &fidp->fs, datasync);
211 if (err < 0) {
212 err = -errno;
215 return err;
218 int v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
219 V9fsFidState *newdirfid, V9fsString *name)
221 int err;
222 V9fsState *s = pdu->s;
224 if (v9fs_request_cancelled(pdu)) {
225 return -EINTR;
227 v9fs_path_read_lock(s);
228 v9fs_co_run_in_worker(
230 err = s->ops->link(&s->ctx, &oldfid->path,
231 &newdirfid->path, name->data);
232 if (err < 0) {
233 err = -errno;
236 v9fs_path_unlock(s);
237 return err;
240 int v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
241 struct iovec *iov, int iovcnt, int64_t offset)
243 int err;
244 V9fsState *s = pdu->s;
246 if (v9fs_request_cancelled(pdu)) {
247 return -EINTR;
249 v9fs_co_run_in_worker(
251 err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset);
252 if (err < 0) {
253 err = -errno;
256 return err;
259 int v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
260 struct iovec *iov, int iovcnt, int64_t offset)
262 int err;
263 V9fsState *s = pdu->s;
265 if (v9fs_request_cancelled(pdu)) {
266 return -EINTR;
268 v9fs_co_run_in_worker(
270 err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset);
271 if (err < 0) {
272 err = -errno;
275 return err;