spice/gl: render DisplaySurface via opengl
[qemu/ar7.git] / hw / 9pfs / cofile.c
blob10343c0a93b4ec6d8c16e9dd992a919f813f89e2
1 /*
2 * 9p backend
4 * Copyright IBM, Corp. 2011
6 * Authors:
7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "fsdev/qemu-fsdev.h"
16 #include "qemu/thread.h"
17 #include "qemu/coroutine.h"
18 #include "coth.h"
20 int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
21 V9fsStatDotl *v9stat)
23 int err = 0;
24 V9fsState *s = pdu->s;
26 if (v9fs_request_cancelled(pdu)) {
27 return -EINTR;
29 if (s->ctx.exops.get_st_gen) {
30 v9fs_path_read_lock(s);
31 v9fs_co_run_in_worker(
33 err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode,
34 &v9stat->st_gen);
35 if (err < 0) {
36 err = -errno;
38 });
39 v9fs_path_unlock(s);
41 return err;
44 int v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
46 int err;
47 V9fsState *s = pdu->s;
49 if (v9fs_request_cancelled(pdu)) {
50 return -EINTR;
52 v9fs_path_read_lock(s);
53 v9fs_co_run_in_worker(
55 err = s->ops->lstat(&s->ctx, path, stbuf);
56 if (err < 0) {
57 err = -errno;
59 });
60 v9fs_path_unlock(s);
61 return err;
64 int v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, struct stat *stbuf)
66 int err;
67 V9fsState *s = pdu->s;
69 if (v9fs_request_cancelled(pdu)) {
70 return -EINTR;
72 v9fs_co_run_in_worker(
74 err = s->ops->fstat(&s->ctx, fidp->fid_type, &fidp->fs, stbuf);
75 if (err < 0) {
76 err = -errno;
78 });
80 * Some FS driver (local:mapped-file) can't support fetching attributes
81 * using file descriptor. Use Path name in that case.
83 if (err == -EOPNOTSUPP) {
84 err = v9fs_co_lstat(pdu, &fidp->path, stbuf);
85 if (err == -ENOENT) {
87 * fstat on an unlinked file. Work with partial results
88 * returned from s->ops->fstat
90 err = 0;
93 return err;
96 int v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
98 int err;
99 V9fsState *s = pdu->s;
101 if (v9fs_request_cancelled(pdu)) {
102 return -EINTR;
104 v9fs_path_read_lock(s);
105 v9fs_co_run_in_worker(
107 err = s->ops->open(&s->ctx, &fidp->path, flags, &fidp->fs);
108 if (err == -1) {
109 err = -errno;
110 } else {
111 err = 0;
114 v9fs_path_unlock(s);
115 if (!err) {
116 total_open_fd++;
117 if (total_open_fd > open_fd_hw) {
118 v9fs_reclaim_fd(pdu);
121 return err;
124 int v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, gid_t gid,
125 int flags, int mode, struct stat *stbuf)
127 int err;
128 FsCred cred;
129 V9fsPath path;
130 V9fsState *s = pdu->s;
132 if (v9fs_request_cancelled(pdu)) {
133 return -EINTR;
135 cred_init(&cred);
136 cred.fc_mode = mode & 07777;
137 cred.fc_uid = fidp->uid;
138 cred.fc_gid = gid;
140 * Hold the directory fid lock so that directory path name
141 * don't change. Read lock is fine because this fid cannot
142 * be used by any other operation.
144 v9fs_path_read_lock(s);
145 v9fs_co_run_in_worker(
147 err = s->ops->open2(&s->ctx, &fidp->path,
148 name->data, flags, &cred, &fidp->fs);
149 if (err < 0) {
150 err = -errno;
151 } else {
152 v9fs_path_init(&path);
153 err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
154 if (!err) {
155 err = s->ops->lstat(&s->ctx, &path, stbuf);
156 if (err < 0) {
157 err = -errno;
158 s->ops->close(&s->ctx, &fidp->fs);
159 } else {
160 v9fs_path_copy(&fidp->path, &path);
162 } else {
163 s->ops->close(&s->ctx, &fidp->fs);
165 v9fs_path_free(&path);
168 v9fs_path_unlock(s);
169 if (!err) {
170 total_open_fd++;
171 if (total_open_fd > open_fd_hw) {
172 v9fs_reclaim_fd(pdu);
175 return err;
178 int v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
180 int err;
181 V9fsState *s = pdu->s;
183 if (v9fs_request_cancelled(pdu)) {
184 return -EINTR;
186 v9fs_co_run_in_worker(
188 err = s->ops->close(&s->ctx, fs);
189 if (err < 0) {
190 err = -errno;
193 if (!err) {
194 total_open_fd--;
196 return err;
199 int v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
201 int err;
202 V9fsState *s = pdu->s;
204 if (v9fs_request_cancelled(pdu)) {
205 return -EINTR;
207 v9fs_co_run_in_worker(
209 err = s->ops->fsync(&s->ctx, fidp->fid_type, &fidp->fs, datasync);
210 if (err < 0) {
211 err = -errno;
214 return err;
217 int v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
218 V9fsFidState *newdirfid, V9fsString *name)
220 int err;
221 V9fsState *s = pdu->s;
223 if (v9fs_request_cancelled(pdu)) {
224 return -EINTR;
226 v9fs_path_read_lock(s);
227 v9fs_co_run_in_worker(
229 err = s->ops->link(&s->ctx, &oldfid->path,
230 &newdirfid->path, name->data);
231 if (err < 0) {
232 err = -errno;
235 v9fs_path_unlock(s);
236 return err;
239 int v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
240 struct iovec *iov, int iovcnt, int64_t offset)
242 int err;
243 V9fsState *s = pdu->s;
245 if (v9fs_request_cancelled(pdu)) {
246 return -EINTR;
248 v9fs_co_run_in_worker(
250 err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset);
251 if (err < 0) {
252 err = -errno;
255 return err;
258 int v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
259 struct iovec *iov, int iovcnt, int64_t offset)
261 int err;
262 V9fsState *s = pdu->s;
264 if (v9fs_request_cancelled(pdu)) {
265 return -EINTR;
267 v9fs_co_run_in_worker(
269 err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset);
270 if (err < 0) {
271 err = -errno;
274 return err;