target/loongarch: Add CSRs definition
[qemu.git] / hw / 9pfs / codir.c
blob93ba44fb75447ac65f83ef58941e903611441052
1 /*
2 * 9p backend
4 * Copyright IBM, Corp. 2011
6 * Authors:
7 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 * Not so fast! You might want to read the 9p developer docs first:
16 * https://wiki.qemu.org/Documentation/9p
19 #include "qemu/osdep.h"
20 #include "fsdev/qemu-fsdev.h"
21 #include "qemu/thread.h"
22 #include "qemu/coroutine.h"
23 #include "qemu/main-loop.h"
24 #include "coth.h"
25 #include "9p-xattr.h"
26 #include "9p-util.h"
29 * Intended to be called from bottom-half (e.g. background I/O thread)
30 * context.
32 static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
34 int err = 0;
35 V9fsState *s = pdu->s;
36 struct dirent *entry;
38 errno = 0;
39 entry = s->ops->readdir(&s->ctx, &fidp->fs);
40 if (!entry && errno) {
41 *dent = NULL;
42 err = -errno;
43 } else {
44 *dent = entry;
46 return err;
50 * TODO: This will be removed for performance reasons.
51 * Use v9fs_co_readdir_many() instead.
53 int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
54 struct dirent **dent)
56 int err;
58 if (v9fs_request_cancelled(pdu)) {
59 return -EINTR;
61 v9fs_co_run_in_worker({
62 err = do_readdir(pdu, fidp, dent);
63 });
64 return err;
68 * This is solely executed on a background IO thread.
70 * See v9fs_co_readdir_many() (as its only user) below for details.
72 static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
73 struct V9fsDirEnt **entries, off_t offset,
74 int32_t maxsize, bool dostat)
76 V9fsState *s = pdu->s;
77 V9fsString name;
78 int len, err = 0;
79 int32_t size = 0;
80 off_t saved_dir_pos;
81 struct dirent *dent;
82 struct V9fsDirEnt *e = NULL;
83 V9fsPath path;
84 struct stat stbuf;
86 *entries = NULL;
87 v9fs_path_init(&path);
90 * TODO: Here should be a warn_report_once() if lock failed.
92 * With a good 9p client we should not get into concurrency here,
93 * because a good client would not use the same fid for concurrent
94 * requests. We do the lock here for safety reasons though. However
95 * the client would then suffer performance issues, so better log that
96 * issue here.
98 v9fs_readdir_lock(&fidp->fs.dir);
100 /* seek directory to requested initial position */
101 if (offset == 0) {
102 s->ops->rewinddir(&s->ctx, &fidp->fs);
103 } else {
104 s->ops->seekdir(&s->ctx, &fidp->fs, offset);
107 /* save the directory position */
108 saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs);
109 if (saved_dir_pos < 0) {
110 err = saved_dir_pos;
111 goto out;
114 while (true) {
115 /* interrupt loop if request was cancelled by a Tflush request */
116 if (v9fs_request_cancelled(pdu)) {
117 err = -EINTR;
118 break;
121 /* get directory entry from fs driver */
122 err = do_readdir(pdu, fidp, &dent);
123 if (err || !dent) {
124 break;
128 * stop this loop as soon as it would exceed the allowed maximum
129 * response message size for the directory entries collected so far,
130 * because anything beyond that size would need to be discarded by
131 * 9p controller (main thread / top half) anyway
133 v9fs_string_init(&name);
134 v9fs_string_sprintf(&name, "%s", dent->d_name);
135 len = v9fs_readdir_response_size(&name);
136 v9fs_string_free(&name);
137 if (size + len > maxsize) {
138 /* this is not an error case actually */
139 break;
142 /* append next node to result chain */
143 if (!e) {
144 *entries = e = g_new0(V9fsDirEnt, 1);
145 } else {
146 e = e->next = g_new0(V9fsDirEnt, 1);
148 e->dent = qemu_dirent_dup(dent);
150 /* perform a full stat() for directory entry if requested by caller */
151 if (dostat) {
152 err = s->ops->name_to_path(
153 &s->ctx, &fidp->path, dent->d_name, &path
155 if (err < 0) {
156 err = -errno;
157 break;
160 err = s->ops->lstat(&s->ctx, &path, &stbuf);
161 if (err < 0) {
162 err = -errno;
163 break;
166 e->st = g_new0(struct stat, 1);
167 memcpy(e->st, &stbuf, sizeof(struct stat));
170 size += len;
171 saved_dir_pos = qemu_dirent_off(dent);
174 /* restore (last) saved position */
175 s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos);
177 out:
178 v9fs_readdir_unlock(&fidp->fs.dir);
179 v9fs_path_free(&path);
180 if (err < 0) {
181 return err;
183 return size;
187 * v9fs_co_readdir_many() - Reads multiple directory entries in one rush.
189 * @pdu: the causing 9p (T_readdir) client request
190 * @fidp: already opened directory where readdir shall be performed on
191 * @entries: output for directory entries (must not be NULL)
192 * @offset: initial position inside the directory the function shall
193 * seek to before retrieving the directory entries
194 * @maxsize: maximum result message body size (in bytes)
195 * @dostat: whether a stat() should be performed and returned for
196 * each directory entry
197 * Return: resulting response message body size (in bytes) on success,
198 * negative error code otherwise
200 * Retrieves the requested (max. amount of) directory entries from the fs
201 * driver. This function must only be called by the main IO thread (top half).
202 * Internally this function call will be dispatched to a background IO thread
203 * (bottom half) where it is eventually executed by the fs driver.
205 * Acquiring multiple directory entries in one rush from the fs
206 * driver, instead of retrieving each directory entry individually, is very
207 * beneficial from performance point of view. Because for every fs driver
208 * request latency is added, which in practice could lead to overall
209 * latencies of several hundred ms for reading all entries (of just a single
210 * directory) if every directory entry was individually requested from fs
211 * driver.
213 * NOTE: You must ALWAYS call v9fs_free_dirents(entries) after calling
214 * v9fs_co_readdir_many(), both on success and on error cases of this
215 * function, to avoid memory leaks once @entries are no longer needed.
217 int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
218 struct V9fsDirEnt **entries,
219 off_t offset, int32_t maxsize,
220 bool dostat)
222 int err = 0;
224 if (v9fs_request_cancelled(pdu)) {
225 return -EINTR;
227 v9fs_co_run_in_worker({
228 err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat);
230 return err;
233 off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
235 off_t err;
236 V9fsState *s = pdu->s;
238 if (v9fs_request_cancelled(pdu)) {
239 return -EINTR;
241 v9fs_co_run_in_worker(
243 err = s->ops->telldir(&s->ctx, &fidp->fs);
244 if (err < 0) {
245 err = -errno;
248 return err;
251 void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
252 off_t offset)
254 V9fsState *s = pdu->s;
255 if (v9fs_request_cancelled(pdu)) {
256 return;
258 v9fs_co_run_in_worker(
260 s->ops->seekdir(&s->ctx, &fidp->fs, offset);
264 void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
266 V9fsState *s = pdu->s;
267 if (v9fs_request_cancelled(pdu)) {
268 return;
270 v9fs_co_run_in_worker(
272 s->ops->rewinddir(&s->ctx, &fidp->fs);
276 int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
277 V9fsString *name, mode_t mode, uid_t uid,
278 gid_t gid, struct stat *stbuf)
280 int err;
281 FsCred cred;
282 V9fsPath path;
283 V9fsState *s = pdu->s;
285 if (v9fs_request_cancelled(pdu)) {
286 return -EINTR;
288 cred_init(&cred);
289 cred.fc_mode = mode;
290 cred.fc_uid = uid;
291 cred.fc_gid = gid;
292 v9fs_path_read_lock(s);
293 v9fs_co_run_in_worker(
295 err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred);
296 if (err < 0) {
297 err = -errno;
298 } else {
299 v9fs_path_init(&path);
300 err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
301 if (!err) {
302 err = s->ops->lstat(&s->ctx, &path, stbuf);
303 if (err < 0) {
304 err = -errno;
307 v9fs_path_free(&path);
310 v9fs_path_unlock(s);
311 return err;
314 int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
316 int err;
317 V9fsState *s = pdu->s;
319 if (v9fs_request_cancelled(pdu)) {
320 return -EINTR;
322 v9fs_path_read_lock(s);
323 v9fs_co_run_in_worker(
325 err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs);
326 if (err < 0) {
327 err = -errno;
328 } else {
329 err = 0;
332 v9fs_path_unlock(s);
333 if (!err) {
334 total_open_fd++;
335 if (total_open_fd > open_fd_hw) {
336 v9fs_reclaim_fd(pdu);
339 return err;
342 int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
344 int err;
345 V9fsState *s = pdu->s;
347 if (v9fs_request_cancelled(pdu)) {
348 return -EINTR;
350 v9fs_co_run_in_worker(
352 err = s->ops->closedir(&s->ctx, fs);
353 if (err < 0) {
354 err = -errno;
357 if (!err) {
358 total_open_fd--;
360 return err;