[IA64-SGI] fix the size of __sn_cnodeid_to_nasid
[firewire-audio.git] / fs / fuse / dev.c
blobf556a0d5c0d31010b86552ff958dcb55b20947f5
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
9 #include "fuse_i.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->connected)
30 fc = NULL;
31 spin_unlock(&fuse_lock);
32 return fc;
35 static void fuse_request_init(struct fuse_req *req)
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
43 struct fuse_req *fuse_request_alloc(void)
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
46 if (req)
47 fuse_request_init(req);
48 return req;
51 void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req);
56 static void block_sigs(sigset_t *oldset)
58 sigset_t mask;
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
64 static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL);
69 void fuse_reset_request(struct fuse_req *req)
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
77 static void __fuse_get_request(struct fuse_req *req)
79 atomic_inc(&req->count);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
89 static struct fuse_req *do_get_request(struct fuse_conn *fc)
91 struct fuse_req *req;
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
103 return req;
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
109 int intr;
110 sigset_t oldset;
112 atomic_inc(&fc->num_waiting);
113 block_sigs(&oldset);
114 intr = down_interruptible(&fc->outstanding_sem);
115 restore_sigs(&oldset);
116 if (intr) {
117 atomic_dec(&fc->num_waiting);
118 return NULL;
120 return do_get_request(fc);
123 /* Must be called with fuse_lock held */
124 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting);
128 list_add(&req->list, &fc->unused_list);
129 } else
130 fuse_request_free(req);
132 /* If we are in debt decrease that first */
133 if (fc->outstanding_debt)
134 fc->outstanding_debt--;
135 else
136 up(&fc->outstanding_sem);
139 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
141 if (atomic_dec_and_test(&req->count)) {
142 spin_lock(&fuse_lock);
143 fuse_putback_request(fc, req);
144 spin_unlock(&fuse_lock);
148 static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
150 if (atomic_dec_and_test(&req->count))
151 fuse_putback_request(fc, req);
154 void fuse_release_background(struct fuse_req *req)
156 iput(req->inode);
157 iput(req->inode2);
158 if (req->file)
159 fput(req->file);
160 spin_lock(&fuse_lock);
161 list_del(&req->bg_entry);
162 spin_unlock(&fuse_lock);
166 * This function is called when a request is finished. Either a reply
167 * has arrived or it was interrupted (and not yet sent) or some error
168 * occurred during communication with userspace, or the device file
169 * was closed. In case of a background request the reference to the
170 * stored objects are released. The requester thread is woken up (if
171 * still waiting), the 'end' callback is called if given, else the
172 * reference to the request is released
174 * Releasing extra reference for foreground requests must be done
175 * within the same locked region as setting state to finished. This
176 * is because fuse_reset_request() may be called after request is
177 * finished and it must be the sole possessor. If request is
178 * interrupted and put in the background, it will return with an error
179 * and hence never be reset and reused.
181 * Called with fuse_lock, unlocks it
183 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
185 list_del(&req->list);
186 req->state = FUSE_REQ_FINISHED;
187 if (!req->background) {
188 wake_up(&req->waitq);
189 fuse_put_request_locked(fc, req);
190 spin_unlock(&fuse_lock);
191 } else {
192 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
193 req->end = NULL;
194 spin_unlock(&fuse_lock);
195 down_read(&fc->sbput_sem);
196 if (fc->mounted)
197 fuse_release_background(req);
198 up_read(&fc->sbput_sem);
199 if (end)
200 end(fc, req);
201 else
202 fuse_put_request(fc, req);
207 * Unfortunately request interruption not just solves the deadlock
208 * problem, it causes problems too. These stem from the fact, that an
209 * interrupted request is continued to be processed in userspace,
210 * while all the locks and object references (inode and file) held
211 * during the operation are released.
213 * To release the locks is exactly why there's a need to interrupt the
214 * request, so there's not a lot that can be done about this, except
215 * introduce additional locking in userspace.
217 * More important is to keep inode and file references until userspace
218 * has replied, otherwise FORGET and RELEASE could be sent while the
219 * inode/file is still used by the filesystem.
221 * For this reason the concept of "background" request is introduced.
222 * An interrupted request is backgrounded if it has been already sent
223 * to userspace. Backgrounding involves getting an extra reference to
224 * inode(s) or file used in the request, and adding the request to
225 * fc->background list. When a reply is received for a background
226 * request, the object references are released, and the request is
227 * removed from the list. If the filesystem is unmounted while there
228 * are still background requests, the list is walked and references
229 * are released as if a reply was received.
231 * There's one more use for a background request. The RELEASE message is
232 * always sent as background, since it doesn't return an error or
233 * data.
235 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
237 req->background = 1;
238 list_add(&req->bg_entry, &fc->background);
239 if (req->inode)
240 req->inode = igrab(req->inode);
241 if (req->inode2)
242 req->inode2 = igrab(req->inode2);
243 if (req->file)
244 get_file(req->file);
247 /* Called with fuse_lock held. Releases, and then reacquires it. */
248 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
250 sigset_t oldset;
252 spin_unlock(&fuse_lock);
253 block_sigs(&oldset);
254 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
255 restore_sigs(&oldset);
256 spin_lock(&fuse_lock);
257 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
258 return;
260 if (!req->interrupted) {
261 req->out.h.error = -EINTR;
262 req->interrupted = 1;
264 if (req->locked) {
265 /* This is uninterruptible sleep, because data is
266 being copied to/from the buffers of req. During
267 locked state, there mustn't be any filesystem
268 operation (e.g. page fault), since that could lead
269 to deadlock */
270 spin_unlock(&fuse_lock);
271 wait_event(req->waitq, !req->locked);
272 spin_lock(&fuse_lock);
274 if (req->state == FUSE_REQ_PENDING) {
275 list_del(&req->list);
276 __fuse_put_request(req);
277 } else if (req->state == FUSE_REQ_SENT)
278 background_request(fc, req);
281 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
283 unsigned nbytes = 0;
284 unsigned i;
286 for (i = 0; i < numargs; i++)
287 nbytes += args[i].size;
289 return nbytes;
292 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
294 fc->reqctr++;
295 /* zero is special */
296 if (fc->reqctr == 0)
297 fc->reqctr = 1;
298 req->in.h.unique = fc->reqctr;
299 req->in.h.len = sizeof(struct fuse_in_header) +
300 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
301 if (!req->preallocated) {
302 /* If request is not preallocated (either FORGET or
303 RELEASE), then still decrease outstanding_sem, so
304 user can't open infinite number of files while not
305 processing the RELEASE requests. However for
306 efficiency do it without blocking, so if down()
307 would block, just increase the debt instead */
308 if (down_trylock(&fc->outstanding_sem))
309 fc->outstanding_debt++;
311 list_add_tail(&req->list, &fc->pending);
312 req->state = FUSE_REQ_PENDING;
313 wake_up(&fc->waitq);
317 * This can only be interrupted by a SIGKILL
319 void request_send(struct fuse_conn *fc, struct fuse_req *req)
321 req->isreply = 1;
322 spin_lock(&fuse_lock);
323 if (!fc->connected)
324 req->out.h.error = -ENOTCONN;
325 else if (fc->conn_error)
326 req->out.h.error = -ECONNREFUSED;
327 else {
328 queue_request(fc, req);
329 /* acquire extra reference, since request is still needed
330 after request_end() */
331 __fuse_get_request(req);
333 request_wait_answer(fc, req);
335 spin_unlock(&fuse_lock);
338 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
340 spin_lock(&fuse_lock);
341 if (fc->connected) {
342 queue_request(fc, req);
343 spin_unlock(&fuse_lock);
344 } else {
345 req->out.h.error = -ENOTCONN;
346 request_end(fc, req);
350 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
352 req->isreply = 0;
353 request_send_nowait(fc, req);
356 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
358 req->isreply = 1;
359 spin_lock(&fuse_lock);
360 background_request(fc, req);
361 spin_unlock(&fuse_lock);
362 request_send_nowait(fc, req);
366 * Lock the request. Up to the next unlock_request() there mustn't be
367 * anything that could cause a page-fault. If the request was already
368 * interrupted bail out.
370 static int lock_request(struct fuse_req *req)
372 int err = 0;
373 if (req) {
374 spin_lock(&fuse_lock);
375 if (req->interrupted)
376 err = -ENOENT;
377 else
378 req->locked = 1;
379 spin_unlock(&fuse_lock);
381 return err;
385 * Unlock request. If it was interrupted during being locked, the
386 * requester thread is currently waiting for it to be unlocked, so
387 * wake it up.
389 static void unlock_request(struct fuse_req *req)
391 if (req) {
392 spin_lock(&fuse_lock);
393 req->locked = 0;
394 if (req->interrupted)
395 wake_up(&req->waitq);
396 spin_unlock(&fuse_lock);
400 struct fuse_copy_state {
401 int write;
402 struct fuse_req *req;
403 const struct iovec *iov;
404 unsigned long nr_segs;
405 unsigned long seglen;
406 unsigned long addr;
407 struct page *pg;
408 void *mapaddr;
409 void *buf;
410 unsigned len;
413 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
414 struct fuse_req *req, const struct iovec *iov,
415 unsigned long nr_segs)
417 memset(cs, 0, sizeof(*cs));
418 cs->write = write;
419 cs->req = req;
420 cs->iov = iov;
421 cs->nr_segs = nr_segs;
424 /* Unmap and put previous page of userspace buffer */
425 static void fuse_copy_finish(struct fuse_copy_state *cs)
427 if (cs->mapaddr) {
428 kunmap_atomic(cs->mapaddr, KM_USER0);
429 if (cs->write) {
430 flush_dcache_page(cs->pg);
431 set_page_dirty_lock(cs->pg);
433 put_page(cs->pg);
434 cs->mapaddr = NULL;
439 * Get another pagefull of userspace buffer, and map it to kernel
440 * address space, and lock request
442 static int fuse_copy_fill(struct fuse_copy_state *cs)
444 unsigned long offset;
445 int err;
447 unlock_request(cs->req);
448 fuse_copy_finish(cs);
449 if (!cs->seglen) {
450 BUG_ON(!cs->nr_segs);
451 cs->seglen = cs->iov[0].iov_len;
452 cs->addr = (unsigned long) cs->iov[0].iov_base;
453 cs->iov ++;
454 cs->nr_segs --;
456 down_read(&current->mm->mmap_sem);
457 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
458 &cs->pg, NULL);
459 up_read(&current->mm->mmap_sem);
460 if (err < 0)
461 return err;
462 BUG_ON(err != 1);
463 offset = cs->addr % PAGE_SIZE;
464 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
465 cs->buf = cs->mapaddr + offset;
466 cs->len = min(PAGE_SIZE - offset, cs->seglen);
467 cs->seglen -= cs->len;
468 cs->addr += cs->len;
470 return lock_request(cs->req);
473 /* Do as much copy to/from userspace buffer as we can */
474 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
476 unsigned ncpy = min(*size, cs->len);
477 if (val) {
478 if (cs->write)
479 memcpy(cs->buf, *val, ncpy);
480 else
481 memcpy(*val, cs->buf, ncpy);
482 *val += ncpy;
484 *size -= ncpy;
485 cs->len -= ncpy;
486 cs->buf += ncpy;
487 return ncpy;
491 * Copy a page in the request to/from the userspace buffer. Must be
492 * done atomically
494 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
495 unsigned offset, unsigned count, int zeroing)
497 if (page && zeroing && count < PAGE_SIZE) {
498 void *mapaddr = kmap_atomic(page, KM_USER1);
499 memset(mapaddr, 0, PAGE_SIZE);
500 kunmap_atomic(mapaddr, KM_USER1);
502 while (count) {
503 int err;
504 if (!cs->len && (err = fuse_copy_fill(cs)))
505 return err;
506 if (page) {
507 void *mapaddr = kmap_atomic(page, KM_USER1);
508 void *buf = mapaddr + offset;
509 offset += fuse_copy_do(cs, &buf, &count);
510 kunmap_atomic(mapaddr, KM_USER1);
511 } else
512 offset += fuse_copy_do(cs, NULL, &count);
514 if (page && !cs->write)
515 flush_dcache_page(page);
516 return 0;
519 /* Copy pages in the request to/from userspace buffer */
520 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
521 int zeroing)
523 unsigned i;
524 struct fuse_req *req = cs->req;
525 unsigned offset = req->page_offset;
526 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
528 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
529 struct page *page = req->pages[i];
530 int err = fuse_copy_page(cs, page, offset, count, zeroing);
531 if (err)
532 return err;
534 nbytes -= count;
535 count = min(nbytes, (unsigned) PAGE_SIZE);
536 offset = 0;
538 return 0;
541 /* Copy a single argument in the request to/from userspace buffer */
542 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
544 while (size) {
545 int err;
546 if (!cs->len && (err = fuse_copy_fill(cs)))
547 return err;
548 fuse_copy_do(cs, &val, &size);
550 return 0;
553 /* Copy request arguments to/from userspace buffer */
554 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
555 unsigned argpages, struct fuse_arg *args,
556 int zeroing)
558 int err = 0;
559 unsigned i;
561 for (i = 0; !err && i < numargs; i++) {
562 struct fuse_arg *arg = &args[i];
563 if (i == numargs - 1 && argpages)
564 err = fuse_copy_pages(cs, arg->size, zeroing);
565 else
566 err = fuse_copy_one(cs, arg->value, arg->size);
568 return err;
571 /* Wait until a request is available on the pending list */
572 static void request_wait(struct fuse_conn *fc)
574 DECLARE_WAITQUEUE(wait, current);
576 add_wait_queue_exclusive(&fc->waitq, &wait);
577 while (fc->connected && list_empty(&fc->pending)) {
578 set_current_state(TASK_INTERRUPTIBLE);
579 if (signal_pending(current))
580 break;
582 spin_unlock(&fuse_lock);
583 schedule();
584 spin_lock(&fuse_lock);
586 set_current_state(TASK_RUNNING);
587 remove_wait_queue(&fc->waitq, &wait);
591 * Read a single request into the userspace filesystem's buffer. This
592 * function waits until a request is available, then removes it from
593 * the pending list and copies request data to userspace buffer. If
594 * no reply is needed (FORGET) or request has been interrupted or
595 * there was an error during the copying then it's finished by calling
596 * request_end(). Otherwise add it to the processing list, and set
597 * the 'sent' flag.
599 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
600 unsigned long nr_segs, loff_t *off)
602 int err;
603 struct fuse_conn *fc;
604 struct fuse_req *req;
605 struct fuse_in *in;
606 struct fuse_copy_state cs;
607 unsigned reqsize;
609 restart:
610 spin_lock(&fuse_lock);
611 fc = file->private_data;
612 err = -EPERM;
613 if (!fc)
614 goto err_unlock;
615 request_wait(fc);
616 err = -ENODEV;
617 if (!fc->connected)
618 goto err_unlock;
619 err = -ERESTARTSYS;
620 if (list_empty(&fc->pending))
621 goto err_unlock;
623 req = list_entry(fc->pending.next, struct fuse_req, list);
624 req->state = FUSE_REQ_READING;
625 list_move(&req->list, &fc->io);
627 in = &req->in;
628 reqsize = in->h.len;
629 /* If request is too large, reply with an error and restart the read */
630 if (iov_length(iov, nr_segs) < reqsize) {
631 req->out.h.error = -EIO;
632 /* SETXATTR is special, since it may contain too large data */
633 if (in->h.opcode == FUSE_SETXATTR)
634 req->out.h.error = -E2BIG;
635 request_end(fc, req);
636 goto restart;
638 spin_unlock(&fuse_lock);
639 fuse_copy_init(&cs, 1, req, iov, nr_segs);
640 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
641 if (!err)
642 err = fuse_copy_args(&cs, in->numargs, in->argpages,
643 (struct fuse_arg *) in->args, 0);
644 fuse_copy_finish(&cs);
645 spin_lock(&fuse_lock);
646 req->locked = 0;
647 if (!err && req->interrupted)
648 err = -ENOENT;
649 if (err) {
650 if (!req->interrupted)
651 req->out.h.error = -EIO;
652 request_end(fc, req);
653 return err;
655 if (!req->isreply)
656 request_end(fc, req);
657 else {
658 req->state = FUSE_REQ_SENT;
659 list_move_tail(&req->list, &fc->processing);
660 spin_unlock(&fuse_lock);
662 return reqsize;
664 err_unlock:
665 spin_unlock(&fuse_lock);
666 return err;
669 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
670 size_t nbytes, loff_t *off)
672 struct iovec iov;
673 iov.iov_len = nbytes;
674 iov.iov_base = buf;
675 return fuse_dev_readv(file, &iov, 1, off);
678 /* Look up request on processing list by unique ID */
679 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
681 struct list_head *entry;
683 list_for_each(entry, &fc->processing) {
684 struct fuse_req *req;
685 req = list_entry(entry, struct fuse_req, list);
686 if (req->in.h.unique == unique)
687 return req;
689 return NULL;
692 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
693 unsigned nbytes)
695 unsigned reqsize = sizeof(struct fuse_out_header);
697 if (out->h.error)
698 return nbytes != reqsize ? -EINVAL : 0;
700 reqsize += len_args(out->numargs, out->args);
702 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
703 return -EINVAL;
704 else if (reqsize > nbytes) {
705 struct fuse_arg *lastarg = &out->args[out->numargs-1];
706 unsigned diffsize = reqsize - nbytes;
707 if (diffsize > lastarg->size)
708 return -EINVAL;
709 lastarg->size -= diffsize;
711 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
712 out->page_zeroing);
716 * Write a single reply to a request. First the header is copied from
717 * the write buffer. The request is then searched on the processing
718 * list by the unique ID found in the header. If found, then remove
719 * it from the list and copy the rest of the buffer to the request.
720 * The request is finished by calling request_end()
722 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
723 unsigned long nr_segs, loff_t *off)
725 int err;
726 unsigned nbytes = iov_length(iov, nr_segs);
727 struct fuse_req *req;
728 struct fuse_out_header oh;
729 struct fuse_copy_state cs;
730 struct fuse_conn *fc = fuse_get_conn(file);
731 if (!fc)
732 return -ENODEV;
734 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
735 if (nbytes < sizeof(struct fuse_out_header))
736 return -EINVAL;
738 err = fuse_copy_one(&cs, &oh, sizeof(oh));
739 if (err)
740 goto err_finish;
741 err = -EINVAL;
742 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
743 oh.len != nbytes)
744 goto err_finish;
746 spin_lock(&fuse_lock);
747 err = -ENOENT;
748 if (!fc->connected)
749 goto err_unlock;
751 req = request_find(fc, oh.unique);
752 err = -EINVAL;
753 if (!req)
754 goto err_unlock;
756 if (req->interrupted) {
757 spin_unlock(&fuse_lock);
758 fuse_copy_finish(&cs);
759 spin_lock(&fuse_lock);
760 request_end(fc, req);
761 return -ENOENT;
763 list_move(&req->list, &fc->io);
764 req->out.h = oh;
765 req->locked = 1;
766 cs.req = req;
767 spin_unlock(&fuse_lock);
769 err = copy_out_args(&cs, &req->out, nbytes);
770 fuse_copy_finish(&cs);
772 spin_lock(&fuse_lock);
773 req->locked = 0;
774 if (!err) {
775 if (req->interrupted)
776 err = -ENOENT;
777 } else if (!req->interrupted)
778 req->out.h.error = -EIO;
779 request_end(fc, req);
781 return err ? err : nbytes;
783 err_unlock:
784 spin_unlock(&fuse_lock);
785 err_finish:
786 fuse_copy_finish(&cs);
787 return err;
790 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
791 size_t nbytes, loff_t *off)
793 struct iovec iov;
794 iov.iov_len = nbytes;
795 iov.iov_base = (char __user *) buf;
796 return fuse_dev_writev(file, &iov, 1, off);
799 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
801 struct fuse_conn *fc = fuse_get_conn(file);
802 unsigned mask = POLLOUT | POLLWRNORM;
804 if (!fc)
805 return -ENODEV;
807 poll_wait(file, &fc->waitq, wait);
809 spin_lock(&fuse_lock);
810 if (!list_empty(&fc->pending))
811 mask |= POLLIN | POLLRDNORM;
812 spin_unlock(&fuse_lock);
814 return mask;
818 * Abort all requests on the given list (pending or processing)
820 * This function releases and reacquires fuse_lock
822 static void end_requests(struct fuse_conn *fc, struct list_head *head)
824 while (!list_empty(head)) {
825 struct fuse_req *req;
826 req = list_entry(head->next, struct fuse_req, list);
827 req->out.h.error = -ECONNABORTED;
828 request_end(fc, req);
829 spin_lock(&fuse_lock);
834 * Abort requests under I/O
836 * The requests are set to interrupted and finished, and the request
837 * waiter is woken up. This will make request_wait_answer() wait
838 * until the request is unlocked and then return.
840 * If the request is asynchronous, then the end function needs to be
841 * called after waiting for the request to be unlocked (if it was
842 * locked).
844 static void end_io_requests(struct fuse_conn *fc)
846 while (!list_empty(&fc->io)) {
847 struct fuse_req *req =
848 list_entry(fc->io.next, struct fuse_req, list);
849 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
851 req->interrupted = 1;
852 req->out.h.error = -ECONNABORTED;
853 req->state = FUSE_REQ_FINISHED;
854 list_del_init(&req->list);
855 wake_up(&req->waitq);
856 if (end) {
857 req->end = NULL;
858 /* The end function will consume this reference */
859 __fuse_get_request(req);
860 spin_unlock(&fuse_lock);
861 wait_event(req->waitq, !req->locked);
862 end(fc, req);
863 spin_lock(&fuse_lock);
869 * Abort all requests.
871 * Emergency exit in case of a malicious or accidental deadlock, or
872 * just a hung filesystem.
874 * The same effect is usually achievable through killing the
875 * filesystem daemon and all users of the filesystem. The exception
876 * is the combination of an asynchronous request and the tricky
877 * deadlock (see Documentation/filesystems/fuse.txt).
879 * During the aborting, progression of requests from the pending and
880 * processing lists onto the io list, and progression of new requests
881 * onto the pending list is prevented by req->connected being false.
883 * Progression of requests under I/O to the processing list is
884 * prevented by the req->interrupted flag being true for these
885 * requests. For this reason requests on the io list must be aborted
886 * first.
888 void fuse_abort_conn(struct fuse_conn *fc)
890 spin_lock(&fuse_lock);
891 if (fc->connected) {
892 fc->connected = 0;
893 end_io_requests(fc);
894 end_requests(fc, &fc->pending);
895 end_requests(fc, &fc->processing);
896 wake_up_all(&fc->waitq);
898 spin_unlock(&fuse_lock);
901 static int fuse_dev_release(struct inode *inode, struct file *file)
903 struct fuse_conn *fc;
905 spin_lock(&fuse_lock);
906 fc = file->private_data;
907 if (fc) {
908 fc->connected = 0;
909 end_requests(fc, &fc->pending);
910 end_requests(fc, &fc->processing);
912 spin_unlock(&fuse_lock);
913 if (fc)
914 kobject_put(&fc->kobj);
916 return 0;
919 struct file_operations fuse_dev_operations = {
920 .owner = THIS_MODULE,
921 .llseek = no_llseek,
922 .read = fuse_dev_read,
923 .readv = fuse_dev_readv,
924 .write = fuse_dev_write,
925 .writev = fuse_dev_writev,
926 .poll = fuse_dev_poll,
927 .release = fuse_dev_release,
930 static struct miscdevice fuse_miscdevice = {
931 .minor = FUSE_MINOR,
932 .name = "fuse",
933 .fops = &fuse_dev_operations,
936 int __init fuse_dev_init(void)
938 int err = -ENOMEM;
939 fuse_req_cachep = kmem_cache_create("fuse_request",
940 sizeof(struct fuse_req),
941 0, 0, NULL, NULL);
942 if (!fuse_req_cachep)
943 goto out;
945 err = misc_register(&fuse_miscdevice);
946 if (err)
947 goto out_cache_clean;
949 return 0;
951 out_cache_clean:
952 kmem_cache_destroy(fuse_req_cachep);
953 out:
954 return err;
957 void fuse_dev_cleanup(void)
959 misc_deregister(&fuse_miscdevice);
960 kmem_cache_destroy(fuse_req_cachep);