[PATCH] Kprobes: Fix deadlock in function-return probes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / fuse / dev.c
blob4526da8907c6d384fefee03b18bd6894558bf459
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
9 #include "fuse_i.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->connected)
30 fc = NULL;
31 spin_unlock(&fuse_lock);
32 return fc;
35 static void fuse_request_init(struct fuse_req *req)
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
43 struct fuse_req *fuse_request_alloc(void)
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
46 if (req)
47 fuse_request_init(req);
48 return req;
51 void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req);
56 static void block_sigs(sigset_t *oldset)
58 sigset_t mask;
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
64 static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL);
69 void fuse_reset_request(struct fuse_req *req)
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
77 static void __fuse_get_request(struct fuse_req *req)
79 atomic_inc(&req->count);
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
89 static struct fuse_req *do_get_request(struct fuse_conn *fc)
91 struct fuse_req *req;
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
103 return req;
106 /* This can return NULL, but only in case it's interrupted by a SIGKILL */
107 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
109 int intr;
110 sigset_t oldset;
112 atomic_inc(&fc->num_waiting);
113 block_sigs(&oldset);
114 intr = down_interruptible(&fc->outstanding_sem);
115 restore_sigs(&oldset);
116 if (intr) {
117 atomic_dec(&fc->num_waiting);
118 return NULL;
120 return do_get_request(fc);
123 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
125 spin_lock(&fuse_lock);
126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting);
128 list_add(&req->list, &fc->unused_list);
129 } else
130 fuse_request_free(req);
132 /* If we are in debt decrease that first */
133 if (fc->outstanding_debt)
134 fc->outstanding_debt--;
135 else
136 up(&fc->outstanding_sem);
137 spin_unlock(&fuse_lock);
140 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
142 if (atomic_dec_and_test(&req->count))
143 fuse_putback_request(fc, req);
146 void fuse_release_background(struct fuse_req *req)
148 iput(req->inode);
149 iput(req->inode2);
150 if (req->file)
151 fput(req->file);
152 spin_lock(&fuse_lock);
153 list_del(&req->bg_entry);
154 spin_unlock(&fuse_lock);
158 * This function is called when a request is finished. Either a reply
159 * has arrived or it was interrupted (and not yet sent) or some error
160 * occurred during communication with userspace, or the device file
161 * was closed. In case of a background request the reference to the
162 * stored objects are released. The requester thread is woken up (if
163 * still waiting), the 'end' callback is called if given, else the
164 * reference to the request is released
166 * Called with fuse_lock, unlocks it
168 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
170 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
171 req->end = NULL;
172 list_del(&req->list);
173 req->state = FUSE_REQ_FINISHED;
174 spin_unlock(&fuse_lock);
175 if (req->background) {
176 down_read(&fc->sbput_sem);
177 if (fc->mounted)
178 fuse_release_background(req);
179 up_read(&fc->sbput_sem);
181 wake_up(&req->waitq);
182 if (end)
183 end(fc, req);
184 else
185 fuse_put_request(fc, req);
189 * Unfortunately request interruption not just solves the deadlock
190 * problem, it causes problems too. These stem from the fact, that an
191 * interrupted request is continued to be processed in userspace,
192 * while all the locks and object references (inode and file) held
193 * during the operation are released.
195 * To release the locks is exactly why there's a need to interrupt the
196 * request, so there's not a lot that can be done about this, except
197 * introduce additional locking in userspace.
199 * More important is to keep inode and file references until userspace
200 * has replied, otherwise FORGET and RELEASE could be sent while the
201 * inode/file is still used by the filesystem.
203 * For this reason the concept of "background" request is introduced.
204 * An interrupted request is backgrounded if it has been already sent
205 * to userspace. Backgrounding involves getting an extra reference to
206 * inode(s) or file used in the request, and adding the request to
207 * fc->background list. When a reply is received for a background
208 * request, the object references are released, and the request is
209 * removed from the list. If the filesystem is unmounted while there
210 * are still background requests, the list is walked and references
211 * are released as if a reply was received.
213 * There's one more use for a background request. The RELEASE message is
214 * always sent as background, since it doesn't return an error or
215 * data.
217 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
219 req->background = 1;
220 list_add(&req->bg_entry, &fc->background);
221 if (req->inode)
222 req->inode = igrab(req->inode);
223 if (req->inode2)
224 req->inode2 = igrab(req->inode2);
225 if (req->file)
226 get_file(req->file);
229 /* Called with fuse_lock held. Releases, and then reacquires it. */
230 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
232 sigset_t oldset;
234 spin_unlock(&fuse_lock);
235 block_sigs(&oldset);
236 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
237 restore_sigs(&oldset);
238 spin_lock(&fuse_lock);
239 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
240 return;
242 if (!req->interrupted) {
243 req->out.h.error = -EINTR;
244 req->interrupted = 1;
246 if (req->locked) {
247 /* This is uninterruptible sleep, because data is
248 being copied to/from the buffers of req. During
249 locked state, there mustn't be any filesystem
250 operation (e.g. page fault), since that could lead
251 to deadlock */
252 spin_unlock(&fuse_lock);
253 wait_event(req->waitq, !req->locked);
254 spin_lock(&fuse_lock);
256 if (req->state == FUSE_REQ_PENDING) {
257 list_del(&req->list);
258 __fuse_put_request(req);
259 } else if (req->state == FUSE_REQ_SENT)
260 background_request(fc, req);
263 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
265 unsigned nbytes = 0;
266 unsigned i;
268 for (i = 0; i < numargs; i++)
269 nbytes += args[i].size;
271 return nbytes;
274 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
276 fc->reqctr++;
277 /* zero is special */
278 if (fc->reqctr == 0)
279 fc->reqctr = 1;
280 req->in.h.unique = fc->reqctr;
281 req->in.h.len = sizeof(struct fuse_in_header) +
282 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
283 if (!req->preallocated) {
284 /* If request is not preallocated (either FORGET or
285 RELEASE), then still decrease outstanding_sem, so
286 user can't open infinite number of files while not
287 processing the RELEASE requests. However for
288 efficiency do it without blocking, so if down()
289 would block, just increase the debt instead */
290 if (down_trylock(&fc->outstanding_sem))
291 fc->outstanding_debt++;
293 list_add_tail(&req->list, &fc->pending);
294 req->state = FUSE_REQ_PENDING;
295 wake_up(&fc->waitq);
299 * This can only be interrupted by a SIGKILL
301 void request_send(struct fuse_conn *fc, struct fuse_req *req)
303 req->isreply = 1;
304 spin_lock(&fuse_lock);
305 if (!fc->connected)
306 req->out.h.error = -ENOTCONN;
307 else if (fc->conn_error)
308 req->out.h.error = -ECONNREFUSED;
309 else {
310 queue_request(fc, req);
311 /* acquire extra reference, since request is still needed
312 after request_end() */
313 __fuse_get_request(req);
315 request_wait_answer(fc, req);
317 spin_unlock(&fuse_lock);
320 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
322 spin_lock(&fuse_lock);
323 if (fc->connected) {
324 queue_request(fc, req);
325 spin_unlock(&fuse_lock);
326 } else {
327 req->out.h.error = -ENOTCONN;
328 request_end(fc, req);
332 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
334 req->isreply = 0;
335 request_send_nowait(fc, req);
338 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
340 req->isreply = 1;
341 spin_lock(&fuse_lock);
342 background_request(fc, req);
343 spin_unlock(&fuse_lock);
344 request_send_nowait(fc, req);
348 * Lock the request. Up to the next unlock_request() there mustn't be
349 * anything that could cause a page-fault. If the request was already
350 * interrupted bail out.
352 static int lock_request(struct fuse_req *req)
354 int err = 0;
355 if (req) {
356 spin_lock(&fuse_lock);
357 if (req->interrupted)
358 err = -ENOENT;
359 else
360 req->locked = 1;
361 spin_unlock(&fuse_lock);
363 return err;
367 * Unlock request. If it was interrupted during being locked, the
368 * requester thread is currently waiting for it to be unlocked, so
369 * wake it up.
371 static void unlock_request(struct fuse_req *req)
373 if (req) {
374 spin_lock(&fuse_lock);
375 req->locked = 0;
376 if (req->interrupted)
377 wake_up(&req->waitq);
378 spin_unlock(&fuse_lock);
382 struct fuse_copy_state {
383 int write;
384 struct fuse_req *req;
385 const struct iovec *iov;
386 unsigned long nr_segs;
387 unsigned long seglen;
388 unsigned long addr;
389 struct page *pg;
390 void *mapaddr;
391 void *buf;
392 unsigned len;
395 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
396 struct fuse_req *req, const struct iovec *iov,
397 unsigned long nr_segs)
399 memset(cs, 0, sizeof(*cs));
400 cs->write = write;
401 cs->req = req;
402 cs->iov = iov;
403 cs->nr_segs = nr_segs;
406 /* Unmap and put previous page of userspace buffer */
407 static void fuse_copy_finish(struct fuse_copy_state *cs)
409 if (cs->mapaddr) {
410 kunmap_atomic(cs->mapaddr, KM_USER0);
411 if (cs->write) {
412 flush_dcache_page(cs->pg);
413 set_page_dirty_lock(cs->pg);
415 put_page(cs->pg);
416 cs->mapaddr = NULL;
421 * Get another pagefull of userspace buffer, and map it to kernel
422 * address space, and lock request
424 static int fuse_copy_fill(struct fuse_copy_state *cs)
426 unsigned long offset;
427 int err;
429 unlock_request(cs->req);
430 fuse_copy_finish(cs);
431 if (!cs->seglen) {
432 BUG_ON(!cs->nr_segs);
433 cs->seglen = cs->iov[0].iov_len;
434 cs->addr = (unsigned long) cs->iov[0].iov_base;
435 cs->iov ++;
436 cs->nr_segs --;
438 down_read(&current->mm->mmap_sem);
439 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
440 &cs->pg, NULL);
441 up_read(&current->mm->mmap_sem);
442 if (err < 0)
443 return err;
444 BUG_ON(err != 1);
445 offset = cs->addr % PAGE_SIZE;
446 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
447 cs->buf = cs->mapaddr + offset;
448 cs->len = min(PAGE_SIZE - offset, cs->seglen);
449 cs->seglen -= cs->len;
450 cs->addr += cs->len;
452 return lock_request(cs->req);
455 /* Do as much copy to/from userspace buffer as we can */
456 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
458 unsigned ncpy = min(*size, cs->len);
459 if (val) {
460 if (cs->write)
461 memcpy(cs->buf, *val, ncpy);
462 else
463 memcpy(*val, cs->buf, ncpy);
464 *val += ncpy;
466 *size -= ncpy;
467 cs->len -= ncpy;
468 cs->buf += ncpy;
469 return ncpy;
473 * Copy a page in the request to/from the userspace buffer. Must be
474 * done atomically
476 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
477 unsigned offset, unsigned count, int zeroing)
479 if (page && zeroing && count < PAGE_SIZE) {
480 void *mapaddr = kmap_atomic(page, KM_USER1);
481 memset(mapaddr, 0, PAGE_SIZE);
482 kunmap_atomic(mapaddr, KM_USER1);
484 while (count) {
485 int err;
486 if (!cs->len && (err = fuse_copy_fill(cs)))
487 return err;
488 if (page) {
489 void *mapaddr = kmap_atomic(page, KM_USER1);
490 void *buf = mapaddr + offset;
491 offset += fuse_copy_do(cs, &buf, &count);
492 kunmap_atomic(mapaddr, KM_USER1);
493 } else
494 offset += fuse_copy_do(cs, NULL, &count);
496 if (page && !cs->write)
497 flush_dcache_page(page);
498 return 0;
501 /* Copy pages in the request to/from userspace buffer */
502 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
503 int zeroing)
505 unsigned i;
506 struct fuse_req *req = cs->req;
507 unsigned offset = req->page_offset;
508 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
510 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
511 struct page *page = req->pages[i];
512 int err = fuse_copy_page(cs, page, offset, count, zeroing);
513 if (err)
514 return err;
516 nbytes -= count;
517 count = min(nbytes, (unsigned) PAGE_SIZE);
518 offset = 0;
520 return 0;
523 /* Copy a single argument in the request to/from userspace buffer */
524 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
526 while (size) {
527 int err;
528 if (!cs->len && (err = fuse_copy_fill(cs)))
529 return err;
530 fuse_copy_do(cs, &val, &size);
532 return 0;
535 /* Copy request arguments to/from userspace buffer */
536 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
537 unsigned argpages, struct fuse_arg *args,
538 int zeroing)
540 int err = 0;
541 unsigned i;
543 for (i = 0; !err && i < numargs; i++) {
544 struct fuse_arg *arg = &args[i];
545 if (i == numargs - 1 && argpages)
546 err = fuse_copy_pages(cs, arg->size, zeroing);
547 else
548 err = fuse_copy_one(cs, arg->value, arg->size);
550 return err;
553 /* Wait until a request is available on the pending list */
554 static void request_wait(struct fuse_conn *fc)
556 DECLARE_WAITQUEUE(wait, current);
558 add_wait_queue_exclusive(&fc->waitq, &wait);
559 while (fc->connected && list_empty(&fc->pending)) {
560 set_current_state(TASK_INTERRUPTIBLE);
561 if (signal_pending(current))
562 break;
564 spin_unlock(&fuse_lock);
565 schedule();
566 spin_lock(&fuse_lock);
568 set_current_state(TASK_RUNNING);
569 remove_wait_queue(&fc->waitq, &wait);
573 * Read a single request into the userspace filesystem's buffer. This
574 * function waits until a request is available, then removes it from
575 * the pending list and copies request data to userspace buffer. If
576 * no reply is needed (FORGET) or request has been interrupted or
577 * there was an error during the copying then it's finished by calling
578 * request_end(). Otherwise add it to the processing list, and set
579 * the 'sent' flag.
581 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
582 unsigned long nr_segs, loff_t *off)
584 int err;
585 struct fuse_conn *fc;
586 struct fuse_req *req;
587 struct fuse_in *in;
588 struct fuse_copy_state cs;
589 unsigned reqsize;
591 restart:
592 spin_lock(&fuse_lock);
593 fc = file->private_data;
594 err = -EPERM;
595 if (!fc)
596 goto err_unlock;
597 request_wait(fc);
598 err = -ENODEV;
599 if (!fc->connected)
600 goto err_unlock;
601 err = -ERESTARTSYS;
602 if (list_empty(&fc->pending))
603 goto err_unlock;
605 req = list_entry(fc->pending.next, struct fuse_req, list);
606 req->state = FUSE_REQ_READING;
607 list_move(&req->list, &fc->io);
609 in = &req->in;
610 reqsize = in->h.len;
611 /* If request is too large, reply with an error and restart the read */
612 if (iov_length(iov, nr_segs) < reqsize) {
613 req->out.h.error = -EIO;
614 /* SETXATTR is special, since it may contain too large data */
615 if (in->h.opcode == FUSE_SETXATTR)
616 req->out.h.error = -E2BIG;
617 request_end(fc, req);
618 goto restart;
620 spin_unlock(&fuse_lock);
621 fuse_copy_init(&cs, 1, req, iov, nr_segs);
622 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
623 if (!err)
624 err = fuse_copy_args(&cs, in->numargs, in->argpages,
625 (struct fuse_arg *) in->args, 0);
626 fuse_copy_finish(&cs);
627 spin_lock(&fuse_lock);
628 req->locked = 0;
629 if (!err && req->interrupted)
630 err = -ENOENT;
631 if (err) {
632 if (!req->interrupted)
633 req->out.h.error = -EIO;
634 request_end(fc, req);
635 return err;
637 if (!req->isreply)
638 request_end(fc, req);
639 else {
640 req->state = FUSE_REQ_SENT;
641 list_move_tail(&req->list, &fc->processing);
642 spin_unlock(&fuse_lock);
644 return reqsize;
646 err_unlock:
647 spin_unlock(&fuse_lock);
648 return err;
651 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
652 size_t nbytes, loff_t *off)
654 struct iovec iov;
655 iov.iov_len = nbytes;
656 iov.iov_base = buf;
657 return fuse_dev_readv(file, &iov, 1, off);
660 /* Look up request on processing list by unique ID */
661 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
663 struct list_head *entry;
665 list_for_each(entry, &fc->processing) {
666 struct fuse_req *req;
667 req = list_entry(entry, struct fuse_req, list);
668 if (req->in.h.unique == unique)
669 return req;
671 return NULL;
674 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
675 unsigned nbytes)
677 unsigned reqsize = sizeof(struct fuse_out_header);
679 if (out->h.error)
680 return nbytes != reqsize ? -EINVAL : 0;
682 reqsize += len_args(out->numargs, out->args);
684 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
685 return -EINVAL;
686 else if (reqsize > nbytes) {
687 struct fuse_arg *lastarg = &out->args[out->numargs-1];
688 unsigned diffsize = reqsize - nbytes;
689 if (diffsize > lastarg->size)
690 return -EINVAL;
691 lastarg->size -= diffsize;
693 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
694 out->page_zeroing);
698 * Write a single reply to a request. First the header is copied from
699 * the write buffer. The request is then searched on the processing
700 * list by the unique ID found in the header. If found, then remove
701 * it from the list and copy the rest of the buffer to the request.
702 * The request is finished by calling request_end()
704 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
705 unsigned long nr_segs, loff_t *off)
707 int err;
708 unsigned nbytes = iov_length(iov, nr_segs);
709 struct fuse_req *req;
710 struct fuse_out_header oh;
711 struct fuse_copy_state cs;
712 struct fuse_conn *fc = fuse_get_conn(file);
713 if (!fc)
714 return -ENODEV;
716 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
717 if (nbytes < sizeof(struct fuse_out_header))
718 return -EINVAL;
720 err = fuse_copy_one(&cs, &oh, sizeof(oh));
721 if (err)
722 goto err_finish;
723 err = -EINVAL;
724 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
725 oh.len != nbytes)
726 goto err_finish;
728 spin_lock(&fuse_lock);
729 err = -ENOENT;
730 if (!fc->connected)
731 goto err_unlock;
733 req = request_find(fc, oh.unique);
734 err = -EINVAL;
735 if (!req)
736 goto err_unlock;
738 if (req->interrupted) {
739 spin_unlock(&fuse_lock);
740 fuse_copy_finish(&cs);
741 spin_lock(&fuse_lock);
742 request_end(fc, req);
743 return -ENOENT;
745 list_move(&req->list, &fc->io);
746 req->out.h = oh;
747 req->locked = 1;
748 cs.req = req;
749 spin_unlock(&fuse_lock);
751 err = copy_out_args(&cs, &req->out, nbytes);
752 fuse_copy_finish(&cs);
754 spin_lock(&fuse_lock);
755 req->locked = 0;
756 if (!err) {
757 if (req->interrupted)
758 err = -ENOENT;
759 } else if (!req->interrupted)
760 req->out.h.error = -EIO;
761 request_end(fc, req);
763 return err ? err : nbytes;
765 err_unlock:
766 spin_unlock(&fuse_lock);
767 err_finish:
768 fuse_copy_finish(&cs);
769 return err;
772 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
773 size_t nbytes, loff_t *off)
775 struct iovec iov;
776 iov.iov_len = nbytes;
777 iov.iov_base = (char __user *) buf;
778 return fuse_dev_writev(file, &iov, 1, off);
781 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
783 struct fuse_conn *fc = fuse_get_conn(file);
784 unsigned mask = POLLOUT | POLLWRNORM;
786 if (!fc)
787 return -ENODEV;
789 poll_wait(file, &fc->waitq, wait);
791 spin_lock(&fuse_lock);
792 if (!list_empty(&fc->pending))
793 mask |= POLLIN | POLLRDNORM;
794 spin_unlock(&fuse_lock);
796 return mask;
800 * Abort all requests on the given list (pending or processing)
802 * This function releases and reacquires fuse_lock
804 static void end_requests(struct fuse_conn *fc, struct list_head *head)
806 while (!list_empty(head)) {
807 struct fuse_req *req;
808 req = list_entry(head->next, struct fuse_req, list);
809 req->out.h.error = -ECONNABORTED;
810 request_end(fc, req);
811 spin_lock(&fuse_lock);
816 * Abort requests under I/O
818 * The requests are set to interrupted and finished, and the request
819 * waiter is woken up. This will make request_wait_answer() wait
820 * until the request is unlocked and then return.
822 * If the request is asynchronous, then the end function needs to be
823 * called after waiting for the request to be unlocked (if it was
824 * locked).
826 static void end_io_requests(struct fuse_conn *fc)
828 while (!list_empty(&fc->io)) {
829 struct fuse_req *req =
830 list_entry(fc->io.next, struct fuse_req, list);
831 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
833 req->interrupted = 1;
834 req->out.h.error = -ECONNABORTED;
835 req->state = FUSE_REQ_FINISHED;
836 list_del_init(&req->list);
837 wake_up(&req->waitq);
838 if (end) {
839 req->end = NULL;
840 /* The end function will consume this reference */
841 __fuse_get_request(req);
842 spin_unlock(&fuse_lock);
843 wait_event(req->waitq, !req->locked);
844 end(fc, req);
845 spin_lock(&fuse_lock);
851 * Abort all requests.
853 * Emergency exit in case of a malicious or accidental deadlock, or
854 * just a hung filesystem.
856 * The same effect is usually achievable through killing the
857 * filesystem daemon and all users of the filesystem. The exception
858 * is the combination of an asynchronous request and the tricky
859 * deadlock (see Documentation/filesystems/fuse.txt).
861 * During the aborting, progression of requests from the pending and
862 * processing lists onto the io list, and progression of new requests
863 * onto the pending list is prevented by req->connected being false.
865 * Progression of requests under I/O to the processing list is
866 * prevented by the req->interrupted flag being true for these
867 * requests. For this reason requests on the io list must be aborted
868 * first.
870 void fuse_abort_conn(struct fuse_conn *fc)
872 spin_lock(&fuse_lock);
873 if (fc->connected) {
874 fc->connected = 0;
875 end_io_requests(fc);
876 end_requests(fc, &fc->pending);
877 end_requests(fc, &fc->processing);
878 wake_up_all(&fc->waitq);
880 spin_unlock(&fuse_lock);
883 static int fuse_dev_release(struct inode *inode, struct file *file)
885 struct fuse_conn *fc;
887 spin_lock(&fuse_lock);
888 fc = file->private_data;
889 if (fc) {
890 fc->connected = 0;
891 end_requests(fc, &fc->pending);
892 end_requests(fc, &fc->processing);
894 spin_unlock(&fuse_lock);
895 if (fc)
896 kobject_put(&fc->kobj);
898 return 0;
901 struct file_operations fuse_dev_operations = {
902 .owner = THIS_MODULE,
903 .llseek = no_llseek,
904 .read = fuse_dev_read,
905 .readv = fuse_dev_readv,
906 .write = fuse_dev_write,
907 .writev = fuse_dev_writev,
908 .poll = fuse_dev_poll,
909 .release = fuse_dev_release,
912 static struct miscdevice fuse_miscdevice = {
913 .minor = FUSE_MINOR,
914 .name = "fuse",
915 .fops = &fuse_dev_operations,
918 int __init fuse_dev_init(void)
920 int err = -ENOMEM;
921 fuse_req_cachep = kmem_cache_create("fuse_request",
922 sizeof(struct fuse_req),
923 0, 0, NULL, NULL);
924 if (!fuse_req_cachep)
925 goto out;
927 err = misc_register(&fuse_miscdevice);
928 if (err)
929 goto out_cache_clean;
931 return 0;
933 out_cache_clean:
934 kmem_cache_destroy(fuse_req_cachep);
935 out:
936 return err;
939 void fuse_dev_cleanup(void)
941 misc_deregister(&fuse_miscdevice);
942 kmem_cache_destroy(fuse_req_cachep);