2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #define pr_fmt(fmt) "%s: " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
23 #include <linux/sched.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
32 #include <linux/aio.h>
33 #include <linux/highmem.h>
34 #include <linux/workqueue.h>
35 #include <linux/security.h>
36 #include <linux/eventfd.h>
37 #include <linux/blkdev.h>
38 #include <linux/compat.h>
39 #include <linux/anon_inodes.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
44 #include <asm/kmap_types.h>
45 #include <asm/uaccess.h>
49 #define AIO_RING_MAGIC 0xa10a10a1
50 #define AIO_RING_COMPAT_FEATURES 1
51 #define AIO_RING_INCOMPAT_FEATURES 0
53 unsigned id
; /* kernel internal index number */
54 unsigned nr
; /* number of io_events */
59 unsigned compat_features
;
60 unsigned incompat_features
;
61 unsigned header_length
; /* size of aio_ring */
64 struct io_event io_events
[0];
65 }; /* 128 bytes + ring size */
67 #define AIO_RING_PAGES 8
72 struct kioctx
*table
[];
76 unsigned reqs_available
;
80 struct percpu_ref users
;
83 unsigned long user_id
;
85 struct __percpu kioctx_cpu
*cpu
;
88 * For percpu reqs_available, number of slots we move to/from global
93 * This is what userspace passed to io_setup(), it's not used for
94 * anything but counting against the global max_reqs quota.
96 * The real limit is nr_events - 1, which will be larger (see
101 /* Size of ringbuffer, in units of struct io_event */
104 unsigned long mmap_base
;
105 unsigned long mmap_size
;
107 struct page
**ring_pages
;
110 struct rcu_head rcu_head
;
111 struct work_struct free_work
;
115 * This counts the number of available slots in the ringbuffer,
116 * so we avoid overflowing it: it's decremented (if positive)
117 * when allocating a kiocb and incremented when the resulting
118 * io_event is pulled off the ringbuffer.
120 * We batch accesses to it with a percpu version.
122 atomic_t reqs_available
;
123 } ____cacheline_aligned_in_smp
;
127 struct list_head active_reqs
; /* used for cancellation */
128 } ____cacheline_aligned_in_smp
;
131 struct mutex ring_lock
;
132 wait_queue_head_t wait
;
133 } ____cacheline_aligned_in_smp
;
137 spinlock_t completion_lock
;
138 } ____cacheline_aligned_in_smp
;
140 struct page
*internal_pages
[AIO_RING_PAGES
];
141 struct file
*aio_ring_file
;
146 /*------ sysctl variables----*/
147 static DEFINE_SPINLOCK(aio_nr_lock
);
148 unsigned long aio_nr
; /* current system wide number of aio requests */
149 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
150 /*----end sysctl variables---*/
152 static struct kmem_cache
*kiocb_cachep
;
153 static struct kmem_cache
*kioctx_cachep
;
156 * Creates the slab caches used by the aio routines, panic on
157 * failure as this is done early during the boot sequence.
159 static int __init
aio_setup(void)
161 kiocb_cachep
= KMEM_CACHE(kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
162 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
164 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page
));
168 __initcall(aio_setup
);
170 static void aio_free_ring(struct kioctx
*ctx
)
173 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
175 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
176 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
177 page_count(ctx
->ring_pages
[i
]));
178 put_page(ctx
->ring_pages
[i
]);
181 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
)
182 kfree(ctx
->ring_pages
);
185 truncate_setsize(aio_ring_file
->f_inode
, 0);
187 ctx
->aio_ring_file
= NULL
;
191 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
193 vma
->vm_ops
= &generic_file_vm_ops
;
197 static const struct file_operations aio_ring_fops
= {
198 .mmap
= aio_ring_mmap
,
201 static int aio_set_page_dirty(struct page
*page
)
206 #if IS_ENABLED(CONFIG_MIGRATION)
207 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
208 struct page
*old
, enum migrate_mode mode
)
210 struct kioctx
*ctx
= mapping
->private_data
;
212 unsigned idx
= old
->index
;
215 /* Writeback must be complete */
216 BUG_ON(PageWriteback(old
));
219 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
);
220 if (rc
!= MIGRATEPAGE_SUCCESS
) {
227 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
228 migrate_page_copy(new, old
);
229 ctx
->ring_pages
[idx
] = new;
230 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
236 static const struct address_space_operations aio_ctx_aops
= {
237 .set_page_dirty
= aio_set_page_dirty
,
238 #if IS_ENABLED(CONFIG_MIGRATION)
239 .migratepage
= aio_migratepage
,
243 static int aio_setup_ring(struct kioctx
*ctx
)
245 struct aio_ring
*ring
;
246 unsigned nr_events
= ctx
->max_reqs
;
247 struct mm_struct
*mm
= current
->mm
;
248 unsigned long size
, populate
;
253 /* Compensate for the ring buffer's head/tail overlap entry */
254 nr_events
+= 2; /* 1 is required, 2 for good luck */
256 size
= sizeof(struct aio_ring
);
257 size
+= sizeof(struct io_event
) * nr_events
;
259 nr_pages
= PFN_UP(size
);
263 file
= anon_inode_getfile_private("[aio]", &aio_ring_fops
, ctx
, O_RDWR
);
265 ctx
->aio_ring_file
= NULL
;
269 file
->f_inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
270 file
->f_inode
->i_mapping
->private_data
= ctx
;
271 file
->f_inode
->i_size
= PAGE_SIZE
* (loff_t
)nr_pages
;
273 for (i
= 0; i
< nr_pages
; i
++) {
275 page
= find_or_create_page(file
->f_inode
->i_mapping
,
276 i
, GFP_HIGHUSER
| __GFP_ZERO
);
279 pr_debug("pid(%d) page[%d]->count=%d\n",
280 current
->pid
, i
, page_count(page
));
281 SetPageUptodate(page
);
285 ctx
->aio_ring_file
= file
;
286 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
287 / sizeof(struct io_event
);
289 ctx
->ring_pages
= ctx
->internal_pages
;
290 if (nr_pages
> AIO_RING_PAGES
) {
291 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
293 if (!ctx
->ring_pages
)
297 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
298 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
300 down_write(&mm
->mmap_sem
);
301 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
302 PROT_READ
| PROT_WRITE
,
303 MAP_SHARED
| MAP_POPULATE
, 0, &populate
);
304 if (IS_ERR((void *)ctx
->mmap_base
)) {
305 up_write(&mm
->mmap_sem
);
311 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
313 /* We must do this while still holding mmap_sem for write, as we
314 * need to be protected against userspace attempting to mremap()
315 * or munmap() the ring buffer.
317 ctx
->nr_pages
= get_user_pages(current
, mm
, ctx
->mmap_base
, nr_pages
,
318 1, 0, ctx
->ring_pages
, NULL
);
320 /* Dropping the reference here is safe as the page cache will hold
321 * onto the pages for us. It is also required so that page migration
322 * can unmap the pages and get the right reference count.
324 for (i
= 0; i
< ctx
->nr_pages
; i
++)
325 put_page(ctx
->ring_pages
[i
]);
327 up_write(&mm
->mmap_sem
);
329 if (unlikely(ctx
->nr_pages
!= nr_pages
)) {
334 ctx
->user_id
= ctx
->mmap_base
;
335 ctx
->nr_events
= nr_events
; /* trusted copy */
337 ring
= kmap_atomic(ctx
->ring_pages
[0]);
338 ring
->nr
= nr_events
; /* user copy */
340 ring
->head
= ring
->tail
= 0;
341 ring
->magic
= AIO_RING_MAGIC
;
342 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
343 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
344 ring
->header_length
= sizeof(struct aio_ring
);
346 flush_dcache_page(ctx
->ring_pages
[0]);
351 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
352 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
353 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
355 void kiocb_set_cancel_fn(struct kiocb
*req
, kiocb_cancel_fn
*cancel
)
357 struct kioctx
*ctx
= req
->ki_ctx
;
360 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
362 if (!req
->ki_list
.next
)
363 list_add(&req
->ki_list
, &ctx
->active_reqs
);
365 req
->ki_cancel
= cancel
;
367 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
369 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
371 static int kiocb_cancel(struct kioctx
*ctx
, struct kiocb
*kiocb
)
373 kiocb_cancel_fn
*old
, *cancel
;
376 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
377 * actually has a cancel function, hence the cmpxchg()
380 cancel
= ACCESS_ONCE(kiocb
->ki_cancel
);
382 if (!cancel
|| cancel
== KIOCB_CANCELLED
)
386 cancel
= cmpxchg(&kiocb
->ki_cancel
, old
, KIOCB_CANCELLED
);
387 } while (cancel
!= old
);
389 return cancel(kiocb
);
392 static void free_ioctx_rcu(struct rcu_head
*head
)
394 struct kioctx
*ctx
= container_of(head
, struct kioctx
, rcu_head
);
396 free_percpu(ctx
->cpu
);
397 kmem_cache_free(kioctx_cachep
, ctx
);
401 * When this function runs, the kioctx has been removed from the "hash table"
402 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
403 * now it's safe to cancel any that need to be.
405 static void free_ioctx(struct work_struct
*work
)
407 struct kioctx
*ctx
= container_of(work
, struct kioctx
, free_work
);
408 struct aio_ring
*ring
;
413 spin_lock_irq(&ctx
->ctx_lock
);
415 while (!list_empty(&ctx
->active_reqs
)) {
416 req
= list_first_entry(&ctx
->active_reqs
,
417 struct kiocb
, ki_list
);
419 list_del_init(&req
->ki_list
);
420 kiocb_cancel(ctx
, req
);
423 spin_unlock_irq(&ctx
->ctx_lock
);
425 for_each_possible_cpu(cpu
) {
426 struct kioctx_cpu
*kcpu
= per_cpu_ptr(ctx
->cpu
, cpu
);
428 atomic_add(kcpu
->reqs_available
, &ctx
->reqs_available
);
429 kcpu
->reqs_available
= 0;
433 prepare_to_wait(&ctx
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
435 ring
= kmap_atomic(ctx
->ring_pages
[0]);
436 avail
= (ring
->head
<= ring
->tail
)
437 ? ring
->tail
- ring
->head
438 : ctx
->nr_events
- ring
->head
+ ring
->tail
;
440 atomic_add(avail
, &ctx
->reqs_available
);
441 ring
->head
= ring
->tail
;
444 if (atomic_read(&ctx
->reqs_available
) >= ctx
->nr_events
- 1)
449 finish_wait(&ctx
->wait
, &wait
);
451 WARN_ON(atomic_read(&ctx
->reqs_available
) > ctx
->nr_events
- 1);
455 pr_debug("freeing %p\n", ctx
);
458 * Here the call_rcu() is between the wait_event() for reqs_active to
459 * hit 0, and freeing the ioctx.
461 * aio_complete() decrements reqs_active, but it has to touch the ioctx
462 * after to issue a wakeup so we use rcu.
464 call_rcu(&ctx
->rcu_head
, free_ioctx_rcu
);
467 static void free_ioctx_ref(struct percpu_ref
*ref
)
469 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
471 INIT_WORK(&ctx
->free_work
, free_ioctx
);
472 schedule_work(&ctx
->free_work
);
475 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
478 struct kioctx_table
*table
, *old
;
479 struct aio_ring
*ring
;
481 spin_lock(&mm
->ioctx_lock
);
483 table
= rcu_dereference(mm
->ioctx_table
);
487 for (i
= 0; i
< table
->nr
; i
++)
488 if (!table
->table
[i
]) {
490 table
->table
[i
] = ctx
;
492 spin_unlock(&mm
->ioctx_lock
);
494 ring
= kmap_atomic(ctx
->ring_pages
[0]);
500 new_nr
= (table
? table
->nr
: 1) * 4;
503 spin_unlock(&mm
->ioctx_lock
);
505 table
= kzalloc(sizeof(*table
) + sizeof(struct kioctx
*) *
512 spin_lock(&mm
->ioctx_lock
);
514 old
= rcu_dereference(mm
->ioctx_table
);
517 rcu_assign_pointer(mm
->ioctx_table
, table
);
518 } else if (table
->nr
> old
->nr
) {
519 memcpy(table
->table
, old
->table
,
520 old
->nr
* sizeof(struct kioctx
*));
522 rcu_assign_pointer(mm
->ioctx_table
, table
);
532 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
534 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
536 struct mm_struct
*mm
= current
->mm
;
541 * We keep track of the number of available ringbuffer slots, to prevent
542 * overflow (reqs_available), and we also use percpu counters for this.
544 * So since up to half the slots might be on other cpu's percpu counters
545 * and unavailable, double nr_events so userspace sees what they
546 * expected: additionally, we move req_batch slots to/from percpu
547 * counters at a time, so make sure that isn't 0:
549 nr_events
= max(nr_events
, num_possible_cpus() * 4);
552 /* Prevent overflows */
553 if ((nr_events
> (0x10000000U
/ sizeof(struct io_event
))) ||
554 (nr_events
> (0x10000000U
/ sizeof(struct kiocb
)))) {
555 pr_debug("ENOMEM: nr_events too high\n");
556 return ERR_PTR(-EINVAL
);
559 if (!nr_events
|| (unsigned long)nr_events
> (aio_max_nr
* 2UL))
560 return ERR_PTR(-EAGAIN
);
562 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
564 return ERR_PTR(-ENOMEM
);
566 ctx
->max_reqs
= nr_events
;
568 if (percpu_ref_init(&ctx
->users
, free_ioctx_ref
))
571 spin_lock_init(&ctx
->ctx_lock
);
572 spin_lock_init(&ctx
->completion_lock
);
573 mutex_init(&ctx
->ring_lock
);
574 init_waitqueue_head(&ctx
->wait
);
576 INIT_LIST_HEAD(&ctx
->active_reqs
);
578 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
582 if (aio_setup_ring(ctx
) < 0)
585 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
586 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
587 if (ctx
->req_batch
< 1)
590 /* limit the number of system wide aios */
591 spin_lock(&aio_nr_lock
);
592 if (aio_nr
+ nr_events
> (aio_max_nr
* 2UL) ||
593 aio_nr
+ nr_events
< aio_nr
) {
594 spin_unlock(&aio_nr_lock
);
597 aio_nr
+= ctx
->max_reqs
;
598 spin_unlock(&aio_nr_lock
);
600 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
602 err
= ioctx_add_table(ctx
, mm
);
604 goto out_cleanup_put
;
606 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
607 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
611 percpu_ref_put(&ctx
->users
);
616 free_percpu(ctx
->cpu
);
618 free_percpu(ctx
->users
.pcpu_count
);
620 if (ctx
->aio_ring_file
)
621 fput(ctx
->aio_ring_file
);
622 kmem_cache_free(kioctx_cachep
, ctx
);
623 pr_debug("error allocating ioctx %d\n", err
);
628 * Cancels all outstanding aio requests on an aio context. Used
629 * when the processes owning a context have all exited to encourage
630 * the rapid destruction of the kioctx.
632 static void kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
)
634 if (!atomic_xchg(&ctx
->dead
, 1)) {
635 struct kioctx_table
*table
;
637 spin_lock(&mm
->ioctx_lock
);
639 table
= rcu_dereference(mm
->ioctx_table
);
641 WARN_ON(ctx
!= table
->table
[ctx
->id
]);
642 table
->table
[ctx
->id
] = NULL
;
644 spin_unlock(&mm
->ioctx_lock
);
646 /* percpu_ref_kill() will do the necessary call_rcu() */
647 wake_up_all(&ctx
->wait
);
650 * It'd be more correct to do this in free_ioctx(), after all
651 * the outstanding kiocbs have finished - but by then io_destroy
652 * has already returned, so io_setup() could potentially return
653 * -EAGAIN with no ioctxs actually in use (as far as userspace
656 spin_lock(&aio_nr_lock
);
657 BUG_ON(aio_nr
- ctx
->max_reqs
> aio_nr
);
658 aio_nr
-= ctx
->max_reqs
;
659 spin_unlock(&aio_nr_lock
);
662 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
664 percpu_ref_kill(&ctx
->users
);
668 /* wait_on_sync_kiocb:
669 * Waits on the given sync kiocb to complete.
671 ssize_t
wait_on_sync_kiocb(struct kiocb
*req
)
673 while (!req
->ki_ctx
) {
674 set_current_state(TASK_UNINTERRUPTIBLE
);
679 __set_current_state(TASK_RUNNING
);
680 return req
->ki_user_data
;
682 EXPORT_SYMBOL(wait_on_sync_kiocb
);
685 * exit_aio: called when the last user of mm goes away. At this point, there is
686 * no way for any new requests to be submited or any of the io_* syscalls to be
687 * called on the context.
689 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
692 void exit_aio(struct mm_struct
*mm
)
694 struct kioctx_table
*table
;
700 table
= rcu_dereference(mm
->ioctx_table
);
703 if (!table
|| i
>= table
->nr
) {
705 rcu_assign_pointer(mm
->ioctx_table
, NULL
);
711 ctx
= table
->table
[i
++];
717 * We don't need to bother with munmap() here -
718 * exit_mmap(mm) is coming and it'll unmap everything.
719 * Since aio_free_ring() uses non-zero ->mmap_size
720 * as indicator that it needs to unmap the area,
721 * just set it to 0; aio_free_ring() is the only
722 * place that uses ->mmap_size, so it's safe.
730 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
732 struct kioctx_cpu
*kcpu
;
735 kcpu
= this_cpu_ptr(ctx
->cpu
);
737 kcpu
->reqs_available
+= nr
;
738 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
739 kcpu
->reqs_available
-= ctx
->req_batch
;
740 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
746 static bool get_reqs_available(struct kioctx
*ctx
)
748 struct kioctx_cpu
*kcpu
;
752 kcpu
= this_cpu_ptr(ctx
->cpu
);
754 if (!kcpu
->reqs_available
) {
755 int old
, avail
= atomic_read(&ctx
->reqs_available
);
758 if (avail
< ctx
->req_batch
)
762 avail
= atomic_cmpxchg(&ctx
->reqs_available
,
763 avail
, avail
- ctx
->req_batch
);
764 } while (avail
!= old
);
766 kcpu
->reqs_available
+= ctx
->req_batch
;
770 kcpu
->reqs_available
--;
777 * Allocate a slot for an aio request.
778 * Returns NULL if no requests are free.
780 static inline struct kiocb
*aio_get_req(struct kioctx
*ctx
)
784 if (!get_reqs_available(ctx
))
787 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
794 put_reqs_available(ctx
, 1);
798 static void kiocb_free(struct kiocb
*req
)
802 if (req
->ki_eventfd
!= NULL
)
803 eventfd_ctx_put(req
->ki_eventfd
);
804 kmem_cache_free(kiocb_cachep
, req
);
807 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
809 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
810 struct mm_struct
*mm
= current
->mm
;
811 struct kioctx
*ctx
, *ret
= NULL
;
812 struct kioctx_table
*table
;
815 if (get_user(id
, &ring
->id
))
819 table
= rcu_dereference(mm
->ioctx_table
);
821 if (!table
|| id
>= table
->nr
)
824 ctx
= table
->table
[id
];
825 if (ctx
&& ctx
->user_id
== ctx_id
) {
826 percpu_ref_get(&ctx
->users
);
835 * Called when the io request on the given iocb is complete.
837 void aio_complete(struct kiocb
*iocb
, long res
, long res2
)
839 struct kioctx
*ctx
= iocb
->ki_ctx
;
840 struct aio_ring
*ring
;
841 struct io_event
*ev_page
, *event
;
846 * Special case handling for sync iocbs:
847 * - events go directly into the iocb for fast handling
848 * - the sync task with the iocb in its stack holds the single iocb
849 * ref, no other paths have a way to get another ref
850 * - the sync task helpfully left a reference to itself in the iocb
852 if (is_sync_kiocb(iocb
)) {
853 iocb
->ki_user_data
= res
;
855 iocb
->ki_ctx
= ERR_PTR(-EXDEV
);
856 wake_up_process(iocb
->ki_obj
.tsk
);
861 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
862 * need to issue a wakeup after incrementing reqs_available.
866 if (iocb
->ki_list
.next
) {
869 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
870 list_del(&iocb
->ki_list
);
871 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
875 * Add a completion event to the ring buffer. Must be done holding
876 * ctx->completion_lock to prevent other code from messing with the tail
877 * pointer since we might be called from irq context.
879 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
882 pos
= tail
+ AIO_EVENTS_OFFSET
;
884 if (++tail
>= ctx
->nr_events
)
887 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
888 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
890 event
->obj
= (u64
)(unsigned long)iocb
->ki_obj
.user
;
891 event
->data
= iocb
->ki_user_data
;
895 kunmap_atomic(ev_page
);
896 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
898 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
899 ctx
, tail
, iocb
, iocb
->ki_obj
.user
, iocb
->ki_user_data
,
902 /* after flagging the request as done, we
903 * must never even look at it again
905 smp_wmb(); /* make event visible before updating tail */
909 ring
= kmap_atomic(ctx
->ring_pages
[0]);
912 flush_dcache_page(ctx
->ring_pages
[0]);
914 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
916 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
919 * Check if the user asked us to deliver the result through an
920 * eventfd. The eventfd_signal() function is safe to be called
923 if (iocb
->ki_eventfd
!= NULL
)
924 eventfd_signal(iocb
->ki_eventfd
, 1);
926 /* everything turned out well, dispose of the aiocb. */
930 * We have to order our ring_info tail store above and test
931 * of the wait list below outside the wait lock. This is
932 * like in wake_up_bit() where clearing a bit has to be
933 * ordered with the unlocked test.
937 if (waitqueue_active(&ctx
->wait
))
942 EXPORT_SYMBOL(aio_complete
);
945 * Pull an event off of the ioctx's event ring. Returns the number of
948 static long aio_read_events_ring(struct kioctx
*ctx
,
949 struct io_event __user
*event
, long nr
)
951 struct aio_ring
*ring
;
952 unsigned head
, tail
, pos
;
956 mutex_lock(&ctx
->ring_lock
);
958 ring
= kmap_atomic(ctx
->ring_pages
[0]);
963 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
973 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
977 avail
= min(avail
, nr
- ret
);
978 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
-
979 ((head
+ AIO_EVENTS_OFFSET
) % AIO_EVENTS_PER_PAGE
));
981 pos
= head
+ AIO_EVENTS_OFFSET
;
982 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
983 pos
%= AIO_EVENTS_PER_PAGE
;
986 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
987 sizeof(*ev
) * avail
);
990 if (unlikely(copy_ret
)) {
997 head
%= ctx
->nr_events
;
1000 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1002 kunmap_atomic(ring
);
1003 flush_dcache_page(ctx
->ring_pages
[0]);
1005 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1007 put_reqs_available(ctx
, ret
);
1009 mutex_unlock(&ctx
->ring_lock
);
1014 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1015 struct io_event __user
*event
, long *i
)
1017 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1022 if (unlikely(atomic_read(&ctx
->dead
)))
1028 return ret
< 0 || *i
>= min_nr
;
1031 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1032 struct io_event __user
*event
,
1033 struct timespec __user
*timeout
)
1035 ktime_t until
= { .tv64
= KTIME_MAX
};
1041 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
1044 until
= timespec_to_ktime(ts
);
1048 * Note that aio_read_events() is being called as the conditional - i.e.
1049 * we're calling it after prepare_to_wait() has set task state to
1050 * TASK_INTERRUPTIBLE.
1052 * But aio_read_events() can block, and if it blocks it's going to flip
1053 * the task state back to TASK_RUNNING.
1055 * This should be ok, provided it doesn't flip the state back to
1056 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1057 * will only happen if the mutex_lock() call blocks, and we then find
1058 * the ringbuffer empty. So in practice we should be ok, but it's
1059 * something to be aware of when touching this code.
1061 wait_event_interruptible_hrtimeout(ctx
->wait
,
1062 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
), until
);
1064 if (!ret
&& signal_pending(current
))
1071 * Create an aio_context capable of receiving at least nr_events.
1072 * ctxp must not point to an aio_context that already exists, and
1073 * must be initialized to 0 prior to the call. On successful
1074 * creation of the aio_context, *ctxp is filled in with the resulting
1075 * handle. May fail with -EINVAL if *ctxp is not initialized,
1076 * if the specified nr_events exceeds internal limits. May fail
1077 * with -EAGAIN if the specified nr_events exceeds the user's limit
1078 * of available events. May fail with -ENOMEM if insufficient kernel
1079 * resources are available. May fail with -EFAULT if an invalid
1080 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1083 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1085 struct kioctx
*ioctx
= NULL
;
1089 ret
= get_user(ctx
, ctxp
);
1094 if (unlikely(ctx
|| nr_events
== 0)) {
1095 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1100 ioctx
= ioctx_alloc(nr_events
);
1101 ret
= PTR_ERR(ioctx
);
1102 if (!IS_ERR(ioctx
)) {
1103 ret
= put_user(ioctx
->user_id
, ctxp
);
1105 kill_ioctx(current
->mm
, ioctx
);
1106 percpu_ref_put(&ioctx
->users
);
1114 * Destroy the aio_context specified. May cancel any outstanding
1115 * AIOs and block on completion. Will fail with -ENOSYS if not
1116 * implemented. May fail with -EINVAL if the context pointed to
1119 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1121 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1122 if (likely(NULL
!= ioctx
)) {
1123 kill_ioctx(current
->mm
, ioctx
);
1124 percpu_ref_put(&ioctx
->users
);
1127 pr_debug("EINVAL: io_destroy: invalid context id\n");
1131 typedef ssize_t (aio_rw_op
)(struct kiocb
*, const struct iovec
*,
1132 unsigned long, loff_t
);
1134 static ssize_t
aio_setup_vectored_rw(struct kiocb
*kiocb
,
1135 int rw
, char __user
*buf
,
1136 unsigned long *nr_segs
,
1137 struct iovec
**iovec
,
1142 *nr_segs
= kiocb
->ki_nbytes
;
1144 #ifdef CONFIG_COMPAT
1146 ret
= compat_rw_copy_check_uvector(rw
,
1147 (struct compat_iovec __user
*)buf
,
1148 *nr_segs
, 1, *iovec
, iovec
);
1151 ret
= rw_copy_check_uvector(rw
,
1152 (struct iovec __user
*)buf
,
1153 *nr_segs
, 1, *iovec
, iovec
);
1157 /* ki_nbytes now reflect bytes instead of segs */
1158 kiocb
->ki_nbytes
= ret
;
1162 static ssize_t
aio_setup_single_vector(struct kiocb
*kiocb
,
1163 int rw
, char __user
*buf
,
1164 unsigned long *nr_segs
,
1165 struct iovec
*iovec
)
1167 if (unlikely(!access_ok(!rw
, buf
, kiocb
->ki_nbytes
)))
1170 iovec
->iov_base
= buf
;
1171 iovec
->iov_len
= kiocb
->ki_nbytes
;
1178 * Performs the initial checks and aio retry method
1179 * setup for the kiocb at the time of io submission.
1181 static ssize_t
aio_run_iocb(struct kiocb
*req
, unsigned opcode
,
1182 char __user
*buf
, bool compat
)
1184 struct file
*file
= req
->ki_filp
;
1186 unsigned long nr_segs
;
1190 struct iovec inline_vec
, *iovec
= &inline_vec
;
1193 case IOCB_CMD_PREAD
:
1194 case IOCB_CMD_PREADV
:
1197 rw_op
= file
->f_op
->aio_read
;
1200 case IOCB_CMD_PWRITE
:
1201 case IOCB_CMD_PWRITEV
:
1204 rw_op
= file
->f_op
->aio_write
;
1207 if (unlikely(!(file
->f_mode
& mode
)))
1213 ret
= (opcode
== IOCB_CMD_PREADV
||
1214 opcode
== IOCB_CMD_PWRITEV
)
1215 ? aio_setup_vectored_rw(req
, rw
, buf
, &nr_segs
,
1217 : aio_setup_single_vector(req
, rw
, buf
, &nr_segs
,
1222 ret
= rw_verify_area(rw
, file
, &req
->ki_pos
, req
->ki_nbytes
);
1224 if (iovec
!= &inline_vec
)
1229 req
->ki_nbytes
= ret
;
1231 /* XXX: move/kill - rw_verify_area()? */
1232 /* This matches the pread()/pwrite() logic */
1233 if (req
->ki_pos
< 0) {
1239 file_start_write(file
);
1241 ret
= rw_op(req
, iovec
, nr_segs
, req
->ki_pos
);
1244 file_end_write(file
);
1247 case IOCB_CMD_FDSYNC
:
1248 if (!file
->f_op
->aio_fsync
)
1251 ret
= file
->f_op
->aio_fsync(req
, 1);
1254 case IOCB_CMD_FSYNC
:
1255 if (!file
->f_op
->aio_fsync
)
1258 ret
= file
->f_op
->aio_fsync(req
, 0);
1262 pr_debug("EINVAL: no operation provided\n");
1266 if (iovec
!= &inline_vec
)
1269 if (ret
!= -EIOCBQUEUED
) {
1271 * There's no easy way to restart the syscall since other AIO's
1272 * may be already running. Just fail this IO with EINTR.
1274 if (unlikely(ret
== -ERESTARTSYS
|| ret
== -ERESTARTNOINTR
||
1275 ret
== -ERESTARTNOHAND
||
1276 ret
== -ERESTART_RESTARTBLOCK
))
1278 aio_complete(req
, ret
, 0);
1284 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1285 struct iocb
*iocb
, bool compat
)
1290 /* enforce forwards compatibility on users */
1291 if (unlikely(iocb
->aio_reserved1
|| iocb
->aio_reserved2
)) {
1292 pr_debug("EINVAL: reserve field set\n");
1296 /* prevent overflows */
1298 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1299 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1300 ((ssize_t
)iocb
->aio_nbytes
< 0)
1302 pr_debug("EINVAL: io_submit: overflow check\n");
1306 req
= aio_get_req(ctx
);
1310 req
->ki_filp
= fget(iocb
->aio_fildes
);
1311 if (unlikely(!req
->ki_filp
)) {
1316 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1318 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1319 * instance of the file* now. The file descriptor must be
1320 * an eventfd() fd, and will be signaled for each completed
1321 * event using the eventfd_signal() function.
1323 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1324 if (IS_ERR(req
->ki_eventfd
)) {
1325 ret
= PTR_ERR(req
->ki_eventfd
);
1326 req
->ki_eventfd
= NULL
;
1331 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1332 if (unlikely(ret
)) {
1333 pr_debug("EFAULT: aio_key\n");
1337 req
->ki_obj
.user
= user_iocb
;
1338 req
->ki_user_data
= iocb
->aio_data
;
1339 req
->ki_pos
= iocb
->aio_offset
;
1340 req
->ki_nbytes
= iocb
->aio_nbytes
;
1342 ret
= aio_run_iocb(req
, iocb
->aio_lio_opcode
,
1343 (char __user
*)(unsigned long)iocb
->aio_buf
,
1350 put_reqs_available(ctx
, 1);
1355 long do_io_submit(aio_context_t ctx_id
, long nr
,
1356 struct iocb __user
*__user
*iocbpp
, bool compat
)
1361 struct blk_plug plug
;
1363 if (unlikely(nr
< 0))
1366 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1367 nr
= LONG_MAX
/sizeof(*iocbpp
);
1369 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1372 ctx
= lookup_ioctx(ctx_id
);
1373 if (unlikely(!ctx
)) {
1374 pr_debug("EINVAL: invalid context id\n");
1378 blk_start_plug(&plug
);
1381 * AKPM: should this return a partial result if some of the IOs were
1382 * successfully submitted?
1384 for (i
=0; i
<nr
; i
++) {
1385 struct iocb __user
*user_iocb
;
1388 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1393 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1398 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, compat
);
1402 blk_finish_plug(&plug
);
1404 percpu_ref_put(&ctx
->users
);
1409 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1410 * the number of iocbs queued. May return -EINVAL if the aio_context
1411 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1412 * *iocbpp[0] is not properly initialized, if the operation specified
1413 * is invalid for the file descriptor in the iocb. May fail with
1414 * -EFAULT if any of the data structures point to invalid data. May
1415 * fail with -EBADF if the file descriptor specified in the first
1416 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1417 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1418 * fail with -ENOSYS if not implemented.
1420 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1421 struct iocb __user
* __user
*, iocbpp
)
1423 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1427 * Finds a given iocb for cancellation.
1429 static struct kiocb
*lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
,
1432 struct list_head
*pos
;
1434 assert_spin_locked(&ctx
->ctx_lock
);
1436 if (key
!= KIOCB_KEY
)
1439 /* TODO: use a hash or array, this sucks. */
1440 list_for_each(pos
, &ctx
->active_reqs
) {
1441 struct kiocb
*kiocb
= list_kiocb(pos
);
1442 if (kiocb
->ki_obj
.user
== iocb
)
1449 * Attempts to cancel an iocb previously passed to io_submit. If
1450 * the operation is successfully cancelled, the resulting event is
1451 * copied into the memory pointed to by result without being placed
1452 * into the completion queue and 0 is returned. May fail with
1453 * -EFAULT if any of the data structures pointed to are invalid.
1454 * May fail with -EINVAL if aio_context specified by ctx_id is
1455 * invalid. May fail with -EAGAIN if the iocb specified was not
1456 * cancelled. Will fail with -ENOSYS if not implemented.
1458 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1459 struct io_event __user
*, result
)
1462 struct kiocb
*kiocb
;
1466 ret
= get_user(key
, &iocb
->aio_key
);
1470 ctx
= lookup_ioctx(ctx_id
);
1474 spin_lock_irq(&ctx
->ctx_lock
);
1476 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1478 ret
= kiocb_cancel(ctx
, kiocb
);
1482 spin_unlock_irq(&ctx
->ctx_lock
);
1486 * The result argument is no longer used - the io_event is
1487 * always delivered via the ring buffer. -EINPROGRESS indicates
1488 * cancellation is progress:
1493 percpu_ref_put(&ctx
->users
);
1499 * Attempts to read at least min_nr events and up to nr events from
1500 * the completion queue for the aio_context specified by ctx_id. If
1501 * it succeeds, the number of read events is returned. May fail with
1502 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1503 * out of range, if timeout is out of range. May fail with -EFAULT
1504 * if any of the memory specified is invalid. May return 0 or
1505 * < min_nr if the timeout specified by timeout has elapsed
1506 * before sufficient events are available, where timeout == NULL
1507 * specifies an infinite timeout. Note that the timeout pointed to by
1508 * timeout is relative. Will fail with -ENOSYS if not implemented.
1510 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1513 struct io_event __user
*, events
,
1514 struct timespec __user
*, timeout
)
1516 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1519 if (likely(ioctx
)) {
1520 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1521 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);
1522 percpu_ref_put(&ioctx
->users
);