1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
14 #include <linux/sched/signal.h>
15 #include <linux/slab.h>
16 #include <linux/file.h>
17 #include <linux/fdtable.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/rcupdate.h>
22 unsigned int sysctl_nr_open __read_mostly
= 1024*1024;
23 unsigned int sysctl_nr_open_min
= BITS_PER_LONG
;
24 /* our min() is unusable in constant expressions ;-/ */
25 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
26 unsigned int sysctl_nr_open_max
=
27 __const_min(INT_MAX
, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG
;
29 static void __free_fdtable(struct fdtable
*fdt
)
32 kvfree(fdt
->open_fds
);
36 static void free_fdtable_rcu(struct rcu_head
*rcu
)
38 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
41 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
42 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
45 * Copy 'count' fd bits from the old table to the new table and clear the extra
46 * space if any. This does not copy the file pointers. Called with the files
47 * spinlock held for write.
49 static void copy_fd_bitmaps(struct fdtable
*nfdt
, struct fdtable
*ofdt
,
52 unsigned int cpy
, set
;
54 cpy
= count
/ BITS_PER_BYTE
;
55 set
= (nfdt
->max_fds
- count
) / BITS_PER_BYTE
;
56 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
57 memset((char *)nfdt
->open_fds
+ cpy
, 0, set
);
58 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
59 memset((char *)nfdt
->close_on_exec
+ cpy
, 0, set
);
61 cpy
= BITBIT_SIZE(count
);
62 set
= BITBIT_SIZE(nfdt
->max_fds
) - cpy
;
63 memcpy(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
, cpy
);
64 memset((char *)nfdt
->full_fds_bits
+ cpy
, 0, set
);
68 * Copy all file descriptors from the old table to the new, expanded table and
69 * clear the extra space. Called with the files spinlock held for write.
71 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
73 unsigned int cpy
, set
;
75 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
77 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
78 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
79 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
80 memset((char *)nfdt
->fd
+ cpy
, 0, set
);
82 copy_fd_bitmaps(nfdt
, ofdt
, ofdt
->max_fds
);
85 static struct fdtable
* alloc_fdtable(unsigned int nr
)
91 * Figure out how many fds we actually want to support in this fdtable.
92 * Allocation steps are keyed to the size of the fdarray, since it
93 * grows far faster than any of the other dynamic data. We try to fit
94 * the fdarray into comfortable page-tuned chunks: starting at 1024B
95 * and growing in powers of two from there on.
97 nr
/= (1024 / sizeof(struct file
*));
98 nr
= roundup_pow_of_two(nr
+ 1);
99 nr
*= (1024 / sizeof(struct file
*));
101 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102 * had been set lower between the check in expand_files() and here. Deal
103 * with that in caller, it's cheaper that way.
105 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106 * bitmaps handling below becomes unpleasant, to put it mildly...
108 if (unlikely(nr
> sysctl_nr_open
))
109 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
111 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL_ACCOUNT
);
115 data
= kvmalloc_array(nr
, sizeof(struct file
*), GFP_KERNEL_ACCOUNT
);
120 data
= kvmalloc(max_t(size_t,
121 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
),
125 fdt
->open_fds
= data
;
126 data
+= nr
/ BITS_PER_BYTE
;
127 fdt
->close_on_exec
= data
;
128 data
+= nr
/ BITS_PER_BYTE
;
129 fdt
->full_fds_bits
= data
;
142 * Expand the file descriptor table.
143 * This function will allocate a new fdtable and both fd array and fdset, of
145 * Return <0 error code on error; 1 on successful completion.
146 * The files->file_lock should be held on entry, and will be held on exit.
148 static int expand_fdtable(struct files_struct
*files
, unsigned int nr
)
149 __releases(files
->file_lock
)
150 __acquires(files
->file_lock
)
152 struct fdtable
*new_fdt
, *cur_fdt
;
154 spin_unlock(&files
->file_lock
);
155 new_fdt
= alloc_fdtable(nr
);
157 /* make sure all __fd_install() have seen resize_in_progress
158 * or have finished their rcu_read_lock_sched() section.
160 if (atomic_read(&files
->count
) > 1)
163 spin_lock(&files
->file_lock
);
167 * extremely unlikely race - sysctl_nr_open decreased between the check in
168 * caller and alloc_fdtable(). Cheaper to catch it here...
170 if (unlikely(new_fdt
->max_fds
<= nr
)) {
171 __free_fdtable(new_fdt
);
174 cur_fdt
= files_fdtable(files
);
175 BUG_ON(nr
< cur_fdt
->max_fds
);
176 copy_fdtable(new_fdt
, cur_fdt
);
177 rcu_assign_pointer(files
->fdt
, new_fdt
);
178 if (cur_fdt
!= &files
->fdtab
)
179 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
180 /* coupled with smp_rmb() in __fd_install() */
187 * This function will expand the file structures, if the requested size exceeds
188 * the current capacity and there is room for expansion.
189 * Return <0 error code on error; 0 when nothing done; 1 when files were
190 * expanded and execution may have blocked.
191 * The files->file_lock should be held on entry, and will be held on exit.
193 static int expand_files(struct files_struct
*files
, unsigned int nr
)
194 __releases(files
->file_lock
)
195 __acquires(files
->file_lock
)
201 fdt
= files_fdtable(files
);
203 /* Do we need to expand? */
204 if (nr
< fdt
->max_fds
)
208 if (nr
>= sysctl_nr_open
)
211 if (unlikely(files
->resize_in_progress
)) {
212 spin_unlock(&files
->file_lock
);
214 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
215 spin_lock(&files
->file_lock
);
219 /* All good, so we try */
220 files
->resize_in_progress
= true;
221 expanded
= expand_fdtable(files
, nr
);
222 files
->resize_in_progress
= false;
224 wake_up_all(&files
->resize_wait
);
228 static inline void __set_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
230 __set_bit(fd
, fdt
->close_on_exec
);
233 static inline void __clear_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
235 if (test_bit(fd
, fdt
->close_on_exec
))
236 __clear_bit(fd
, fdt
->close_on_exec
);
239 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
)
241 __set_bit(fd
, fdt
->open_fds
);
243 if (!~fdt
->open_fds
[fd
])
244 __set_bit(fd
, fdt
->full_fds_bits
);
247 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
249 __clear_bit(fd
, fdt
->open_fds
);
250 __clear_bit(fd
/ BITS_PER_LONG
, fdt
->full_fds_bits
);
253 static unsigned int count_open_files(struct fdtable
*fdt
)
255 unsigned int size
= fdt
->max_fds
;
258 /* Find the last open fd */
259 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
260 if (fdt
->open_fds
[--i
])
263 i
= (i
+ 1) * BITS_PER_LONG
;
268 * Allocate a new files structure and copy contents from the
269 * passed in files structure.
270 * errorp will be valid only when the returned files_struct is NULL.
272 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
274 struct files_struct
*newf
;
275 struct file
**old_fds
, **new_fds
;
276 unsigned int open_files
, i
;
277 struct fdtable
*old_fdt
, *new_fdt
;
280 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
284 atomic_set(&newf
->count
, 1);
286 spin_lock_init(&newf
->file_lock
);
287 newf
->resize_in_progress
= false;
288 init_waitqueue_head(&newf
->resize_wait
);
290 new_fdt
= &newf
->fdtab
;
291 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
292 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
293 new_fdt
->open_fds
= newf
->open_fds_init
;
294 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
295 new_fdt
->fd
= &newf
->fd_array
[0];
297 spin_lock(&oldf
->file_lock
);
298 old_fdt
= files_fdtable(oldf
);
299 open_files
= count_open_files(old_fdt
);
302 * Check whether we need to allocate a larger fd array and fd set.
304 while (unlikely(open_files
> new_fdt
->max_fds
)) {
305 spin_unlock(&oldf
->file_lock
);
307 if (new_fdt
!= &newf
->fdtab
)
308 __free_fdtable(new_fdt
);
310 new_fdt
= alloc_fdtable(open_files
- 1);
316 /* beyond sysctl_nr_open; nothing to do */
317 if (unlikely(new_fdt
->max_fds
< open_files
)) {
318 __free_fdtable(new_fdt
);
324 * Reacquire the oldf lock and a pointer to its fd table
325 * who knows it may have a new bigger fd table. We need
326 * the latest pointer.
328 spin_lock(&oldf
->file_lock
);
329 old_fdt
= files_fdtable(oldf
);
330 open_files
= count_open_files(old_fdt
);
333 copy_fd_bitmaps(new_fdt
, old_fdt
, open_files
);
335 old_fds
= old_fdt
->fd
;
336 new_fds
= new_fdt
->fd
;
338 for (i
= open_files
; i
!= 0; i
--) {
339 struct file
*f
= *old_fds
++;
344 * The fd may be claimed in the fd bitmap but not yet
345 * instantiated in the files array if a sibling thread
346 * is partway through open(). So make sure that this
347 * fd is available to the new process.
349 __clear_open_fd(open_files
- i
, new_fdt
);
351 rcu_assign_pointer(*new_fds
++, f
);
353 spin_unlock(&oldf
->file_lock
);
355 /* clear the remainder */
356 memset(new_fds
, 0, (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*));
358 rcu_assign_pointer(newf
->fdt
, new_fdt
);
363 kmem_cache_free(files_cachep
, newf
);
368 static struct fdtable
*close_files(struct files_struct
* files
)
371 * It is safe to dereference the fd table without RCU or
372 * ->file_lock because this is the last reference to the
375 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
376 unsigned int i
, j
= 0;
380 i
= j
* BITS_PER_LONG
;
381 if (i
>= fdt
->max_fds
)
383 set
= fdt
->open_fds
[j
++];
386 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
388 filp_close(file
, files
);
400 struct files_struct
*get_files_struct(struct task_struct
*task
)
402 struct files_struct
*files
;
407 atomic_inc(&files
->count
);
413 void put_files_struct(struct files_struct
*files
)
415 if (atomic_dec_and_test(&files
->count
)) {
416 struct fdtable
*fdt
= close_files(files
);
418 /* free the arrays if they are not embedded */
419 if (fdt
!= &files
->fdtab
)
421 kmem_cache_free(files_cachep
, files
);
425 void reset_files_struct(struct files_struct
*files
)
427 struct task_struct
*tsk
= current
;
428 struct files_struct
*old
;
434 put_files_struct(old
);
437 void exit_files(struct task_struct
*tsk
)
439 struct files_struct
* files
= tsk
->files
;
445 put_files_struct(files
);
449 struct files_struct init_files
= {
450 .count
= ATOMIC_INIT(1),
451 .fdt
= &init_files
.fdtab
,
453 .max_fds
= NR_OPEN_DEFAULT
,
454 .fd
= &init_files
.fd_array
[0],
455 .close_on_exec
= init_files
.close_on_exec_init
,
456 .open_fds
= init_files
.open_fds_init
,
457 .full_fds_bits
= init_files
.full_fds_bits_init
,
459 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
460 .resize_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(init_files
.resize_wait
),
463 static unsigned int find_next_fd(struct fdtable
*fdt
, unsigned int start
)
465 unsigned int maxfd
= fdt
->max_fds
;
466 unsigned int maxbit
= maxfd
/ BITS_PER_LONG
;
467 unsigned int bitbit
= start
/ BITS_PER_LONG
;
469 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
474 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
478 * allocate a file descriptor, mark it busy.
480 int __alloc_fd(struct files_struct
*files
,
481 unsigned start
, unsigned end
, unsigned flags
)
487 spin_lock(&files
->file_lock
);
489 fdt
= files_fdtable(files
);
491 if (fd
< files
->next_fd
)
494 if (fd
< fdt
->max_fds
)
495 fd
= find_next_fd(fdt
, fd
);
498 * N.B. For clone tasks sharing a files structure, this test
499 * will limit the total number of files that can be opened.
505 error
= expand_files(files
, fd
);
510 * If we needed to expand the fs array we
511 * might have blocked - try again.
516 if (start
<= files
->next_fd
)
517 files
->next_fd
= fd
+ 1;
519 __set_open_fd(fd
, fdt
);
520 if (flags
& O_CLOEXEC
)
521 __set_close_on_exec(fd
, fdt
);
523 __clear_close_on_exec(fd
, fdt
);
527 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
528 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
529 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
534 spin_unlock(&files
->file_lock
);
538 static int alloc_fd(unsigned start
, unsigned flags
)
540 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
543 int get_unused_fd_flags(unsigned flags
)
545 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
547 EXPORT_SYMBOL(get_unused_fd_flags
);
549 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
551 struct fdtable
*fdt
= files_fdtable(files
);
552 __clear_open_fd(fd
, fdt
);
553 if (fd
< files
->next_fd
)
557 void put_unused_fd(unsigned int fd
)
559 struct files_struct
*files
= current
->files
;
560 spin_lock(&files
->file_lock
);
561 __put_unused_fd(files
, fd
);
562 spin_unlock(&files
->file_lock
);
565 EXPORT_SYMBOL(put_unused_fd
);
568 * Install a file pointer in the fd array.
570 * The VFS is full of places where we drop the files lock between
571 * setting the open_fds bitmap and installing the file in the file
572 * array. At any such point, we are vulnerable to a dup2() race
573 * installing a file in the array before us. We need to detect this and
574 * fput() the struct file we are about to overwrite in this case.
576 * It should never happen - if we allow dup2() do it, _really_ bad things
579 * NOTE: __fd_install() variant is really, really low-level; don't
580 * use it unless you are forced to by truly lousy API shoved down
581 * your throat. 'files' *MUST* be either current->files or obtained
582 * by get_files_struct(current) done by whoever had given it to you,
583 * or really bad things will happen. Normally you want to use
584 * fd_install() instead.
587 void __fd_install(struct files_struct
*files
, unsigned int fd
,
592 rcu_read_lock_sched();
594 if (unlikely(files
->resize_in_progress
)) {
595 rcu_read_unlock_sched();
596 spin_lock(&files
->file_lock
);
597 fdt
= files_fdtable(files
);
598 BUG_ON(fdt
->fd
[fd
] != NULL
);
599 rcu_assign_pointer(fdt
->fd
[fd
], file
);
600 spin_unlock(&files
->file_lock
);
603 /* coupled with smp_wmb() in expand_fdtable() */
605 fdt
= rcu_dereference_sched(files
->fdt
);
606 BUG_ON(fdt
->fd
[fd
] != NULL
);
607 rcu_assign_pointer(fdt
->fd
[fd
], file
);
608 rcu_read_unlock_sched();
611 void fd_install(unsigned int fd
, struct file
*file
)
613 __fd_install(current
->files
, fd
, file
);
616 EXPORT_SYMBOL(fd_install
);
619 * The same warnings as for __alloc_fd()/__fd_install() apply here...
621 int __close_fd(struct files_struct
*files
, unsigned fd
)
626 spin_lock(&files
->file_lock
);
627 fdt
= files_fdtable(files
);
628 if (fd
>= fdt
->max_fds
)
633 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
634 __put_unused_fd(files
, fd
);
635 spin_unlock(&files
->file_lock
);
636 return filp_close(file
, files
);
639 spin_unlock(&files
->file_lock
);
642 EXPORT_SYMBOL(__close_fd
); /* for ksys_close() */
645 * variant of __close_fd that gets a ref on the file for later fput
647 int __close_fd_get_file(unsigned int fd
, struct file
**res
)
649 struct files_struct
*files
= current
->files
;
653 spin_lock(&files
->file_lock
);
654 fdt
= files_fdtable(files
);
655 if (fd
>= fdt
->max_fds
)
660 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
661 __put_unused_fd(files
, fd
);
662 spin_unlock(&files
->file_lock
);
665 return filp_close(file
, files
);
668 spin_unlock(&files
->file_lock
);
673 void do_close_on_exec(struct files_struct
*files
)
678 /* exec unshares first */
679 spin_lock(&files
->file_lock
);
682 unsigned fd
= i
* BITS_PER_LONG
;
683 fdt
= files_fdtable(files
);
684 if (fd
>= fdt
->max_fds
)
686 set
= fdt
->close_on_exec
[i
];
689 fdt
->close_on_exec
[i
] = 0;
690 for ( ; set
; fd
++, set
>>= 1) {
697 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
698 __put_unused_fd(files
, fd
);
699 spin_unlock(&files
->file_lock
);
700 filp_close(file
, files
);
702 spin_lock(&files
->file_lock
);
706 spin_unlock(&files
->file_lock
);
709 static struct file
*__fget(unsigned int fd
, fmode_t mask
, unsigned int refs
)
711 struct files_struct
*files
= current
->files
;
716 file
= fcheck_files(files
, fd
);
718 /* File object ref couldn't be taken.
719 * dup2() atomicity guarantee is the reason
720 * we loop to catch the new file (or NULL pointer)
722 if (file
->f_mode
& mask
)
724 else if (!get_file_rcu_many(file
, refs
))
732 struct file
*fget_many(unsigned int fd
, unsigned int refs
)
734 return __fget(fd
, FMODE_PATH
, refs
);
737 struct file
*fget(unsigned int fd
)
739 return __fget(fd
, FMODE_PATH
, 1);
743 struct file
*fget_raw(unsigned int fd
)
745 return __fget(fd
, 0, 1);
747 EXPORT_SYMBOL(fget_raw
);
750 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
752 * You can use this instead of fget if you satisfy all of the following
754 * 1) You must call fput_light before exiting the syscall and returning control
755 * to userspace (i.e. you cannot remember the returned struct file * after
756 * returning to userspace).
757 * 2) You must not call filp_close on the returned struct file * in between
758 * calls to fget_light and fput_light.
759 * 3) You must not clone the current task in between the calls to fget_light
762 * The fput_needed flag returned by fget_light should be passed to the
763 * corresponding fput_light.
765 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
767 struct files_struct
*files
= current
->files
;
770 if (atomic_read(&files
->count
) == 1) {
771 file
= __fcheck_files(files
, fd
);
772 if (!file
|| unlikely(file
->f_mode
& mask
))
774 return (unsigned long)file
;
776 file
= __fget(fd
, mask
, 1);
779 return FDPUT_FPUT
| (unsigned long)file
;
782 unsigned long __fdget(unsigned int fd
)
784 return __fget_light(fd
, FMODE_PATH
);
786 EXPORT_SYMBOL(__fdget
);
788 unsigned long __fdget_raw(unsigned int fd
)
790 return __fget_light(fd
, 0);
793 unsigned long __fdget_pos(unsigned int fd
)
795 unsigned long v
= __fdget(fd
);
796 struct file
*file
= (struct file
*)(v
& ~3);
798 if (file
&& (file
->f_mode
& FMODE_ATOMIC_POS
)) {
799 if (file_count(file
) > 1) {
800 v
|= FDPUT_POS_UNLOCK
;
801 mutex_lock(&file
->f_pos_lock
);
807 void __f_unlock_pos(struct file
*f
)
809 mutex_unlock(&f
->f_pos_lock
);
813 * We only lock f_pos if we have threads or if the file might be
814 * shared with another process. In both cases we'll have an elevated
815 * file count (done either by fdget() or by fork()).
818 void set_close_on_exec(unsigned int fd
, int flag
)
820 struct files_struct
*files
= current
->files
;
822 spin_lock(&files
->file_lock
);
823 fdt
= files_fdtable(files
);
825 __set_close_on_exec(fd
, fdt
);
827 __clear_close_on_exec(fd
, fdt
);
828 spin_unlock(&files
->file_lock
);
831 bool get_close_on_exec(unsigned int fd
)
833 struct files_struct
*files
= current
->files
;
837 fdt
= files_fdtable(files
);
838 res
= close_on_exec(fd
, fdt
);
843 static int do_dup2(struct files_struct
*files
,
844 struct file
*file
, unsigned fd
, unsigned flags
)
845 __releases(&files
->file_lock
)
851 * We need to detect attempts to do dup2() over allocated but still
852 * not finished descriptor. NB: OpenBSD avoids that at the price of
853 * extra work in their equivalent of fget() - they insert struct
854 * file immediately after grabbing descriptor, mark it larval if
855 * more work (e.g. actual opening) is needed and make sure that
856 * fget() treats larval files as absent. Potentially interesting,
857 * but while extra work in fget() is trivial, locking implications
858 * and amount of surgery on open()-related paths in VFS are not.
859 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
860 * deadlocks in rather amusing ways, AFAICS. All of that is out of
861 * scope of POSIX or SUS, since neither considers shared descriptor
862 * tables and this condition does not arise without those.
864 fdt
= files_fdtable(files
);
865 tofree
= fdt
->fd
[fd
];
866 if (!tofree
&& fd_is_open(fd
, fdt
))
869 rcu_assign_pointer(fdt
->fd
[fd
], file
);
870 __set_open_fd(fd
, fdt
);
871 if (flags
& O_CLOEXEC
)
872 __set_close_on_exec(fd
, fdt
);
874 __clear_close_on_exec(fd
, fdt
);
875 spin_unlock(&files
->file_lock
);
878 filp_close(tofree
, files
);
883 spin_unlock(&files
->file_lock
);
887 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
890 struct files_struct
*files
= current
->files
;
893 return __close_fd(files
, fd
);
895 if (fd
>= rlimit(RLIMIT_NOFILE
))
898 spin_lock(&files
->file_lock
);
899 err
= expand_files(files
, fd
);
900 if (unlikely(err
< 0))
902 return do_dup2(files
, file
, fd
, flags
);
905 spin_unlock(&files
->file_lock
);
909 static int ksys_dup3(unsigned int oldfd
, unsigned int newfd
, int flags
)
913 struct files_struct
*files
= current
->files
;
915 if ((flags
& ~O_CLOEXEC
) != 0)
918 if (unlikely(oldfd
== newfd
))
921 if (newfd
>= rlimit(RLIMIT_NOFILE
))
924 spin_lock(&files
->file_lock
);
925 err
= expand_files(files
, newfd
);
926 file
= fcheck(oldfd
);
929 if (unlikely(err
< 0)) {
934 return do_dup2(files
, file
, newfd
, flags
);
939 spin_unlock(&files
->file_lock
);
943 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
945 return ksys_dup3(oldfd
, newfd
, flags
);
948 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
950 if (unlikely(newfd
== oldfd
)) { /* corner case */
951 struct files_struct
*files
= current
->files
;
955 if (!fcheck_files(files
, oldfd
))
960 return ksys_dup3(oldfd
, newfd
, 0);
963 int ksys_dup(unsigned int fildes
)
966 struct file
*file
= fget_raw(fildes
);
969 ret
= get_unused_fd_flags(0);
971 fd_install(ret
, file
);
978 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
980 return ksys_dup(fildes
);
983 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
986 if (from
>= rlimit(RLIMIT_NOFILE
))
988 err
= alloc_fd(from
, flags
);
991 fd_install(err
, file
);
996 int iterate_fd(struct files_struct
*files
, unsigned n
,
997 int (*f
)(const void *, struct file
*, unsigned),
1000 struct fdtable
*fdt
;
1004 spin_lock(&files
->file_lock
);
1005 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1007 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1010 res
= f(p
, file
, n
);
1014 spin_unlock(&files
->file_lock
);
1017 EXPORT_SYMBOL(iterate_fd
);