4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
11 #include <linux/time.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/file.h>
15 #include <linux/bitops.h>
16 #include <linux/interrupt.h>
17 #include <linux/spinlock.h>
18 #include <linux/rcupdate.h>
19 #include <linux/workqueue.h>
21 struct fdtable_defer
{
23 struct work_struct wq
;
24 struct timer_list timer
;
29 * We use this list to defer free fdtables that have vmalloced
30 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
31 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
32 * this per-task structure.
34 static DEFINE_PER_CPU(struct fdtable_defer
, fdtable_defer_list
);
38 * Allocate an fd array, using kmalloc or vmalloc.
39 * Note: the array isn't cleared at allocation time.
41 struct file
** alloc_fd_array(int num
)
43 struct file
**new_fds
;
44 int size
= num
* sizeof(struct file
*);
46 if (size
<= PAGE_SIZE
)
47 new_fds
= (struct file
**) kmalloc(size
, GFP_KERNEL
);
49 new_fds
= (struct file
**) vmalloc(size
);
53 void free_fd_array(struct file
**array
, int num
)
55 int size
= num
* sizeof(struct file
*);
58 printk (KERN_ERR
"free_fd_array: array = 0 (num = %d)\n", num
);
62 if (num
<= NR_OPEN_DEFAULT
) /* Don't free the embedded fd array! */
64 else if (size
<= PAGE_SIZE
)
70 static void __free_fdtable(struct fdtable
*fdt
)
72 free_fdset(fdt
->open_fds
, fdt
->max_fdset
);
73 free_fdset(fdt
->close_on_exec
, fdt
->max_fdset
);
74 free_fd_array(fdt
->fd
, fdt
->max_fds
);
78 static void fdtable_timer(unsigned long data
)
80 struct fdtable_defer
*fddef
= (struct fdtable_defer
*)data
;
82 spin_lock(&fddef
->lock
);
84 * If someone already emptied the queue return.
88 if (!schedule_work(&fddef
->wq
))
89 mod_timer(&fddef
->timer
, 5);
91 spin_unlock(&fddef
->lock
);
94 static void free_fdtable_work(struct work_struct
*work
)
96 struct fdtable_defer
*f
=
97 container_of(work
, struct fdtable_defer
, wq
);
100 spin_lock_bh(&f
->lock
);
103 spin_unlock_bh(&f
->lock
);
105 struct fdtable
*next
= fdt
->next
;
111 static void free_fdtable_rcu(struct rcu_head
*rcu
)
113 struct fdtable
*fdt
= container_of(rcu
, struct fdtable
, rcu
);
114 int fdset_size
, fdarray_size
;
115 struct fdtable_defer
*fddef
;
118 fdset_size
= fdt
->max_fdset
/ 8;
119 fdarray_size
= fdt
->max_fds
* sizeof(struct file
*);
121 if (fdt
->free_files
) {
123 * The this fdtable was embedded in the files structure
124 * and the files structure itself was getting destroyed.
125 * It is now safe to free the files structure.
127 kmem_cache_free(files_cachep
, fdt
->free_files
);
130 if (fdt
->max_fdset
<= EMBEDDED_FD_SET_SIZE
&&
131 fdt
->max_fds
<= NR_OPEN_DEFAULT
) {
133 * The fdtable was embedded
137 if (fdset_size
<= PAGE_SIZE
&& fdarray_size
<= PAGE_SIZE
) {
138 kfree(fdt
->open_fds
);
139 kfree(fdt
->close_on_exec
);
143 fddef
= &get_cpu_var(fdtable_defer_list
);
144 spin_lock(&fddef
->lock
);
145 fdt
->next
= fddef
->next
;
148 * vmallocs are handled from the workqueue context.
149 * If the per-cpu workqueue is running, then we
150 * defer work scheduling through a timer.
152 if (!schedule_work(&fddef
->wq
))
153 mod_timer(&fddef
->timer
, 5);
154 spin_unlock(&fddef
->lock
);
155 put_cpu_var(fdtable_defer_list
);
159 void free_fdtable(struct fdtable
*fdt
)
161 if (fdt
->free_files
||
162 fdt
->max_fdset
> EMBEDDED_FD_SET_SIZE
||
163 fdt
->max_fds
> NR_OPEN_DEFAULT
)
164 call_rcu(&fdt
->rcu
, free_fdtable_rcu
);
168 * Expand the fdset in the files_struct. Called with the files spinlock
171 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*fdt
)
176 BUG_ON(nfdt
->max_fdset
< fdt
->max_fdset
);
177 BUG_ON(nfdt
->max_fds
< fdt
->max_fds
);
178 /* Copy the existing tables and install the new pointers */
180 i
= fdt
->max_fdset
/ (sizeof(unsigned long) * 8);
181 count
= (nfdt
->max_fdset
- fdt
->max_fdset
) / 8;
184 * Don't copy the entire array if the current fdset is
185 * not yet initialised.
188 memcpy (nfdt
->open_fds
, fdt
->open_fds
,
190 memcpy (nfdt
->close_on_exec
, fdt
->close_on_exec
,
192 memset (&nfdt
->open_fds
->fds_bits
[i
], 0, count
);
193 memset (&nfdt
->close_on_exec
->fds_bits
[i
], 0, count
);
196 /* Don't copy/clear the array if we are creating a new
197 fd array for fork() */
199 memcpy(nfdt
->fd
, fdt
->fd
,
200 fdt
->max_fds
* sizeof(struct file
*));
201 /* clear the remainder of the array */
202 memset(&nfdt
->fd
[fdt
->max_fds
], 0,
203 (nfdt
->max_fds
- fdt
->max_fds
) *
204 sizeof(struct file
*));
209 * Allocate an fdset array, using kmalloc or vmalloc.
210 * Note: the array isn't cleared at allocation time.
212 fd_set
* alloc_fdset(int num
)
217 if (size
<= PAGE_SIZE
)
218 new_fdset
= (fd_set
*) kmalloc(size
, GFP_KERNEL
);
220 new_fdset
= (fd_set
*) vmalloc(size
);
224 void free_fdset(fd_set
*array
, int num
)
226 if (num
<= EMBEDDED_FD_SET_SIZE
) /* Don't free an embedded fdset */
228 else if (num
<= 8 * PAGE_SIZE
)
234 static struct fdtable
*alloc_fdtable(int nr
)
236 struct fdtable
*fdt
= NULL
;
238 fd_set
*new_openset
= NULL
, *new_execset
= NULL
;
239 struct file
**new_fds
;
241 fdt
= kzalloc(sizeof(*fdt
), GFP_KERNEL
);
245 nfds
= max_t(int, 8 * L1_CACHE_BYTES
, roundup_pow_of_two(nr
+ 1));
249 new_openset
= alloc_fdset(nfds
);
250 new_execset
= alloc_fdset(nfds
);
251 if (!new_openset
|| !new_execset
)
253 fdt
->open_fds
= new_openset
;
254 fdt
->close_on_exec
= new_execset
;
255 fdt
->max_fdset
= nfds
;
257 nfds
= NR_OPEN_DEFAULT
;
259 * Expand to the max in easy steps, and keep expanding it until
260 * we have enough for the requested fd array size.
263 #if NR_OPEN_DEFAULT < 256
268 if (nfds
< (PAGE_SIZE
/ sizeof(struct file
*)))
269 nfds
= PAGE_SIZE
/ sizeof(struct file
*);
275 } while (nfds
<= nr
);
276 new_fds
= alloc_fd_array(nfds
);
281 fdt
->free_files
= NULL
;
284 nfds
= fdt
->max_fdset
;
286 free_fdset(new_openset
, nfds
);
287 free_fdset(new_execset
, nfds
);
293 * Expand the file descriptor table.
294 * This function will allocate a new fdtable and both fd array and fdset, of
296 * Return <0 error code on error; 1 on successful completion.
297 * The files->file_lock should be held on entry, and will be held on exit.
299 static int expand_fdtable(struct files_struct
*files
, int nr
)
300 __releases(files
->file_lock
)
301 __acquires(files
->file_lock
)
303 struct fdtable
*new_fdt
, *cur_fdt
;
305 spin_unlock(&files
->file_lock
);
306 new_fdt
= alloc_fdtable(nr
);
307 spin_lock(&files
->file_lock
);
311 * Check again since another task may have expanded the fd table while
312 * we dropped the lock
314 cur_fdt
= files_fdtable(files
);
315 if (nr
>= cur_fdt
->max_fds
|| nr
>= cur_fdt
->max_fdset
) {
316 /* Continue as planned */
317 copy_fdtable(new_fdt
, cur_fdt
);
318 rcu_assign_pointer(files
->fdt
, new_fdt
);
319 free_fdtable(cur_fdt
);
321 /* Somebody else expanded, so undo our attempt */
322 __free_fdtable(new_fdt
);
329 * This function will expand the file structures, if the requested size exceeds
330 * the current capacity and there is room for expansion.
331 * Return <0 error code on error; 0 when nothing done; 1 when files were
332 * expanded and execution may have blocked.
333 * The files->file_lock should be held on entry, and will be held on exit.
335 int expand_files(struct files_struct
*files
, int nr
)
339 fdt
= files_fdtable(files
);
340 /* Do we need to expand? */
341 if (nr
< fdt
->max_fdset
&& nr
< fdt
->max_fds
)
344 if (fdt
->max_fdset
>= NR_OPEN
|| fdt
->max_fds
>= NR_OPEN
||
348 /* All good, so we try */
349 return expand_fdtable(files
, nr
);
352 static void __devinit
fdtable_defer_list_init(int cpu
)
354 struct fdtable_defer
*fddef
= &per_cpu(fdtable_defer_list
, cpu
);
355 spin_lock_init(&fddef
->lock
);
356 INIT_WORK(&fddef
->wq
, free_fdtable_work
);
357 init_timer(&fddef
->timer
);
358 fddef
->timer
.data
= (unsigned long)fddef
;
359 fddef
->timer
.function
= fdtable_timer
;
363 void __init
files_defer_init(void)
366 for_each_possible_cpu(i
)
367 fdtable_defer_list_init(i
);