[PATCH] DVB: Documentation and Kconfig updazes
[linux-2.6/history.git] / fs / fcntl.c
blob3486b799e9e43ded04f1ba464496c31288020500
1 /*
2 * linux/fs/fcntl.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/file.h>
11 #include <linux/dnotify.h>
12 #include <linux/smp_lock.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/security.h>
16 #include <linux/ptrace.h>
18 #include <asm/poll.h>
19 #include <asm/siginfo.h>
20 #include <asm/uaccess.h>
22 void fastcall set_close_on_exec(unsigned int fd, int flag)
24 struct files_struct *files = current->files;
25 spin_lock(&files->file_lock);
26 if (flag)
27 FD_SET(fd, files->close_on_exec);
28 else
29 FD_CLR(fd, files->close_on_exec);
30 spin_unlock(&files->file_lock);
33 static inline int get_close_on_exec(unsigned int fd)
35 struct files_struct *files = current->files;
36 int res;
37 spin_lock(&files->file_lock);
38 res = FD_ISSET(fd, files->close_on_exec);
39 spin_unlock(&files->file_lock);
40 return res;
44 /* Expand files. Return <0 on error; 0 nothing done; 1 files expanded,
45 * we may have blocked.
47 * Should be called with the files->file_lock spinlock held for write.
49 static int expand_files(struct files_struct *files, int nr)
51 int err, expand = 0;
52 #ifdef FDSET_DEBUG
53 printk (KERN_ERR "%s %d: nr = %d\n", __FUNCTION__, current->pid, nr);
54 #endif
56 if (nr >= files->max_fdset) {
57 expand = 1;
58 if ((err = expand_fdset(files, nr)))
59 goto out;
61 if (nr >= files->max_fds) {
62 expand = 1;
63 if ((err = expand_fd_array(files, nr)))
64 goto out;
66 err = expand;
67 out:
68 #ifdef FDSET_DEBUG
69 if (err)
70 printk (KERN_ERR "%s %d: return %d\n", __FUNCTION__, current->pid, err);
71 #endif
72 return err;
76 * locate_fd finds a free file descriptor in the open_fds fdset,
77 * expanding the fd arrays if necessary. Must be called with the
78 * file_lock held for write.
81 static int locate_fd(struct files_struct *files,
82 struct file *file, unsigned int orig_start)
84 unsigned int newfd;
85 unsigned int start;
86 int error;
88 error = -EINVAL;
89 if (orig_start >= current->rlim[RLIMIT_NOFILE].rlim_cur)
90 goto out;
92 repeat:
94 * Someone might have closed fd's in the range
95 * orig_start..files->next_fd
97 start = orig_start;
98 if (start < files->next_fd)
99 start = files->next_fd;
101 newfd = start;
102 if (start < files->max_fdset) {
103 newfd = find_next_zero_bit(files->open_fds->fds_bits,
104 files->max_fdset, start);
107 error = -EMFILE;
108 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
109 goto out;
111 error = expand_files(files, newfd);
112 if (error < 0)
113 goto out;
116 * If we needed to expand the fs array we
117 * might have blocked - try again.
119 if (error)
120 goto repeat;
122 if (start <= files->next_fd)
123 files->next_fd = newfd + 1;
125 error = newfd;
127 out:
128 return error;
131 static int dupfd(struct file *file, unsigned int start)
133 struct files_struct * files = current->files;
134 int fd;
136 spin_lock(&files->file_lock);
137 fd = locate_fd(files, file, start);
138 if (fd >= 0) {
139 FD_SET(fd, files->open_fds);
140 FD_CLR(fd, files->close_on_exec);
141 spin_unlock(&files->file_lock);
142 fd_install(fd, file);
143 } else {
144 spin_unlock(&files->file_lock);
145 fput(file);
148 return fd;
151 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
153 int err = -EBADF;
154 struct file * file, *tofree;
155 struct files_struct * files = current->files;
157 spin_lock(&files->file_lock);
158 if (!(file = fcheck(oldfd)))
159 goto out_unlock;
160 err = newfd;
161 if (newfd == oldfd)
162 goto out_unlock;
163 err = -EBADF;
164 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
165 goto out_unlock;
166 get_file(file); /* We are now finished with oldfd */
168 err = expand_files(files, newfd);
169 if (err < 0)
170 goto out_fput;
172 /* To avoid races with open() and dup(), we will mark the fd as
173 * in-use in the open-file bitmap throughout the entire dup2()
174 * process. This is quite safe: do_close() uses the fd array
175 * entry, not the bitmap, to decide what work needs to be
176 * done. --sct */
177 /* Doesn't work. open() might be there first. --AV */
179 /* Yes. It's a race. In user space. Nothing sane to do */
180 err = -EBUSY;
181 tofree = files->fd[newfd];
182 if (!tofree && FD_ISSET(newfd, files->open_fds))
183 goto out_fput;
185 files->fd[newfd] = file;
186 FD_SET(newfd, files->open_fds);
187 FD_CLR(newfd, files->close_on_exec);
188 spin_unlock(&files->file_lock);
190 if (tofree)
191 filp_close(tofree, files);
192 err = newfd;
193 out:
194 return err;
195 out_unlock:
196 spin_unlock(&files->file_lock);
197 goto out;
199 out_fput:
200 spin_unlock(&files->file_lock);
201 fput(file);
202 goto out;
205 asmlinkage long sys_dup(unsigned int fildes)
207 int ret = -EBADF;
208 struct file * file = fget(fildes);
210 if (file)
211 ret = dupfd(file, 0);
212 return ret;
215 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT)
217 static int setfl(int fd, struct file * filp, unsigned long arg)
219 struct inode * inode = filp->f_dentry->d_inode;
220 int error = 0;
222 /* O_APPEND cannot be cleared if the file is marked as append-only */
223 if (!(arg & O_APPEND) && IS_APPEND(inode))
224 return -EPERM;
226 /* required for strict SunOS emulation */
227 if (O_NONBLOCK != O_NDELAY)
228 if (arg & O_NDELAY)
229 arg |= O_NONBLOCK;
231 if (arg & O_DIRECT) {
232 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
233 !filp->f_mapping->a_ops->direct_IO)
234 return -EINVAL;
237 lock_kernel();
238 if ((arg ^ filp->f_flags) & FASYNC) {
239 if (filp->f_op && filp->f_op->fasync) {
240 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
241 if (error < 0)
242 goto out;
246 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
247 out:
248 unlock_kernel();
249 return error;
252 static void f_modown(struct file *filp, unsigned long pid,
253 uid_t uid, uid_t euid, int force)
255 write_lock_irq(&filp->f_owner.lock);
256 if (force || !filp->f_owner.pid) {
257 filp->f_owner.pid = pid;
258 filp->f_owner.uid = uid;
259 filp->f_owner.euid = euid;
261 write_unlock_irq(&filp->f_owner.lock);
264 int f_setown(struct file *filp, unsigned long arg, int force)
266 int err;
268 err = security_file_set_fowner(filp);
269 if (err)
270 return err;
272 f_modown(filp, arg, current->uid, current->euid, force);
273 return 0;
276 EXPORT_SYMBOL(f_setown);
278 void f_delown(struct file *filp)
280 f_modown(filp, 0, 0, 0, 1);
283 EXPORT_SYMBOL(f_delown);
285 static long do_fcntl(unsigned int fd, unsigned int cmd,
286 unsigned long arg, struct file * filp)
288 long err = -EINVAL;
290 switch (cmd) {
291 case F_DUPFD:
292 get_file(filp);
293 err = dupfd(filp, arg);
294 break;
295 case F_GETFD:
296 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
297 break;
298 case F_SETFD:
299 err = 0;
300 set_close_on_exec(fd, arg & FD_CLOEXEC);
301 break;
302 case F_GETFL:
303 err = filp->f_flags;
304 break;
305 case F_SETFL:
306 err = setfl(fd, filp, arg);
307 break;
308 case F_GETLK:
309 err = fcntl_getlk(filp, (struct flock __user *) arg);
310 break;
311 case F_SETLK:
312 case F_SETLKW:
313 err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
314 break;
315 case F_GETOWN:
317 * XXX If f_owner is a process group, the
318 * negative return value will get converted
319 * into an error. Oops. If we keep the
320 * current syscall conventions, the only way
321 * to fix this will be in libc.
323 err = filp->f_owner.pid;
324 force_successful_syscall_return();
325 break;
326 case F_SETOWN:
327 err = f_setown(filp, arg, 1);
328 break;
329 case F_GETSIG:
330 err = filp->f_owner.signum;
331 break;
332 case F_SETSIG:
333 /* arg == 0 restores default behaviour. */
334 if (arg < 0 || arg > _NSIG) {
335 break;
337 err = 0;
338 filp->f_owner.signum = arg;
339 break;
340 case F_GETLEASE:
341 err = fcntl_getlease(filp);
342 break;
343 case F_SETLEASE:
344 err = fcntl_setlease(fd, filp, arg);
345 break;
346 case F_NOTIFY:
347 err = fcntl_dirnotify(fd, filp, arg);
348 break;
349 default:
350 break;
353 return err;
356 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
358 struct file * filp;
359 long err = -EBADF;
361 filp = fget(fd);
362 if (!filp)
363 goto out;
365 err = security_file_fcntl(filp, cmd, arg);
366 if (err) {
367 fput(filp);
368 return err;
371 err = do_fcntl(fd, cmd, arg, filp);
373 fput(filp);
374 out:
375 return err;
378 #if BITS_PER_LONG == 32
379 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
381 struct file * filp;
382 long err;
384 err = -EBADF;
385 filp = fget(fd);
386 if (!filp)
387 goto out;
389 err = security_file_fcntl(filp, cmd, arg);
390 if (err) {
391 fput(filp);
392 return err;
394 err = -EBADF;
396 switch (cmd) {
397 case F_GETLK64:
398 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
399 break;
400 case F_SETLK64:
401 case F_SETLKW64:
402 err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
403 break;
404 default:
405 err = do_fcntl(fd, cmd, arg, filp);
406 break;
408 fput(filp);
409 out:
410 return err;
412 #endif
414 /* Table to convert sigio signal codes into poll band bitmaps */
416 static long band_table[NSIGPOLL] = {
417 POLLIN | POLLRDNORM, /* POLL_IN */
418 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
419 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
420 POLLERR, /* POLL_ERR */
421 POLLPRI | POLLRDBAND, /* POLL_PRI */
422 POLLHUP | POLLERR /* POLL_HUP */
425 static inline int sigio_perm(struct task_struct *p,
426 struct fown_struct *fown)
428 return ((fown->euid == 0) ||
429 (fown->euid == p->suid) || (fown->euid == p->uid) ||
430 (fown->uid == p->suid) || (fown->uid == p->uid));
433 static void send_sigio_to_task(struct task_struct *p,
434 struct fown_struct *fown,
435 int fd,
436 int reason)
438 if (!sigio_perm(p, fown))
439 return;
441 if (security_file_send_sigiotask(p, fown, fd, reason))
442 return;
444 switch (fown->signum) {
445 siginfo_t si;
446 default:
447 /* Queue a rt signal with the appropriate fd as its
448 value. We use SI_SIGIO as the source, not
449 SI_KERNEL, since kernel signals always get
450 delivered even if we can't queue. Failure to
451 queue in this case _should_ be reported; we fall
452 back to SIGIO in that case. --sct */
453 si.si_signo = fown->signum;
454 si.si_errno = 0;
455 si.si_code = reason;
456 /* Make sure we are called with one of the POLL_*
457 reasons, otherwise we could leak kernel stack into
458 userspace. */
459 if ((reason & __SI_MASK) != __SI_POLL)
460 BUG();
461 if (reason - POLL_IN >= NSIGPOLL)
462 si.si_band = ~0L;
463 else
464 si.si_band = band_table[reason - POLL_IN];
465 si.si_fd = fd;
466 if (!send_sig_info(fown->signum, &si, p))
467 break;
468 /* fall-through: fall back on the old plain SIGIO signal */
469 case 0:
470 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
474 void send_sigio(struct fown_struct *fown, int fd, int band)
476 struct task_struct *p;
477 int pid;
479 read_lock(&fown->lock);
480 pid = fown->pid;
481 if (!pid)
482 goto out_unlock_fown;
484 read_lock(&tasklist_lock);
485 if (pid > 0) {
486 p = find_task_by_pid(pid);
487 if (p) {
488 send_sigio_to_task(p, fown, fd, band);
490 } else {
491 struct list_head *l;
492 struct pid *pidptr;
493 for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
494 send_sigio_to_task(p, fown, fd, band);
497 read_unlock(&tasklist_lock);
498 out_unlock_fown:
499 read_unlock(&fown->lock);
502 static void send_sigurg_to_task(struct task_struct *p,
503 struct fown_struct *fown)
505 if (sigio_perm(p, fown))
506 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
509 int send_sigurg(struct fown_struct *fown)
511 struct task_struct *p;
512 int pid, ret = 0;
514 read_lock(&fown->lock);
515 pid = fown->pid;
516 if (!pid)
517 goto out_unlock_fown;
519 ret = 1;
521 read_lock(&tasklist_lock);
522 if (pid > 0) {
523 p = find_task_by_pid(pid);
524 if (p) {
525 send_sigurg_to_task(p, fown);
527 } else {
528 struct list_head *l;
529 struct pid *pidptr;
530 for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
531 send_sigurg_to_task(p, fown);
534 read_unlock(&tasklist_lock);
535 out_unlock_fown:
536 read_unlock(&fown->lock);
537 return ret;
540 static rwlock_t fasync_lock = RW_LOCK_UNLOCKED;
541 static kmem_cache_t *fasync_cache;
544 * fasync_helper() is used by some character device drivers (mainly mice)
545 * to set up the fasync queue. It returns negative on error, 0 if it did
546 * no changes and positive if it added/deleted the entry.
548 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
550 struct fasync_struct *fa, **fp;
551 struct fasync_struct *new = NULL;
552 int result = 0;
554 if (on) {
555 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
556 if (!new)
557 return -ENOMEM;
559 write_lock_irq(&fasync_lock);
560 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
561 if (fa->fa_file == filp) {
562 if(on) {
563 fa->fa_fd = fd;
564 kmem_cache_free(fasync_cache, new);
565 } else {
566 *fp = fa->fa_next;
567 kmem_cache_free(fasync_cache, fa);
568 result = 1;
570 goto out;
574 if (on) {
575 new->magic = FASYNC_MAGIC;
576 new->fa_file = filp;
577 new->fa_fd = fd;
578 new->fa_next = *fapp;
579 *fapp = new;
580 result = 1;
582 out:
583 write_unlock_irq(&fasync_lock);
584 return result;
587 EXPORT_SYMBOL(fasync_helper);
589 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
591 while (fa) {
592 struct fown_struct * fown;
593 if (fa->magic != FASYNC_MAGIC) {
594 printk(KERN_ERR "kill_fasync: bad magic number in "
595 "fasync_struct!\n");
596 return;
598 fown = &fa->fa_file->f_owner;
599 /* Don't send SIGURG to processes which have not set a
600 queued signum: SIGURG has its own default signalling
601 mechanism. */
602 if (!(sig == SIGURG && fown->signum == 0))
603 send_sigio(fown, fa->fa_fd, band);
604 fa = fa->fa_next;
608 EXPORT_SYMBOL(__kill_fasync);
610 void kill_fasync(struct fasync_struct **fp, int sig, int band)
612 /* First a quick test without locking: usually
613 * the list is empty.
615 if (*fp) {
616 read_lock(&fasync_lock);
617 /* reread *fp after obtaining the lock */
618 __kill_fasync(*fp, sig, band);
619 read_unlock(&fasync_lock);
623 EXPORT_SYMBOL(kill_fasync);
625 static int __init fasync_init(void)
627 fasync_cache = kmem_cache_create("fasync_cache",
628 sizeof(struct fasync_struct), 0, 0, NULL, NULL);
629 if (!fasync_cache)
630 panic("cannot create fasync slab cache");
631 return 0;
634 module_init(fasync_init)