4 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/smp_lock.h>
10 #include <linux/slab.h>
13 #include <asm/siginfo.h>
14 #include <asm/uaccess.h>
16 extern int sock_fcntl (struct file
*, unsigned int cmd
, unsigned long arg
);
18 /* Expand files. Return <0 on error; 0 nothing done; 1 files expanded,
19 * we may have blocked.
21 * Should be called with the files->file_lock spinlock held for write.
23 static int expand_files(struct files_struct
*files
, int nr
)
27 printk (KERN_ERR __FUNCTION__
" %d: nr = %d\n", current
->pid
, nr
);
30 if (nr
>= files
->max_fdset
) {
32 if ((err
= expand_fdset(files
, nr
)))
35 if (nr
>= files
->max_fds
) {
37 if ((err
= expand_fd_array(files
, nr
)))
44 printk (KERN_ERR __FUNCTION__
" %d: return %d\n", current
->pid
, err
);
50 * locate_fd finds a free file descriptor in the open_fds fdset,
51 * expanding the fd arrays if necessary. The files write lock will be
52 * held on exit to ensure that the fd can be entered atomically.
55 static int locate_fd(struct files_struct
*files
,
56 struct file
*file
, int orig_start
)
62 write_lock(&files
->file_lock
);
66 * Someone might have closed fd's in the range
67 * orig_start..files->next_fd
70 if (start
< files
->next_fd
)
71 start
= files
->next_fd
;
74 if (start
< files
->max_fdset
) {
75 newfd
= find_next_zero_bit(files
->open_fds
->fds_bits
,
76 files
->max_fdset
, start
);
80 if (newfd
>= current
->rlim
[RLIMIT_NOFILE
].rlim_cur
)
83 error
= expand_files(files
, newfd
);
88 * If we needed to expand the fs array we
89 * might have blocked - try again.
94 if (start
<= files
->next_fd
)
95 files
->next_fd
= newfd
+ 1;
103 static inline void allocate_fd(struct files_struct
*files
,
104 struct file
*file
, int fd
)
106 FD_SET(fd
, files
->open_fds
);
107 FD_CLR(fd
, files
->close_on_exec
);
108 write_unlock(&files
->file_lock
);
109 fd_install(fd
, file
);
112 static int dupfd(struct file
*file
, int start
)
114 struct files_struct
* files
= current
->files
;
117 ret
= locate_fd(files
, file
, start
);
120 allocate_fd(files
, file
, ret
);
124 write_unlock(&files
->file_lock
);
129 asmlinkage
long sys_dup2(unsigned int oldfd
, unsigned int newfd
)
133 struct files_struct
* files
= current
->files
;
135 write_lock(¤t
->files
->file_lock
);
136 if (!(file
= fcheck(oldfd
)))
142 if (newfd
>= current
->rlim
[RLIMIT_NOFILE
].rlim_cur
)
144 get_file(file
); /* We are now finished with oldfd */
146 err
= expand_files(files
, newfd
);
148 write_unlock(&files
->file_lock
);
153 /* To avoid races with open() and dup(), we will mark the fd as
154 * in-use in the open-file bitmap throughout the entire dup2()
155 * process. This is quite safe: do_close() uses the fd array
156 * entry, not the bitmap, to decide what work needs to be
158 FD_SET(newfd
, files
->open_fds
);
159 write_unlock(&files
->file_lock
);
163 write_lock(&files
->file_lock
);
164 allocate_fd(files
, file
, newfd
);
170 write_unlock(¤t
->files
->file_lock
);
174 asmlinkage
long sys_dup(unsigned int fildes
)
177 struct file
* file
= fget(fildes
);
180 ret
= dupfd(file
, 0);
184 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC)
186 static int setfl(int fd
, struct file
* filp
, unsigned long arg
)
188 struct inode
* inode
= filp
->f_dentry
->d_inode
;
191 * In the case of an append-only file, O_APPEND
194 if (!(arg
& O_APPEND
) && IS_APPEND(inode
))
197 /* Did FASYNC state change? */
198 if ((arg
^ filp
->f_flags
) & FASYNC
) {
199 if (filp
->f_op
&& filp
->f_op
->fasync
)
200 filp
->f_op
->fasync(fd
, filp
, (arg
& FASYNC
) != 0);
203 /* required for strict SunOS emulation */
204 if (O_NONBLOCK
!= O_NDELAY
)
208 filp
->f_flags
= (arg
& SETFL_MASK
) | (filp
->f_flags
& ~SETFL_MASK
);
212 asmlinkage
long sys_fcntl(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
227 err
= dupfd(filp
, arg
);
231 err
= FD_ISSET(fd
, current
->files
->close_on_exec
);
235 FD_SET(fd
, current
->files
->close_on_exec
);
237 FD_CLR(fd
, current
->files
->close_on_exec
);
243 err
= setfl(fd
, filp
, arg
);
246 err
= fcntl_getlk(fd
, (struct flock
*) arg
);
249 err
= fcntl_setlk(fd
, cmd
, (struct flock
*) arg
);
252 err
= fcntl_setlk(fd
, cmd
, (struct flock
*) arg
);
256 * XXX If f_owner is a process group, the
257 * negative return value will get converted
258 * into an error. Oops. If we keep the
259 * current syscall conventions, the only way
260 * to fix this will be in libc.
262 err
= filp
->f_owner
.pid
;
265 filp
->f_owner
.pid
= arg
;
266 filp
->f_owner
.uid
= current
->uid
;
267 filp
->f_owner
.euid
= current
->euid
;
268 if (S_ISSOCK (filp
->f_dentry
->d_inode
->i_mode
))
269 err
= sock_fcntl (filp
, F_SETOWN
, arg
);
272 err
= filp
->f_owner
.signum
;
275 /* arg == 0 restores default behaviour. */
276 if (arg
< 0 || arg
> _NSIG
) {
281 filp
->f_owner
.signum
= arg
;
284 /* sockets need a few special fcntls. */
286 if (S_ISSOCK (filp
->f_dentry
->d_inode
->i_mode
))
287 err
= sock_fcntl (filp
, cmd
, arg
);
296 /* Table to convert sigio signal codes into poll band bitmaps */
298 static long band_table
[NSIGPOLL
] = {
299 POLLIN
| POLLRDNORM
, /* POLL_IN */
300 POLLOUT
| POLLWRNORM
| POLLWRBAND
, /* POLL_OUT */
301 POLLIN
| POLLRDNORM
| POLLMSG
, /* POLL_MSG */
302 POLLERR
, /* POLL_ERR */
303 POLLPRI
| POLLRDBAND
, /* POLL_PRI */
304 POLLHUP
| POLLERR
/* POLL_HUP */
307 static void send_sigio_to_task(struct task_struct
*p
,
308 struct fown_struct
*fown
,
309 struct fasync_struct
*fa
,
312 if ((fown
->euid
!= 0) &&
313 (fown
->euid
^ p
->suid
) && (fown
->euid
^ p
->uid
) &&
314 (fown
->uid
^ p
->suid
) && (fown
->uid
^ p
->uid
))
316 switch (fown
->signum
) {
319 /* Queue a rt signal with the appropriate fd as its
320 value. We use SI_SIGIO as the source, not
321 SI_KERNEL, since kernel signals always get
322 delivered even if we can't queue. Failure to
323 queue in this case _should_ be reported; we fall
324 back to SIGIO in that case. --sct */
325 si
.si_signo
= fown
->signum
;
328 /* Make sure we are called with one of the POLL_*
329 reasons, otherwise we could leak kernel stack into
331 if ((reason
& __SI_MASK
) != __SI_POLL
)
333 if (reason
- POLL_IN
> NSIGPOLL
)
336 si
.si_band
= band_table
[reason
- POLL_IN
];
337 si
.si_fd
= fa
->fa_fd
;
338 if (!send_sig_info(fown
->signum
, &si
, p
))
340 /* fall-through: fall back on the old plain SIGIO signal */
342 send_sig(SIGIO
, p
, 1);
346 static void send_sigio(struct fown_struct
*fown
, struct fasync_struct
*fa
,
349 struct task_struct
* p
;
352 read_lock(&tasklist_lock
);
353 if ( (pid
> 0) && (p
= find_task_by_pid(pid
)) ) {
354 send_sigio_to_task(p
, fown
, fa
, band
);
363 send_sigio_to_task(p
, fown
, fa
, band
);
366 read_unlock(&tasklist_lock
);
370 * fasync_helper() is used by some character device drivers (mainly mice)
371 * to set up the fasync queue. It returns negative on error, 0 if it did
372 * no changes and positive if it added/deleted the entry.
374 static rwlock_t fasync_lock
= RW_LOCK_UNLOCKED
;
375 int fasync_helper(int fd
, struct file
* filp
, int on
, struct fasync_struct
**fapp
)
377 struct fasync_struct
*fa
, **fp
;
378 struct fasync_struct
*new = NULL
;
382 new = kmalloc(sizeof(struct fasync_struct
), GFP_KERNEL
);
386 write_lock_irq(&fasync_lock
);
387 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
388 if (fa
->fa_file
== filp
) {
402 new->magic
= FASYNC_MAGIC
;
405 new->fa_next
= *fapp
;
410 write_unlock_irq(&fasync_lock
);
414 void __kill_fasync(struct fasync_struct
*fa
, int sig
, int band
)
417 struct fown_struct
* fown
;
418 if (fa
->magic
!= FASYNC_MAGIC
) {
419 printk("kill_fasync: bad magic number in "
423 fown
= &fa
->fa_file
->f_owner
;
424 /* Don't send SIGURG to processes which have not set a
425 queued signum: SIGURG has its own default signalling
427 if (fown
->pid
&& !(sig
== SIGURG
&& fown
->signum
== 0))
428 send_sigio(fown
, fa
, band
);
433 void kill_fasync(struct fasync_struct
**fp
, int sig
, int band
)
435 read_lock(&fasync_lock
);
436 __kill_fasync(*fp
, sig
, band
);
437 read_unlock(&fasync_lock
);