We only need to determine the softlink_dir when no filesystem is
[dragonfly.git] / sys / kern / kern_fp.c
blob980b6e9cc38f87008b9ea0114726b49a02085ad2
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_fp.c,v 1.20 2007/01/12 06:06:57 dillon Exp $
38 * Direct file pointer API functions for in-kernel operations on files. These
39 * functions provide a open/read/write/close like interface within the kernel
40 * for operating on files that are not necessarily associated with processes
41 * and which do not (typically) have descriptors.
43 * FUTURE: file handle conversion routines to support checkpointing,
44 * and additional file operations (ioctl, fcntl).
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/malloc.h>
51 #include <sys/sysproto.h>
52 #include <sys/conf.h>
53 #include <sys/filedesc.h>
54 #include <sys/sysctl.h>
55 #include <sys/vnode.h>
56 #include <sys/proc.h>
57 #include <sys/nlookup.h>
58 #include <sys/file.h>
59 #include <sys/stat.h>
60 #include <sys/filio.h>
61 #include <sys/fcntl.h>
62 #include <sys/unistd.h>
63 #include <sys/resourcevar.h>
64 #include <sys/event.h>
65 #include <sys/mman.h>
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <sys/lock.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_kern.h>
80 #include <sys/file2.h>
81 #include <machine/limits.h>
83 typedef struct file *file_t;
86 * fp_open:
88 * Open a file as specified. Use O_* flags for flags.
90 * NOTE! O_ROOTCRED not quite working yet, vn_open() asserts that the
91 * cred must match the process's cred. XXX
93 * NOTE! when fp_open() is called from a pure thread, root creds are
94 * used.
96 int
97 fp_open(const char *path, int flags, int mode, file_t *fpp)
99 struct nlookupdata nd;
100 struct thread *td;
101 struct file *fp;
102 int error;
104 if ((error = falloc(NULL, fpp, NULL)) != 0)
105 return (error);
106 fp = *fpp;
107 td = curthread;
108 if (td->td_proc) {
109 if ((flags & O_ROOTCRED) == 0)
110 fsetcred(fp, td->td_proc->p_ucred);
112 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP);
113 flags = FFLAGS(flags);
114 if (error == 0)
115 error = vn_open(&nd, fp, flags, mode);
116 nlookup_done(&nd);
117 if (error) {
118 fdrop(fp);
119 *fpp = NULL;
121 return(error);
126 * fp_vpopen(): convert a vnode to a file pointer, call VOP_OPEN() on the
127 * the vnode. The vnode must be refd and locked.
129 * On success the vnode's ref is inherited by the file pointer and the caller
130 * should not vrele() it, and the vnode is unlocked.
132 * On failure the vnode remains locked and refd and the caller is responsible
133 * for vput()ing it.
136 fp_vpopen(struct vnode *vp, int flags, file_t *fpp)
138 struct thread *td;
139 struct file *fp;
140 int vmode;
141 int error;
143 td = curthread;
146 * Vnode checks (from vn_open())
148 if (vp->v_type == VLNK) {
149 error = EMLINK;
150 goto bad2;
152 if (vp->v_type == VSOCK) {
153 error = EOPNOTSUPP;
154 goto bad2;
156 flags = FFLAGS(flags);
157 vmode = 0;
158 if (flags & (FWRITE | O_TRUNC)) {
159 if (vp->v_type == VDIR) {
160 error = EISDIR;
161 goto bad2;
163 error = vn_writechk(vp, NULL);
164 if (error)
165 goto bad2;
166 vmode |= VWRITE;
168 if (flags & FREAD)
169 vmode |= VREAD;
170 if (vmode) {
171 error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred);
172 if (error)
173 goto bad2;
177 * File pointer setup
179 if ((error = falloc(NULL, fpp, NULL)) != 0)
180 goto bad2;
181 fp = *fpp;
182 if ((flags & O_ROOTCRED) == 0 && td->td_proc)
183 fsetcred(fp, td->td_proc->p_ucred);
185 error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, fp);
186 if (error)
187 goto bad1;
189 vput(vp);
190 return (0);
191 bad1:
192 fp->f_ops = &badfileops; /* open failed, don't close */
193 fp->f_data = NULL;
194 fdrop(fp);
195 /* leave the vnode intact, but fall through and unlock it anyway */
196 bad2:
197 *fpp = NULL;
198 return (error);
202 * fp_*read() is meant to operate like the normal descriptor based syscalls
203 * would. Note that if 'buf' points to user memory a UIO_USERSPACE
204 * transfer will be used.
207 fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
208 enum uio_seg seg)
210 struct uio auio;
211 struct iovec aiov;
212 size_t count;
213 int error;
215 if (res)
216 *res = 0;
217 if (nbytes > INT_MAX)
218 return (EINVAL);
219 bzero(&auio, sizeof(auio));
220 aiov.iov_base = (caddr_t)buf;
221 aiov.iov_len = nbytes;
222 auio.uio_iov = &aiov;
223 auio.uio_iovcnt = 1;
224 auio.uio_offset = offset;
225 auio.uio_resid = nbytes;
226 auio.uio_rw = UIO_READ;
227 auio.uio_segflg = seg;
228 auio.uio_td = curthread;
230 count = nbytes;
231 error = fo_read(fp, &auio, fp->f_cred, O_FOFFSET);
232 if (error) {
233 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
234 error == EWOULDBLOCK)
236 error = 0;
239 count -= auio.uio_resid;
240 if (res)
241 *res = count;
242 return(error);
246 fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res, int all,
247 enum uio_seg seg)
249 struct uio auio;
250 struct iovec aiov;
251 int error;
252 int lastresid;
254 if (res)
255 *res = 0;
256 if (nbytes > INT_MAX)
257 return (EINVAL);
258 bzero(&auio, sizeof(auio));
259 aiov.iov_base = (caddr_t)buf;
260 aiov.iov_len = nbytes;
261 auio.uio_iov = &aiov;
262 auio.uio_iovcnt = 1;
263 auio.uio_offset = 0;
264 auio.uio_resid = nbytes;
265 auio.uio_rw = UIO_READ;
266 auio.uio_segflg = seg;
267 auio.uio_td = curthread;
270 * If all is false call fo_read() once.
271 * If all is true we attempt to read the entire request. We have to
272 * break out of the loop if an unrecoverable error or EOF occurs.
274 do {
275 lastresid = auio.uio_resid;
276 error = fo_read(fp, &auio, fp->f_cred, 0);
277 } while (all && auio.uio_resid &&
278 ((error == 0 && auio.uio_resid != lastresid) ||
279 error == ERESTART || error == EINTR));
280 if (all && error == 0 && auio.uio_resid)
281 error = ESPIPE;
284 * If an error occured but some data was read, silently forget the
285 * error. However, if this is a non-blocking descriptor and 'all'
286 * was specified, return an error even if some data was read (this
287 * is considered a bug in the caller for using an illegal combination
288 * of 'all' and a non-blocking descriptor).
290 if (error) {
291 if (auio.uio_resid != nbytes) {
292 if (error == ERESTART || error == EINTR)
293 error = 0;
294 if (error == EWOULDBLOCK && all == 0)
295 error = 0;
298 if (res)
299 *res = nbytes - auio.uio_resid;
300 return(error);
304 fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
305 enum uio_seg seg)
307 struct uio auio;
308 struct iovec aiov;
309 size_t count;
310 int error;
312 if (res)
313 *res = 0;
314 if (nbytes > INT_MAX)
315 return (EINVAL);
316 bzero(&auio, sizeof(auio));
317 aiov.iov_base = (caddr_t)buf;
318 aiov.iov_len = nbytes;
319 auio.uio_iov = &aiov;
320 auio.uio_iovcnt = 1;
321 auio.uio_offset = offset;
322 auio.uio_resid = nbytes;
323 auio.uio_rw = UIO_WRITE;
324 auio.uio_segflg = seg;
325 auio.uio_td = curthread;
327 count = nbytes;
328 error = fo_write(fp, &auio, fp->f_cred, O_FOFFSET);
329 if (error) {
330 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
331 error == EWOULDBLOCK)
333 error = 0;
336 count -= auio.uio_resid;
337 if (res)
338 *res = count;
339 return(error);
344 fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res, enum uio_seg seg)
346 struct uio auio;
347 struct iovec aiov;
348 size_t count;
349 int error;
351 if (res)
352 *res = 0;
353 if (nbytes > INT_MAX)
354 return (EINVAL);
355 bzero(&auio, sizeof(auio));
356 aiov.iov_base = (caddr_t)buf;
357 aiov.iov_len = nbytes;
358 auio.uio_iov = &aiov;
359 auio.uio_iovcnt = 1;
360 auio.uio_offset = 0;
361 auio.uio_resid = nbytes;
362 auio.uio_rw = UIO_WRITE;
363 auio.uio_segflg = seg;
364 auio.uio_td = curthread;
366 count = nbytes;
367 error = fo_write(fp, &auio, fp->f_cred, 0);
368 if (error) {
369 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
370 error == EWOULDBLOCK)
372 error = 0;
375 count -= auio.uio_resid;
376 if (res)
377 *res = count;
378 return(error);
382 fp_stat(file_t fp, struct stat *ub)
384 int error;
386 error = fo_stat(fp, ub, fp->f_cred);
387 return(error);
391 * non-anonymous, non-stack descriptor mappings only!
393 * This routine mostly snarfed from vm/vm_mmap.c
396 fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
397 off_t pos, void **resp)
399 struct thread *td = curthread;
400 struct proc *p = td->td_proc;
401 vm_size_t pageoff;
402 vm_prot_t maxprot;
403 vm_offset_t addr;
404 void *handle;
405 int error;
406 vm_object_t obj;
407 struct vmspace *vms = p->p_vmspace;
408 struct vnode *vp;
409 int disablexworkaround;
411 prot &= VM_PROT_ALL;
413 if ((ssize_t)size < 0 || (flags & MAP_ANON))
414 return(EINVAL);
416 pageoff = (pos & PAGE_MASK);
417 pos -= pageoff;
419 /* Adjust size for rounding (on both ends). */
420 size += pageoff; /* low end... */
421 size = (vm_size_t)round_page(size); /* hi end */
422 addr = (vm_offset_t)addr_arg;
425 * Check for illegal addresses. Watch out for address wrap... Note
426 * that VM_*_ADDRESS are not constants due to casts (argh).
428 if (flags & MAP_FIXED) {
430 * The specified address must have the same remainder
431 * as the file offset taken modulo PAGE_SIZE, so it
432 * should be aligned after adjustment by pageoff.
434 addr -= pageoff;
435 if (addr & PAGE_MASK)
436 return (EINVAL);
437 /* Address range must be all in user VM space. */
438 if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
439 return (EINVAL);
440 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
441 return (EINVAL);
442 if (addr + size < addr)
443 return (EINVAL);
444 } else if (addr == 0 ||
445 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
446 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
449 * XXX for non-fixed mappings where no hint is provided or
450 * the hint would fall in the potential heap space,
451 * place it after the end of the largest possible heap.
453 * There should really be a pmap call to determine a reasonable
454 * location.
456 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
460 * Mapping file, get fp for validation. Obtain vnode and make
461 * sure it is of appropriate type.
463 if (fp->f_type != DTYPE_VNODE)
464 return (EINVAL);
467 * POSIX shared-memory objects are defined to have
468 * kernel persistence, and are not defined to support
469 * read(2)/write(2) -- or even open(2). Thus, we can
470 * use MAP_ASYNC to trade on-disk coherence for speed.
471 * The shm_open(3) library routine turns on the FPOSIXSHM
472 * flag to request this behavior.
474 if (fp->f_flag & FPOSIXSHM)
475 flags |= MAP_NOSYNC;
476 vp = (struct vnode *) fp->f_data;
477 if (vp->v_type != VREG && vp->v_type != VCHR)
478 return (EINVAL);
481 * Get the proper underlying object
483 if (vp->v_type == VREG) {
484 if ((obj = vp->v_object) == NULL)
485 return (EINVAL);
486 KKASSERT(vp == (struct vnode *)obj->handle);
490 * XXX hack to handle use of /dev/zero to map anon memory (ala
491 * SunOS).
493 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
494 handle = NULL;
495 maxprot = VM_PROT_ALL;
496 flags |= MAP_ANON;
497 pos = 0;
498 } else {
500 * cdevs does not provide private mappings of any kind.
503 * However, for XIG X server to continue to work,
504 * we should allow the superuser to do it anyway.
505 * We only allow it at securelevel < 1.
506 * (Because the XIG X server writes directly to video
507 * memory via /dev/mem, it should never work at any
508 * other securelevel.
509 * XXX this will have to go
511 if (securelevel >= 1)
512 disablexworkaround = 1;
513 else
514 disablexworkaround = suser(td);
515 if (vp->v_type == VCHR && disablexworkaround &&
516 (flags & (MAP_PRIVATE|MAP_COPY))) {
517 error = EINVAL;
518 goto done;
521 * Ensure that file and memory protections are
522 * compatible. Note that we only worry about
523 * writability if mapping is shared; in this case,
524 * current and max prot are dictated by the open file.
525 * XXX use the vnode instead? Problem is: what
526 * credentials do we use for determination? What if
527 * proc does a setuid?
529 maxprot = VM_PROT_EXECUTE; /* ??? */
530 if (fp->f_flag & FREAD) {
531 maxprot |= VM_PROT_READ;
532 } else if (prot & PROT_READ) {
533 error = EACCES;
534 goto done;
537 * If we are sharing potential changes (either via
538 * MAP_SHARED or via the implicit sharing of character
539 * device mappings), and we are trying to get write
540 * permission although we opened it without asking
541 * for it, bail out. Check for superuser, only if
542 * we're at securelevel < 1, to allow the XIG X server
543 * to continue to work.
546 if ((flags & MAP_SHARED) != 0 ||
547 (vp->v_type == VCHR && disablexworkaround)
549 if ((fp->f_flag & FWRITE) != 0) {
550 struct vattr va;
551 if ((error = VOP_GETATTR(vp, &va))) {
552 goto done;
554 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
555 maxprot |= VM_PROT_WRITE;
556 } else if (prot & PROT_WRITE) {
557 error = EPERM;
558 goto done;
560 } else if ((prot & PROT_WRITE) != 0) {
561 error = EACCES;
562 goto done;
564 } else {
565 maxprot |= VM_PROT_WRITE;
567 handle = (void *)vp;
569 error = vm_mmap(&vms->vm_map, &addr, size, prot,
570 maxprot, flags, handle, pos);
571 if (error == 0 && addr_arg)
572 *resp = (void *)addr;
573 done:
574 return (error);
578 fp_close(file_t fp)
580 return(fdrop(fp));
584 fp_shutdown(file_t fp, int how)
586 return(fo_shutdown(fp, how));