mdoc: Add NetBSD 6.0 (used in wbsio.4).
[dragonfly.git] / sys / kern / kern_fp.c
blob470eed1bb58e5624bbb1c474b55be0a5979bb4ec
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_fp.c,v 1.20 2007/01/12 06:06:57 dillon Exp $
38 * Direct file pointer API functions for in-kernel operations on files. These
39 * functions provide a open/read/write/close like interface within the kernel
40 * for operating on files that are not necessarily associated with processes
41 * and which do not (typically) have descriptors.
43 * FUTURE: file handle conversion routines to support checkpointing,
44 * and additional file operations (ioctl, fcntl).
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/malloc.h>
51 #include <sys/sysproto.h>
52 #include <sys/conf.h>
53 #include <sys/filedesc.h>
54 #include <sys/sysctl.h>
55 #include <sys/vnode.h>
56 #include <sys/proc.h>
57 #include <sys/priv.h>
58 #include <sys/nlookup.h>
59 #include <sys/file.h>
60 #include <sys/stat.h>
61 #include <sys/filio.h>
62 #include <sys/fcntl.h>
63 #include <sys/unistd.h>
64 #include <sys/resourcevar.h>
65 #include <sys/event.h>
66 #include <sys/mman.h>
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <sys/lock.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pager.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_kern.h>
81 #include <sys/file2.h>
82 #include <machine/limits.h>
84 typedef struct file *file_t;
87 * fp_open:
89 * Open a file as specified. Use O_* flags for flags.
91 * NOTE! O_ROOTCRED not quite working yet, vn_open() asserts that the
92 * cred must match the process's cred. XXX
94 * NOTE! when fp_open() is called from a pure thread, root creds are
95 * used.
97 int
98 fp_open(const char *path, int flags, int mode, file_t *fpp)
100 struct nlookupdata nd;
101 struct thread *td;
102 struct file *fp;
103 int error;
105 if ((error = falloc(NULL, fpp, NULL)) != 0)
106 return (error);
107 fp = *fpp;
108 td = curthread;
109 if (td->td_proc) {
110 if ((flags & O_ROOTCRED) == 0)
111 fsetcred(fp, td->td_proc->p_ucred);
113 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP);
114 flags = FFLAGS(flags);
115 if (error == 0)
116 error = vn_open(&nd, fp, flags, mode);
117 nlookup_done(&nd);
118 if (error) {
119 fdrop(fp);
120 *fpp = NULL;
122 return(error);
127 * fp_vpopen(): convert a vnode to a file pointer, call VOP_OPEN() on the
128 * the vnode. The vnode must be refd and locked.
130 * On success the vnode's ref is inherited by the file pointer and the caller
131 * should not vrele() it, and the vnode is unlocked.
133 * On failure the vnode remains locked and refd and the caller is responsible
134 * for vput()ing it.
137 fp_vpopen(struct vnode *vp, int flags, file_t *fpp)
139 struct thread *td;
140 struct file *fp;
141 int vmode;
142 int error;
144 td = curthread;
147 * Vnode checks (from vn_open())
149 if (vp->v_type == VLNK) {
150 error = EMLINK;
151 goto bad2;
153 if (vp->v_type == VSOCK) {
154 error = EOPNOTSUPP;
155 goto bad2;
157 flags = FFLAGS(flags);
158 vmode = 0;
159 if (flags & (FWRITE | O_TRUNC)) {
160 if (vp->v_type == VDIR) {
161 error = EISDIR;
162 goto bad2;
164 error = vn_writechk(vp, NULL);
165 if (error)
166 goto bad2;
167 vmode |= VWRITE;
169 if (flags & FREAD)
170 vmode |= VREAD;
171 if (vmode) {
172 error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred);
173 if (error)
174 goto bad2;
178 * File pointer setup
180 if ((error = falloc(NULL, fpp, NULL)) != 0)
181 goto bad2;
182 fp = *fpp;
183 if ((flags & O_ROOTCRED) == 0 && td->td_proc)
184 fsetcred(fp, td->td_proc->p_ucred);
186 error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, fp);
187 if (error)
188 goto bad1;
190 vput(vp);
191 return (0);
192 bad1:
193 fp->f_ops = &badfileops; /* open failed, don't close */
194 fp->f_data = NULL;
195 fdrop(fp);
196 /* leave the vnode intact, but fall through and unlock it anyway */
197 bad2:
198 *fpp = NULL;
199 return (error);
203 * fp_*read() is meant to operate like the normal descriptor based syscalls
204 * would. Note that if 'buf' points to user memory a UIO_USERSPACE
205 * transfer will be used.
208 fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
209 enum uio_seg seg)
211 struct uio auio;
212 struct iovec aiov;
213 size_t count;
214 int error;
216 if (res)
217 *res = 0;
218 if (nbytes > LONG_MAX)
219 return (EINVAL);
220 bzero(&auio, sizeof(auio));
221 aiov.iov_base = (caddr_t)buf;
222 aiov.iov_len = nbytes;
223 auio.uio_iov = &aiov;
224 auio.uio_iovcnt = 1;
225 auio.uio_offset = offset;
226 auio.uio_resid = nbytes;
227 auio.uio_rw = UIO_READ;
228 auio.uio_segflg = seg;
229 auio.uio_td = curthread;
231 count = nbytes;
232 error = fo_read(fp, &auio, fp->f_cred, O_FOFFSET);
233 if (error) {
234 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
235 error == EWOULDBLOCK)
237 error = 0;
240 count -= auio.uio_resid;
241 if (res)
242 *res = count;
243 return(error);
247 fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res, int all,
248 enum uio_seg seg)
250 struct uio auio;
251 struct iovec aiov;
252 int error;
253 int lastresid;
255 if (res)
256 *res = 0;
257 if (nbytes > LONG_MAX)
258 return (EINVAL);
259 bzero(&auio, sizeof(auio));
260 aiov.iov_base = (caddr_t)buf;
261 aiov.iov_len = nbytes;
262 auio.uio_iov = &aiov;
263 auio.uio_iovcnt = 1;
264 auio.uio_offset = 0;
265 auio.uio_resid = nbytes;
266 auio.uio_rw = UIO_READ;
267 auio.uio_segflg = seg;
268 auio.uio_td = curthread;
271 * If all is false call fo_read() once.
272 * If all is true we attempt to read the entire request. We have to
273 * break out of the loop if an unrecoverable error or EOF occurs.
275 do {
276 lastresid = auio.uio_resid;
277 error = fo_read(fp, &auio, fp->f_cred, 0);
278 } while (all && auio.uio_resid &&
279 ((error == 0 && auio.uio_resid != lastresid) ||
280 error == ERESTART || error == EINTR));
281 if (all && error == 0 && auio.uio_resid)
282 error = ESPIPE;
285 * If an error occured but some data was read, silently forget the
286 * error. However, if this is a non-blocking descriptor and 'all'
287 * was specified, return an error even if some data was read (this
288 * is considered a bug in the caller for using an illegal combination
289 * of 'all' and a non-blocking descriptor).
291 if (error) {
292 if (auio.uio_resid != nbytes) {
293 if (error == ERESTART || error == EINTR)
294 error = 0;
295 if (error == EWOULDBLOCK && all == 0)
296 error = 0;
299 if (res)
300 *res = nbytes - auio.uio_resid;
301 return(error);
305 fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
306 enum uio_seg seg)
308 struct uio auio;
309 struct iovec aiov;
310 size_t count;
311 int error;
313 if (res)
314 *res = 0;
315 if (nbytes > LONG_MAX)
316 return (EINVAL);
317 bzero(&auio, sizeof(auio));
318 aiov.iov_base = (caddr_t)buf;
319 aiov.iov_len = nbytes;
320 auio.uio_iov = &aiov;
321 auio.uio_iovcnt = 1;
322 auio.uio_offset = offset;
323 auio.uio_resid = nbytes;
324 auio.uio_rw = UIO_WRITE;
325 auio.uio_segflg = seg;
326 auio.uio_td = curthread;
328 count = nbytes;
329 error = fo_write(fp, &auio, fp->f_cred, O_FOFFSET);
330 if (error) {
331 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
332 error == EWOULDBLOCK)
334 error = 0;
337 count -= auio.uio_resid;
338 if (res)
339 *res = count;
340 return(error);
345 fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res, enum uio_seg seg)
347 struct uio auio;
348 struct iovec aiov;
349 size_t count;
350 int error;
352 if (res)
353 *res = 0;
354 if (nbytes > LONG_MAX)
355 return (EINVAL);
356 bzero(&auio, sizeof(auio));
357 aiov.iov_base = (caddr_t)buf;
358 aiov.iov_len = nbytes;
359 auio.uio_iov = &aiov;
360 auio.uio_iovcnt = 1;
361 auio.uio_offset = 0;
362 auio.uio_resid = nbytes;
363 auio.uio_rw = UIO_WRITE;
364 auio.uio_segflg = seg;
365 auio.uio_td = curthread;
367 count = nbytes;
368 error = fo_write(fp, &auio, fp->f_cred, 0);
369 if (error) {
370 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
371 error == EWOULDBLOCK)
373 error = 0;
376 count -= auio.uio_resid;
377 if (res)
378 *res = count;
379 return(error);
383 fp_stat(file_t fp, struct stat *ub)
385 int error;
387 error = fo_stat(fp, ub, fp->f_cred);
388 return(error);
392 * non-anonymous, non-stack descriptor mappings only!
394 * This routine mostly snarfed from vm/vm_mmap.c
397 fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
398 off_t pos, void **resp)
400 struct thread *td = curthread;
401 struct proc *p = td->td_proc;
402 vm_size_t pageoff;
403 vm_prot_t maxprot;
404 vm_offset_t addr;
405 void *handle;
406 int error;
407 vm_object_t obj;
408 struct vmspace *vms = p->p_vmspace;
409 struct vnode *vp;
410 int disablexworkaround;
412 prot &= VM_PROT_ALL;
414 if ((ssize_t)size < 0 || (flags & MAP_ANON))
415 return(EINVAL);
417 pageoff = (pos & PAGE_MASK);
418 pos -= pageoff;
420 /* Adjust size for rounding (on both ends). */
421 size += pageoff; /* low end... */
422 size = (vm_size_t)round_page(size); /* hi end */
423 addr = (vm_offset_t)addr_arg;
426 * Check for illegal addresses. Watch out for address wrap... Note
427 * that VM_*_ADDRESS are not constants due to casts (argh).
429 if (flags & MAP_FIXED) {
431 * The specified address must have the same remainder
432 * as the file offset taken modulo PAGE_SIZE, so it
433 * should be aligned after adjustment by pageoff.
435 addr -= pageoff;
436 if (addr & PAGE_MASK)
437 return (EINVAL);
438 /* Address range must be all in user VM space. */
439 if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
440 return (EINVAL);
441 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
442 return (EINVAL);
443 if (addr + size < addr)
444 return (EINVAL);
445 } else if (addr == 0 ||
446 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
447 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
450 * XXX for non-fixed mappings where no hint is provided or
451 * the hint would fall in the potential heap space,
452 * place it after the end of the largest possible heap.
454 * There should really be a pmap call to determine a reasonable
455 * location.
457 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
461 * Mapping file, get fp for validation. Obtain vnode and make
462 * sure it is of appropriate type.
464 if (fp->f_type != DTYPE_VNODE)
465 return (EINVAL);
468 * POSIX shared-memory objects are defined to have
469 * kernel persistence, and are not defined to support
470 * read(2)/write(2) -- or even open(2). Thus, we can
471 * use MAP_ASYNC to trade on-disk coherence for speed.
472 * The shm_open(3) library routine turns on the FPOSIXSHM
473 * flag to request this behavior.
475 if (fp->f_flag & FPOSIXSHM)
476 flags |= MAP_NOSYNC;
477 vp = (struct vnode *) fp->f_data;
478 if (vp->v_type != VREG && vp->v_type != VCHR)
479 return (EINVAL);
482 * Get the proper underlying object
484 if (vp->v_type == VREG) {
485 if ((obj = vp->v_object) == NULL)
486 return (EINVAL);
487 KKASSERT(vp == (struct vnode *)obj->handle);
491 * XXX hack to handle use of /dev/zero to map anon memory (ala
492 * SunOS).
494 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
495 handle = NULL;
496 maxprot = VM_PROT_ALL;
497 flags |= MAP_ANON;
498 pos = 0;
499 } else {
501 * cdevs does not provide private mappings of any kind.
504 * However, for XIG X server to continue to work,
505 * we should allow the superuser to do it anyway.
506 * We only allow it at securelevel < 1.
507 * (Because the XIG X server writes directly to video
508 * memory via /dev/mem, it should never work at any
509 * other securelevel.
510 * XXX this will have to go
512 if (securelevel >= 1)
513 disablexworkaround = 1;
514 else
515 disablexworkaround = priv_check(td, PRIV_ROOT);
516 if (vp->v_type == VCHR && disablexworkaround &&
517 (flags & (MAP_PRIVATE|MAP_COPY))) {
518 error = EINVAL;
519 goto done;
522 * Ensure that file and memory protections are
523 * compatible. Note that we only worry about
524 * writability if mapping is shared; in this case,
525 * current and max prot are dictated by the open file.
526 * XXX use the vnode instead? Problem is: what
527 * credentials do we use for determination? What if
528 * proc does a setuid?
530 maxprot = VM_PROT_EXECUTE; /* ??? */
531 if (fp->f_flag & FREAD) {
532 maxprot |= VM_PROT_READ;
533 } else if (prot & PROT_READ) {
534 error = EACCES;
535 goto done;
538 * If we are sharing potential changes (either via
539 * MAP_SHARED or via the implicit sharing of character
540 * device mappings), and we are trying to get write
541 * permission although we opened it without asking
542 * for it, bail out. Check for superuser, only if
543 * we're at securelevel < 1, to allow the XIG X server
544 * to continue to work.
547 if ((flags & MAP_SHARED) != 0 ||
548 (vp->v_type == VCHR && disablexworkaround)
550 if ((fp->f_flag & FWRITE) != 0) {
551 struct vattr va;
552 if ((error = VOP_GETATTR(vp, &va))) {
553 goto done;
555 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
556 maxprot |= VM_PROT_WRITE;
557 } else if (prot & PROT_WRITE) {
558 error = EPERM;
559 goto done;
561 } else if ((prot & PROT_WRITE) != 0) {
562 error = EACCES;
563 goto done;
565 } else {
566 maxprot |= VM_PROT_WRITE;
568 handle = (void *)vp;
570 error = vm_mmap(&vms->vm_map, &addr, size, prot,
571 maxprot, flags, handle, pos);
572 if (error == 0 && addr_arg)
573 *resp = (void *)addr;
574 done:
575 return (error);
579 fp_close(file_t fp)
581 return(fdrop(fp));
585 fp_shutdown(file_t fp, int how)
587 return(fo_shutdown(fp, how));