linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / kern / kern_subr.c
bloba8a2b8be516233ac4f1fc447f1a41575b7dfadd8
1 /*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40 * $DragonFly: src/sys/kern/kern_subr.c,v 1.27 2007/01/29 20:44:02 tgen Exp $
43 #include "opt_ddb.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 #include <sys/vnode.h>
55 #include <sys/sfbuf.h>
56 #include <sys/thread2.h>
57 #include <machine/limits.h>
59 #include <vm/vm.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_map.h>
63 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
64 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
67 * UIO_READ: copy the kernelspace cp to the user or kernelspace UIO
68 * UIO_WRITE: copy the user or kernelspace UIO to the kernelspace cp
70 * For userspace UIO's, uio_td must be the current thread.
72 * The syscall interface is responsible for limiting the length to
73 * ssize_t for things like read() or write() which return the bytes
74 * read or written as ssize_t. These functions work with unsigned
75 * lengths.
77 int
78 uiomove(caddr_t cp, size_t n, struct uio *uio)
80 struct iovec *iov;
81 size_t cnt;
82 int error = 0;
83 int save = 0;
84 int baseticks = ticks;
86 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
87 ("uiomove: mode"));
88 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
89 ("uiomove proc"));
91 if (curproc) {
92 save = curproc->p_flag & P_DEADLKTREAT;
93 curproc->p_flag |= P_DEADLKTREAT;
96 while (n > 0 && uio->uio_resid) {
97 iov = uio->uio_iov;
98 cnt = iov->iov_len;
99 if (cnt == 0) {
100 uio->uio_iov++;
101 uio->uio_iovcnt--;
102 continue;
104 if (cnt > n)
105 cnt = n;
107 switch (uio->uio_segflg) {
109 case UIO_USERSPACE:
110 if (ticks - baseticks >= hogticks) {
111 uio_yield();
112 baseticks = ticks;
114 if (uio->uio_rw == UIO_READ)
115 error = copyout(cp, iov->iov_base, cnt);
116 else
117 error = copyin(iov->iov_base, cp, cnt);
118 if (error)
119 break;
120 break;
122 case UIO_SYSSPACE:
123 if (uio->uio_rw == UIO_READ)
124 bcopy((caddr_t)cp, iov->iov_base, cnt);
125 else
126 bcopy(iov->iov_base, (caddr_t)cp, cnt);
127 break;
128 case UIO_NOCOPY:
129 break;
131 iov->iov_base = (char *)iov->iov_base + cnt;
132 iov->iov_len -= cnt;
133 uio->uio_resid -= cnt;
134 uio->uio_offset += cnt;
135 cp += cnt;
136 n -= cnt;
138 if (curproc)
139 curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
140 return (error);
144 * Like uiomove() but copies zero-fill. Only allowed for UIO_READ,
145 * for obvious reasons.
148 uiomovez(size_t n, struct uio *uio)
150 struct iovec *iov;
151 size_t cnt;
152 int error = 0;
154 KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode"));
155 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
156 ("uiomove proc"));
158 while (n > 0 && uio->uio_resid) {
159 iov = uio->uio_iov;
160 cnt = iov->iov_len;
161 if (cnt == 0) {
162 uio->uio_iov++;
163 uio->uio_iovcnt--;
164 continue;
166 if (cnt > n)
167 cnt = n;
169 switch (uio->uio_segflg) {
170 case UIO_USERSPACE:
171 error = copyout(ZeroPage, iov->iov_base, cnt);
172 if (error)
173 break;
174 break;
175 case UIO_SYSSPACE:
176 bzero(iov->iov_base, cnt);
177 break;
178 case UIO_NOCOPY:
179 break;
181 iov->iov_base = (char *)iov->iov_base + cnt;
182 iov->iov_len -= cnt;
183 uio->uio_resid -= cnt;
184 uio->uio_offset += cnt;
185 n -= cnt;
187 return (error);
191 * Wrapper for uiomove() that validates the arguments against a known-good
192 * kernel buffer.
195 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
197 size_t offset;
199 offset = (size_t)uio->uio_offset;
200 if ((off_t)offset != uio->uio_offset)
201 return (EINVAL);
202 if (buflen == 0 || offset >= buflen)
203 return (0);
204 return (uiomove((char *)buf + offset, buflen - offset, uio));
208 * Give next character to user as result of read.
211 ureadc(int c, struct uio *uio)
213 struct iovec *iov;
214 char *iov_base;
216 again:
217 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
218 panic("ureadc");
219 iov = uio->uio_iov;
220 if (iov->iov_len == 0) {
221 uio->uio_iovcnt--;
222 uio->uio_iov++;
223 goto again;
225 switch (uio->uio_segflg) {
227 case UIO_USERSPACE:
228 if (subyte(iov->iov_base, c) < 0)
229 return (EFAULT);
230 break;
232 case UIO_SYSSPACE:
233 iov_base = iov->iov_base;
234 *iov_base = c;
235 iov->iov_base = iov_base;
236 break;
238 case UIO_NOCOPY:
239 break;
241 iov->iov_base = (char *)iov->iov_base + 1;
242 iov->iov_len--;
243 uio->uio_resid--;
244 uio->uio_offset++;
245 return (0);
249 * General routine to allocate a hash table. Make the hash table size a
250 * power of 2 greater or equal to the number of elements requested, and
251 * store the masking value in *hashmask.
253 void *
254 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
256 long hashsize;
257 LIST_HEAD(generic, generic) *hashtbl;
258 int i;
260 if (elements <= 0)
261 panic("hashinit: bad elements");
262 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
263 continue;
264 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
265 for (i = 0; i < hashsize; i++)
266 LIST_INIT(&hashtbl[i]);
267 *hashmask = hashsize - 1;
268 return (hashtbl);
272 * This is a newer version which allocates a hash table of structures.
274 * The returned array will be zero'd. The caller is responsible for
275 * initializing the structures.
277 void *
278 hashinit_ext(int elements, size_t size, struct malloc_type *type,
279 u_long *hashmask)
281 long hashsize;
282 void *hashtbl;
284 if (elements <= 0)
285 panic("hashinit: bad elements");
286 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
287 continue;
288 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
289 *hashmask = hashsize - 1;
290 return (hashtbl);
293 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
294 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
295 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
296 #define NPRIMES (sizeof(primes) / sizeof(primes[0]))
299 * General routine to allocate a prime number sized hash table.
301 void *
302 phashinit(int elements, struct malloc_type *type, u_long *nentries)
304 long hashsize;
305 LIST_HEAD(generic, generic) *hashtbl;
306 int i;
308 if (elements <= 0)
309 panic("phashinit: bad elements");
310 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
311 i++;
312 if (i == NPRIMES)
313 break;
314 hashsize = primes[i];
316 hashsize = primes[i - 1];
317 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
318 for (i = 0; i < hashsize; i++)
319 LIST_INIT(&hashtbl[i]);
320 *nentries = hashsize;
321 return (hashtbl);
325 * This is a newer version which allocates a hash table of structures
326 * in a prime-number size.
328 * The returned array will be zero'd. The caller is responsible for
329 * initializing the structures.
331 void *
332 phashinit_ext(int elements, size_t size, struct malloc_type *type,
333 u_long *nentries)
335 long hashsize;
336 void *hashtbl;
337 int i;
339 if (elements <= 0)
340 panic("phashinit: bad elements");
341 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
342 i++;
343 if (i == NPRIMES)
344 break;
345 hashsize = primes[i];
347 hashsize = primes[i - 1];
348 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
349 *nentries = hashsize;
350 return (hashtbl);
354 * Copyin an iovec. If the iovec array fits, use the preallocated small
355 * iovec structure. If it is too big, dynamically allocate an iovec array
356 * of sufficient size.
358 * MPSAFE
361 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
362 size_t iov_cnt, size_t *iov_len)
364 struct iovec *iovp;
365 int error, i;
366 size_t len;
368 if (iov_cnt > UIO_MAXIOV)
369 return EMSGSIZE;
370 if (iov_cnt > UIO_SMALLIOV) {
371 MALLOC(*kiov, struct iovec *, sizeof(struct iovec) * iov_cnt,
372 M_IOV, M_WAITOK);
373 } else {
374 *kiov = siov;
376 error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
377 if (error == 0) {
378 *iov_len = 0;
379 for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) {
381 * Check for both *iov_len overflows and out of
382 * range iovp->iov_len's. We limit to the
383 * capabilities of signed integers.
385 * GCC4 - overflow check opt requires assign/test.
387 len = *iov_len + iovp->iov_len;
388 if (len < *iov_len)
389 error = EINVAL;
390 *iov_len = len;
395 * From userland disallow iovec's which exceed the sized size
396 * limit as the system calls return ssize_t.
398 * NOTE: Internal kernel interfaces can handle the unsigned
399 * limit.
401 if (error == 0 && (ssize_t)*iov_len < 0)
402 error = EINVAL;
404 if (error)
405 iovec_free(kiov, siov);
406 return (error);
411 * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
412 * Copyright (c) 1982, 1986, 1991, 1993
413 * The Regents of the University of California. All rights reserved.
414 * (c) UNIX System Laboratories, Inc.
415 * All or some portions of this file are derived from material licensed
416 * to the University of California by American Telephone and Telegraph
417 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
418 * the permission of UNIX System Laboratories, Inc.
420 * Redistribution and use in source and binary forms, with or without
421 * modification, are permitted provided that the following conditions
422 * are met:
423 * 1. Redistributions of source code must retain the above copyright
424 * notice, this list of conditions and the following disclaimer.
425 * 2. Redistributions in binary form must reproduce the above copyright
426 * notice, this list of conditions and the following disclaimer in the
427 * documentation and/or other materials provided with the distribution.
428 * 4. Neither the name of the University nor the names of its contributors
429 * may be used to endorse or promote products derived from this software
430 * without specific prior written permission.
432 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
433 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
434 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
435 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
436 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
437 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
438 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
439 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
440 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
441 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
442 * SUCH DAMAGE.
444 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
445 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
449 * Implement uiomove(9) from physical memory using sf_bufs to reduce
450 * the creation and destruction of ephemeral mappings.
453 uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio)
455 struct sf_buf *sf;
456 struct thread *td = curthread;
457 struct iovec *iov;
458 void *cp;
459 vm_offset_t page_offset;
460 vm_page_t m;
461 size_t cnt;
462 int error = 0;
463 int save = 0;
465 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
466 ("uiomove_fromphys: mode"));
467 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
468 ("uiomove_fromphys proc"));
470 crit_enter();
471 save = td->td_flags & TDF_DEADLKTREAT;
472 td->td_flags |= TDF_DEADLKTREAT;
473 crit_exit();
475 while (n > 0 && uio->uio_resid) {
476 iov = uio->uio_iov;
477 cnt = iov->iov_len;
478 if (cnt == 0) {
479 uio->uio_iov++;
480 uio->uio_iovcnt--;
481 continue;
483 if (cnt > n)
484 cnt = n;
485 page_offset = offset & PAGE_MASK;
486 cnt = min(cnt, PAGE_SIZE - page_offset);
487 m = ma[offset >> PAGE_SHIFT];
488 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
489 cp = (char *)sf_buf_kva(sf) + page_offset;
490 switch (uio->uio_segflg) {
491 case UIO_USERSPACE:
493 * note: removed uioyield (it was the wrong place to
494 * put it).
496 if (uio->uio_rw == UIO_READ)
497 error = copyout(cp, iov->iov_base, cnt);
498 else
499 error = copyin(iov->iov_base, cp, cnt);
500 if (error) {
501 sf_buf_free(sf);
502 goto out;
504 break;
505 case UIO_SYSSPACE:
506 if (uio->uio_rw == UIO_READ)
507 bcopy(cp, iov->iov_base, cnt);
508 else
509 bcopy(iov->iov_base, cp, cnt);
510 break;
511 case UIO_NOCOPY:
512 break;
514 sf_buf_free(sf);
515 iov->iov_base = (char *)iov->iov_base + cnt;
516 iov->iov_len -= cnt;
517 uio->uio_resid -= cnt;
518 uio->uio_offset += cnt;
519 offset += cnt;
520 n -= cnt;
522 out:
523 if (save == 0) {
524 crit_enter();
525 td->td_flags &= ~TDF_DEADLKTREAT;
526 crit_exit();
528 return (error);