2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sysctl.h>
47 #include <sys/vnode.h>
48 #include <sys/thread2.h>
49 #include <machine/limits.h>
51 #include <cpu/lwbuf.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
57 SYSCTL_INT(_kern
, KERN_IOV_MAX
, iov_max
, CTLFLAG_RD
, NULL
, UIO_MAXIOV
,
58 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
61 copyin_nofault(const void *udaddr
, void *kaddr
, size_t len
)
63 thread_t td
= curthread
;
66 atomic_set_int(&td
->td_flags
, TDF_NOFAULT
);
67 error
= copyin(udaddr
, kaddr
, len
);
68 atomic_clear_int(&td
->td_flags
, TDF_NOFAULT
);
73 copyout_nofault(const void *kaddr
, void *udaddr
, size_t len
)
75 thread_t td
= curthread
;
78 atomic_set_int(&td
->td_flags
, TDF_NOFAULT
);
79 error
= copyout(kaddr
, udaddr
, len
);
80 atomic_clear_int(&td
->td_flags
, TDF_NOFAULT
);
85 * UIO_READ: copy the kernelspace cp to the user or kernelspace UIO
86 * UIO_WRITE: copy the user or kernelspace UIO to the kernelspace cp
88 * For userspace UIO's, uio_td must be the current thread.
90 * The syscall interface is responsible for limiting the length to
91 * ssize_t for things like read() or write() which return the bytes
92 * read or written as ssize_t. These functions work with unsigned
96 uiomove(caddr_t cp
, size_t n
, struct uio
*uio
)
98 thread_t td
= curthread
;
105 KASSERT(uio
->uio_rw
== UIO_READ
|| uio
->uio_rw
== UIO_WRITE
,
107 KASSERT(uio
->uio_segflg
!= UIO_USERSPACE
|| uio
->uio_td
== td
,
111 save
= td
->td_flags
& TDF_DEADLKTREAT
;
112 td
->td_flags
|= TDF_DEADLKTREAT
;
117 while (n
> 0 && uio
->uio_resid
) {
129 switch (uio
->uio_segflg
) {
133 if (uio
->uio_rw
== UIO_READ
)
134 error
= copyout(cp
, iov
->iov_base
, cnt
);
136 error
= copyin(iov
->iov_base
, cp
, cnt
);
139 if (uio
->uio_rw
== UIO_READ
)
140 bcopy(cp
, iov
->iov_base
, cnt
);
142 bcopy(iov
->iov_base
, cp
, cnt
);
150 iov
->iov_base
= (char *)iov
->iov_base
+ cnt
;
152 uio
->uio_resid
-= cnt
;
153 uio
->uio_offset
+= cnt
;
158 td
->td_flags
= (td
->td_flags
& ~TDF_DEADLKTREAT
) | save
;
165 * This is the same as uiomove() except (cp, n) is within the bounds of
166 * the passed, locked buffer. Under certain circumstances a VM fault
167 * occuring with a locked buffer held can result in a deadlock or an
168 * attempt to recursively lock the buffer.
170 * This procedure deals with these cases.
172 * If the buffer represents a regular file, is B_CACHE, but the last VM page
173 * is not fully valid we fix-up the last VM page. This should handle the
174 * recursive lock issue.
176 * Deadlocks are another issue. We are holding the vp and the bp locked
177 * and could deadlock against a different vp and/or bp if another thread is
178 * trying to access us while we accessing it. The only solution here is
179 * to release the bp and vnode lock and do the uio to/from a system buffer,
180 * then regain the locks and copyback (if applicable). XXX TODO.
183 uiomovebp(struct buf
*bp
, caddr_t cp
, size_t n
, struct uio
*uio
)
188 if (bp
->b_vp
&& bp
->b_vp
->v_type
== VREG
&&
189 (bp
->b_flags
& B_CACHE
) &&
190 (count
= bp
->b_xio
.xio_npages
) != 0 &&
191 (m
= bp
->b_xio
.xio_pages
[count
-1])->valid
!= VM_PAGE_BITS_ALL
) {
192 vm_page_zero_invalid(m
, TRUE
);
194 return (uiomove(cp
, n
, uio
));
198 * uiomove() but fail for non-trivial VM faults, even if the VM fault is
199 * valid. Returns EFAULT if a VM fault occurred via the copyin/copyout
202 * This allows callers to hold e.g. a busy VM page, or a busy VM object,
203 * or a locked vnode through the call and then fall-back to safer code
207 uiomove_nofault(caddr_t cp
, size_t n
, struct uio
*uio
)
209 thread_t td
= curthread
;
212 atomic_set_int(&td
->td_flags
, TDF_NOFAULT
);
213 error
= uiomove(cp
, n
, uio
);
214 atomic_clear_int(&td
->td_flags
, TDF_NOFAULT
);
219 * Like uiomove() but copies zero-fill. Only allowed for UIO_READ,
220 * for obvious reasons.
223 uiomovez(size_t n
, struct uio
*uio
)
229 KASSERT(uio
->uio_rw
== UIO_READ
, ("uiomovez: mode"));
230 KASSERT(uio
->uio_segflg
!= UIO_USERSPACE
|| uio
->uio_td
== curthread
,
233 while (n
> 0 && uio
->uio_resid
) {
244 switch (uio
->uio_segflg
) {
246 error
= copyout(ZeroPage
, iov
->iov_base
, cnt
);
249 bzero(iov
->iov_base
, cnt
);
257 iov
->iov_base
= (char *)iov
->iov_base
+ cnt
;
259 uio
->uio_resid
-= cnt
;
260 uio
->uio_offset
+= cnt
;
267 * Wrapper for uiomove() that validates the arguments against a known-good
268 * kernel buffer. This function automatically indexes the buffer by
269 * uio_offset and handles all range checking.
272 uiomove_frombuf(void *buf
, size_t buflen
, struct uio
*uio
)
276 offset
= (size_t)uio
->uio_offset
;
277 if ((off_t
)offset
!= uio
->uio_offset
)
279 if (buflen
== 0 || offset
>= buflen
)
281 return (uiomove((char *)buf
+ offset
, buflen
- offset
, uio
));
285 * Give next character to user as result of read.
288 ureadc(int c
, struct uio
*uio
)
294 if (uio
->uio_iovcnt
== 0 || uio
->uio_resid
== 0)
297 if (iov
->iov_len
== 0) {
303 switch (uio
->uio_segflg
) {
305 if (subyte(iov
->iov_base
, c
) < 0)
309 iov_base
= iov
->iov_base
;
311 iov
->iov_base
= iov_base
;
317 iov
->iov_base
= (char *)iov
->iov_base
+ 1;
325 * General routine to allocate a hash table. Make the hash table size a
326 * power of 2 greater or equal to the number of elements requested, and
327 * store the masking value in *hashmask.
330 hashinit(int elements
, struct malloc_type
*type
, u_long
*hashmask
)
333 LIST_HEAD(generic
, generic
) *hashtbl
;
337 panic("hashinit: bad elements");
338 for (hashsize
= 2; hashsize
< elements
; hashsize
<<= 1)
340 hashtbl
= kmalloc((u_long
)hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
);
341 for (i
= 0; i
< hashsize
; i
++)
342 LIST_INIT(&hashtbl
[i
]);
343 *hashmask
= hashsize
- 1;
348 hashdestroy(void *vhashtbl
, struct malloc_type
*type
, u_long hashmask
)
350 LIST_HEAD(generic
, generic
) *hashtbl
, *hp
;
353 for (hp
= hashtbl
; hp
<= &hashtbl
[hashmask
]; hp
++)
354 KASSERT(LIST_EMPTY(hp
), ("%s: hash not empty", __func__
));
355 kfree(hashtbl
, type
);
359 * This is a newer version which allocates a hash table of structures.
361 * The returned array will be zero'd. The caller is responsible for
362 * initializing the structures.
365 hashinit_ext(int elements
, size_t size
, struct malloc_type
*type
,
372 panic("hashinit: bad elements");
373 for (hashsize
= 2; hashsize
< elements
; hashsize
<<= 1)
375 hashtbl
= kmalloc((size_t)hashsize
* size
, type
, M_WAITOK
| M_ZERO
);
376 *hashmask
= hashsize
- 1;
380 static int primes
[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
381 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
382 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
383 #define NPRIMES NELEM(primes)
386 * General routine to allocate a prime number sized hash table.
389 phashinit(int elements
, struct malloc_type
*type
, u_long
*nentries
)
392 LIST_HEAD(generic
, generic
) *hashtbl
;
396 panic("phashinit: bad elements");
397 for (i
= 1, hashsize
= primes
[1]; hashsize
<= elements
;) {
401 hashsize
= primes
[i
];
403 hashsize
= primes
[i
- 1];
404 hashtbl
= kmalloc((u_long
)hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
);
405 for (i
= 0; i
< hashsize
; i
++)
406 LIST_INIT(&hashtbl
[i
]);
407 *nentries
= hashsize
;
412 * This is a newer version which allocates a hash table of structures
413 * in a prime-number size.
415 * The returned array will be zero'd. The caller is responsible for
416 * initializing the structures.
419 phashinit_ext(int elements
, size_t size
, struct malloc_type
*type
,
427 panic("phashinit: bad elements");
428 for (i
= 1, hashsize
= primes
[1]; hashsize
<= elements
;) {
432 hashsize
= primes
[i
];
434 hashsize
= primes
[i
- 1];
435 hashtbl
= kmalloc((size_t)hashsize
* size
, type
, M_WAITOK
| M_ZERO
);
436 *nentries
= hashsize
;
441 * Copyin an iovec. If the iovec array fits, use the preallocated small
442 * iovec structure. If it is too big, dynamically allocate an iovec array
443 * of sufficient size.
448 iovec_copyin(const struct iovec
*uiov
, struct iovec
**kiov
, struct iovec
*siov
,
449 int iov_cnt
, size_t *iov_len
)
455 if ((u_int
)iov_cnt
> UIO_MAXIOV
)
457 if (iov_cnt
> UIO_SMALLIOV
) {
458 *kiov
= kmalloc(sizeof(struct iovec
) * iov_cnt
, M_IOV
,
463 error
= copyin(uiov
, *kiov
, iov_cnt
* sizeof(struct iovec
));
466 for (i
= 0, iovp
= *kiov
; i
< iov_cnt
; i
++, iovp
++) {
468 * Check for both *iov_len overflows and out of
469 * range iovp->iov_len's. We limit to the
470 * capabilities of signed integers.
472 * GCC4 - overflow check opt requires assign/test.
474 len
= *iov_len
+ iovp
->iov_len
;
482 * From userland disallow iovec's which exceed the sized size
483 * limit as the system calls return ssize_t.
485 * NOTE: Internal kernel interfaces can handle the unsigned
488 if (error
== 0 && (ssize_t
)*iov_len
< 0)
492 iovec_free(kiov
, siov
);
498 * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
499 * Copyright (c) 1982, 1986, 1991, 1993
500 * The Regents of the University of California. All rights reserved.
501 * (c) UNIX System Laboratories, Inc.
502 * All or some portions of this file are derived from material licensed
503 * to the University of California by American Telephone and Telegraph
504 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
505 * the permission of UNIX System Laboratories, Inc.
507 * Redistribution and use in source and binary forms, with or without
508 * modification, are permitted provided that the following conditions
510 * 1. Redistributions of source code must retain the above copyright
511 * notice, this list of conditions and the following disclaimer.
512 * 2. Redistributions in binary form must reproduce the above copyright
513 * notice, this list of conditions and the following disclaimer in the
514 * documentation and/or other materials provided with the distribution.
515 * 3. Neither the name of the University nor the names of its contributors
516 * may be used to endorse or promote products derived from this software
517 * without specific prior written permission.
519 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
520 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
521 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
522 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
523 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
524 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
525 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
526 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
527 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
528 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
531 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
532 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
536 * Implement uiomove(9) from physical memory using lwbuf's to reduce
537 * the creation and destruction of ephemeral mappings.
540 uiomove_fromphys(vm_page_t
*ma
, vm_offset_t offset
, size_t n
, struct uio
*uio
)
542 struct lwbuf lwb_cache
;
544 struct thread
*td
= curthread
;
547 vm_offset_t page_offset
;
553 KASSERT(uio
->uio_rw
== UIO_READ
|| uio
->uio_rw
== UIO_WRITE
,
554 ("uiomove_fromphys: mode"));
555 KASSERT(uio
->uio_segflg
!= UIO_USERSPACE
|| uio
->uio_td
== curthread
,
556 ("uiomove_fromphys proc"));
559 save
= td
->td_flags
& TDF_DEADLKTREAT
;
560 td
->td_flags
|= TDF_DEADLKTREAT
;
563 while (n
> 0 && uio
->uio_resid
) {
573 page_offset
= offset
& PAGE_MASK
;
574 cnt
= min(cnt
, PAGE_SIZE
- page_offset
);
575 m
= ma
[offset
>> PAGE_SHIFT
];
576 lwb
= lwbuf_alloc(m
, &lwb_cache
);
577 cp
= (char *)lwbuf_kva(lwb
) + page_offset
;
579 switch (uio
->uio_segflg
) {
582 * note: removed uioyield (it was the wrong place to
585 if (uio
->uio_rw
== UIO_READ
)
586 error
= copyout(cp
, iov
->iov_base
, cnt
);
588 error
= copyin(iov
->iov_base
, cp
, cnt
);
595 if (uio
->uio_rw
== UIO_READ
)
596 bcopy(cp
, iov
->iov_base
, cnt
);
598 bcopy(iov
->iov_base
, cp
, cnt
);
604 iov
->iov_base
= (char *)iov
->iov_base
+ cnt
;
606 uio
->uio_resid
-= cnt
;
607 uio
->uio_offset
+= cnt
;
614 td
->td_flags
&= ~TDF_DEADLKTREAT
;