2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: Utah $Hdr: mem.c 1.13 89/10/08$
40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
41 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
42 * $DragonFly: src/sys/kern/kern_memio.c,v 1.31 2008/01/05 14:02:38 swildner Exp $
49 #include <sys/param.h>
50 #include <sys/systm.h>
53 #include <sys/fcntl.h>
54 #include <sys/filio.h>
55 #include <sys/ioccom.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/memrange.h>
60 #include <sys/random.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
64 #include <sys/vnode.h>
68 #include <vm/vm_extern.h>
71 static d_open_t mmopen
;
72 static d_close_t mmclose
;
73 static d_read_t mmread
;
74 static d_write_t mmwrite
;
75 static d_ioctl_t mmioctl
;
76 static d_mmap_t memmmap
;
77 static d_poll_t mmpoll
;
80 static struct dev_ops mem_ops
= {
81 { "mem", CDEV_MAJOR
, D_MEM
},
94 MALLOC_DEFINE(M_MEMDESC
, "memdesc", "memory range descriptors");
95 static int mem_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
96 static int random_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
98 struct mem_range_softc mem_range_softc
;
102 mmopen(struct dev_open_args
*ap
)
104 cdev_t dev
= ap
->a_head
.a_dev
;
107 switch (minor(dev
)) {
110 if (ap
->a_oflags
& FWRITE
) {
111 if (securelevel
> 0 || kernel_mem_readonly
)
117 error
= suser_cred(ap
->a_cred
, 0);
120 if (securelevel
> 0 || kernel_mem_readonly
) {
124 error
= cpu_set_iopl();
134 mmclose(struct dev_close_args
*ap
)
136 cdev_t dev
= ap
->a_head
.a_dev
;
139 switch (minor(dev
)) {
141 error
= cpu_clr_iopl();
152 mmrw(cdev_t dev
, struct uio
*uio
, int flags
)
161 while (uio
->uio_resid
> 0 && error
== 0) {
163 if (iov
->iov_len
== 0) {
166 if (uio
->uio_iovcnt
< 0)
170 switch (minor(dev
)) {
173 * minor device 0 is physical memory, /dev/mem
177 pmap_kenter((vm_offset_t
)ptvmmap
, v
);
178 o
= (int)uio
->uio_offset
& PAGE_MASK
;
179 c
= (u_int
)(PAGE_SIZE
- ((int)iov
->iov_base
& PAGE_MASK
));
180 c
= min(c
, (u_int
)(PAGE_SIZE
- o
));
181 c
= min(c
, (u_int
)iov
->iov_len
);
182 error
= uiomove((caddr_t
)&ptvmmap
[o
], (int)c
, uio
);
183 pmap_kremove((vm_offset_t
)ptvmmap
);
188 * minor device 1 is kernel memory, /dev/kmem
190 vm_offset_t saddr
, eaddr
;
196 * Make sure that all of the pages are currently
197 * resident so that we don't create any zero-fill
200 saddr
= trunc_page(uio
->uio_offset
);
201 eaddr
= round_page(uio
->uio_offset
+ c
);
206 * Make sure the kernel addresses are mapped.
207 * platform_direct_mapped() can be used to bypass
208 * default mapping via the page table (virtual kernels
209 * contain a lot of out-of-band data).
212 if (uio
->uio_rw
!= UIO_READ
)
213 prot
|= VM_PROT_WRITE
;
214 error
= kvm_access_check(saddr
, eaddr
, prot
);
217 error
= uiomove((caddr_t
)(vm_offset_t
)uio
->uio_offset
,
223 * minor device 2 is EOF/RATHOLE
225 if (uio
->uio_rw
== UIO_READ
)
231 * minor device 3 (/dev/random) is source of filth
232 * on read, seeder on write
235 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
236 c
= min(iov
->iov_len
, PAGE_SIZE
);
237 if (uio
->uio_rw
== UIO_WRITE
) {
238 error
= uiomove(buf
, (int)c
, uio
);
240 error
= add_buffer_randomness(buf
, c
);
242 poolsize
= read_random(buf
, c
);
246 if ((flags
& IO_NDELAY
) != 0)
247 return (EWOULDBLOCK
);
250 c
= min(c
, poolsize
);
251 error
= uiomove(buf
, (int)c
, uio
);
256 * minor device 4 (/dev/urandom) is source of muck
257 * on read, writes are disallowed.
259 c
= min(iov
->iov_len
, PAGE_SIZE
);
260 if (uio
->uio_rw
== UIO_WRITE
) {
264 if (CURSIG(curthread
->td_lwp
) != 0) {
266 * Use tsleep() to get the error code right.
267 * It should return immediately.
269 error
= tsleep(&rand_bolt
, PCATCH
, "urand", 1);
270 if (error
!= 0 && error
!= EWOULDBLOCK
)
274 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
275 poolsize
= read_random_unlimited(buf
, c
);
276 c
= min(c
, poolsize
);
277 error
= uiomove(buf
, (int)c
, uio
);
281 * minor device 12 (/dev/zero) is source of nulls
282 * on read, write are disallowed.
284 if (uio
->uio_rw
== UIO_WRITE
) {
289 zbuf
= (caddr_t
)kmalloc(PAGE_SIZE
, M_TEMP
,
292 c
= min(iov
->iov_len
, PAGE_SIZE
);
293 error
= uiomove(zbuf
, (int)c
, uio
);
302 uio
->uio_offset
+= c
;
311 mmread(struct dev_read_args
*ap
)
313 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
317 mmwrite(struct dev_write_args
*ap
)
319 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
326 /*******************************************************\
327 * allow user processes to MMAP some memory sections *
328 * instead of going through read/write *
329 \*******************************************************/
332 memmmap(struct dev_mmap_args
*ap
)
334 cdev_t dev
= ap
->a_head
.a_dev
;
336 switch (minor(dev
)) {
339 * minor device 0 is physical memory
341 ap
->a_result
= i386_btop(ap
->a_offset
);
345 * minor device 1 is kernel memory
347 ap
->a_result
= i386_btop(vtophys(ap
->a_offset
));
356 mmioctl(struct dev_ioctl_args
*ap
)
358 cdev_t dev
= ap
->a_head
.a_dev
;
360 switch (minor(dev
)) {
362 return mem_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
363 ap
->a_fflag
, ap
->a_cred
);
366 return random_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
367 ap
->a_fflag
, ap
->a_cred
);
373 * Operations for changing memory attributes.
375 * This is basically just an ioctl shim for mem_range_attr_get
376 * and mem_range_attr_set.
379 mem_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
382 struct mem_range_op
*mo
= (struct mem_range_op
*)data
;
383 struct mem_range_desc
*md
;
385 /* is this for us? */
386 if ((cmd
!= MEMRANGE_GET
) &&
387 (cmd
!= MEMRANGE_SET
))
390 /* any chance we can handle this? */
391 if (mem_range_softc
.mr_op
== NULL
)
394 /* do we have any descriptors? */
395 if (mem_range_softc
.mr_ndesc
== 0)
400 nd
= imin(mo
->mo_arg
[0], mem_range_softc
.mr_ndesc
);
402 md
= (struct mem_range_desc
*)
403 kmalloc(nd
* sizeof(struct mem_range_desc
),
404 M_MEMDESC
, M_WAITOK
);
405 error
= mem_range_attr_get(md
, &nd
);
407 error
= copyout(md
, mo
->mo_desc
,
408 nd
* sizeof(struct mem_range_desc
));
409 kfree(md
, M_MEMDESC
);
411 nd
= mem_range_softc
.mr_ndesc
;
417 md
= (struct mem_range_desc
*)kmalloc(sizeof(struct mem_range_desc
),
418 M_MEMDESC
, M_WAITOK
);
419 error
= copyin(mo
->mo_desc
, md
, sizeof(struct mem_range_desc
));
420 /* clamp description string */
421 md
->mr_owner
[sizeof(md
->mr_owner
) - 1] = 0;
423 error
= mem_range_attr_set(md
, &mo
->mo_arg
[0]);
424 kfree(md
, M_MEMDESC
);
431 * Implementation-neutral, kernel-callable functions for manipulating
432 * memory range attributes.
435 mem_range_attr_get(struct mem_range_desc
*mrd
, int *arg
)
437 /* can we handle this? */
438 if (mem_range_softc
.mr_op
== NULL
)
442 *arg
= mem_range_softc
.mr_ndesc
;
444 bcopy(mem_range_softc
.mr_desc
, mrd
, (*arg
) * sizeof(struct mem_range_desc
));
450 mem_range_attr_set(struct mem_range_desc
*mrd
, int *arg
)
452 /* can we handle this? */
453 if (mem_range_softc
.mr_op
== NULL
)
456 return (mem_range_softc
.mr_op
->set(&mem_range_softc
, mrd
, arg
));
461 mem_range_AP_init(void)
463 if (mem_range_softc
.mr_op
&& mem_range_softc
.mr_op
->initAP
)
464 return (mem_range_softc
.mr_op
->initAP(&mem_range_softc
));
469 random_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
475 * Even inspecting the state is privileged, since it gives a hint
476 * about how easily the randomness might be guessed.
481 /* Really handled in upper layer */
485 intr
= *(int16_t *)data
;
486 if ((error
= suser_cred(cred
, 0)) != 0)
488 if (intr
< 0 || intr
>= MAX_INTS
)
490 register_randintr(intr
);
493 intr
= *(int16_t *)data
;
494 if ((error
= suser_cred(cred
, 0)) != 0)
496 if (intr
< 0 || intr
>= MAX_INTS
)
498 unregister_randintr(intr
);
504 intr
= *(int16_t *)data
;
505 if ((error
= suser_cred(cred
, 0)) != 0)
507 if (intr
< 0 || intr
>= MAX_INTS
)
509 intr
= next_registered_randintr(intr
);
510 if (intr
== MAX_INTS
)
512 *(u_int16_t
*)data
= intr
;
522 mmpoll(struct dev_poll_args
*ap
)
524 cdev_t dev
= ap
->a_head
.a_dev
;
527 switch (minor(dev
)) {
528 case 3: /* /dev/random */
529 revents
= random_poll(dev
, ap
->a_events
);
531 case 4: /* /dev/urandom */
533 revents
= seltrue(dev
, ap
->a_events
);
536 ap
->a_events
= revents
;
541 iszerodev(cdev_t dev
)
543 return ((major(dev
) == mem_ops
.head
.maj
)
544 && minor(dev
) == 12);
548 mem_drvinit(void *unused
)
551 /* Initialise memory range handling */
552 if (mem_range_softc
.mr_op
!= NULL
)
553 mem_range_softc
.mr_op
->init(&mem_range_softc
);
555 dev_ops_add(&mem_ops
, 0xf0, 0);
556 make_dev(&mem_ops
, 0, UID_ROOT
, GID_KMEM
, 0640, "mem");
557 make_dev(&mem_ops
, 1, UID_ROOT
, GID_KMEM
, 0640, "kmem");
558 make_dev(&mem_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "null");
559 make_dev(&mem_ops
, 3, UID_ROOT
, GID_WHEEL
, 0644, "random");
560 make_dev(&mem_ops
, 4, UID_ROOT
, GID_WHEEL
, 0644, "urandom");
561 make_dev(&mem_ops
, 12, UID_ROOT
, GID_WHEEL
, 0666, "zero");
562 make_dev(&mem_ops
, 14, UID_ROOT
, GID_WHEEL
, 0600, "io");
565 SYSINIT(memdev
,SI_SUB_DRIVERS
,SI_ORDER_MIDDLE
+CDEV_MAJOR
,mem_drvinit
,NULL
)