2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: Utah $Hdr: mem.c 1.13 89/10/08$
40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
41 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
42 * $DragonFly: src/sys/kern/kern_memio.c,v 1.30 2007/02/25 23:17:12 corecode Exp $
49 #include <sys/param.h>
50 #include <sys/systm.h>
53 #include <sys/fcntl.h>
54 #include <sys/filio.h>
55 #include <sys/ioccom.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/memrange.h>
60 #include <sys/random.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
64 #include <sys/vnode.h>
68 #include <vm/vm_extern.h>
71 static d_open_t mmopen
;
72 static d_close_t mmclose
;
73 static d_read_t mmread
;
74 static d_write_t mmwrite
;
75 static d_ioctl_t mmioctl
;
76 static d_mmap_t memmmap
;
77 static d_poll_t mmpoll
;
80 static struct dev_ops mem_ops
= {
81 { "mem", CDEV_MAJOR
, D_MEM
},
94 MALLOC_DEFINE(M_MEMDESC
, "memdesc", "memory range descriptors");
95 static int mem_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
96 static int random_ioctl (cdev_t
, u_long
, caddr_t
, int, struct ucred
*);
98 struct mem_range_softc mem_range_softc
;
102 mmopen(struct dev_open_args
*ap
)
104 cdev_t dev
= ap
->a_head
.a_dev
;
107 switch (minor(dev
)) {
110 if (ap
->a_oflags
& FWRITE
) {
111 if (securelevel
> 0 || kernel_mem_readonly
)
117 error
= suser_cred(ap
->a_cred
, 0);
120 if (securelevel
> 0 || kernel_mem_readonly
) {
124 error
= cpu_set_iopl();
134 mmclose(struct dev_close_args
*ap
)
136 cdev_t dev
= ap
->a_head
.a_dev
;
139 switch (minor(dev
)) {
141 error
= cpu_clr_iopl();
152 mmrw(cdev_t dev
, struct uio
*uio
, int flags
)
161 while (uio
->uio_resid
> 0 && error
== 0) {
163 if (iov
->iov_len
== 0) {
166 if (uio
->uio_iovcnt
< 0)
170 switch (minor(dev
)) {
173 * minor device 0 is physical memory, /dev/mem
177 pmap_kenter((vm_offset_t
)ptvmmap
, v
);
178 o
= (int)uio
->uio_offset
& PAGE_MASK
;
179 c
= (u_int
)(PAGE_SIZE
- ((int)iov
->iov_base
& PAGE_MASK
));
180 c
= min(c
, (u_int
)(PAGE_SIZE
- o
));
181 c
= min(c
, (u_int
)iov
->iov_len
);
182 error
= uiomove((caddr_t
)&ptvmmap
[o
], (int)c
, uio
);
183 pmap_kremove((vm_offset_t
)ptvmmap
);
188 * minor device 1 is kernel memory, /dev/kmem
190 vm_offset_t saddr
, eaddr
;
196 * Make sure that all of the pages are currently
197 * resident so that we don't create any zero-fill
200 saddr
= trunc_page(uio
->uio_offset
);
201 eaddr
= round_page(uio
->uio_offset
+ c
);
206 * Make sure the kernel addresses are mapped.
207 * platform_direct_mapped() can be used to bypass
208 * default mapping via the page table (virtual kernels
209 * contain a lot of out-of-band data).
212 if (uio
->uio_rw
!= UIO_READ
)
213 prot
|= VM_PROT_WRITE
;
214 error
= kvm_access_check(saddr
, eaddr
, prot
);
217 error
= uiomove((caddr_t
)(vm_offset_t
)uio
->uio_offset
,
223 * minor device 2 is EOF/RATHOLE
225 if (uio
->uio_rw
== UIO_READ
)
231 * minor device 3 (/dev/random) is source of filth
232 * on read, seeder on write
235 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
236 c
= min(iov
->iov_len
, PAGE_SIZE
);
237 if (uio
->uio_rw
== UIO_WRITE
) {
238 error
= uiomove(buf
, (int)c
, uio
);
240 error
= add_buffer_randomness(buf
, c
);
242 poolsize
= read_random(buf
, c
);
246 if ((flags
& IO_NDELAY
) != 0)
247 return (EWOULDBLOCK
);
250 c
= min(c
, poolsize
);
251 error
= uiomove(buf
, (int)c
, uio
);
256 * minor device 4 (/dev/urandom) is source of muck
257 * on read, writes are disallowed.
259 c
= min(iov
->iov_len
, PAGE_SIZE
);
260 if (uio
->uio_rw
== UIO_WRITE
) {
264 if (CURSIG(curthread
->td_lwp
) != 0) {
266 * Use tsleep() to get the error code right.
267 * It should return immediately.
269 error
= tsleep(&rand_bolt
, PCATCH
, "urand", 1);
270 if (error
!= 0 && error
!= EWOULDBLOCK
)
274 buf
= kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
275 poolsize
= read_random_unlimited(buf
, c
);
276 c
= min(c
, poolsize
);
277 error
= uiomove(buf
, (int)c
, uio
);
281 * minor device 12 (/dev/zero) is source of nulls
282 * on read, write are disallowed.
284 if (uio
->uio_rw
== UIO_WRITE
) {
290 kmalloc(PAGE_SIZE
, M_TEMP
, M_WAITOK
);
291 bzero(zbuf
, PAGE_SIZE
);
293 c
= min(iov
->iov_len
, PAGE_SIZE
);
294 error
= uiomove(zbuf
, (int)c
, uio
);
303 uio
->uio_offset
+= c
;
312 mmread(struct dev_read_args
*ap
)
314 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
318 mmwrite(struct dev_write_args
*ap
)
320 return(mmrw(ap
->a_head
.a_dev
, ap
->a_uio
, ap
->a_ioflag
));
327 /*******************************************************\
328 * allow user processes to MMAP some memory sections *
329 * instead of going through read/write *
330 \*******************************************************/
333 memmmap(struct dev_mmap_args
*ap
)
335 cdev_t dev
= ap
->a_head
.a_dev
;
337 switch (minor(dev
)) {
340 * minor device 0 is physical memory
342 ap
->a_result
= i386_btop(ap
->a_offset
);
346 * minor device 1 is kernel memory
348 ap
->a_result
= i386_btop(vtophys(ap
->a_offset
));
357 mmioctl(struct dev_ioctl_args
*ap
)
359 cdev_t dev
= ap
->a_head
.a_dev
;
361 switch (minor(dev
)) {
363 return mem_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
364 ap
->a_fflag
, ap
->a_cred
);
367 return random_ioctl(dev
, ap
->a_cmd
, ap
->a_data
,
368 ap
->a_fflag
, ap
->a_cred
);
374 * Operations for changing memory attributes.
376 * This is basically just an ioctl shim for mem_range_attr_get
377 * and mem_range_attr_set.
380 mem_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
383 struct mem_range_op
*mo
= (struct mem_range_op
*)data
;
384 struct mem_range_desc
*md
;
386 /* is this for us? */
387 if ((cmd
!= MEMRANGE_GET
) &&
388 (cmd
!= MEMRANGE_SET
))
391 /* any chance we can handle this? */
392 if (mem_range_softc
.mr_op
== NULL
)
395 /* do we have any descriptors? */
396 if (mem_range_softc
.mr_ndesc
== 0)
401 nd
= imin(mo
->mo_arg
[0], mem_range_softc
.mr_ndesc
);
403 md
= (struct mem_range_desc
*)
404 kmalloc(nd
* sizeof(struct mem_range_desc
),
405 M_MEMDESC
, M_WAITOK
);
406 error
= mem_range_attr_get(md
, &nd
);
408 error
= copyout(md
, mo
->mo_desc
,
409 nd
* sizeof(struct mem_range_desc
));
410 kfree(md
, M_MEMDESC
);
412 nd
= mem_range_softc
.mr_ndesc
;
418 md
= (struct mem_range_desc
*)kmalloc(sizeof(struct mem_range_desc
),
419 M_MEMDESC
, M_WAITOK
);
420 error
= copyin(mo
->mo_desc
, md
, sizeof(struct mem_range_desc
));
421 /* clamp description string */
422 md
->mr_owner
[sizeof(md
->mr_owner
) - 1] = 0;
424 error
= mem_range_attr_set(md
, &mo
->mo_arg
[0]);
425 kfree(md
, M_MEMDESC
);
432 * Implementation-neutral, kernel-callable functions for manipulating
433 * memory range attributes.
436 mem_range_attr_get(struct mem_range_desc
*mrd
, int *arg
)
438 /* can we handle this? */
439 if (mem_range_softc
.mr_op
== NULL
)
443 *arg
= mem_range_softc
.mr_ndesc
;
445 bcopy(mem_range_softc
.mr_desc
, mrd
, (*arg
) * sizeof(struct mem_range_desc
));
451 mem_range_attr_set(struct mem_range_desc
*mrd
, int *arg
)
453 /* can we handle this? */
454 if (mem_range_softc
.mr_op
== NULL
)
457 return (mem_range_softc
.mr_op
->set(&mem_range_softc
, mrd
, arg
));
462 mem_range_AP_init(void)
464 if (mem_range_softc
.mr_op
&& mem_range_softc
.mr_op
->initAP
)
465 return (mem_range_softc
.mr_op
->initAP(&mem_range_softc
));
470 random_ioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int flags
, struct ucred
*cred
)
476 * Even inspecting the state is privileged, since it gives a hint
477 * about how easily the randomness might be guessed.
482 /* Really handled in upper layer */
486 intr
= *(int16_t *)data
;
487 if ((error
= suser_cred(cred
, 0)) != 0)
489 if (intr
< 0 || intr
>= MAX_INTS
)
491 register_randintr(intr
);
494 intr
= *(int16_t *)data
;
495 if ((error
= suser_cred(cred
, 0)) != 0)
497 if (intr
< 0 || intr
>= MAX_INTS
)
499 unregister_randintr(intr
);
505 intr
= *(int16_t *)data
;
506 if ((error
= suser_cred(cred
, 0)) != 0)
508 if (intr
< 0 || intr
>= MAX_INTS
)
510 intr
= next_registered_randintr(intr
);
511 if (intr
== MAX_INTS
)
513 *(u_int16_t
*)data
= intr
;
523 mmpoll(struct dev_poll_args
*ap
)
525 cdev_t dev
= ap
->a_head
.a_dev
;
528 switch (minor(dev
)) {
529 case 3: /* /dev/random */
530 revents
= random_poll(dev
, ap
->a_events
);
532 case 4: /* /dev/urandom */
534 revents
= seltrue(dev
, ap
->a_events
);
537 ap
->a_events
= revents
;
542 iszerodev(cdev_t dev
)
544 return ((major(dev
) == mem_ops
.head
.maj
)
545 && minor(dev
) == 12);
549 mem_drvinit(void *unused
)
552 /* Initialise memory range handling */
553 if (mem_range_softc
.mr_op
!= NULL
)
554 mem_range_softc
.mr_op
->init(&mem_range_softc
);
556 dev_ops_add(&mem_ops
, 0xf0, 0);
557 make_dev(&mem_ops
, 0, UID_ROOT
, GID_KMEM
, 0640, "mem");
558 make_dev(&mem_ops
, 1, UID_ROOT
, GID_KMEM
, 0640, "kmem");
559 make_dev(&mem_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "null");
560 make_dev(&mem_ops
, 3, UID_ROOT
, GID_WHEEL
, 0644, "random");
561 make_dev(&mem_ops
, 4, UID_ROOT
, GID_WHEEL
, 0644, "urandom");
562 make_dev(&mem_ops
, 12, UID_ROOT
, GID_WHEEL
, 0666, "zero");
563 make_dev(&mem_ops
, 14, UID_ROOT
, GID_WHEEL
, 0600, "io");
566 SYSINIT(memdev
,SI_SUB_DRIVERS
,SI_ORDER_MIDDLE
+CDEV_MAJOR
,mem_drvinit
,NULL
)