HAMMER 60I/Many: Mirroring
[dragonfly.git] / sys / kern / kern_memio.c
blob7e5e4248435cfb4ec99bbdc3132b217b3baf0122
1 /*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4 * All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
39 * from: Utah $Hdr: mem.c 1.13 89/10/08$
40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
41 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
42 * $DragonFly: src/sys/kern/kern_memio.c,v 1.31 2008/01/05 14:02:38 swildner Exp $
46 * Memory special file
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/filio.h>
55 #include <sys/ioccom.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/memrange.h>
59 #include <sys/proc.h>
60 #include <sys/random.h>
61 #include <sys/signalvar.h>
62 #include <sys/signal2.h>
63 #include <sys/uio.h>
64 #include <sys/vnode.h>
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_extern.h>
71 static d_open_t mmopen;
72 static d_close_t mmclose;
73 static d_read_t mmread;
74 static d_write_t mmwrite;
75 static d_ioctl_t mmioctl;
76 static d_mmap_t memmmap;
77 static d_poll_t mmpoll;
79 #define CDEV_MAJOR 2
80 static struct dev_ops mem_ops = {
81 { "mem", CDEV_MAJOR, D_MEM },
82 .d_open = mmopen,
83 .d_close = mmclose,
84 .d_read = mmread,
85 .d_write = mmwrite,
86 .d_ioctl = mmioctl,
87 .d_poll = mmpoll,
88 .d_mmap = memmmap,
91 static int rand_bolt;
92 static caddr_t zbuf;
94 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
95 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
96 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
98 struct mem_range_softc mem_range_softc;
101 static int
102 mmopen(struct dev_open_args *ap)
104 cdev_t dev = ap->a_head.a_dev;
105 int error;
107 switch (minor(dev)) {
108 case 0:
109 case 1:
110 if (ap->a_oflags & FWRITE) {
111 if (securelevel > 0 || kernel_mem_readonly)
112 return (EPERM);
114 error = 0;
115 break;
116 case 14:
117 error = suser_cred(ap->a_cred, 0);
118 if (error != 0)
119 break;
120 if (securelevel > 0 || kernel_mem_readonly) {
121 error = EPERM;
122 break;
124 error = cpu_set_iopl();
125 break;
126 default:
127 error = 0;
128 break;
130 return (error);
133 static int
134 mmclose(struct dev_close_args *ap)
136 cdev_t dev = ap->a_head.a_dev;
137 int error;
139 switch (minor(dev)) {
140 case 14:
141 error = cpu_clr_iopl();
142 break;
143 default:
144 error = 0;
145 break;
147 return (error);
151 static int
152 mmrw(cdev_t dev, struct uio *uio, int flags)
154 int o;
155 u_int c, v;
156 u_int poolsize;
157 struct iovec *iov;
158 int error = 0;
159 caddr_t buf = NULL;
161 while (uio->uio_resid > 0 && error == 0) {
162 iov = uio->uio_iov;
163 if (iov->iov_len == 0) {
164 uio->uio_iov++;
165 uio->uio_iovcnt--;
166 if (uio->uio_iovcnt < 0)
167 panic("mmrw");
168 continue;
170 switch (minor(dev)) {
171 case 0:
173 * minor device 0 is physical memory, /dev/mem
175 v = uio->uio_offset;
176 v &= ~PAGE_MASK;
177 pmap_kenter((vm_offset_t)ptvmmap, v);
178 o = (int)uio->uio_offset & PAGE_MASK;
179 c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
180 c = min(c, (u_int)(PAGE_SIZE - o));
181 c = min(c, (u_int)iov->iov_len);
182 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
183 pmap_kremove((vm_offset_t)ptvmmap);
184 continue;
186 case 1: {
188 * minor device 1 is kernel memory, /dev/kmem
190 vm_offset_t saddr, eaddr;
191 int prot;
193 c = iov->iov_len;
196 * Make sure that all of the pages are currently
197 * resident so that we don't create any zero-fill
198 * pages.
200 saddr = trunc_page(uio->uio_offset);
201 eaddr = round_page(uio->uio_offset + c);
202 if (saddr > eaddr)
203 return EFAULT;
206 * Make sure the kernel addresses are mapped.
207 * platform_direct_mapped() can be used to bypass
208 * default mapping via the page table (virtual kernels
209 * contain a lot of out-of-band data).
211 prot = VM_PROT_READ;
212 if (uio->uio_rw != UIO_READ)
213 prot |= VM_PROT_WRITE;
214 error = kvm_access_check(saddr, eaddr, prot);
215 if (error)
216 return (error);
217 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
218 (int)c, uio);
219 continue;
221 case 2:
223 * minor device 2 is EOF/RATHOLE
225 if (uio->uio_rw == UIO_READ)
226 return (0);
227 c = iov->iov_len;
228 break;
229 case 3:
231 * minor device 3 (/dev/random) is source of filth
232 * on read, seeder on write
234 if (buf == NULL)
235 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
236 c = min(iov->iov_len, PAGE_SIZE);
237 if (uio->uio_rw == UIO_WRITE) {
238 error = uiomove(buf, (int)c, uio);
239 if (error == 0)
240 error = add_buffer_randomness(buf, c);
241 } else {
242 poolsize = read_random(buf, c);
243 if (poolsize == 0) {
244 if (buf)
245 kfree(buf, M_TEMP);
246 if ((flags & IO_NDELAY) != 0)
247 return (EWOULDBLOCK);
248 return (0);
250 c = min(c, poolsize);
251 error = uiomove(buf, (int)c, uio);
253 continue;
254 case 4:
256 * minor device 4 (/dev/urandom) is source of muck
257 * on read, writes are disallowed.
259 c = min(iov->iov_len, PAGE_SIZE);
260 if (uio->uio_rw == UIO_WRITE) {
261 error = EPERM;
262 break;
264 if (CURSIG(curthread->td_lwp) != 0) {
266 * Use tsleep() to get the error code right.
267 * It should return immediately.
269 error = tsleep(&rand_bolt, PCATCH, "urand", 1);
270 if (error != 0 && error != EWOULDBLOCK)
271 continue;
273 if (buf == NULL)
274 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
275 poolsize = read_random_unlimited(buf, c);
276 c = min(c, poolsize);
277 error = uiomove(buf, (int)c, uio);
278 continue;
279 case 12:
281 * minor device 12 (/dev/zero) is source of nulls
282 * on read, write are disallowed.
284 if (uio->uio_rw == UIO_WRITE) {
285 c = iov->iov_len;
286 break;
288 if (zbuf == NULL) {
289 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
290 M_WAITOK | M_ZERO);
292 c = min(iov->iov_len, PAGE_SIZE);
293 error = uiomove(zbuf, (int)c, uio);
294 continue;
295 default:
296 return (ENODEV);
298 if (error)
299 break;
300 iov->iov_base += c;
301 iov->iov_len -= c;
302 uio->uio_offset += c;
303 uio->uio_resid -= c;
305 if (buf)
306 kfree(buf, M_TEMP);
307 return (error);
310 static int
311 mmread(struct dev_read_args *ap)
313 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
316 static int
317 mmwrite(struct dev_write_args *ap)
319 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
326 /*******************************************************\
327 * allow user processes to MMAP some memory sections *
328 * instead of going through read/write *
329 \*******************************************************/
331 static int
332 memmmap(struct dev_mmap_args *ap)
334 cdev_t dev = ap->a_head.a_dev;
336 switch (minor(dev)) {
337 case 0:
339 * minor device 0 is physical memory
341 ap->a_result = i386_btop(ap->a_offset);
342 return 0;
343 case 1:
345 * minor device 1 is kernel memory
347 ap->a_result = i386_btop(vtophys(ap->a_offset));
348 return 0;
350 default:
351 return EINVAL;
355 static int
356 mmioctl(struct dev_ioctl_args *ap)
358 cdev_t dev = ap->a_head.a_dev;
360 switch (minor(dev)) {
361 case 0:
362 return mem_ioctl(dev, ap->a_cmd, ap->a_data,
363 ap->a_fflag, ap->a_cred);
364 case 3:
365 case 4:
366 return random_ioctl(dev, ap->a_cmd, ap->a_data,
367 ap->a_fflag, ap->a_cred);
369 return (ENODEV);
373 * Operations for changing memory attributes.
375 * This is basically just an ioctl shim for mem_range_attr_get
376 * and mem_range_attr_set.
378 static int
379 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
381 int nd, error = 0;
382 struct mem_range_op *mo = (struct mem_range_op *)data;
383 struct mem_range_desc *md;
385 /* is this for us? */
386 if ((cmd != MEMRANGE_GET) &&
387 (cmd != MEMRANGE_SET))
388 return (ENOTTY);
390 /* any chance we can handle this? */
391 if (mem_range_softc.mr_op == NULL)
392 return (EOPNOTSUPP);
394 /* do we have any descriptors? */
395 if (mem_range_softc.mr_ndesc == 0)
396 return (ENXIO);
398 switch (cmd) {
399 case MEMRANGE_GET:
400 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
401 if (nd > 0) {
402 md = (struct mem_range_desc *)
403 kmalloc(nd * sizeof(struct mem_range_desc),
404 M_MEMDESC, M_WAITOK);
405 error = mem_range_attr_get(md, &nd);
406 if (!error)
407 error = copyout(md, mo->mo_desc,
408 nd * sizeof(struct mem_range_desc));
409 kfree(md, M_MEMDESC);
410 } else {
411 nd = mem_range_softc.mr_ndesc;
413 mo->mo_arg[0] = nd;
414 break;
416 case MEMRANGE_SET:
417 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc),
418 M_MEMDESC, M_WAITOK);
419 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
420 /* clamp description string */
421 md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
422 if (error == 0)
423 error = mem_range_attr_set(md, &mo->mo_arg[0]);
424 kfree(md, M_MEMDESC);
425 break;
427 return (error);
431 * Implementation-neutral, kernel-callable functions for manipulating
432 * memory range attributes.
435 mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
437 /* can we handle this? */
438 if (mem_range_softc.mr_op == NULL)
439 return (EOPNOTSUPP);
441 if (*arg == 0) {
442 *arg = mem_range_softc.mr_ndesc;
443 } else {
444 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc));
446 return (0);
450 mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
452 /* can we handle this? */
453 if (mem_range_softc.mr_op == NULL)
454 return (EOPNOTSUPP);
456 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg));
459 #ifdef SMP
460 void
461 mem_range_AP_init(void)
463 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
464 return (mem_range_softc.mr_op->initAP(&mem_range_softc));
466 #endif
468 static int
469 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
471 int error;
472 int intr;
475 * Even inspecting the state is privileged, since it gives a hint
476 * about how easily the randomness might be guessed.
478 error = 0;
480 switch (cmd) {
481 /* Really handled in upper layer */
482 case FIOASYNC:
483 break;
484 case MEM_SETIRQ:
485 intr = *(int16_t *)data;
486 if ((error = suser_cred(cred, 0)) != 0)
487 break;
488 if (intr < 0 || intr >= MAX_INTS)
489 return (EINVAL);
490 register_randintr(intr);
491 break;
492 case MEM_CLEARIRQ:
493 intr = *(int16_t *)data;
494 if ((error = suser_cred(cred, 0)) != 0)
495 break;
496 if (intr < 0 || intr >= MAX_INTS)
497 return (EINVAL);
498 unregister_randintr(intr);
499 break;
500 case MEM_RETURNIRQ:
501 error = ENOTSUP;
502 break;
503 case MEM_FINDIRQ:
504 intr = *(int16_t *)data;
505 if ((error = suser_cred(cred, 0)) != 0)
506 break;
507 if (intr < 0 || intr >= MAX_INTS)
508 return (EINVAL);
509 intr = next_registered_randintr(intr);
510 if (intr == MAX_INTS)
511 return (ENOENT);
512 *(u_int16_t *)data = intr;
513 break;
514 default:
515 error = ENOTSUP;
516 break;
518 return (error);
522 mmpoll(struct dev_poll_args *ap)
524 cdev_t dev = ap->a_head.a_dev;
525 int revents;
527 switch (minor(dev)) {
528 case 3: /* /dev/random */
529 revents = random_poll(dev, ap->a_events);
530 break;
531 case 4: /* /dev/urandom */
532 default:
533 revents = seltrue(dev, ap->a_events);
534 break;
536 ap->a_events = revents;
537 return (0);
541 iszerodev(cdev_t dev)
543 return ((major(dev) == mem_ops.head.maj)
544 && minor(dev) == 12);
547 static void
548 mem_drvinit(void *unused)
551 /* Initialise memory range handling */
552 if (mem_range_softc.mr_op != NULL)
553 mem_range_softc.mr_op->init(&mem_range_softc);
555 dev_ops_add(&mem_ops, 0xf0, 0);
556 make_dev(&mem_ops, 0, UID_ROOT, GID_KMEM, 0640, "mem");
557 make_dev(&mem_ops, 1, UID_ROOT, GID_KMEM, 0640, "kmem");
558 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null");
559 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random");
560 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom");
561 make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero");
562 make_dev(&mem_ops, 14, UID_ROOT, GID_WHEEL, 0600, "io");
565 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL)