kernel - Factor out TSC cputimer into common x86_64 code, use for vkernel.
[dragonfly.git] / sys / platform / vkernel64 / platform / init.c
blobfdfc87f5a7fe35ea8da8433fa3771f764af6a7df
1 /*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 #include <sys/wait.h>
56 #include <sys/vmm.h>
58 #include <machine/cpu.h>
59 #include <machine/globaldata.h>
60 #include <machine/tls.h>
61 #include <machine/md_var.h>
62 #include <machine/vmparam.h>
63 #include <cpu/specialreg.h>
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/bridge/if_bridgevar.h>
69 #include <netinet/in.h>
70 #include <arpa/inet.h>
71 #include <net/if_var.h>
73 #include <stdio.h>
74 #include <stdlib.h>
75 #include <stdarg.h>
76 #include <stdbool.h>
77 #include <unistd.h>
78 #include <fcntl.h>
79 #include <string.h>
80 #include <err.h>
81 #include <errno.h>
82 #include <assert.h>
83 #include <sysexits.h>
85 #define EX_VKERNEL_REBOOT 32
87 vm_phystable_t phys_avail[16];
88 vm_paddr_t Maxmem;
89 vm_paddr_t Maxmem_bytes;
90 long physmem;
91 int MemImageFd = -1;
92 struct vkdisk_info DiskInfo[VKDISK_MAX];
93 int DiskNum;
94 struct vknetif_info NetifInfo[VKNETIF_MAX];
95 int NetifNum;
96 char *pid_file;
97 vm_offset_t KvaStart;
98 vm_offset_t KvaEnd;
99 vm_offset_t KvaSize;
100 vm_offset_t virtual_start;
101 vm_offset_t virtual_end;
102 vm_offset_t virtual2_start;
103 vm_offset_t virtual2_end;
104 vm_offset_t kernel_vm_end;
105 vm_offset_t crashdumpmap;
106 vm_offset_t clean_sva;
107 vm_offset_t clean_eva;
108 struct msgbuf *msgbufp;
109 caddr_t ptvmmap;
110 vpte_t *KernelPTD;
111 vpte_t *KernelPTA; /* Warning: Offset for direct VA translation */
112 void *dmap_min_address;
113 void *vkernel_stack;
114 u_int cpu_feature; /* XXX */
115 int tsc_present;
116 int tsc_invariant;
117 int tsc_mpsync;
118 int optcpus; /* number of cpus - see mp_start() */
119 int cpu_bits;
120 int lwp_cpu_lock; /* if/how to lock virtual CPUs to real CPUs */
121 int real_ncpus; /* number of real CPUs */
122 int next_cpu; /* next real CPU to lock a virtual CPU to */
123 int vkernel_b_arg; /* no of logical CPU bits - only SMP */
124 int vkernel_B_arg; /* no of core bits - only SMP */
125 int vmm_enabled; /* VMM HW assisted enable */
126 int use_precise_timer = 0; /* use a precise timer (more expensive) */
127 int allow_tsc_timer = 1; /* use the TSC cpu timer if possible */
128 struct privatespace *CPU_prvspace;
130 tsc_uclock_t tsc_frequency;
131 tsc_uclock_t tsc_oneus_approx;
133 extern uint64_t KPML4phys; /* phys addr of kernel level 4 */
135 static struct trapframe proc0_tf;
136 static void *proc0paddr;
138 static void init_sys_memory(char *imageFile);
139 static void init_kern_memory(void);
140 static void init_kern_memory_vmm(void);
141 static void init_globaldata(void);
142 static void init_vkernel(void);
143 static void init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type);
144 static void init_netif(char *netifExp[], int netifFileNum);
145 static void writepid(void);
146 static void cleanpid(void);
147 static int unix_connect(const char *path);
148 static void usage_err(const char *ctl, ...) __printflike(1, 2);
149 static void usage_help(_Bool);
150 static void init_locks(void);
151 static void handle_term(int);
153 pid_t childpid;
155 static int save_ac;
156 static int prezeromem;
157 static char **save_av;
160 * Kernel startup for virtual kernels - standard main()
163 main(int ac, char **av)
165 char *memImageFile = NULL;
166 char *netifFile[VKNETIF_MAX];
167 char *diskFile[VKDISK_MAX];
168 char *cdFile[VKDISK_MAX];
169 char *suffix;
170 char *endp;
171 char *tmp;
172 char *tok;
173 int diskFlags[VKDISK_MAX];
174 int netifFileNum = 0;
175 int diskFileNum = 0;
176 int cdFileNum = 0;
177 int bootOnDisk = -1; /* set below to vcd (0) or vkd (1) */
178 int c;
179 int i;
180 int j;
181 int n;
182 int isq;
183 int pos;
184 int eflag;
185 int dflag = 0; /* disable vmm */
186 int real_vkernel_enable;
187 int supports_sse;
188 uint32_t mxcsr_mask;
189 size_t vsize;
190 size_t msize;
191 size_t kenv_size;
192 size_t kenv_size2;
193 int status;
194 struct sigaction sa;
197 * Currently a bad hack but rtld-elf needs LD_SHAREDLIB_BASE to
198 * be set to force it to mmap() shared libraries into low memory,
199 * so our module loader can link against the related symbols.
201 if (getenv("LD_SHAREDLIB_BASE") == NULL) {
202 setenv("LD_SHAREDLIB_BASE", "0x10000000", 1);
203 execv(av[0], av);
204 fprintf(stderr, "Must run %s with full path\n", av[0]);
205 exit(1);
208 while ((childpid = fork()) != 0) {
209 /* Ignore signals */
210 bzero(&sa, sizeof(sa));
211 sigemptyset(&sa.sa_mask);
212 sa.sa_handler = SIG_IGN;
213 sigaction(SIGINT, &sa, NULL);
214 sigaction(SIGQUIT, &sa, NULL);
215 sigaction(SIGHUP, &sa, NULL);
218 * Forward SIGTERM to the child so that
219 * the shutdown process initiates correctly.
221 sa.sa_handler = handle_term;
222 sigaction(SIGTERM, &sa, NULL);
225 * Wait for child to terminate, exit if
226 * someone stole our child.
228 while (waitpid(childpid, &status, 0) != childpid) {
229 if (errno == ECHILD)
230 exit(1);
232 if (WEXITSTATUS(status) != EX_VKERNEL_REBOOT)
233 return 0;
237 * Starting for real
239 save_ac = ac;
240 save_av = av;
241 eflag = 0;
242 pos = 0;
243 kenv_size = 0;
246 * Process options
248 kernel_mem_readonly = 1;
249 optcpus = 2;
250 cpu_bits = 1;
251 vkernel_b_arg = 0;
252 vkernel_B_arg = 0;
253 lwp_cpu_lock = LCL_NONE;
255 real_vkernel_enable = 0;
256 vsize = sizeof(real_vkernel_enable);
257 sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
259 if (real_vkernel_enable == 0) {
260 errx(1, "vm.vkernel_enable is 0, must be set "
261 "to 1 to execute a vkernel!");
264 real_ncpus = 1;
265 vsize = sizeof(real_ncpus);
266 sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
268 if (ac < 2)
269 usage_help(false);
271 while ((c = getopt(ac, av, "c:hsvztTl:m:n:r:R:e:i:p:I:Ud")) != -1) {
272 switch(c) {
273 case 'd':
274 dflag = 1;
275 break;
276 case 'e':
278 * name=value:name=value:name=value...
279 * name="value"...
281 * Allow values to be quoted but note that shells
282 * may remove the quotes, so using this feature
283 * to embed colons may require a backslash.
285 n = strlen(optarg);
286 isq = 0;
288 if (eflag == 0) {
289 kenv_size = n + 2;
290 kern_envp = malloc(kenv_size);
291 if (kern_envp == NULL)
292 errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
293 } else {
294 kenv_size2 = kenv_size + n + 1;
295 pos = kenv_size - 1;
296 if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
297 errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
298 kern_envp = tmp;
299 kenv_size = kenv_size2;
302 for (i = 0, j = pos; i < n; ++i) {
303 if (optarg[i] == '"')
304 isq ^= 1;
305 else if (optarg[i] == '\'')
306 isq ^= 2;
307 else if (isq == 0 && optarg[i] == ':')
308 kern_envp[j++] = 0;
309 else
310 kern_envp[j++] = optarg[i];
312 kern_envp[j++] = 0;
313 kern_envp[j++] = 0;
314 eflag++;
315 break;
316 case 's':
317 boothowto |= RB_SINGLE;
318 break;
319 case 't':
320 use_precise_timer = 1;
321 break;
322 case 'T':
323 allow_tsc_timer = 0;
324 break;
325 case 'v':
326 bootverbose = 1;
327 break;
328 case 'i':
329 memImageFile = optarg;
330 break;
331 case 'I':
332 if (netifFileNum < VKNETIF_MAX)
333 netifFile[netifFileNum++] = strdup(optarg);
334 break;
335 case 'r':
336 case 'R':
337 if (bootOnDisk < 0)
338 bootOnDisk = 1;
339 if (diskFileNum + cdFileNum < VKDISK_MAX) {
340 diskFile[diskFileNum] = strdup(optarg);
341 diskFlags[diskFileNum] = (c == 'R');
342 ++diskFileNum;
344 break;
345 case 'c':
346 if (bootOnDisk < 0)
347 bootOnDisk = 0;
348 if (diskFileNum + cdFileNum < VKDISK_MAX)
349 cdFile[cdFileNum++] = strdup(optarg);
350 break;
351 case 'm':
352 Maxmem_bytes = strtoull(optarg, &suffix, 0);
353 if (suffix) {
354 switch(*suffix) {
355 case 'g':
356 case 'G':
357 Maxmem_bytes <<= 30;
358 break;
359 case 'm':
360 case 'M':
361 Maxmem_bytes <<= 20;
362 break;
363 case 'k':
364 case 'K':
365 Maxmem_bytes <<= 10;
366 break;
367 default:
368 Maxmem_bytes = 0;
369 usage_err("Bad maxmem option");
370 /* NOT REACHED */
371 break;
374 break;
375 case 'l':
376 next_cpu = -1;
377 if (strncmp("map", optarg, 3) == 0) {
378 lwp_cpu_lock = LCL_PER_CPU;
379 if (optarg[3] == ',') {
380 next_cpu = strtol(optarg+4, &endp, 0);
381 if (*endp != '\0')
382 usage_err("Bad target CPU number at '%s'", endp);
383 } else {
384 next_cpu = 0;
386 if (next_cpu < 0 || next_cpu > real_ncpus - 1)
387 usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
388 } else if (strncmp("any", optarg, 3) == 0) {
389 lwp_cpu_lock = LCL_NONE;
390 } else {
391 lwp_cpu_lock = LCL_SINGLE_CPU;
392 next_cpu = strtol(optarg, &endp, 0);
393 if (*endp != '\0')
394 usage_err("Bad target CPU number at '%s'", endp);
395 if (next_cpu < 0 || next_cpu > real_ncpus - 1)
396 usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
398 break;
399 case 'n':
401 * This value is set up by mp_start(), don't just
402 * set ncpus here.
404 tok = strtok(optarg, ":");
405 optcpus = strtol(tok, NULL, 0);
406 if (optcpus < 1 || optcpus > MAXCPU)
407 usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
408 cpu_bits = 1;
409 while ((1 << cpu_bits) < optcpus)
410 ++cpu_bits;
413 * By default assume simple hyper-threading
415 vkernel_b_arg = 1;
416 vkernel_B_arg = cpu_bits - vkernel_b_arg;
419 * [:lbits[:cbits]] override # of cpu bits
420 * for logical and core extraction, supplying
421 * defaults for any omission.
423 tok = strtok(NULL, ":");
424 if (tok != NULL) {
425 vkernel_b_arg = strtol(tok, NULL, 0);
426 vkernel_B_arg = cpu_bits - vkernel_b_arg;
428 /* :cbits argument */
429 tok = strtok(NULL, ":");
430 if (tok != NULL) {
431 vkernel_B_arg = strtol(tok, NULL, 0);
434 break;
435 case 'p':
436 pid_file = optarg;
437 break;
438 case 'U':
439 kernel_mem_readonly = 0;
440 break;
441 case 'h':
442 usage_help(true);
443 break;
444 case 'z':
445 prezeromem = 1;
446 break;
447 default:
448 usage_help(false);
453 * Check VMM presence
455 vsize = sizeof(vmm_enabled);
456 sysctlbyname("hw.vmm.enable", &vmm_enabled, &vsize, NULL, 0);
457 vmm_enabled = (vmm_enabled && !dflag);
459 writepid();
460 cpu_disable_intr();
461 if (vmm_enabled) {
462 /* use a MAP_ANON directly */
463 printf("VMM is available\n");
464 init_kern_memory_vmm();
465 } else {
466 printf("VMM is not available\n");
467 init_sys_memory(memImageFile);
468 init_kern_memory();
470 init_globaldata();
471 init_vkernel();
472 setrealcpu();
473 init_kqueue();
475 vmm_guest = VMM_GUEST_VKERNEL;
478 * Check TSC
480 vsize = sizeof(tsc_present);
481 sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
482 vsize = sizeof(tsc_invariant);
483 sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
484 vsize = sizeof(tsc_mpsync);
485 sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
486 vsize = sizeof(tsc_frequency);
487 sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
488 if (tsc_present)
489 cpu_feature |= CPUID_TSC;
490 tsc_oneus_approx = ((tsc_frequency|1) + 999999) / 1000000;
493 * Check SSE
495 vsize = sizeof(supports_sse);
496 supports_sse = 0;
497 sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
498 sysctlbyname("hw.mxcsr_mask", &mxcsr_mask, &msize, NULL, 0);
499 init_fpu(supports_sse);
500 if (supports_sse)
501 cpu_feature |= CPUID_SSE | CPUID_FXSR;
504 * We boot from the first installed disk.
506 if (bootOnDisk == 1) {
507 init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
508 init_disk(cdFile, NULL, cdFileNum, VKD_CD);
509 } else {
510 init_disk(cdFile, NULL, cdFileNum, VKD_CD);
511 init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
514 init_netif(netifFile, netifFileNum);
515 init_exceptions();
516 mi_startup();
517 /* NOT REACHED */
518 exit(EX_SOFTWARE);
521 /* SIGTERM handler */
522 static
523 void
524 handle_term(int sig)
526 kill(childpid, sig);
530 * Initialize system memory. This is the virtual kernel's 'RAM'.
532 static
533 void
534 init_sys_memory(char *imageFile)
536 struct stat st;
537 int i;
538 int fd;
541 * Figure out the system memory image size. If an image file was
542 * specified and -m was not specified, use the image file's size.
544 if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
545 Maxmem_bytes = (vm_paddr_t)st.st_size;
546 if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
547 Maxmem_bytes == 0) {
548 errx(1, "Cannot create new memory file %s unless "
549 "system memory size is specified with -m",
550 imageFile);
551 /* NOT REACHED */
555 * Maxmem must be known at this time
557 if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
558 errx(1, "Bad maxmem specification: 64MB minimum, "
559 "multiples of %dMB only",
560 SEG_SIZE / 1024 / 1024);
561 /* NOT REACHED */
565 * Generate an image file name if necessary, then open/create the
566 * file exclusively locked. Do not allow multiple virtual kernels
567 * to use the same image file.
569 * Don't iterate through a million files if we do not have write
570 * access to the directory, stop if our open() failed on a
571 * non-existant file. Otherwise opens can fail for any number
573 if (imageFile == NULL) {
574 for (i = 0; i < 1000000; ++i) {
575 asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
576 fd = open(imageFile,
577 O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
578 if (fd < 0 && stat(imageFile, &st) == 0) {
579 free(imageFile);
580 continue;
582 break;
584 } else {
585 fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
587 fprintf(stderr, "Using memory file: %s\n", imageFile);
588 if (fd < 0 || fstat(fd, &st) < 0) {
589 err(1, "Unable to open/create %s", imageFile);
590 /* NOT REACHED */
594 * Truncate or extend the file as necessary. Clean out the contents
595 * of the file, we want it to be full of holes so we don't waste
596 * time reading in data from an old file that we no longer care
597 * about.
599 ftruncate(fd, 0);
600 ftruncate(fd, Maxmem_bytes);
602 MemImageFd = fd;
603 Maxmem = Maxmem_bytes >> PAGE_SHIFT;
604 physmem = Maxmem;
608 * Initialize kernel memory. This reserves kernel virtual memory by using
609 * MAP_VPAGETABLE
612 static
613 void
614 init_kern_memory(void)
616 void *base;
617 int i;
618 void *firstfree;
621 * Memory map our kernel virtual memory space. Note that the
622 * kernel image itself is not made part of this memory for the
623 * moment.
625 * The memory map must be segment-aligned so we can properly
626 * offset KernelPTD.
628 * If the system kernel has a different MAXDSIZ, it might not
629 * be possible to map kernel memory in its prefered location.
630 * Try a number of different locations.
633 base = mmap((void*)KERNEL_KVA_START, KERNEL_KVA_SIZE,
634 PROT_READ|PROT_WRITE|PROT_EXEC,
635 MAP_FILE|MAP_SHARED|MAP_VPAGETABLE|MAP_FIXED|MAP_TRYFIXED,
636 MemImageFd, (off_t)KERNEL_KVA_START);
638 if (base == MAP_FAILED) {
639 err(1, "Unable to mmap() kernel virtual memory!");
640 /* NOT REACHED */
642 madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
643 KvaStart = (vm_offset_t)base;
644 KvaSize = KERNEL_KVA_SIZE;
645 KvaEnd = KvaStart + KvaSize;
647 /* cannot use kprintf yet */
648 printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
650 /* MAP_FILE? */
651 dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
652 MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
653 MemImageFd, 0);
654 if (dmap_min_address == MAP_FAILED) {
655 err(1, "Unable to mmap() kernel DMAP region!");
656 /* NOT REACHED */
660 * Prefault the memory. The vkernel is going to fault it all in
661 * anyway, and faults on the backing store itself are very expensive
662 * once we go SMP (contend a lot). So do it now.
664 if (prezeromem)
665 bzero(dmap_min_address, Maxmem_bytes);
668 * Bootstrap the kernel_pmap
670 firstfree = NULL;
671 pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
673 mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
674 0 | VPTE_RW | VPTE_V);
677 * phys_avail[] represents unallocated physical memory. MI code
678 * will use phys_avail[] to create the vm_page array.
680 phys_avail[0].phys_beg = (vm_paddr_t)firstfree;
681 phys_avail[0].phys_beg = (phys_avail[0].phys_beg + PAGE_MASK) &
682 ~(vm_paddr_t)PAGE_MASK;
683 phys_avail[0].phys_end = Maxmem_bytes;
685 #if 0 /* JGV */
687 * (virtual_start, virtual_end) represent unallocated kernel virtual
688 * memory. MI code will create kernel_map using these parameters.
690 virtual_start = KvaStart + (long)firstfree;
691 virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
692 virtual_end = KvaStart + KERNEL_KVA_SIZE;
693 #endif
696 * pmap_growkernel() will set the correct value.
698 kernel_vm_end = 0;
701 * Allocate space for process 0's UAREA.
703 proc0paddr = (void *)virtual_start;
704 for (i = 0; i < UPAGES; ++i) {
705 pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
706 virtual_start += PAGE_SIZE;
707 phys_avail[0].phys_beg += PAGE_SIZE;
711 * crashdumpmap
713 crashdumpmap = virtual_start;
714 virtual_start += MAXDUMPPGS * PAGE_SIZE;
717 * msgbufp maps the system message buffer
719 assert((MSGBUF_SIZE & PAGE_MASK) == 0);
720 msgbufp = (void *)virtual_start;
721 for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
722 pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
723 virtual_start += PAGE_SIZE;
724 phys_avail[0].phys_beg += PAGE_SIZE;
726 msgbufinit(msgbufp, MSGBUF_SIZE);
729 * used by kern_memio for /dev/mem access
731 ptvmmap = (caddr_t)virtual_start;
732 virtual_start += PAGE_SIZE;
735 static
736 void
737 init_kern_memory_vmm(void)
739 int i;
740 void *firstfree;
741 struct vmm_guest_options options;
742 void *dmap_address;
744 KvaStart = (vm_offset_t)KERNEL_KVA_START;
745 KvaSize = KERNEL_KVA_SIZE;
746 KvaEnd = KvaStart + KvaSize;
748 Maxmem = Maxmem_bytes >> PAGE_SHIFT;
749 physmem = Maxmem;
751 if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
752 errx(1, "Bad maxmem specification: 64MB minimum, "
753 "multiples of %dMB only",
754 SEG_SIZE / 1024 / 1024);
755 /* NOT REACHED */
758 /* Call the vmspace_create to allocate the internal
759 * vkernel structures. Won't do anything else (no new
760 * vmspace)
762 if (vmspace_create(NULL, 0, NULL) < 0)
763 panic("vmspace_create() failed");
767 * MAP_ANON the region of the VKERNEL phyisical memory
768 * (known as GPA - Guest Physical Address
770 dmap_address = mmap(NULL, Maxmem_bytes,
771 PROT_READ|PROT_WRITE|PROT_EXEC,
772 MAP_ANON|MAP_SHARED, -1, 0);
773 if (dmap_address == MAP_FAILED) {
774 err(1, "Unable to mmap() RAM region!");
775 /* NOT REACHED */
777 if (prezeromem)
778 bzero(dmap_address, Maxmem_bytes);
780 /* Alloc a new stack in the lowmem */
781 vkernel_stack = mmap(NULL, KERNEL_STACK_SIZE,
782 PROT_READ|PROT_WRITE|PROT_EXEC,
783 MAP_ANON, -1, 0);
784 if (vkernel_stack == MAP_FAILED) {
785 err(1, "Unable to allocate stack\n");
789 * Bootstrap the kernel_pmap
791 firstfree = dmap_address;
792 dmap_min_address = NULL; /* VIRT == PHYS in the first 512G */
793 pmap_bootstrap((vm_paddr_t *)&firstfree, (uint64_t)KvaStart);
796 * Enter VMM mode
798 bzero(&options, sizeof(options));
799 options.guest_cr3 = (register_t) KPML4phys;
800 options.new_stack = (uint64_t) vkernel_stack + KERNEL_STACK_SIZE;
801 options.master = 1;
802 if (vmm_guest_ctl(VMM_GUEST_RUN, &options)) {
803 err(1, "Unable to enter VMM mode.");
807 * phys_avail[] represents unallocated physical memory. MI code
808 * will use phys_avail[] to create the vm_page array.
810 phys_avail[0].phys_beg = (vm_paddr_t)firstfree;
811 phys_avail[0].phys_beg = (phys_avail[0].phys_beg + PAGE_MASK) &
812 ~(vm_paddr_t)PAGE_MASK;
813 phys_avail[0].phys_end = (vm_paddr_t)dmap_address + Maxmem_bytes;
816 * pmap_growkernel() will set the correct value.
818 kernel_vm_end = 0;
821 * Allocate space for process 0's UAREA.
823 proc0paddr = (void *)virtual_start;
824 for (i = 0; i < UPAGES; ++i) {
825 pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
826 virtual_start += PAGE_SIZE;
827 phys_avail[0].phys_beg += PAGE_SIZE;
831 * crashdumpmap
833 crashdumpmap = virtual_start;
834 virtual_start += MAXDUMPPGS * PAGE_SIZE;
837 * msgbufp maps the system message buffer
839 assert((MSGBUF_SIZE & PAGE_MASK) == 0);
840 msgbufp = (void *)virtual_start;
841 for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
843 pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
844 virtual_start += PAGE_SIZE;
845 phys_avail[0].phys_beg += PAGE_SIZE;
848 msgbufinit(msgbufp, MSGBUF_SIZE);
851 * used by kern_memio for /dev/mem access
853 ptvmmap = (caddr_t)virtual_start;
854 virtual_start += PAGE_SIZE;
856 printf("vmm: Hardware pagetable enabled for guest\n");
861 * Map the per-cpu globaldata for cpu #0. Allocate the space using
862 * virtual_start and phys_avail[0]
864 static
865 void
866 init_globaldata(void)
868 int i;
869 vm_paddr_t pa;
870 vm_offset_t va;
873 * Reserve enough KVA to cover possible cpus. This is a considerable
874 * amount of KVA since the privatespace structure includes two
875 * whole page table mappings.
877 virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
878 CPU_prvspace = (void *)virtual_start;
879 virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
882 * Allocate enough physical memory to cover the mdglobaldata
883 * portion of the space and the idle stack and map the pages
884 * into KVA. For cpu #0 only.
886 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
887 pa = phys_avail[0].phys_beg;
888 va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
889 pmap_kenter_quick(va, pa);
890 phys_avail[0].phys_beg += PAGE_SIZE;
892 for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
893 pa = phys_avail[0].phys_beg;
894 va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
895 pmap_kenter_quick(va, pa);
896 phys_avail[0].phys_beg += PAGE_SIZE;
900 * Setup the %gs for cpu #0. The mycpu macro works after this
901 * point. Note that %fs is used by pthreads.
903 tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
908 * Initialize pool tokens and other necessary locks
910 static void
911 init_locks(void)
915 * Get the initial mplock with a count of 1 for the BSP.
916 * This uses a LOGICAL cpu ID, ie BSP == 0.
918 cpu_get_initial_mplock();
920 /* our token pool needs to work early */
921 lwkt_token_pool_init();
927 * Initialize very low level systems including thread0, proc0, etc.
929 static
930 void
931 init_vkernel(void)
933 struct mdglobaldata *gd;
935 gd = &CPU_prvspace[0].mdglobaldata;
936 bzero(gd, sizeof(*gd));
938 gd->mi.gd_curthread = &thread0;
939 thread0.td_gd = &gd->mi;
940 ncpus = 1;
941 ncpus_fit = 1; /* rounded up power of 2 */
942 /* ncpus_fit_mask are 0 */
943 init_param1();
944 gd->mi.gd_prvspace = &CPU_prvspace[0];
945 mi_gdinit(&gd->mi, 0);
946 cpu_gdinit(gd, 0);
947 mi_proc0init(&gd->mi, proc0paddr);
948 lwp0.lwp_md.md_regs = &proc0_tf;
950 init_locks();
951 cninit();
952 rand_initialize();
953 #if 0 /* #ifdef DDB */
954 kdb_init();
955 if (boothowto & RB_KDB)
956 Debugger("Boot flags requested debugger");
957 #endif
958 identcpu();
959 #if 0
960 initializecpu(); /* Initialize CPU registers */
961 #endif
962 init_param2((phys_avail[0].phys_end -
963 phys_avail[0].phys_beg) / PAGE_SIZE);
965 #if 0
967 * Map the message buffer
969 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
970 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
971 msgbufinit(msgbufp, MSGBUF_SIZE);
972 #endif
973 #if 0
974 thread0.td_pcb_cr3 ... MMU
975 lwp0.lwp_md.md_regs = &proc0_tf;
976 #endif
980 * Filesystem image paths for the virtual kernel are optional.
981 * If specified they each should point to a disk image,
982 * the first of which will become the root disk.
984 * The virtual kernel caches data from our 'disk' just like a normal kernel,
985 * so we do not really want the real kernel to cache the data too. Use
986 * O_DIRECT to remove the duplication.
988 static
989 void
990 init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type)
992 char *serno;
993 int i;
995 if (diskFileNum == 0)
996 return;
998 for (i=0; i < diskFileNum; i++){
999 char *fname;
1000 fname = diskExp[i];
1002 if (fname == NULL) {
1003 warnx("Invalid argument to '-r'");
1004 continue;
1007 * Check for a serial number for the virtual disk
1008 * passed from the command line.
1010 serno = fname;
1011 strsep(&serno, ":");
1013 if (DiskNum < VKDISK_MAX) {
1014 struct stat st;
1015 struct vkdisk_info *info = NULL;
1016 int fd;
1017 size_t l = 0;
1019 if (type == VKD_DISK)
1020 fd = open(fname, O_RDWR|O_DIRECT, 0644);
1021 else
1022 fd = open(fname, O_RDONLY|O_DIRECT, 0644);
1023 if (fd < 0 || fstat(fd, &st) < 0) {
1024 err(1, "Unable to open/create %s", fname);
1025 /* NOT REACHED */
1027 if (S_ISREG(st.st_mode) && (diskFlags[i] & 1) == 0) {
1028 if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
1029 errx(1, "Disk image %s is already "
1030 "in use\n", fname);
1031 /* NOT REACHED */
1035 info = &DiskInfo[DiskNum];
1036 l = strlen(fname);
1038 info->unit = i;
1039 info->fd = fd;
1040 info->type = type;
1041 info->flags = diskFlags[i];
1042 memcpy(info->fname, fname, l);
1043 info->serno = NULL;
1044 if (serno) {
1045 if ((info->serno = malloc(SERNOLEN)) != NULL)
1046 strlcpy(info->serno, serno, SERNOLEN);
1047 else
1048 warnx("Couldn't allocate memory for the operation");
1051 if (DiskNum == 0) {
1052 if (type == VKD_CD) {
1053 rootdevnames[0] = "cd9660:vcd0";
1054 } else if (type == VKD_DISK) {
1055 rootdevnames[0] = "ufs:vkd0s0a";
1056 rootdevnames[1] = "ufs:vkd0s1a";
1060 DiskNum++;
1061 } else {
1062 warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
1063 continue;
1068 static
1070 netif_set_tapflags(int tap_unit, int f, int s)
1072 struct ifreq ifr;
1073 int flags;
1075 bzero(&ifr, sizeof(ifr));
1077 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1078 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1079 warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
1080 return -1;
1084 * Adjust if_flags
1086 * If the flags are already set/cleared, then we return
1087 * immediately to avoid extra syscalls
1089 flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
1090 if (f < 0) {
1091 /* Turn off flags */
1092 f = -f;
1093 if ((flags & f) == 0)
1094 return 0;
1095 flags &= ~f;
1096 } else {
1097 /* Turn on flags */
1098 if (flags & f)
1099 return 0;
1100 flags |= f;
1104 * Fix up ifreq.ifr_name, since it may be trashed
1105 * in previous ioctl(SIOCGIFFLAGS)
1107 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1109 ifr.ifr_flags = flags & 0xffff;
1110 ifr.ifr_flagshigh = flags >> 16;
1111 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1112 warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
1113 return -1;
1115 return 0;
1118 static
1120 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
1122 struct ifaliasreq ifra;
1123 struct sockaddr_in *in;
1125 bzero(&ifra, sizeof(ifra));
1126 snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
1128 /* Setup address */
1129 in = (struct sockaddr_in *)&ifra.ifra_addr;
1130 in->sin_family = AF_INET;
1131 in->sin_len = sizeof(*in);
1132 in->sin_addr.s_addr = addr;
1134 if (mask != 0) {
1135 /* Setup netmask */
1136 in = (struct sockaddr_in *)&ifra.ifra_mask;
1137 in->sin_len = sizeof(*in);
1138 in->sin_addr.s_addr = mask;
1141 if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
1142 warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
1143 return -1;
1145 return 0;
1148 static
1150 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
1152 struct ifbreq ifbr;
1153 struct ifdrv ifd;
1155 bzero(&ifbr, sizeof(ifbr));
1156 snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
1157 "tap%d", tap_unit);
1159 bzero(&ifd, sizeof(ifd));
1160 strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
1161 ifd.ifd_cmd = BRDGADD;
1162 ifd.ifd_len = sizeof(ifbr);
1163 ifd.ifd_data = &ifbr;
1165 if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
1167 * 'errno == EEXIST' means that the tap(4) is already
1168 * a member of the bridge(4)
1170 if (errno != EEXIST) {
1171 warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
1172 return -1;
1175 return 0;
1178 #define TAPDEV_OFLAGS (O_RDWR | O_NONBLOCK)
1181 * Locate the first unused tap(4) device file if auto mode is requested,
1182 * or open the user supplied device file, and bring up the corresponding
1183 * tap(4) interface.
1185 * NOTE: Only tap(4) device file is supported currently
1187 static
1189 netif_open_tap(const char *netif, int *tap_unit, int s)
1191 char tap_dev[MAXPATHLEN];
1192 int tap_fd, failed;
1193 struct stat st;
1194 char *dname;
1196 *tap_unit = -1;
1198 if (strcmp(netif, "auto") == 0) {
1200 * Find first unused tap(4) device file
1202 tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
1203 if (tap_fd < 0) {
1204 warnc(errno, "Unable to find a free tap(4)");
1205 return -1;
1207 } else {
1209 * User supplied tap(4) device file or unix socket.
1211 if (netif[0] == '/') /* Absolute path */
1212 strlcpy(tap_dev, netif, sizeof(tap_dev));
1213 else
1214 snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
1216 tap_fd = open(tap_dev, TAPDEV_OFLAGS);
1219 * If we cannot open normally try to connect to it.
1221 if (tap_fd < 0)
1222 tap_fd = unix_connect(tap_dev);
1224 if (tap_fd < 0) {
1225 warn("Unable to open %s", tap_dev);
1226 return -1;
1231 * Check whether the device file is a tap(4)
1233 if (fstat(tap_fd, &st) < 0) {
1234 failed = 1;
1235 } else if (S_ISCHR(st.st_mode)) {
1236 dname = fdevname(tap_fd);
1237 if (dname)
1238 dname = strstr(dname, "tap");
1239 if (dname) {
1241 * Bring up the corresponding tap(4) interface
1243 *tap_unit = strtol(dname + 3, NULL, 10);
1244 printf("TAP UNIT %d\n", *tap_unit);
1245 if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
1246 failed = 0;
1247 else
1248 failed = 1;
1249 } else {
1250 failed = 1;
1252 } else if (S_ISSOCK(st.st_mode)) {
1254 * Special socket connection (typically to vknet). We
1255 * do not have to do anything.
1257 failed = 0;
1258 } else {
1259 failed = 1;
1262 if (failed) {
1263 warnx("%s is not a tap(4) device or socket", tap_dev);
1264 close(tap_fd);
1265 tap_fd = -1;
1266 *tap_unit = -1;
1268 return tap_fd;
1271 static int
1272 unix_connect(const char *path)
1274 struct sockaddr_un sunx;
1275 int len;
1276 int net_fd;
1277 int sndbuf = 262144;
1278 struct stat st;
1280 snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
1281 len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
1282 ++len; /* include nul */
1283 sunx.sun_family = AF_UNIX;
1284 sunx.sun_len = len;
1286 net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
1287 if (net_fd < 0)
1288 return(-1);
1289 if (connect(net_fd, (void *)&sunx, len) < 0) {
1290 close(net_fd);
1291 return(-1);
1293 setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1294 if (fstat(net_fd, &st) == 0)
1295 printf("Network socket buffer: %d bytes\n", st.st_blksize);
1296 fcntl(net_fd, F_SETFL, O_NONBLOCK);
1297 return(net_fd);
1300 #undef TAPDEV_MAJOR
1301 #undef TAPDEV_MINOR
1302 #undef TAPDEV_OFLAGS
1305 * Following syntax is supported,
1306 * 1) x.x.x.x tap(4)'s address is x.x.x.x
1308 * 2) x.x.x.x/z tap(4)'s address is x.x.x.x
1309 * tap(4)'s netmask len is z
1311 * 3) x.x.x.x:y.y.y.y tap(4)'s address is x.x.x.x
1312 * pseudo netif's address is y.y.y.y
1314 * 4) x.x.x.x:y.y.y.y/z tap(4)'s address is x.x.x.x
1315 * pseudo netif's address is y.y.y.y
1316 * tap(4) and pseudo netif's netmask len are z
1318 * 5) bridgeX tap(4) will be added to bridgeX
1320 * 6) bridgeX:y.y.y.y tap(4) will be added to bridgeX
1321 * pseudo netif's address is y.y.y.y
1323 * 7) bridgeX:y.y.y.y/z tap(4) will be added to bridgeX
1324 * pseudo netif's address is y.y.y.y
1325 * pseudo netif's netmask len is z
1327 static
1329 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1331 in_addr_t tap_addr, netmask, netif_addr;
1332 int next_netif_addr;
1333 char *tok, *masklen_str, *ifbridge;
1335 *addr = 0;
1336 *mask = 0;
1338 tok = strtok(NULL, ":/");
1339 if (tok == NULL) {
1341 * Nothing special, simply use tap(4) as backend
1343 return 0;
1346 if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1348 * tap(4)'s address is supplied
1350 ifbridge = NULL;
1353 * If there is next token, then it may be pseudo
1354 * netif's address or netmask len for tap(4)
1356 next_netif_addr = 0;
1357 } else {
1359 * Not tap(4)'s address, assume it as a bridge(4)
1360 * iface name
1362 tap_addr = 0;
1363 ifbridge = tok;
1366 * If there is next token, then it must be pseudo
1367 * netif's address
1369 next_netif_addr = 1;
1372 netmask = netif_addr = 0;
1374 tok = strtok(NULL, ":/");
1375 if (tok == NULL)
1376 goto back;
1378 if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1379 if (next_netif_addr) {
1380 warnx("Invalid pseudo netif address: %s", tok);
1381 return -1;
1383 netif_addr = 0;
1386 * Current token is not address, then it must be netmask len
1388 masklen_str = tok;
1389 } else {
1391 * Current token is pseudo netif address, if there is next token
1392 * it must be netmask len
1394 masklen_str = strtok(NULL, "/");
1397 /* Calculate netmask */
1398 if (masklen_str != NULL) {
1399 u_long masklen;
1401 masklen = strtoul(masklen_str, NULL, 10);
1402 if (masklen < 32 && masklen > 0) {
1403 netmask = htonl(~((1LL << (32 - masklen)) - 1)
1404 & 0xffffffff);
1405 } else {
1406 warnx("Invalid netmask len: %lu", masklen);
1407 return -1;
1411 /* Make sure there is no more token left */
1412 if (strtok(NULL, ":/") != NULL) {
1413 warnx("Invalid argument to '-I'");
1414 return -1;
1417 back:
1418 if (tap_unit < 0) {
1419 /* Do nothing */
1420 } else if (ifbridge == NULL) {
1421 /* Set tap(4) address/netmask */
1422 if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1423 return -1;
1424 } else {
1425 /* Tie tap(4) to bridge(4) */
1426 if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1427 return -1;
1430 *addr = netif_addr;
1431 *mask = netmask;
1432 return 0;
1436 * NetifInfo[] will be filled for pseudo netif initialization.
1437 * NetifNum will be bumped to reflect the number of valid entries
1438 * in NetifInfo[].
1440 static
1441 void
1442 init_netif(char *netifExp[], int netifExpNum)
1444 int i, s;
1445 char *tmp;
1447 if (netifExpNum == 0)
1448 return;
1450 s = socket(AF_INET, SOCK_DGRAM, 0); /* for ioctl(SIOC) */
1451 if (s < 0)
1452 return;
1454 for (i = 0; i < netifExpNum; ++i) {
1455 struct vknetif_info *info;
1456 in_addr_t netif_addr, netif_mask;
1457 int tap_fd, tap_unit;
1458 char *netif;
1460 /* Extract MAC address if there is one */
1461 tmp = netifExp[i];
1462 strsep(&tmp, "=");
1464 netif = strtok(netifExp[i], ":");
1465 if (netif == NULL) {
1466 warnx("Invalid argument to '-I'");
1467 continue;
1471 * Open tap(4) device file and bring up the
1472 * corresponding interface
1474 tap_fd = netif_open_tap(netif, &tap_unit, s);
1475 if (tap_fd < 0)
1476 continue;
1479 * Initialize tap(4) and get address/netmask
1480 * for pseudo netif
1482 * NB: Rest part of netifExp[i] is passed
1483 * to netif_init_tap() implicitly.
1485 if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1487 * NB: Closing tap(4) device file will bring
1488 * down the corresponding interface
1490 close(tap_fd);
1491 continue;
1494 info = &NetifInfo[NetifNum];
1495 bzero(info, sizeof(*info));
1496 info->tap_fd = tap_fd;
1497 info->tap_unit = tap_unit;
1498 info->netif_addr = netif_addr;
1499 info->netif_mask = netif_mask;
1501 * If tmp isn't NULL it means a MAC could have been
1502 * specified so attempt to convert it.
1503 * Setting enaddr to NULL will tell vke_attach() we
1504 * need a pseudo-random MAC address.
1506 if (tmp != NULL) {
1507 if ((info->enaddr = malloc(ETHER_ADDR_LEN)) == NULL)
1508 warnx("Couldn't allocate memory for the operation");
1509 else {
1510 if ((kether_aton(tmp, info->enaddr)) == NULL) {
1511 free(info->enaddr);
1512 info->enaddr = NULL;
1517 NetifNum++;
1518 if (NetifNum >= VKNETIF_MAX) /* XXX will this happen? */
1519 break;
1521 close(s);
1525 * Create the pid file and leave it open and locked while the vkernel is
1526 * running. This allows a script to use /usr/bin/lockf to probe whether
1527 * a vkernel is still running (so as not to accidently kill an unrelated
1528 * process from a stale pid file).
1530 static
1531 void
1532 writepid(void)
1534 char buf[32];
1535 int fd;
1537 if (pid_file != NULL) {
1538 snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1539 fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1540 if (fd < 0) {
1541 if (errno == EWOULDBLOCK) {
1542 perror("Failed to lock pidfile, "
1543 "vkernel already running");
1544 } else {
1545 perror("Failed to create pidfile");
1547 exit(EX_SOFTWARE);
1549 ftruncate(fd, 0);
1550 write(fd, buf, strlen(buf));
1551 /* leave the file open to maintain the lock */
1555 static
1556 void
1557 cleanpid( void )
1559 if (pid_file != NULL) {
1560 if (unlink(pid_file) < 0)
1561 perror("Warning: couldn't remove pidfile");
1565 static
1566 void
1567 usage_err(const char *ctl, ...)
1569 va_list va;
1571 va_start(va, ctl);
1572 vfprintf(stderr, ctl, va);
1573 va_end(va);
1574 fprintf(stderr, "\n");
1575 exit(EX_USAGE);
1578 static
1579 void
1580 usage_help(_Bool help)
1582 fprintf(stderr, "Usage: %s [-hsUvdtT] [-c file] [-e name=value:name=value:...]\n"
1583 "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1584 "\t[-m size] [-n numcpus[:lbits[:cbits]]]\n"
1585 "\t[-p file] [-r file]\n", save_av[0]);
1587 if (help)
1588 fprintf(stderr, "\nArguments:\n"
1589 "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1590 "\t-e\tSpecify an environment to be used by the kernel.\n"
1591 "\t-h\tThis list of options.\n"
1592 "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1593 "\t-I\tCreate a virtual network device.\n"
1594 "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1595 "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1596 "\t-n\tSpecify the number of CPUs and the topology you wish to emulate:\n"
1597 "\t\t\tnumcpus - number of cpus\n"
1598 "\t\t\tlbits - specify the number of bits within APICID(=CPUID)\n"
1599 "\t\t\t needed for representing the logical ID.\n"
1600 "\t\t\t Controls the number of threads/core:\n"
1601 "\t\t\t (0 bits - 1 thread, 1 bit - 2 threads).\n"
1602 "\t\t\tcbits - specify the number of bits within APICID(=CPUID)\n"
1603 "\t\t\t needed for representing the core ID.\n"
1604 "\t\t\t Controls the number of cores/package:\n"
1605 "\t\t\t (0 bits - 1 core, 1 bit - 2 cores).\n"
1606 "\t-p\tSpecify a file in which to store the process ID.\n"
1607 "\t-r\tSpecify a R/W disk image file, iterates vkd0..n\n"
1608 "\t-R\tSpecify a COW disk image file, iterates vkd0..n\n"
1609 "\t-s\tBoot into single-user mode.\n"
1610 "\t-t\tUse a precise host timer when calculating clock values.\n"
1611 "\t-T\tDisallow use of the TSC cpu timer as a clock.\n"
1612 "\t-U\tEnable writing to kernel memory and module loading.\n"
1613 "\t-v\tTurn on verbose booting.\n");
1615 exit(EX_USAGE);
1618 void
1619 cpu_smp_stopped(void)
1623 void
1624 cpu_reset(void)
1626 kprintf("cpu reset, rebooting vkernel\n");
1627 closefrom(3);
1628 cleanpid();
1629 exit(EX_VKERNEL_REBOOT);
1632 void
1633 cpu_halt(void)
1635 kprintf("cpu halt, exiting vkernel\n");
1636 cleanpid();
1637 exit(EX_OK);
1640 void
1641 setrealcpu(void)
1643 switch(lwp_cpu_lock) {
1644 case LCL_PER_CPU:
1645 if (bootverbose)
1646 kprintf("Locking CPU%d to real cpu %d\n",
1647 mycpuid, next_cpu);
1648 usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1649 next_cpu++;
1650 if (next_cpu >= real_ncpus)
1651 next_cpu = 0;
1652 break;
1653 case LCL_SINGLE_CPU:
1654 if (bootverbose)
1655 kprintf("Locking CPU%d to real cpu %d\n",
1656 mycpuid, next_cpu);
1657 usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1658 break;
1659 default:
1660 /* do not map virtual cpus to real cpus */
1661 break;
1666 * Allocate and free memory for module loading. The loaded module
1667 * has to be placed somewhere near the current kernel binary load
1668 * point or the relocations will not work.
1670 * I'm not sure why this isn't working.
1673 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1675 #if 1
1676 size_t xtra;
1677 xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1678 *basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1679 bzero((void *)*basep, bytes);
1680 #else
1681 *basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1682 PROT_READ|PROT_WRITE|PROT_EXEC,
1683 MAP_ANON|MAP_SHARED, -1, 0);
1684 if ((void *)*basep == MAP_FAILED)
1685 return ENOMEM;
1686 #endif
1687 return 0;
1690 void
1691 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1693 #if 0
1694 #if 0
1695 munmap((void *)base, bytes);
1696 #endif
1697 #endif