couple of bits on the x86_64 boot code
[newos.git] / boot / pc / x86_64 / stage2.c
blob2978fa1f8c384a7ec9937f6990c20f8590faeed2
1 /*
2 ** Copyright 2001-2004, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <boot/bootdir.h>
6 #include <boot/stage2.h>
7 #include "stage2_priv.h"
8 #include "vesa.h"
9 #include "int86.h"
10 #include "multiboot.h"
12 #include <string.h>
13 #include <stdarg.h>
14 #include <stdio.h>
15 #include <newos/elf64.h>
17 // we're running out of the first 'file' contained in the bootdir, which is
18 // a set of binaries and data packed back to back, described by an array
19 // of boot_entry structures at the beginning. The load address is fixed.
20 #define BOOTDIR_ADDR 0x400000
21 static const boot_entry *bootdir = (boot_entry*)BOOTDIR_ADDR;
23 // stick the kernel arguments in a pseudo-random page that will be mapped
24 // at least during the call into the kernel. The kernel should copy the
25 // data out and unmap the page.
26 kernel_args *ka = (kernel_args *)0x20000;
28 // next paddr to allocate
29 addr_t next_paddr;
31 // needed for message
32 static unsigned short *kScreenBase = (unsigned short*) 0xb8000;
33 static unsigned screenOffset = 0;
35 unsigned int cv_factor = 0;
37 // size of bootdir in pages
38 static unsigned int bootdir_pages = 0;
40 // function decls for this module
41 static void calculate_cpu_conversion_factor(void);
42 static void load_elf_image(void *data,
43 addr_range *ar0, addr_range *ar1, addr_t *start_addr, addr_range *dynamic_section);
44 static void sort_addr_range(addr_range *range, int count);
46 // memory structure returned by int 0x15, ax 0xe820
47 struct emem_struct {
48 uint64 base_addr;
49 uint64 length;
50 uint64 type;
51 uint64 filler;
54 // called by the stage1 bootloader.
55 // State:
56 // long mode (64bit)
57 // mmu enabled, first 16MB identity mapped
58 // stack somewhere below 1 MB
59 // supervisor mode
60 void stage2_main(void *multiboot_info, unsigned int memsize, void *extended_mem_block, unsigned int extended_mem_count)
62 unsigned int *idt;
63 unsigned int *gdt;
64 addr_t kernel_base;
65 addr_t next_vaddr;
66 unsigned int i;
67 addr_t kernel_entry;
69 asm("cld"); // Ain't nothing but a GCC thang.
70 asm("fninit"); // initialize floating point unit
72 dprintf("stage2 bootloader entry.\n");
74 dump_multiboot(multiboot_info);
76 // calculate the conversion factor that translates rdtsc time to real microseconds
77 calculate_cpu_conversion_factor();
79 // calculate how big the bootdir is so we know where we can start grabbing pages
81 int entry;
82 for (entry = 0; entry < BOOTDIR_MAX_ENTRIES; entry++) {
83 if (bootdir[entry].be_type == BE_TYPE_NONE)
84 break;
86 bootdir_pages += bootdir[entry].be_size;
89 dprintf("bootdir is %d pages long\n", bootdir_pages);
92 ka->bootdir_addr.start = (unsigned long)bootdir;
93 ka->bootdir_addr.size = bootdir_pages * PAGE_SIZE;
95 next_paddr = BOOTDIR_ADDR + bootdir_pages * PAGE_SIZE;
97 #if 0
98 if(in_vesa) {
99 //struct VBEInfoBlock *info = (struct VBEInfoBlock *)vesa_ptr;
100 struct VBEModeInfoBlock *mode_info = (struct VBEModeInfoBlock *)(vesa_ptr + 0x200);
102 ka->fb.enabled = 1;
103 ka->fb.x_size = mode_info->x_resolution;
104 ka->fb.y_size = mode_info->y_resolution;
105 ka->fb.bit_depth = mode_info->bits_per_pixel;
106 ka->fb.red_mask_size = mode_info->red_mask_size;
107 ka->fb.red_field_position = mode_info->red_field_position;
108 ka->fb.green_mask_size = mode_info->green_mask_size;
109 ka->fb.green_field_position = mode_info->green_field_position;
110 ka->fb.blue_mask_size = mode_info->blue_mask_size;
111 ka->fb.blue_field_position = mode_info->blue_field_position;
112 ka->fb.reserved_mask_size = mode_info->reserved_mask_size;
113 ka->fb.reserved_field_position = mode_info->reserved_field_position;
114 ka->fb.mapping.start = mode_info->phys_base_ptr;
115 ka->fb.mapping.size = ka->fb.x_size * ka->fb.y_size * ((ka->fb.bit_depth+7)/8);
116 ka->fb.already_mapped = 0;
117 } else {
118 ka->fb.enabled = 0;
120 #endif
122 mmu_init(ka);
124 // load the kernel (1st entry in the bootdir)
125 load_elf_image((void *)((uint64)bootdir[1].be_offset * PAGE_SIZE + BOOTDIR_ADDR),
126 &ka->kernel_seg0_addr, &ka->kernel_seg1_addr, &kernel_entry, &ka->kernel_dynamic_section_addr);
128 // find the footprint of the kernel
129 if (ka->kernel_seg1_addr.size > 0)
130 next_vaddr = ROUNDUP(ka->kernel_seg1_addr.start + ka->kernel_seg1_addr.size, PAGE_SIZE);
131 else
132 next_vaddr = ROUNDUP(ka->kernel_seg0_addr.start + ka->kernel_seg0_addr.size, PAGE_SIZE);
134 if (ka->kernel_seg1_addr.size > 0) {
135 kernel_base = ROUNDOWN(min(ka->kernel_seg0_addr.start, ka->kernel_seg1_addr.start), PAGE_SIZE);
136 } else {
137 kernel_base = ROUNDOWN(ka->kernel_seg0_addr.start, PAGE_SIZE);
140 // map in a kernel stack
141 ka->cpu_kstack[0].start = next_vaddr;
142 for (i=0; i<STACK_SIZE; i++) {
143 addr_t paddr = next_paddr;
144 next_paddr += PAGE_SIZE;
146 mmu_map_page(next_vaddr, paddr);
147 next_vaddr += PAGE_SIZE;
149 ka->cpu_kstack[0].size = next_vaddr - ka->cpu_kstack[0].start;
151 dprintf("new stack at 0x%lx to 0x%lx\n", ka->cpu_kstack[0].start, ka->cpu_kstack[0].start + ka->cpu_kstack[0].size);
153 // set up a new gdt
155 struct gdt_idt_descr gdt_descr;
157 // find a new gdt
158 gdt = (unsigned int *)next_paddr;
159 ka->arch_args.phys_gdt = (addr_t)gdt;
160 next_paddr += PAGE_SIZE;
162 // put segment descriptors in it
163 gdt[0] = 0;
164 gdt[1] = 0;
165 gdt[2] = 0x00000000; // seg 0x8 -- ring 0, 64bit code
166 gdt[3] = 0x00af9a00;
168 // map the gdt into virtual space
169 mmu_map_page(next_vaddr, (addr_t)gdt);
170 ka->arch_args.vir_gdt = (addr_t)next_vaddr;
171 next_vaddr += PAGE_SIZE;
173 // load the GDT
174 gdt_descr.a = GDT_LIMIT - 1;
175 gdt_descr.b = (unsigned int *)ka->arch_args.vir_gdt;
177 asm("lgdt %0;"
178 : : "m" (gdt_descr));
181 // set up a new idt
183 struct gdt_idt_descr idt_descr;
185 // find a new idt
186 idt = (unsigned int *)next_paddr;
187 ka->arch_args.phys_idt = (addr_t)idt;
188 next_paddr += PAGE_SIZE;
190 // clear it out
191 for(i=0; i<IDT_LIMIT/4; i++) {
192 idt[i] = 0;
195 // map the idt into virtual space
196 mmu_map_page(next_vaddr, (addr_t)idt);
197 ka->arch_args.vir_idt = (addr_t)next_vaddr;
198 next_vaddr += PAGE_SIZE;
200 // load the idt
201 idt_descr.a = IDT_LIMIT - 1;
202 idt_descr.b = (unsigned int *)ka->arch_args.vir_idt;
204 asm("lidt %0;"
205 : : "m" (idt_descr));
208 // mark memory that we know is used
209 ka->phys_alloc_range[0].start = BOOTDIR_ADDR;
210 ka->phys_alloc_range[0].size = next_paddr - BOOTDIR_ADDR;
211 ka->num_phys_alloc_ranges = 1;
213 // figure out the memory map
214 fill_ka_memranges(multiboot_info);
216 #if 0
217 if(extended_mem_count > 0) {
218 struct emem_struct *buf = (struct emem_struct *)extended_mem_block;
219 unsigned int i;
221 ka->num_phys_mem_ranges = 0;
223 for(i = 0; i < extended_mem_count; i++) {
224 if(buf[i].type == 1) {
225 // round everything up to page boundaries, exclusive of pages it partially occupies
226 buf[i].length -= (buf[i].base_addr % PAGE_SIZE) ? (PAGE_SIZE - (buf[i].base_addr % PAGE_SIZE)) : 0;
227 buf[i].base_addr = ROUNDUP(buf[i].base_addr, PAGE_SIZE);
228 buf[i].length = ROUNDOWN(buf[i].length, PAGE_SIZE);
230 // this is mem we can use
231 if(ka->num_phys_mem_ranges == 0) {
232 ka->phys_mem_range[0].start = (addr_t)buf[i].base_addr;
233 ka->phys_mem_range[0].size = (addr_t)buf[i].length;
234 ka->num_phys_mem_ranges++;
235 } else {
236 // we might have to extend the previous hole
237 addr_t previous_end = ka->phys_mem_range[ka->num_phys_mem_ranges-1].start + ka->phys_mem_range[ka->num_phys_mem_ranges-1].size;
238 if(previous_end <= buf[i].base_addr &&
239 ((buf[i].base_addr - previous_end) < 0x100000)) {
240 // extend the previous buffer
241 ka->phys_mem_range[ka->num_phys_mem_ranges-1].size +=
242 (buf[i].base_addr - previous_end) +
243 buf[i].length;
245 // mark the gap between the two allocated ranges in use
246 ka->phys_alloc_range[ka->num_phys_alloc_ranges].start = previous_end;
247 ka->phys_alloc_range[ka->num_phys_alloc_ranges].size = buf[i].base_addr - previous_end;
248 ka->num_phys_alloc_ranges++;
253 } else {
254 // we dont have an extended map, assume memory is contiguously mapped at 0x0
255 ka->phys_mem_range[0].start = 0;
256 ka->phys_mem_range[0].size = memsize;
257 ka->num_phys_mem_ranges = 1;
259 // mark the bios area allocated
260 ka->phys_alloc_range[ka->num_phys_alloc_ranges].start = 0x9f000; // 640k - 1 page
261 ka->phys_alloc_range[ka->num_phys_alloc_ranges].size = 0x61000;
262 ka->num_phys_alloc_ranges++;
264 #endif
266 // save the memory we've virtually allocated (for the kernel and other stuff)
267 ka->virt_alloc_range[0].start = kernel_base;
268 ka->virt_alloc_range[0].size = next_vaddr - kernel_base;
269 ka->num_virt_alloc_ranges = 1;
271 // sort the address ranges
272 sort_addr_range(ka->phys_mem_range, ka->num_phys_mem_ranges);
273 sort_addr_range(ka->phys_alloc_range, ka->num_phys_alloc_ranges);
274 sort_addr_range(ka->virt_alloc_range, ka->num_virt_alloc_ranges);
276 #if 1
278 unsigned int i;
280 dprintf("phys memory ranges:\n");
281 for(i=0; i < ka->num_phys_mem_ranges; i++) {
282 dprintf(" base 0x%08lx, length 0x%08lx\n", ka->phys_mem_range[i].start, ka->phys_mem_range[i].size);
285 dprintf("allocated phys memory ranges:\n");
286 for(i=0; i < ka->num_phys_alloc_ranges; i++) {
287 dprintf(" base 0x%08lx, length 0x%08lx\n", ka->phys_alloc_range[i].start, ka->phys_alloc_range[i].size);
290 dprintf("allocated virt memory ranges:\n");
291 for(i=0; i < ka->num_virt_alloc_ranges; i++) {
292 dprintf(" base 0x%08lx, length 0x%08lx\n", ka->virt_alloc_range[i].start, ka->virt_alloc_range[i].size);
295 #endif
297 // save the kernel args
298 ka->arch_args.system_time_cv_factor = cv_factor;
299 ka->str = NULL;
300 ka->num_cpus = 1;
301 #if 0
302 dprintf("kernel args at 0x%x\n", ka);
303 dprintf("pgdir = 0x%x\n", ka->pgdir);
304 dprintf("pgtables[0] = 0x%x\n", ka->pgtables[0]);
305 dprintf("phys_idt = 0x%x\n", ka->phys_idt);
306 dprintf("vir_idt = 0x%x\n", ka->vir_idt);
307 dprintf("phys_gdt = 0x%x\n", ka->phys_gdt);
308 dprintf("vir_gdt = 0x%x\n", ka->vir_gdt);
309 dprintf("mem_size = 0x%x\n", ka->mem_size);
310 dprintf("str = 0x%x\n", ka->str);
311 dprintf("bootdir = 0x%x\n", ka->bootdir);
312 dprintf("bootdir_size = 0x%x\n", ka->bootdir_size);
313 dprintf("phys_alloc_range_low = 0x%x\n", ka->phys_alloc_range_low);
314 dprintf("phys_alloc_range_high = 0x%x\n", ka->phys_alloc_range_high);
315 dprintf("virt_alloc_range_low = 0x%x\n", ka->virt_alloc_range_low);
316 dprintf("virt_alloc_range_high = 0x%x\n", ka->virt_alloc_range_high);
317 dprintf("page_hole = 0x%x\n", ka->page_hole);
318 #endif
319 // dprintf("finding and booting other cpus...\n");
320 // smp_boot(ka, kernel_entry);
322 dprintf("jumping into kernel at 0x%lx\n", kernel_entry);
324 ka->cons_line = screenOffset / SCREEN_WIDTH;
326 asm volatile("mov %0, %%rsp;"
327 "mov %1, %%rdi;"
328 "mov %2, %%rsi;"
329 "call *%3"
330 :: "r" (ka->cpu_kstack[0].start + ka->cpu_kstack[0].size),
331 "rdi" (ka),
332 "rsi" (0),
333 "r" (kernel_entry));
335 for(;;);
338 static void load_elf_image(void *data, addr_range *ar0, addr_range *ar1, addr_t *start_addr, addr_range *dynamic_section)
340 struct Elf64_Ehdr *imageHeader = (struct Elf64_Ehdr*) data;
341 struct Elf64_Phdr *segments = (struct Elf64_Phdr*)(imageHeader->e_phoff + (addr_t)imageHeader);
342 int segmentIndex;
343 int foundSegmentIndex = 0;
345 ar0->size = 0;
346 ar1->size = 0;
347 dynamic_section->size = 0;
349 for (segmentIndex = 0; segmentIndex < imageHeader->e_phnum; segmentIndex++) {
350 struct Elf64_Phdr *segment = &segments[segmentIndex];
351 unsigned segmentOffset;
353 switch(segment->p_type) {
354 case PT_LOAD:
355 break;
356 case PT_DYNAMIC:
357 dynamic_section->start = segment->p_vaddr;
358 dynamic_section->size = segment->p_memsz;
359 default:
360 continue;
363 // dprintf("segment %d\n", segmentIndex);
364 // dprintf("p_vaddr 0x%lx p_paddr 0x%lx p_filesz 0x%lx p_memsz 0x%lx\n",
365 // segment->p_vaddr, segment->p_paddr, segment->p_filesz, segment->p_memsz);
367 /* Map initialized portion */
368 for (segmentOffset = 0;
369 segmentOffset < ROUNDUP(segment->p_filesz, PAGE_SIZE);
370 segmentOffset += PAGE_SIZE) {
372 addr_t paddr = next_paddr;
373 next_paddr += PAGE_SIZE;
375 mmu_map_page(segment->p_vaddr + segmentOffset, paddr);
376 memcpy((void *)ROUNDOWN(segment->p_vaddr + segmentOffset, PAGE_SIZE),
377 (void *)ROUNDOWN((addr_t)data + segment->p_offset + segmentOffset, PAGE_SIZE), PAGE_SIZE);
380 /* Clean out the leftover part of the last page */
381 if(segment->p_filesz % PAGE_SIZE > 0) {
382 // dprintf("memsetting 0 to va 0x%lx, size %d\n", (void*)(segment->p_vaddr + segment->p_filesz), PAGE_SIZE - (segment->p_filesz % PAGE_SIZE));
383 memset((void*)(segment->p_vaddr + segment->p_filesz), 0, PAGE_SIZE
384 - (segment->p_filesz % PAGE_SIZE));
387 /* Map uninitialized portion */
388 for (; segmentOffset < ROUNDUP(segment->p_memsz, PAGE_SIZE); segmentOffset += PAGE_SIZE) {
389 // dprintf("mapping zero page at va 0x%lx\n", segment->p_vaddr + segmentOffset);
391 addr_t paddr = next_paddr;
392 next_paddr += PAGE_SIZE;
394 mmu_map_page(segment->p_vaddr + segmentOffset, paddr);
395 memset((void *)(segment->p_vaddr + segmentOffset), 0, PAGE_SIZE);
397 switch(foundSegmentIndex) {
398 case 0:
399 ar0->start = segment->p_vaddr;
400 ar0->size = segment->p_memsz;
401 break;
402 case 1:
403 ar1->start = segment->p_vaddr;
404 ar1->size = segment->p_memsz;
405 break;
406 default:
409 foundSegmentIndex++;
411 *start_addr = imageHeader->e_entry;
415 void sleep(uint64 time)
417 uint64 start = system_time();
419 while(system_time() - start <= time)
423 static void sort_addr_range(addr_range *range, int count)
425 addr_range temp_range;
426 int i;
427 bool done;
429 do {
430 done = true;
431 for(i = 1; i < count; i++) {
432 if(range[i].start < range[i-1].start) {
433 done = false;
434 memcpy(&temp_range, &range[i], sizeof(temp_range));
435 memcpy(&range[i], &range[i-1], sizeof(temp_range));
436 memcpy(&range[i-1], &temp_range, sizeof(temp_range));
439 } while(!done);
442 #define outb(value,port) \
443 asm("outb %%al,%%dx"::"a" (value),"d" (port))
446 #define inb(port) ({ \
447 unsigned char _v; \
448 asm volatile("inb %%dx,%%al":"=a" (_v):"d" (port)); \
449 _v; \
452 #define TIMER_CLKNUM_HZ 1193167
454 static void calculate_cpu_conversion_factor(void)
456 unsigned s_low, s_high;
457 unsigned low, high;
458 unsigned long expired;
459 uint64 t1, t2;
460 uint64 p1, p2, p3;
461 double r1, r2, r3;
463 outb(0x34, 0x43); /* program the timer to count down mode */
464 outb(0xff, 0x40); /* low and then high */
465 outb(0xff, 0x40);
467 /* quick sample */
468 quick_sample:
469 do {
470 outb(0x00, 0x43); /* latch counter value */
471 s_low = inb(0x40);
472 s_high = inb(0x40);
473 } while(s_high!= 255);
474 t1 = rdtsc();
475 do {
476 outb(0x00, 0x43); /* latch counter value */
477 low = inb(0x40);
478 high = inb(0x40);
479 } while(high> 224);
480 t2 = rdtsc();
481 p1= t2-t1;
482 r1= (double)(p1)/(double)(((s_high<<8)|s_low) - ((high<<8)|low));
484 /* not so quick sample */
485 not_so_quick_sample:
486 do {
487 outb(0x00, 0x43); /* latch counter value */
488 s_low = inb(0x40);
489 s_high = inb(0x40);
490 } while(s_high!= 255);
491 t1 = rdtsc();
492 do {
493 outb(0x00, 0x43); /* latch counter value */
494 low = inb(0x40);
495 high = inb(0x40);
496 } while(high> 192);
497 t2 = rdtsc();
498 p2= t2-t1;
499 r2= (double)(p2)/(double)(((s_high<<8)|s_low) - ((high<<8)|low));
500 if((r1/r2)> 1.01) {
501 dprintf("Tuning loop(1)\n");
502 goto quick_sample;
504 if((r1/r2)< 0.99) {
505 dprintf("Tuning loop(1)\n");
506 goto quick_sample;
509 /* slow sample */
510 do {
511 outb(0x00, 0x43); /* latch counter value */
512 s_low = inb(0x40);
513 s_high = inb(0x40);
514 } while(s_high!= 255);
515 t1 = rdtsc();
516 do {
517 outb(0x00, 0x43); /* latch counter value */
518 low = inb(0x40);
519 high = inb(0x40);
520 } while(high> 128);
521 t2 = rdtsc();
522 p3= t2-t1;
523 r3= (double)(p3)/(double)(((s_high<<8)|s_low) - ((high<<8)|low));
524 if((r2/r3)> 1.01) {
525 dprintf("Tuning loop(2)\n");
526 goto not_so_quick_sample;
528 if((r2/r3)< 0.99) {
529 dprintf("Tuning loop(2)\n");
530 goto not_so_quick_sample;
533 expired = ((s_high<<8)|s_low) - ((high<<8)|low);
534 p3*= TIMER_CLKNUM_HZ;
537 * cv_factor contains time in usecs per CPU cycle * 2^32
539 * The code below is a bit fancy. Originally Michael Noistering
540 * had it like:
542 * cv_factor = ((uint64)1000000<<32) * expired / p3;
544 * whic is perfect, but unfortunately 1000000ULL<<32*expired
545 * may overflow in fast cpus with the long sampling period
546 * i put there for being as accurate as possible under
547 * vmware.
549 * The below calculation is based in that we are trying
550 * to calculate:
552 * (C*expired)/p3 -> (C*(x0<<k + x1))/p3 ->
553 * (C*(x0<<k))/p3 + (C*x1)/p3
555 * Now the term (C*(x0<<k))/p3 is rewritten as:
557 * (C*(x0<<k))/p3 -> ((C*x0)/p3)<<k + reminder
559 * where reminder is:
561 * floor((1<<k)*decimalPart((C*x0)/p3))
563 * which is approximated as:
565 * floor((1<<k)*decimalPart(((C*x0)%p3)/p3)) ->
566 * (((C*x0)%p3)<<k)/p3
568 * So the final expression is:
570 * ((C*x0)/p3)<<k + (((C*x0)%p3)<<k)/p3 + (C*x1)/p3
572 * Just to make things fancier we choose k based on the input
573 * parameters (we use log2(expired)/3.)
575 * Of course, you are not expected to understand any of this.
578 unsigned i;
579 unsigned k;
580 uint64 C;
581 uint64 x0;
582 uint64 x1;
583 uint64 a, b, c;
585 /* first calculate k*/
586 k= 0;
587 for(i= 0; i< 32; i++) {
588 if(expired & (1<<i)) {
589 k= i;
592 k/= 3;
594 C = 1000000ULL<<32;
595 x0= expired>> k;
596 x1= expired&((1<<k)-1);
598 a= ((C*x0)/p3)<<k;
599 b= (((C*x0)%p3)<<k)/p3;
600 c= (C*x1)/p3;
601 #if 0
602 dprintf("a=%Ld\n", a);
603 dprintf("b=%Ld\n", b);
604 dprintf("c=%Ld\n", c);
605 dprintf("%d %Ld\n", expired, p3);
606 #endif
607 cv_factor= a + b + c;
608 #if 0
609 dprintf("cvf=%Ld\n", cv_factor);
610 #endif
613 if(p3/expired/1000000000LL) {
614 dprintf("CPU at %Ld.%03Ld GHz\n", p3/expired/1000000000LL, ((p3/expired)%1000000000LL)/1000000LL);
615 } else {
616 dprintf("CPU at %Ld.%03Ld MHz\n", p3/expired/1000000LL, ((p3/expired)%1000000LL)/1000LL);
620 void clearscreen()
622 int i;
624 for(i=0; i< SCREEN_WIDTH*SCREEN_HEIGHT; i++) {
625 kScreenBase[i] = 0xf20;
629 static void scrup()
631 int i;
632 memcpy(kScreenBase, kScreenBase + SCREEN_WIDTH,
633 SCREEN_WIDTH * SCREEN_HEIGHT * 2 - SCREEN_WIDTH * 2);
634 screenOffset = (SCREEN_HEIGHT - 1) * SCREEN_WIDTH;
635 for(i=0; i<SCREEN_WIDTH; i++)
636 kScreenBase[screenOffset + i] = 0x0720;
639 int puts(const char *str)
641 while (*str) {
642 if (*str == '\n') {
643 screenOffset += SCREEN_WIDTH - (screenOffset % 80);
644 } else {
645 kScreenBase[screenOffset++] = 0xf00 | *str;
647 if (screenOffset >= SCREEN_WIDTH * SCREEN_HEIGHT)
648 scrup();
650 str++;
652 return 0;
655 int dprintf(const char *fmt, ...)
657 int ret;
658 va_list args;
659 char temp[256];
661 va_start(args, fmt);
662 ret = vsprintf(temp,fmt,args);
663 va_end(args);
665 puts(temp);
666 return ret;
669 int panic(const char *fmt, ...)
671 int ret;
672 va_list args;
673 char temp[256];
675 va_start(args, fmt);
676 ret = vsprintf(temp,fmt,args);
677 va_end(args);
679 puts("PANIC: ");
680 puts(temp);
681 puts("\n");
683 puts("spinning forever...");
684 for(;;);
685 return ret;
688 uint64 system_time(void)
690 return 0;