Work on the stage2 loader:
[newos.git] / boot / i386 / stage2.c
blob12789bbb55c9ddca75498266f1bbe81e07c72b60
1 /*
2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <boot/bootdir.h>
6 #include <boot/stage2.h>
7 #include "stage2_priv.h"
8 #include "vesa.h"
10 #include <libc/string.h>
11 #include <libc/stdarg.h>
12 #include <libc/printf.h>
13 #include <sys/elf32.h>
15 const unsigned kBSSSize = 0x9000;
17 // we're running out of the first 'file' contained in the bootdir, which is
18 // a set of binaries and data packed back to back, described by an array
19 // of boot_entry structures at the beginning. The load address is fixed.
20 #define BOOTDIR_ADDR 0x100000
21 const boot_entry *bootdir = (boot_entry*)BOOTDIR_ADDR;
23 // stick the kernel arguments in a pseudo-random page that will be mapped
24 // at least during the call into the kernel. The kernel should copy the
25 // data out and unmap the page.
26 kernel_args *ka = (kernel_args *)0x20000;
28 // needed for message
29 unsigned short *kScreenBase = (unsigned short*) 0xb8000;
30 unsigned screenOffset = 0;
31 unsigned int line = 0;
33 unsigned int cv_factor = 0;
35 // size of bootdir in pages
36 unsigned int bootdir_pages = 0;
38 // working pagedir and pagetable
39 unsigned int *pgdir = 0;
40 unsigned int *pgtable = 0;
42 // function decls for this module
43 void calculate_cpu_conversion_factor();
44 void load_elf_image(void *data, unsigned int *next_paddr,
45 addr_range *ar0, addr_range *ar1, unsigned int *start_addr, addr_range *dynamic_section);
46 int mmu_init(kernel_args *ka, unsigned int *next_paddr);
47 void mmu_map_page(unsigned int vaddr, unsigned int paddr);
48 void cpuid(uint32 selector, uint32 *data);
49 unsigned int get_eflags();
50 void set_eflags(unsigned int val);
51 int check_cpu();
53 // called by the stage1 bootloader.
54 // State:
55 // 32-bit
56 // mmu disabled
57 // stack somewhere below 1 MB
58 // supervisor mode
59 void _start(unsigned int mem, int in_vesa, unsigned int vesa_ptr)
61 unsigned int new_stack;
62 unsigned int *idt;
63 unsigned int *gdt;
64 unsigned int next_vaddr;
65 unsigned int next_paddr;
66 unsigned int nextAllocPage;
67 unsigned int kernelSize;
68 unsigned int i;
69 unsigned int kernel_entry;
71 asm("cld"); // Ain't nothing but a GCC thang.
72 asm("fninit"); // initialize floating point unit
74 clearscreen();
75 dprintf("stage2 bootloader entry.\n");
76 dprintf("memsize = 0x%x, in_vesa %d, vesa_ptr 0x%x\n", mem, in_vesa, vesa_ptr);
78 // verify we can run on this cpu
79 if(check_cpu() < 0) {
80 dprintf("\nSorry, this computer appears to be lacking some of the features\n");
81 dprintf("needed by NewOS. It is currently only able to run on\n");
82 dprintf("Pentium class cpus and above, with a few exceptions to\n");
83 dprintf("that rule.\n");
84 dprintf("\nPlease reset your computer to continue.");
86 for(;;);
89 // calculate the conversion factor that translates rdtsc time to real microseconds
90 calculate_cpu_conversion_factor();
92 // calculate how big the bootdir is so we know where we can start grabbing pages
94 int entry;
95 for (entry = 0; entry < 64; entry++) {
96 if (bootdir[entry].be_type == BE_TYPE_NONE)
97 break;
99 bootdir_pages += bootdir[entry].be_size;
102 // nmessage("bootdir is ", bootdir_pages, " pages long\n");
105 ka->bootdir_addr.start = (unsigned long)bootdir;
106 ka->bootdir_addr.size = bootdir_pages * PAGE_SIZE;
108 next_paddr = BOOTDIR_ADDR + bootdir_pages * PAGE_SIZE;
110 if(in_vesa) {
111 struct VBEInfoBlock *info = (struct VBEInfoBlock *)vesa_ptr;
112 struct VBEModeInfoBlock *mode_info = (struct VBEModeInfoBlock *)(vesa_ptr + 0x200);
114 ka->fb.enabled = 1;
115 ka->fb.x_size = mode_info->x_resolution;
116 ka->fb.y_size = mode_info->y_resolution;
117 ka->fb.bit_depth = mode_info->bits_per_pixel;
118 ka->fb.mapping.start = mode_info->phys_base_ptr;
119 ka->fb.mapping.size = ka->fb.x_size * ka->fb.y_size * (ka->fb.bit_depth/8);
120 ka->fb.already_mapped = 0;
121 } else {
122 ka->fb.enabled = 0;
125 mmu_init(ka, &next_paddr);
127 // load the kernel (3rd entry in the bootdir)
128 load_elf_image((void *)(bootdir[2].be_offset * PAGE_SIZE + BOOTDIR_ADDR), &next_paddr,
129 &ka->kernel_seg0_addr, &ka->kernel_seg1_addr, &kernel_entry, &ka->kernel_dynamic_section_addr);
131 if(ka->kernel_seg1_addr.size > 0)
132 next_vaddr = ROUNDUP(ka->kernel_seg1_addr.start + ka->kernel_seg1_addr.size, PAGE_SIZE);
133 else
134 next_vaddr = ROUNDUP(ka->kernel_seg0_addr.start + ka->kernel_seg0_addr.size, PAGE_SIZE);
136 // map in a kernel stack
137 ka->cpu_kstack[0].start = next_vaddr;
138 for(i=0; i<STACK_SIZE; i++) {
139 mmu_map_page(next_vaddr, next_paddr);
140 next_vaddr += PAGE_SIZE;
141 next_paddr += PAGE_SIZE;
143 ka->cpu_kstack[0].size = next_vaddr - ka->cpu_kstack[0].start;
145 // dprintf("new stack at 0x%x to 0x%x\n", ka->cpu_kstack[0].start, ka->cpu_kstack[0].start + ka->cpu_kstack[0].size);
147 // set up a new idt
149 struct gdt_idt_descr idt_descr;
151 // find a new idt
152 idt = (unsigned int *)next_paddr;
153 ka->arch_args.phys_idt = (unsigned int)idt;
154 next_paddr += PAGE_SIZE;
156 // nmessage("idt at ", (unsigned int)idt, "\n");
158 // clear it out
159 for(i=0; i<IDT_LIMIT/4; i++) {
160 idt[i] = 0;
163 // map the idt into virtual space
164 mmu_map_page(next_vaddr, (unsigned int)idt);
165 ka->arch_args.vir_idt = (unsigned int)next_vaddr;
166 next_vaddr += PAGE_SIZE;
168 // load the idt
169 idt_descr.a = IDT_LIMIT - 1;
170 idt_descr.b = (unsigned int *)ka->arch_args.vir_idt;
172 asm("lidt %0;"
173 : : "m" (idt_descr));
175 // nmessage("idt at virtual address ", next_vpage, "\n");
178 // set up a new gdt
180 struct gdt_idt_descr gdt_descr;
182 // find a new gdt
183 gdt = (unsigned int *)next_paddr;
184 ka->arch_args.phys_gdt = (unsigned int)gdt;
185 next_paddr += PAGE_SIZE;
187 // nmessage("gdt at ", (unsigned int)gdt, "\n");
189 // put segment descriptors in it
190 gdt[0] = 0;
191 gdt[1] = 0;
192 gdt[2] = 0x0000ffff; // seg 0x8 -- kernel 4GB code
193 gdt[3] = 0x00cf9a00;
194 gdt[4] = 0x0000ffff; // seg 0x10 -- kernel 4GB data
195 gdt[5] = 0x00cf9200;
196 gdt[6] = 0x0000ffff; // seg 0x1b -- ring 3 4GB code
197 gdt[7] = 0x00cffa00;
198 gdt[8] = 0x0000ffff; // seg 0x23 -- ring 3 4GB data
199 gdt[9] = 0x00cff200;
200 // gdt[10] & gdt[11] will be filled later by the kernel
202 // map the gdt into virtual space
203 mmu_map_page(next_vaddr, (unsigned int)gdt);
204 ka->arch_args.vir_gdt = (unsigned int)next_vaddr;
205 next_vaddr += PAGE_SIZE;
207 // load the GDT
208 gdt_descr.a = GDT_LIMIT - 1;
209 gdt_descr.b = (unsigned int *)ka->arch_args.vir_gdt;
211 asm("lgdt %0;"
212 : : "m" (gdt_descr));
214 // nmessage("gdt at virtual address ", next_vpage, "\n");
217 // Map the pg_dir into kernel space at 0xffc00000-0xffffffff
218 // this enables a mmu trick where the 4 MB region that this pgdir entry
219 // represents now maps the 4MB of potential pagetables that the pgdir
220 // points to. Thrown away later in VM bringup, but useful for now.
221 pgdir[1023] = (unsigned int)pgdir | DEFAULT_PAGE_FLAGS;
223 // also map it on the next vpage
224 mmu_map_page(next_vaddr, (unsigned int)pgdir);
225 ka->arch_args.vir_pgdir = next_vaddr;
226 next_vaddr += PAGE_SIZE;
228 // save the kernel args
229 ka->arch_args.system_time_cv_factor = cv_factor;
230 ka->phys_mem_range[0].start = 0;
231 ka->phys_mem_range[0].size = mem;
232 ka->num_phys_mem_ranges = 1;
233 ka->str = NULL;
234 ka->phys_alloc_range[0].start = BOOTDIR_ADDR;
235 ka->phys_alloc_range[0].size = next_paddr - BOOTDIR_ADDR;
236 ka->num_phys_alloc_ranges = 1;
237 ka->virt_alloc_range[0].start = KERNEL_BASE;
238 ka->virt_alloc_range[0].size = next_vaddr - KERNEL_BASE;
239 ka->num_virt_alloc_ranges = 1;
240 ka->arch_args.page_hole = 0xffc00000;
241 ka->num_cpus = 1;
242 #if 0
243 dprintf("kernel args at 0x%x\n", ka);
244 dprintf("pgdir = 0x%x\n", ka->pgdir);
245 dprintf("pgtables[0] = 0x%x\n", ka->pgtables[0]);
246 dprintf("phys_idt = 0x%x\n", ka->phys_idt);
247 dprintf("vir_idt = 0x%x\n", ka->vir_idt);
248 dprintf("phys_gdt = 0x%x\n", ka->phys_gdt);
249 dprintf("vir_gdt = 0x%x\n", ka->vir_gdt);
250 dprintf("mem_size = 0x%x\n", ka->mem_size);
251 dprintf("str = 0x%x\n", ka->str);
252 dprintf("bootdir = 0x%x\n", ka->bootdir);
253 dprintf("bootdir_size = 0x%x\n", ka->bootdir_size);
254 dprintf("phys_alloc_range_low = 0x%x\n", ka->phys_alloc_range_low);
255 dprintf("phys_alloc_range_high = 0x%x\n", ka->phys_alloc_range_high);
256 dprintf("virt_alloc_range_low = 0x%x\n", ka->virt_alloc_range_low);
257 dprintf("virt_alloc_range_high = 0x%x\n", ka->virt_alloc_range_high);
258 dprintf("page_hole = 0x%x\n", ka->page_hole);
259 #endif
260 // dprintf("finding and booting other cpus...\n");
261 smp_boot(ka, kernel_entry);
263 dprintf("jumping into kernel at 0x%x\n", kernel_entry);
265 ka->cons_line = line;
267 asm("movl %0, %%eax; " // move stack out of way
268 "movl %%eax, %%esp; "
269 : : "m" (ka->cpu_kstack[0].start + ka->cpu_kstack[0].size));
270 asm("pushl $0x0; " // we're the BSP cpu (0)
271 "pushl %0; " // kernel args
272 "pushl $0x0;" // dummy retval for call to main
273 "pushl %1; " // this is the start address
274 "ret; " // jump.
275 : : "g" (ka), "g" (kernel_entry));
278 void load_elf_image(void *data, unsigned int *next_paddr, addr_range *ar0, addr_range *ar1, unsigned int *start_addr, addr_range *dynamic_section)
280 struct Elf32_Ehdr *imageHeader = (struct Elf32_Ehdr*) data;
281 struct Elf32_Phdr *segments = (struct Elf32_Phdr*)(imageHeader->e_phoff + (unsigned) imageHeader);
282 int segmentIndex;
283 int foundSegmentIndex = 0;
285 ar0->size = 0;
286 ar1->size = 0;
287 dynamic_section->size = 0;
289 for (segmentIndex = 0; segmentIndex < imageHeader->e_phnum; segmentIndex++) {
290 struct Elf32_Phdr *segment = &segments[segmentIndex];
291 unsigned segmentOffset;
293 switch(segment->p_type) {
294 case PT_LOAD:
295 break;
296 case PT_DYNAMIC:
297 dynamic_section->start = segment->p_vaddr;
298 dynamic_section->size = segment->p_memsz;
299 default:
300 continue;
303 // dprintf("segment %d\n", segmentIndex);
304 // dprintf("p_vaddr 0x%x p_paddr 0x%x p_filesz 0x%x p_memsz 0x%x\n",
305 // segment->p_vaddr, segment->p_paddr, segment->p_filesz, segment->p_memsz);
307 /* Map initialized portion */
308 for (segmentOffset = 0;
309 segmentOffset < ROUNDUP(segment->p_filesz, PAGE_SIZE);
310 segmentOffset += PAGE_SIZE) {
312 mmu_map_page(segment->p_vaddr + segmentOffset, *next_paddr);
313 memcpy((void *)ROUNDOWN(segment->p_vaddr + segmentOffset, PAGE_SIZE),
314 (void *)ROUNDOWN((unsigned)data + segment->p_offset + segmentOffset, PAGE_SIZE), PAGE_SIZE);
315 (*next_paddr) += PAGE_SIZE;
318 /* Clean out the leftover part of the last page */
319 if(segment->p_filesz % PAGE_SIZE > 0) {
320 // dprintf("memsetting 0 to va 0x%x, size %d\n", (void*)((unsigned)segment->p_vaddr + segment->p_filesz), PAGE_SIZE - (segment->p_filesz % PAGE_SIZE));
321 memset((void*)((unsigned)segment->p_vaddr + segment->p_filesz), 0, PAGE_SIZE
322 - (segment->p_filesz % PAGE_SIZE));
325 /* Map uninitialized portion */
326 for (; segmentOffset < ROUNDUP(segment->p_memsz, PAGE_SIZE); segmentOffset += PAGE_SIZE) {
327 // dprintf("mapping zero page at va 0x%x\n", segment->p_vaddr + segmentOffset);
328 mmu_map_page(segment->p_vaddr + segmentOffset, *next_paddr);
329 memset((void *)(segment->p_vaddr + segmentOffset), 0, PAGE_SIZE);
330 (*next_paddr) += PAGE_SIZE;
332 switch(foundSegmentIndex) {
333 case 0:
334 ar0->start = segment->p_vaddr;
335 ar0->size = segment->p_memsz;
336 break;
337 case 1:
338 ar1->start = segment->p_vaddr;
339 ar1->size = segment->p_memsz;
340 break;
341 default:
344 foundSegmentIndex++;
346 *start_addr = imageHeader->e_entry;
349 // allocate a page directory and page table to facilitate mapping
350 // pages to the 0x80000000 - 0x80400000 region.
351 // also identity maps the first 4MB of memory
352 int mmu_init(kernel_args *ka, unsigned int *next_paddr)
354 int i;
356 // allocate a new pgdir
357 pgdir = (unsigned int *)*next_paddr;
358 (*next_paddr) += PAGE_SIZE;
359 ka->arch_args.phys_pgdir = (unsigned int)pgdir;
361 // clear out the pgdir
362 for(i = 0; i < 1024; i++)
363 pgdir[i] = 0;
365 // make a pagetable at this random spot
366 pgtable = (unsigned int *)0x11000;
368 for (i = 0; i < 1024; i++) {
369 pgtable[i] = (i * 0x1000) | DEFAULT_PAGE_FLAGS;
370 } // pkx: create first 4 MB one-to-one mapping
372 pgdir[0] = (unsigned int)pgtable | DEFAULT_PAGE_FLAGS;
373 // pkx: put the one-to-one mapping into the page dir.
375 // Get new page table and clear it out
376 pgtable = (unsigned int *)*next_paddr;
377 ka->arch_args.pgtables[0] = (unsigned int)pgtable;
378 ka->arch_args.num_pgtables = 1;
380 (*next_paddr) += PAGE_SIZE;
381 for (i = 0; i < 1024; i++)
382 pgtable[i] = 0;
384 // put the new page table into the page directory
385 // this maps the kernel at KERNEL_BASE
386 pgdir[KERNEL_BASE/(4*1024*1024)] = (unsigned int)pgtable | DEFAULT_PAGE_FLAGS;
388 // switch to the new pgdir
389 asm("movl %0, %%eax;"
390 "movl %%eax, %%cr3;" :: "m" (pgdir) : "eax");
391 // Important. Make sure supervisor threads can fault on read only pages...
392 asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
393 // pkx: moved the paging turn-on to here.
395 return 0;
398 // can only map the 4 meg region right after KERNEL_BASE, may fix this later
399 // if need arises.
400 void mmu_map_page(unsigned int vaddr, unsigned int paddr)
402 // dprintf("mmu_map_page: vaddr 0x%x, paddr 0x%x\n", vaddr, paddr);
403 if(vaddr < KERNEL_BASE || vaddr >= (KERNEL_BASE + 4096*1024)) {
404 dprintf("mmu_map_page: asked to map invalid page!\n");
405 for(;;);
407 paddr &= ~(PAGE_SIZE-1);
408 // dprintf("paddr 0x%x @ index %d\n", paddr, (vaddr % (PAGE_SIZE * 1024)) / PAGE_SIZE);
409 pgtable[(vaddr % (PAGE_SIZE * 1024)) / PAGE_SIZE] = paddr | DEFAULT_PAGE_FLAGS;
412 int check_cpu()
414 unsigned int i;
415 uint32 data[4];
416 char str[17];
418 // check the eflags register to see if the cpuid instruction exists
419 if((get_eflags() & 1<<21) == 0) {
420 set_eflags(get_eflags() | 1<<21);
421 if((get_eflags() & 1<<21) == 0) {
422 // we couldn't set the ID bit of the eflags register, this cpu is old
423 return -1;
427 // we can safely call cpuid
429 // print some fun data
430 cpuid(0, data);
432 // build the vendor string
433 memset(str, 0, sizeof(str));
434 *(unsigned int *)&str[0] = data[1];
435 *(unsigned int *)&str[4] = data[3];
436 *(unsigned int *)&str[8] = data[2];
438 // get the family, model, stepping
439 cpuid(1, data);
440 dprintf("CPU: family %d model %d stepping %d, string '%s'\n",
441 (data[0] >> 8) & 0xf, (data[0] >> 4) & 0xf, data[0] & 0xf, str);
443 // check for bits we need
444 cpuid(1, data);
445 if(!(data[4] & 1<<4)) return -1; // check for rdtsc
447 return 0;
450 long long rdtsc();
451 asm(
452 "rdtsc:\n"
453 " rdtsc\n"
454 " ret\n"
457 //void execute_n_instructions(int count);
458 asm(
459 ".global execute_n_instructions\n"
460 "execute_n_instructions:\n"
461 " movl 4(%esp), %ecx\n"
462 " shrl $4, %ecx\n" /* divide count by 16 */
463 ".again:\n"
464 " xorl %eax, %eax\n"
465 " xorl %eax, %eax\n"
466 " xorl %eax, %eax\n"
467 " xorl %eax, %eax\n"
468 " xorl %eax, %eax\n"
469 " xorl %eax, %eax\n"
470 " xorl %eax, %eax\n"
471 " xorl %eax, %eax\n"
472 " xorl %eax, %eax\n"
473 " xorl %eax, %eax\n"
474 " xorl %eax, %eax\n"
475 " xorl %eax, %eax\n"
476 " xorl %eax, %eax\n"
477 " xorl %eax, %eax\n"
478 " xorl %eax, %eax\n"
479 " loop .again\n"
480 " ret\n"
483 void system_time_setup(long a);
484 asm(
485 "system_time_setup:\n"
486 /* First divide 1M * 2^32 by proc_clock */
487 " movl $0x0F4240, %ecx\n"
488 " movl %ecx, %edx\n"
489 " subl %eax, %eax\n"
490 " movl 4(%esp), %ebx\n"
491 " divl %ebx, %eax\n" /* should be 64 / 32 */
492 " movl %eax, cv_factor\n"
493 " ret\n"
496 // long long system_time();
497 asm(
498 ".global system_time\n"
499 "system_time:\n"
500 /* load 64-bit factor into %eax (low), %edx (high) */
501 /* hand-assemble rdtsc -- read time stamp counter */
502 " rdtsc\n" /* time in %edx,%eax */
504 " pushl %ebx\n"
505 " pushl %ecx\n"
506 " movl cv_factor, %ebx\n"
507 " movl %edx, %ecx\n" /* save high half */
508 " mull %ebx\n" /* truncate %eax, but keep %edx */
509 " movl %ecx, %eax\n"
510 " movl %edx, %ecx\n" /* save high half of low */
511 " mull %ebx\n" /*, %eax*/
512 /* now compute [%edx, %eax] + [%ecx], propagating carry */
513 " subl %ebx, %ebx\n" /* need zero to propagate carry */
514 " addl %ecx, %eax\n"
515 " adc %ebx, %edx\n"
516 " popl %ecx\n"
517 " popl %ebx\n"
518 " ret\n"
521 // void cpuid(uint32 selector, uint32 *data);
522 asm(
523 ".global cpuid\n"
524 "cpuid:\n"
525 " pushl %ebx\n"
526 " pushl %edi\n"
528 " movl 12(%esp),%eax\n"
529 " movl 16(%esp),%edi\n"
530 " cpuid\n"
532 " movl %eax,0(%edi)\n"
533 " movl %ebx,4(%edi)\n"
534 " movl %ecx,8(%edi)\n"
535 " movl %edx,12(%edi)\n"
537 " popl %edi\n"
538 " popl %ebx\n"
540 " ret\n"
543 // unsigned int get_eflags();
544 asm(
545 ".global get_eflags\n"
546 "get_eflags:\n"
548 " pushfl\n"
549 " popl %eax\n"
551 " ret\n"
554 // void set_eflags(unsigned int val);
555 asm(
556 ".global set_eflags\n"
557 "set_eflags:\n"
559 " pushl 4(%esp)\n"
560 " popfl\n"
562 " ret\n"
565 void sleep(long long time)
567 long long start = system_time();
569 while(system_time() - start <= time)
573 #define outb(value,port) \
574 asm("outb %%al,%%dx"::"a" (value),"d" (port))
577 #define inb(port) ({ \
578 unsigned char _v; \
579 asm volatile("inb %%dx,%%al":"=a" (_v):"d" (port)); \
580 _v; \
583 #define TIMER_CLKNUM_HZ 1193167
585 void calculate_cpu_conversion_factor()
587 unsigned char low, high;
588 unsigned long expired;
589 long long t1, t2;
590 long long time_base_ticks;
591 double timer_usecs;
593 /* program the timer to count down mode */
594 outb(0x34, 0x43);
596 outb(0xff, 0x40); /* low and then high */
597 outb(0xff, 0x40);
599 t1 = rdtsc();
601 execute_n_instructions(32*20000);
603 t2 = rdtsc();
605 outb(0x00, 0x43); /* latch counter value */
606 low = inb(0x40);
607 high = inb(0x40);
609 expired = (unsigned long)0xffff - ((((unsigned long)high) << 8) + low);
611 timer_usecs = (expired * 1.0) / (TIMER_CLKNUM_HZ/1000000.0);
612 time_base_ticks = t2 -t1;
614 dprintf("CPU at %d Hz\n", (int)((time_base_ticks / timer_usecs) * 1000000));
616 system_time_setup((int)((time_base_ticks / timer_usecs) * 1000000));
619 void clearscreen()
621 int i;
623 for(i=0; i< SCREEN_WIDTH*SCREEN_HEIGHT*2; i++) {
624 kScreenBase[i] = 0xf20;
628 static void scrup()
630 int i;
631 memcpy(kScreenBase, kScreenBase + SCREEN_WIDTH,
632 SCREEN_WIDTH * SCREEN_HEIGHT * 2 - SCREEN_WIDTH * 2);
633 screenOffset = (SCREEN_HEIGHT - 1) * SCREEN_WIDTH;
634 for(i=0; i<SCREEN_WIDTH; i++)
635 kScreenBase[screenOffset + i] = 0x0720;
636 line = SCREEN_HEIGHT - 1;
639 void puts(const char *str)
641 while (*str) {
642 if (*str == '\n') {
643 line++;
644 if(line > SCREEN_HEIGHT - 1)
645 scrup();
646 else
647 screenOffset += SCREEN_WIDTH - (screenOffset % 80);
648 } else {
649 kScreenBase[screenOffset++] = 0xf00 | *str;
651 if (screenOffset > SCREEN_WIDTH * SCREEN_HEIGHT)
652 scrup();
654 str++;
658 int dprintf(const char *fmt, ...)
660 int ret;
661 va_list args;
662 char temp[256];
664 va_start(args, fmt);
665 ret = vsprintf(temp,fmt,args);
666 va_end(args);
668 puts(temp);
669 return ret;