delint for clang
[AROS.git] / arch / x86_64-pc / kernel / kernel_startup.c
blob404b4d27fb90eb2b2e20bf1301f5e0ceafd255d3
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <aros/multiboot.h>
7 #include <asm/cpu.h>
8 #include <asm/io.h>
9 #include <aros/symbolsets.h>
10 #include <exec/lists.h>
11 #include <exec/memory.h>
12 #include <exec/resident.h>
13 #include <utility/tagitem.h>
14 #include <proto/arossupport.h>
15 #include <proto/exec.h>
17 #include <bootconsole.h>
18 #include <inttypes.h>
19 #include <string.h>
21 #include "boot_utils.h"
22 #include "kernel_base.h"
23 #include "kernel_bootmem.h"
24 #include "kernel_debug.h"
25 #include "kernel_intern.h"
26 #include "kernel_mmap.h"
27 #include "kernel_romtags.h"
28 #include "apic.h"
29 #include "smp.h"
30 #include "tls.h"
32 #define D(x) x
33 #define DSTACK(x)
35 /* Common IBM PC memory layout */
36 static const struct MemRegion PC_Memory[] =
39 * Give low memory a bit lower priority. This will help us to locate its MemHeader (the last one in the list).
40 * We explicitly need low memory for SMP bootstrap.
42 {0x000000000, 0x000100000, "Low memory" , -6, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT|MEMF_24BITDMA},
43 {0x000100000, 0x001000000, "ISA DMA memory", -5, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT|MEMF_24BITDMA},
45 * EXPERIMENTAL:
46 * 1. Some (or all?) 64-bit machines expose RAM at addresses up to 0xD0000000 (giving 3.5 GB total). All MMIO
47 * sits beyond this border. We intentionally specify 4GB as limit, just in case if some machine exhibits
48 * even more RAM in this space. We want all the RAM to be usable.
49 * 2. We have MEMF_31BIT originating from MorphOS. But here we interpret it as "32-bit memory". I guess
50 * it originated from the assumption that MMIO starts at 0x80000000 (which is true at least for PegasosPPC).
51 * So, is it okay to assume actually 32-bit memory for MEMF_31BIT? Are there anything which really imposes
52 * 31-bit limit? AllocEntry() issue doesn't count...
54 {0x001000000, 0x0FFFFFFFF, "32-bit memory" , 0, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP|MEMF_31BIT },
56 * FIXME: Our MMU mapping supports only 4GB address space.
57 * We can't enable more right now because lots of RAM would be required for MMU tables,
58 * and it will be irrational to reserve so large boot-time region (AROS will fail to boot
59 * up on systems with relatively small amount of RAM).
60 * MMU structures need to be allocated dynamically from a working memory. Waiting for Michal's
61 * page allocator to implement this...
62 {0x080000000, -1 , "Upper memory" , 10, MEMF_PUBLIC|MEMF_LOCAL|MEMF_KICK|MEMF_CHIP }, */
63 {0 , 0 , NULL , 0, 0 }
66 static ULONG allocator = ALLOCATOR_TLSF;
69 * Boot-time global variables.
70 * __KernBootPrivate needs to survive accross warm reboots, so it's put into .data.
71 * SysBase is intentionally put into .rodata. This way we prevent it from being modified.
73 __attribute__((section(".data"))) struct KernBootPrivate *__KernBootPrivate = NULL;
74 __attribute__((section(".data"))) IPTR kick_highest = 0;
75 __attribute__((section(".rodata"))) struct ExecBase *SysBase = NULL;
77 static void boot_start(struct TagItem *msg);
78 static char boot_stack[];
81 * This is where our kernel started.
82 * First we clear BSS section, then switch stack pointer to our temporary stack
83 * (which is itself located in BSS). While we are here, the stack is actually
84 * located inside our bootstrap, and it's safe to use it a little bit.
86 IPTR __startup start64(struct TagItem *msg, ULONG magic)
88 /* Anti-command-line-run protector */
89 if (magic == AROS_BOOT_MAGIC)
91 /* Run the kickstart from boot_start() routine. */
92 core_Kick(msg, boot_start);
95 return -1;
99 * This code is executed only once, after the kickstart is loaded by bootstrap.
100 * Its main job is to initialize early debugging console ASAP in order to be able
101 * to see what happens. This will deal with both serial and on-screen console.
103 * Console mirror is placed at the end of bootstrap's protected area. We must not
104 * overwrite it because it contains boot-time GDT, taglist, and some other structures.
106 * Default address is bootstrap start + 4KB, just in case.
108 static void boot_start(struct TagItem *msg)
110 fb_Mirror = (void *)LibGetTagData(KRN_ProtAreaEnd, 0x101000, msg);
111 con_InitTagList(msg);
113 bug("AROS64 - The AROS Research OS, 64-bit version. Compiled %s\n", __DATE__);
114 D(bug("[Kernel] boot_start: Jumped into kernel.resource @ %p [stub @ %p].\n", boot_start, start64));
116 kernel_cstart(msg);
120 * This routine actually launches the kickstart. It's called either upon first start or upon warm reboot.
121 * The only assumption is that stack is outside .bss . For both cases this is true:
122 * 1. First boot - the stack is located inside the bootstrap.
123 * 2. Warm reboot - the stack is located in supervisor area (__KernBootPrivate->SystemStack).
125 void core_Kick(struct TagItem *msg, void *target)
127 const struct TagItem *bss = LibFindTagItem(KRN_KernelBss, msg);
129 /* First clear .bss */
130 if (bss)
131 __clear_bss((const struct KernelBSS *)bss->ti_Data);
134 * ... then switch to initial stack and jump to target address.
135 * We set rbp to 0 and use call here in order to get correct stack traces
136 * if the boot task crashes. Otherwise backtrace goes beyond this location
137 * into memory areas with undefined contents.
139 asm volatile("movq %1, %%rsp\n\t"
140 "movq $0, %%rbp\n\t"
141 "call *%2\n"::"D"(msg), "r"(boot_stack + STACK_SIZE), "r"(target));
145 * This is the main entry point.
146 * We run from here both at first boot and upon reboot.
148 void kernel_cstart(const struct TagItem *start_msg)
150 struct MinList memList;
151 struct TagItem *msg = (struct TagItem *)start_msg;
152 struct MemHeader *mh, *mh2;
153 struct mb_mmap *mmap = NULL;
154 IPTR mmap_len = 0;
155 IPTR addr = 0;
156 IPTR klo = 0;
157 struct TagItem *tag;
158 UBYTE _APICID;
159 UWORD *ranges[] = {NULL, NULL, (UWORD *)-1};
160 /* Enable fxsave/fxrstor */
161 wrcr(cr4, rdcr(cr4) | _CR4_OSFXSR | _CR4_OSXMMEXCPT);
163 D(bug("[Kernel] Boot data: 0x%p\n", __KernBootPrivate));
164 DSTACK(bug("[Kernel] Boot stack: 0x%p - 0x%p\n", boot_stack, boot_stack + STACK_SIZE));
166 if (__KernBootPrivate == NULL)
168 /* This is our first start. */
169 struct vbe_mode *vmode = NULL;
170 char *cmdline = NULL;
171 IPTR khi;
173 /* We need highest KS address and memory map to begin the work */
174 khi = LibGetTagData(KRN_KernelHighest, 0, msg);
175 mmap = (struct mb_mmap *)LibGetTagData(KRN_MMAPAddress, 0, msg);
176 mmap_len = LibGetTagData(KRN_MMAPLength, 0, msg);
178 if ((!khi) || (!mmap) || (!mmap_len))
180 krnPanic(NULL, "Incomplete information from the bootstrap\n"
181 "\n"
182 "Kickstart top: 0x%p\n"
183 "Memory map: address 0x%p, length %lu\n", khi, mmap, mmap, mmap_len);
187 * Our boot taglist is located just somewhere in memory. Additionally, it's very fragmented
188 * (its linked data, like VBE information, were also placed just somewhere, by GRUB.
189 * Now we need some memory to gather these things together. This memory will be preserved
190 * accross warm restarts.
191 * We know the bootstrap has reserved some space right beyond the kickstart. We get our highest
192 * address, and use memory map to locate topmost address of this area.
194 khi = AROS_ROUNDUP2(khi + 1, sizeof(APTR));
195 mmap = mmap_FindRegion(khi, mmap, mmap_len);
197 if (!mmap)
199 krnPanic(NULL, "Inconsistent memory map or kickstart placement\n"
200 "Kickstart region not found");
203 if (mmap->type != MMAP_TYPE_RAM)
205 krnPanic(NULL, "Inconsistent memory map or kickstart placement\n"
206 "Reserved memory overwritten\n"
207 "Region 0x%p - 0x%p type %d\n"
208 "Kickstart top 0x%p", mmap->addr, mmap->addr + mmap->len - 1, mmap->type, khi);
211 /* Initialize boot-time memory allocator */
212 BootMemPtr = (void *)khi;
213 BootMemLimit = (void *)mmap->addr + mmap->len;
215 D(bug("[Kernel] Bootinfo storage 0x%p - 0x%p\n", BootMemPtr, BootMemLimit));
218 * Our boot taglist is placed by the bootstrap just somewhere in memory.
219 * The first thing is to move it into some safe place.
222 /* This will relocate the taglist itself */
223 RelocateBootMsg(msg);
226 * Now relocate linked data.
227 * Here we actually process only tags we know about and expect to get.
228 * For example, we are not going to receive KRN_HostInterface or KRN_OpenfirmwareTree.
230 msg = BootMsg;
231 while ((tag = LibNextTagItem(&msg)))
233 switch (tag->ti_Tag)
235 case KRN_KernelBss:
236 RelocateBSSData(tag);
237 break;
239 case KRN_MMAPAddress:
240 RelocateTagData(tag, mmap_len);
241 break;
243 case KRN_VBEModeInfo:
244 RelocateTagData(tag, sizeof(struct vbe_mode));
245 vmode = (struct vbe_mode *)tag->ti_Data;
246 break;
248 case KRN_VBEControllerInfo:
249 RelocateTagData(tag, sizeof(struct vbe_controller));
250 break;
252 case KRN_CmdLine:
253 RelocateStringData(tag);
254 cmdline = (char *)tag->ti_Data;
255 break;
257 case KRN_BootLoader:
258 RelocateStringData(tag);
259 break;
263 /* Now allocate KernBootPrivate */
264 __KernBootPrivate = krnAllocBootMem(sizeof(struct KernBootPrivate));
266 if (cmdline && vmode && vmode->phys_base && strstr(cmdline, "vesahack"))
268 bug("[Kernel] VESA debugging hack activated\n");
271 * VESA hack.
272 * It divides screen height by 2 and increments framebuffer pointer.
273 * This allows VESA driver to use only upper half of the screen, while
274 * lower half will still be used for debug output.
276 vmode->y_resolution >>= 1;
278 __KernBootPrivate->debug_y_resolution = vmode->y_resolution;
279 __KernBootPrivate->debug_framebuffer = (void *)(unsigned long)vmode->phys_base + vmode->y_resolution * vmode->bytes_per_scanline;
282 if (cmdline && strstr(cmdline, "notlsf"))
283 allocator = ALLOCATOR_STD;
286 /* Prepare GDT */
287 core_SetupGDT(__KernBootPrivate);
289 if (!__KernBootPrivate->SystemStack)
292 * Allocate our supervisor stack from boot-time memory.
293 * It will be protected from user's intervention.
294 * Allocate actually three stacks: panic, supervisor, ring1.
295 * Note that we do the actual allocation only once. The region is kept
296 * in __KernBootPrivate which survives warm reboots.
298 __KernBootPrivate->SystemStack = (IPTR)krnAllocBootMem(STACK_SIZE * 3);
300 DSTACK(bug("[Kernel] Allocated supervisor stack 0x%p - 0x%p\n",
301 __KernBootPrivate->SystemStack, __KernBootPrivate->SystemStack + STACK_SIZE * 3));
304 /* We are x86-64, and we know we always have APIC. */
305 __KernBootPrivate->_APICBase = core_APIC_GetBase();
306 _APICID = core_APIC_GetID(__KernBootPrivate->_APICBase);
307 D(bug("[Kernel] kernel_cstart: launching on BSP APIC ID %d, base @ %p\n", _APICID, __KernBootPrivate->_APICBase));
309 /* Set TSS, GDT, LDT and MMU up */
310 core_CPUSetup(_APICID, __KernBootPrivate->SystemStack);
311 core_SetupIDT(__KernBootPrivate);
312 core_SetupMMU(__KernBootPrivate);
315 * Here we ended all boot-time allocations.
316 * We won't do them again, for example on warm reboot. All our areas are stored in struct KernBootPrivate.
317 * We are going to make this area read-only and reset-proof.
319 if (!kick_highest)
321 D(bug("[Kernel] Boot-time setup complete\n"));
322 kick_highest = AROS_ROUNDUP2((IPTR)BootMemPtr, PAGE_SIZE);
325 D(bug("[Kernel] End of kickstart area 0x%p\n", kick_highest));
328 * Obtain the needed data from the boot taglist.
329 * We need to do this even on first boot, because the taglist and its data
330 * have been moved to the permanent storage.
332 msg = BootMsg;
333 while ((tag = LibNextTagItem(&msg)))
335 switch (tag->ti_Tag)
337 case KRN_KernelBase:
339 * KRN_KernelBase is actually a border between read-only
340 * (code) and read-write (data) sections of the kickstart.
341 * read-write section goes to lower addresses from this one,
342 * so we align it upwards in order not to make part of RW data
343 * read-only.
345 addr = AROS_ROUNDUP2(tag->ti_Data, PAGE_SIZE);
346 break;
348 case KRN_KernelLowest:
349 klo = AROS_ROUNDDOWN2(tag->ti_Data, PAGE_SIZE);
350 break;
352 case KRN_MMAPAddress:
353 mmap = (struct mb_mmap *)tag->ti_Data;
354 break;
356 case KRN_MMAPLength:
357 mmap_len = tag->ti_Data;
358 break;
362 /* Sanity check */
363 if ((!klo) || (!addr))
365 krnPanic(NULL, "Incomplete information from the bootstrap\n"
366 "\n"
367 "Kickstart lowest 0x%p, base 0x%p\n", klo, addr);
371 * Explore memory map and create MemHeaders.
372 * We reserve one page (PAGE_SIZE) at zero address. We will protect it.
374 NEWLIST(&memList);
375 mmap_InitMemory(mmap, mmap_len, &memList, klo, kick_highest, PAGE_SIZE, PC_Memory, allocator);
377 D(bug("[Kernel] kernel_cstart: Booting exec.library...\n"));
380 * mmap_InitMemory() adds MemHeaders to the list in the order they were created.
381 * I. e. highest addresses are added last.
382 * Take highest region in order to create SysBase in it.
384 mh = (struct MemHeader *)REMTAIL(&memList);
385 D(bug("[Kernel] Initial MemHeader: 0x%p - 0x%p (%s)\n", mh->mh_Lower, mh->mh_Upper, mh->mh_Node.ln_Name));
387 if (SysBase)
389 D(bug("[Kernel] Got old SysBase 0x%p...\n", SysBase));
391 * Validate existing SysBase pointer.
392 * Here we check that if refers to a valid existing memory region.
393 * Checksums etc are checked in arch-independent code in exec.library.
394 * It's enough to use only size of public part. Anyway, SysBase will be
395 * reallocated by PrepareExecBase(), it will just keep over some data from
396 * public part (KickMemPtr, KickTagPtr and capture vectors).
398 if (!mmap_ValidateRegion((unsigned long)SysBase, sizeof(struct ExecBase), mmap, mmap_len))
400 D(bug("[Kernel] ... invalidated\n"));
401 SysBase = NULL;
405 /* This handles failures itself */
406 ranges[0] = (UWORD *)klo;
407 ranges[1] = (UWORD *)kick_highest;
408 krnPrepareExecBase(ranges, mh, BootMsg);
411 * Now we have working exec.library memory allocator.
412 * Move console mirror buffer away from unused memory.
413 * WARNING!!! Do not report anything in the debug log before this is done. Remember that sequental
414 * AllocMem()s return sequental blocks! And right beyond our allocated area there will be MemChunk.
415 * Between krnPrepareExecBase() and this AllocMem() upon warm reboot console mirror buffer is set
416 * to an old value right above ExecBase. During krnPrepareExecBase() a MemChunk is built there,
417 * which can be overwritten by bootconsole, especially if the output scrolls.
419 if (scr_Type == SCR_GFX)
421 char *mirror = AllocMem(scr_Width * scr_Height, MEMF_PUBLIC);
423 fb_SetMirror(mirror);
426 D(bug("[Kernel] Created SysBase at 0x%p (pointer at 0x%p), MemHeader 0x%p\n", SysBase, &SysBase, mh));
428 /* Block all user's access to zero page */
429 core_ProtKernelArea(0, PAGE_SIZE, 1, 0, 0);
431 /* Store important private data */
432 TLS_SET(SysBase, SysBase);
434 /* Provide information about our supevisor stack. Useful at least for diagnostics. */
435 SysBase->SysStkLower = (APTR)__KernBootPrivate->SystemStack;
436 SysBase->SysStkUpper = (APTR)__KernBootPrivate->SystemStack + STACK_SIZE * 3;
439 * Make kickstart code area read-only.
440 * We do it only after ExecBase creation because SysBase pointer is put
441 * into .rodata. This way we prevent it from ocassional modification by buggy software.
443 core_ProtKernelArea(addr, kick_highest - addr, 1, 0, 1);
445 /* Transfer the rest of memory list into SysBase */
446 D(bug("[Kernel] Transferring memory list into SysBase...\n"));
447 for (mh = (struct MemHeader *)memList.mlh_Head; mh->mh_Node.ln_Succ; mh = mh2)
449 mh2 = (struct MemHeader *)mh->mh_Node.ln_Succ;
451 D(bug("[Kernel] * 0x%p - 0x%p (%s)\n", mh->mh_Lower, mh->mh_Upper, mh->mh_Node.ln_Name));
452 Enqueue(&SysBase->MemList, &mh->mh_Node);
456 * RTF_SINGLETASK residents are called with supervisor privilege level.
457 * Original AmigaOS(tm) does the same, some Amiga hardware expansion ROM
458 * rely on it. Here we continue the tradition, because it's useful for
459 * acpica.library (which needs to look for RSDP in the first 1M)
461 InitCode(RTF_SINGLETASK, 0);
464 * After InitCode(RTF_SINGLETASK) we may have acpica.library
465 * Now we can use ACPI information in order to set up advanced things (SMP, APIC, etc).
466 * Interrupts are still disabled and we are still supervisor.
468 acpi_Initialize();
470 /* Now initialize our interrupt controller (XT-PIC or APIC) */
471 ictl_Initialize();
473 /* The last thing to do is to start up secondary CPU cores (if any) */
474 smp_Initialize();
476 /* Drop privileges down to user mode before calling RTF_COLDSTART */
477 D(bug("[Kernel] Leaving supervisor mode\n"));
478 asm volatile (
479 "mov %[user_ds],%%ds\n\t" // Load DS and ES
480 "mov %[user_ds],%%es\n\t"
481 "mov %%rsp,%%r12\n\t"
482 "pushq %[ds]\n\t" // SS
483 "pushq %%r12\n\t" // rSP
484 "pushq $0x3002\n\t" // rFLAGS
485 "pushq %[cs]\n\t" // CS
486 "pushq $1f\n\t"
487 "iretq\n 1:"
488 ::[user_ds]"r"(USER_DS),[ds]"i"(USER_DS),[cs]"i"(USER_CS):"r12");
490 D(bug("[Kernel] Done?! Still here?\n"));
493 * We are fully done. Run exec.library and the rest.
494 * exec.library will be the first resident to run. It will enable interrupts and multitasking for us.
496 InitCode(RTF_COLDSTART, 0);
498 /* The above must not return */
499 krnPanic(KernelBase, "System Boot Failed!");
502 /* Small delay routine used by exec_cinit initializer */
503 asm("\ndelay:\t.short 0x00eb\n\tretq");
505 /* Our boot-time stack */
506 static char boot_stack[STACK_SIZE] __attribute__((aligned(16)));
508 struct gdt_64bit
510 struct segment_desc seg0; /* seg 0x00 */
511 struct segment_desc super_cs; /* seg 0x08 */
512 struct segment_desc super_ds; /* seg 0x10 */
513 struct segment_desc user_cs32; /* seg 0x18 */
514 struct segment_desc user_ds; /* seg 0x20 */
515 struct segment_desc user_cs; /* seg 0x28 */
516 struct segment_desc gs; /* seg 0x30 */
517 struct segment_desc ldt; /* seg 0x38 */
518 struct
520 struct segment_desc tss_low; /* seg 0x40... */
521 struct segment_ext tss_high;
522 } tss[16];
525 void core_SetupGDT(struct KernBootPrivate *__KernBootPrivate)
527 struct gdt_64bit *GDT;
528 struct tss_64bit *TSS;
529 intptr_t tls_ptr;
530 int i;
532 D(bug("[Kernel] core_SetupGDT(0x%p)\n", __KernBootPrivate));
534 if (!__KernBootPrivate->GDT)
536 __KernBootPrivate->system_tls = krnAllocBootMem(sizeof(tls_t));
537 __KernBootPrivate->GDT = krnAllocBootMemAligned(sizeof(struct gdt_64bit), 128);
538 __KernBootPrivate->TSS = krnAllocBootMemAligned(sizeof(struct tss_64bit) * 16, 128);
540 D(bug("[Kernel] Allocated GDT 0x%p, TLS 0x%p\n", __KernBootPrivate->GDT, __KernBootPrivate->system_tls));
543 GDT = __KernBootPrivate->GDT;
544 TSS = __KernBootPrivate->TSS;
546 /* Supervisor segments */
547 GDT->super_cs.type=0x1a; /* code segment */
548 GDT->super_cs.dpl=0; /* supervisor level */
549 GDT->super_cs.p=1; /* present */
550 GDT->super_cs.l=1; /* long (64-bit) one */
551 GDT->super_cs.d=0; /* must be zero */
552 GDT->super_cs.limit_low=0xffff;
553 GDT->super_cs.limit_high=0xf;
554 GDT->super_cs.g=1;
556 GDT->super_ds.type=0x12; /* data segment */
557 GDT->super_ds.dpl=0; /* supervisor level */
558 GDT->super_ds.p=1; /* present */
559 GDT->super_ds.limit_low=0xffff;
560 GDT->super_ds.limit_high=0xf;
561 GDT->super_ds.g=1;
562 GDT->super_ds.d=1;
564 /* User mode segments */
565 GDT->user_cs.type=0x1a; /* code segment */
566 GDT->user_cs.dpl=3; /* User level */
567 GDT->user_cs.p=1; /* present */
568 GDT->user_cs.l=1; /* long mode */
569 GDT->user_cs.d=0; /* must be zero */
570 GDT->user_cs.limit_low=0xffff;
571 GDT->user_cs.limit_high=0xf;
572 GDT->user_cs.g=1;
574 GDT->user_cs32.type=0x1a; /* code segment for legacy 32-bit code. NOT USED YET! */
575 GDT->user_cs32.dpl=3; /* user level */
576 GDT->user_cs32.p=1; /* present */
577 GDT->user_cs32.l=0; /* 32-bit mode */
578 GDT->user_cs32.d=1; /* 32-bit code */
579 GDT->user_cs32.limit_low=0xffff;
580 GDT->user_cs32.limit_high=0xf;
581 GDT->user_cs32.g=1;
583 GDT->user_ds.type=0x12; /* data segment */
584 GDT->user_ds.dpl=3; /* user level */
585 GDT->user_ds.p=1; /* present */
586 GDT->user_ds.limit_low=0xffff;
587 GDT->user_ds.limit_high=0xf;
588 GDT->user_ds.g=1;
589 GDT->user_ds.d=1;
591 for (i=0; i < 16; i++)
593 const unsigned long tss_limit = sizeof(struct tss_64bit) * 16 - 1;
595 /* Task State Segment */
596 GDT->tss[i].tss_low.type = 0x09; /* 64-bit TSS */
597 GDT->tss[i].tss_low.limit_low = tss_limit;
598 GDT->tss[i].tss_low.base_low = ((unsigned long)&TSS[i]) & 0xffff;
599 GDT->tss[i].tss_low.base_mid = (((unsigned long)&TSS[i]) >> 16) & 0xff;
600 GDT->tss[i].tss_low.dpl = 3; /* User mode task */
601 GDT->tss[i].tss_low.p = 1; /* present */
602 GDT->tss[i].tss_low.limit_high = (tss_limit >> 16) & 0x0f;
603 GDT->tss[i].tss_low.base_high = (((unsigned long)&TSS[i]) >> 24) & 0xff;
604 GDT->tss[i].tss_high.base_ext = 0; /* is within 4GB :-D */
607 tls_ptr = (intptr_t)__KernBootPrivate->system_tls;
609 GDT->gs.type=0x12; /* data segment */
610 GDT->gs.dpl=3; /* user level */
611 GDT->gs.p=1; /* present */
612 GDT->gs.base_low = tls_ptr & 0xffff;
613 GDT->gs.base_mid = (tls_ptr >> 16) & 0xff;
614 GDT->gs.base_high = (tls_ptr >> 24) & 0xff;
615 GDT->gs.g=1;
616 GDT->gs.d=1;
619 void core_CPUSetup(UBYTE _APICID, IPTR SystemStack)
621 struct segment_selector GDT_sel;
622 struct tss_64bit *TSS = __KernBootPrivate->TSS;
624 D(bug("[Kernel] core_CPUSetup(%d, 0x%p)\n", _APICID, SystemStack));
627 * At the moment two of three stacks are reserved. IST is not used (indexes == 0 in interrupt gates)
628 * and ring 1 is not used either. However, the space pointed to by IST is used as a temporary stack
629 * for warm restart routine.
632 TSS[_APICID].ist1 = SystemStack + STACK_SIZE - 16; /* Interrupt stack entry 1 (failsafe) */
633 TSS[_APICID].rsp0 = SystemStack + STACK_SIZE * 2 - 16; /* Ring 0 (Supervisor) */
634 TSS[_APICID].rsp1 = SystemStack + STACK_SIZE * 3 - 16; /* Ring 1 (reserved) */
636 D(bug("[Kernel] core_CPUSetup[%d]: Reloading the GDT and Task Register\n", _APICID));
638 GDT_sel.size = sizeof(struct gdt_64bit) - 1;
639 GDT_sel.base = (uint64_t)__KernBootPrivate->GDT;
640 asm volatile ("lgdt %0"::"m"(GDT_sel));
641 asm volatile ("ltr %w0"::"r"(TASK_SEG + (_APICID << 4)));
642 asm volatile ("mov %0,%%gs"::"a"(USER_GS));