(c) versus (C)
[helenos.git] / kernel / arch / amd64 / src / pm.c
bloba37160b1486db1f7f6caf5f8be9225aabfb5c0c7
1 /*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * Copyright (c) 2005-2006 Ondrej Palkovsky
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 /** @addtogroup amd64
31 * @{
33 /** @file
36 #include <arch/pm.h>
37 #include <arch/mm/page.h>
38 #include <arch/types.h>
39 #include <arch/interrupt.h>
40 #include <arch/asm.h>
41 #include <interrupt.h>
42 #include <mm/as.h>
44 #include <config.h>
46 #include <memstr.h>
47 #include <mm/slab.h>
48 #include <debug.h>
51 * There is no segmentation in long mode so we set up flat mode. In this
52 * mode, we use, for each privilege level, two segments spanning the
53 * whole memory. One is for code and one is for data.
56 descriptor_t gdt[GDT_ITEMS] = {
57 /* NULL descriptor */
58 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
59 /* KTEXT descriptor */
60 { .limit_0_15 = 0xffff,
61 .base_0_15 = 0,
62 .base_16_23 = 0,
63 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE ,
64 .limit_16_19 = 0xf,
65 .available = 0,
66 .longmode = 1,
67 .special = 0,
68 .granularity = 1,
69 .base_24_31 = 0 },
70 /* KDATA descriptor */
71 { .limit_0_15 = 0xffff,
72 .base_0_15 = 0,
73 .base_16_23 = 0,
74 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_KERNEL,
75 .limit_16_19 = 0xf,
76 .available = 0,
77 .longmode = 0,
78 .special = 0,
79 .granularity = 1,
80 .base_24_31 = 0 },
81 /* UDATA descriptor */
82 { .limit_0_15 = 0xffff,
83 .base_0_15 = 0,
84 .base_16_23 = 0,
85 .access = AR_PRESENT | AR_DATA | AR_WRITABLE | DPL_USER,
86 .limit_16_19 = 0xf,
87 .available = 0,
88 .longmode = 0,
89 .special = 1,
90 .granularity = 1,
91 .base_24_31 = 0 },
92 /* UTEXT descriptor */
93 { .limit_0_15 = 0xffff,
94 .base_0_15 = 0,
95 .base_16_23 = 0,
96 .access = AR_PRESENT | AR_CODE | DPL_USER,
97 .limit_16_19 = 0xf,
98 .available = 0,
99 .longmode = 1,
100 .special = 0,
101 .granularity = 1,
102 .base_24_31 = 0 },
103 /* KTEXT 32-bit protected, for protected mode before long mode */
104 { .limit_0_15 = 0xffff,
105 .base_0_15 = 0,
106 .base_16_23 = 0,
107 .access = AR_PRESENT | AR_CODE | DPL_KERNEL | AR_READABLE,
108 .limit_16_19 = 0xf,
109 .available = 0,
110 .longmode = 0,
111 .special = 1,
112 .granularity = 1,
113 .base_24_31 = 0 },
114 /* TSS descriptor - set up will be completed later,
115 * on AMD64 it is 64-bit - 2 items in table */
116 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
117 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
118 /* VESA Init descriptor */
119 #ifdef CONFIG_FB
120 { 0xffff, 0, VESA_INIT_SEGMENT>>12, AR_PRESENT | AR_CODE | DPL_KERNEL, 0xf, 0, 0, 0, 0, 0 }
121 #endif
124 idescriptor_t idt[IDT_ITEMS];
126 ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (uint64_t) gdt };
127 ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (uint64_t) idt };
129 static tss_t tss;
130 tss_t *tss_p = NULL;
132 void gdt_tss_setbase(descriptor_t *d, uintptr_t base)
134 tss_descriptor_t *td = (tss_descriptor_t *) d;
136 td->base_0_15 = base & 0xffff;
137 td->base_16_23 = ((base) >> 16) & 0xff;
138 td->base_24_31 = ((base) >> 24) & 0xff;
139 td->base_32_63 = ((base) >> 32);
142 void gdt_tss_setlimit(descriptor_t *d, uint32_t limit)
144 struct tss_descriptor *td = (tss_descriptor_t *) d;
146 td->limit_0_15 = limit & 0xffff;
147 td->limit_16_19 = (limit >> 16) & 0xf;
150 void idt_setoffset(idescriptor_t *d, uintptr_t offset)
153 * Offset is a linear address.
155 d->offset_0_15 = offset & 0xffff;
156 d->offset_16_31 = offset >> 16 & 0xffff;
157 d->offset_32_63 = offset >> 32;
160 void tss_initialize(tss_t *t)
162 memsetb((uintptr_t) t, sizeof(tss_t), 0);
166 * This function takes care of proper setup of IDT and IDTR.
168 void idt_init(void)
170 idescriptor_t *d;
171 int i;
173 for (i = 0; i < IDT_ITEMS; i++) {
174 d = &idt[i];
176 d->unused = 0;
177 d->selector = gdtselector(KTEXT_DES);
179 d->present = 1;
180 d->type = AR_INTERRUPT; /* masking interrupt */
182 idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size);
186 /** Initialize segmentation - code/data/idt tables
189 void pm_init(void)
191 descriptor_t *gdt_p = (struct descriptor *) gdtr.base;
192 tss_descriptor_t *tss_desc;
195 * Each CPU has its private GDT and TSS.
196 * All CPUs share one IDT.
199 if (config.cpu_active == 1) {
200 idt_init();
202 * NOTE: bootstrap CPU has statically allocated TSS, because
203 * the heap hasn't been initialized so far.
205 tss_p = &tss;
207 else {
208 /* We are going to use malloc, which may return
209 * non boot-mapped pointer, initialize the CR3 register
210 * ahead of page_init */
211 write_cr3((uintptr_t) AS_KERNEL->page_table);
213 tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
214 if (!tss_p)
215 panic("could not allocate TSS\n");
218 tss_initialize(tss_p);
220 tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]);
221 tss_desc->present = 1;
222 tss_desc->type = AR_TSS;
223 tss_desc->dpl = PL_KERNEL;
225 gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
226 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
228 gdtr_load(&gdtr);
229 idtr_load(&idtr);
231 * As of this moment, the current CPU has its own GDT pointing
232 * to its own TSS. We just need to load the TR register.
234 tr_load(gdtselector(TSS_DES));
237 /** @}