Pass AROS magic value from boot loader to kernel start and check for it
[AROS.git] / arch / ppc-sam440 / kernel / kernel_init.c
blob1df675d6b6a56fdeb52d44b73dfa03bb8a5ef298
1 #include <aros/kernel.h>
2 #include <aros/libcall.h>
3 #include <aros/symbolsets.h>
4 #include <inttypes.h>
5 #include <exec/libraries.h>
6 #include <exec/execbase.h>
7 #include <aros/debug.h>
8 #include <exec/memory.h>
9 #include "memory.h"
10 #include <utility/tagitem.h>
11 #include <asm/amcc440.h>
12 #include <asm/io.h>
13 #include <strings.h>
15 #include <proto/exec.h>
17 #include "kernel_intern.h"
18 #include LC_LIBDEFS_FILE
19 #include "syscall.h"
21 /* forward declarations */
22 static void __attribute__((used)) kernel_cstart(struct TagItem *msg);
24 extern void exec_main(struct TagItem *msg, void *entry);
26 /* A very very very.....
27 * ... very ugly code.
29 * The AROS kernel gets executed at this place. The stack is unknown here, might be
30 * set properly up, might be totally broken aswell and thus one cannot trust the contents
31 * of %r1 register. Even worse, the kernel has been relocated most likely to some virtual
32 * address and the MMU mapping might not be ready now.
34 * The strategy is to create one MMU entry first, mapping first 16MB of ram into last 16MB
35 * of address space in one turn and then making proper MMU map once the bss sections are cleared
36 * and the startup routine in C is executed. This "trick" assumes two evil things:
37 * - the kernel will be loaded (and will fit completely) within first 16MB of RAM, and
38 * - the kernel will be mapped into top (last 16MB) of memory.
40 * Yes, I'm evil ;)
41 */
43 asm(".section .aros.init,\"ax\"\n\t"
44 ".globl start\n\t"
45 ".type start,@function\n"
46 "start:\n\t"
47 "mr %r29,%r3\n\t" /* Don't forget the message */
48 "lis %r5,0x4152\n\t" /* Load AROS magic value */
49 "ori %r5,%r5,0x4f53\n\t"
50 "cmpw %r5,%r4\n\t" /* Check if it was passed as 2nd parameter */
51 "bnelr\n\t" /* No, then return to caller */
52 "lis %r9,0xff00\n\t" /* Virtual address 0xff000000 */
53 "li %r10,0\n\t" /* Physical address 0x00000000 */
54 "ori %r9,%r9,0x0270\n\t" /* 16MB page. Valid one */
55 "li %r11,0x043f\n\t" /* Write through cache. RWX enabled :) */
56 "li %r0,0\n\t" /* TLB entry number 0 */
57 "tlbwe %r9,%r0,0\n\t"
58 "tlbwe %r10,%r0,1\n\t"
59 "tlbwe %r11,%r0,2\n\t"
60 "isync\n\t" /* Invalidate shadow TLB's */
61 "li %r9,0; mttbl %r9; mttbu %r9; mttbl %r9\n\t"
62 "lis %r9,tmp_stack_end@ha\n\t" /* Use temporary stack while clearing BSS */
63 "lwz %r1,tmp_stack_end@l(%r9)\n\t"
64 "bl __clear_bss\n\t" /* Clear 'em ALL!!! */
65 "lis %r11,target_address@ha\n\t" /* Load the address of init code in C */
66 "mr %r3,%r29\n\t" /* restore the message */
67 "lwz %r11,target_address@l(%r11)\n\t"
68 "lis %r9,stack_end@ha\n\t" /* Use brand new stack to do evil things */
69 "mtctr %r11\n\t"
70 "lwz %r1,stack_end@l(%r9)\n\t"
71 "bctr\n\t" /* And start the game... */
72 ".string \"Native/CORE v3 (" __DATE__ ")\""
73 "\n\t.text\n\t"
76 static void __attribute__((used)) __clear_bss(struct TagItem *msg)
78 struct KernelBSS *bss = (struct KernelBSS *) krnGetTagData(KRN_KernelBss, 0, msg);
80 if (bss)
82 while (bss->addr && bss->len)
84 bzero(bss->addr, bss->len);
85 bss++;
90 static union {
91 struct TagItem bootup_tags[64];
92 uint32_t tmp_stack [128];
93 } tmp_struct __attribute__((used, section(".data"), aligned(16)));
94 static const uint32_t *tmp_stack_end __attribute__((used, section(".text"))) = &tmp_struct.tmp_stack[124];
95 static uint32_t stack[STACK_SIZE] __attribute__((used, aligned(16)));
96 static uint32_t stack_super[STACK_SIZE] __attribute__((used, aligned(16)));
97 static const uint32_t *stack_end __attribute__((used, section(".text"))) = &stack[STACK_SIZE-4];
98 static const void *target_address __attribute__((used, section(".text"))) = (void*)kernel_cstart;
99 static struct TagItem *BootMsg __attribute__((used));
100 static char CmdLine[200] __attribute__((used));
102 module_t * modlist;
103 uint32_t modlength;
104 uintptr_t memlo;
106 static void __attribute__((used)) kernel_cstart(struct TagItem *msg)
108 struct TagItem *tmp = tmp_struct.bootup_tags;
109 uint32_t reg;
111 /* Lowest usable kernel memory */
112 memlo = 0xff000000;
114 /* Disable interrupts and let FPU work */
115 wrmsr((rdmsr() & ~(MSR_CE | MSR_EE | MSR_ME)) | MSR_FP);
117 /* Enable FPU */
118 wrspr(CCR0, rdspr(CCR0) & ~0x00100000);
119 wrspr(CCR1, rdspr(CCR1) | (0x80000000 >> 24));
121 /* First message after FPU is enabled, otherwise illegal instruction */
122 D(bug("[KRN] Sam440 Kernel built on %s\n", __DATE__));
124 /* Set supervisor stack */
125 wrspr(SPRG0, (uint32_t)&stack_super[STACK_SIZE-4]);
127 wrspr(SPRG4, 0); /* Clear KernelBase */
128 wrspr(SPRG5, 0); /* Clear SysBase */
130 D(bug("[KRN] Kernel resource pre-exec init\n"));
131 D(bug("[KRN] MSR=%08x CRR0=%08x CRR1=%08x\n", rdmsr(), rdspr(CCR0), rdspr(CCR1)));
132 D(bug("[KRN] USB config %08x\n", rddcr(SDR0_USB0)));
134 D(bug("[KRN] msg @ %p\n", msg));
135 D(bug("[KRN] Copying msg data\n"));
136 while(msg->ti_Tag != TAG_DONE)
138 *tmp = *msg;
140 if (tmp->ti_Tag == KRN_CmdLine)
142 strcpy(CmdLine, (char*) msg->ti_Data);
143 tmp->ti_Data = (STACKIPTR) CmdLine;
144 D(bug("[KRN] CmdLine %s\n", tmp->ti_Data));
146 else if (tmp->ti_Tag == KRN_BootLoader)
148 tmp->ti_Data = (STACKIPTR) memlo;
149 memlo += (strlen(memlo) + 4) & ~3;
150 strcpy((char*)tmp->ti_Data, (const char*) msg->ti_Data);
152 else if (tmp->ti_Tag == KRN_DebugInfo)
154 int i;
155 struct MinList *mlist = tmp->ti_Data;
157 D(bug("[KRN] DebugInfo at %08x\n", mlist));
159 module_t *mod = (module_t *)memlo;
161 ListLength(mlist, modlength);
162 modlist = mod;
164 memlo = &mod[modlength];
166 D(bug("[KRN] Bootstrap loaded debug info for %d modules\n", modlength));
167 /* Copy the module entries */
168 for (i=0; i < modlength; i++)
170 module_t *m = REMHEAD(mlist);
171 symbol_t *sym;
173 mod[i].m_lowest = m->m_lowest;
174 mod[i].m_highest = m->m_highest;
175 mod[i].m_str = NULL;
176 NEWLIST(&mod[i].m_symbols);
177 mod[i].m_name = (char *)memlo;
178 memlo += (strlen(m->m_name) + 4) & ~3;
179 strcpy(mod[i].m_name, m->m_name);
181 D(bug("[KRN] Module %s\n", m->m_name));
183 ForeachNode(&m->m_symbols, sym)
185 symbol_t *newsym = memlo;
186 memlo += sizeof(symbol_t);
188 newsym->s_name = memlo;
189 memlo += (strlen(sym->s_name)+4)&~3;
190 strcpy(newsym->s_name, sym->s_name);
192 newsym->s_lowest = sym->s_lowest;
193 newsym->s_highest = sym->s_highest;
195 ADDTAIL(&mod[i].m_symbols, newsym);
199 D(bug("[KRN] Debug info uses %d KB of memory\n", ((intptr_t)memlo - (intptr_t)mod) >> 10));
203 ++tmp;
204 ++msg;
207 memlo = (memlo + 4095) & ~4095;
209 BootMsg = tmp_struct.bootup_tags;
210 D(bug("[KRN] BootMsg @ %p\n", BootMsg));
212 /* Do a slightly more sophisticated MMU map */
213 mmu_init(BootMsg);
214 intr_init();
217 /* Initialize exec.library */
218 exec_main(BootMsg, NULL);
220 goSuper();
221 D(bug("[KRN] Uhm? Nothing to do?\n[KRN] STOPPED\n"));
224 * Do never ever try to return. This code would attempt to go back to the physical address
225 * of asm trampoline, not the virtual one!
227 while(1) {
228 wrmsr(rdmsr() | MSR_POW);
232 AROS_LH0(void *, KrnCreateContext,
233 struct KernelBase *, KernelBase, 18, Kernel)
235 AROS_LIBFUNC_INIT
237 context_t *ctx;
239 uint32_t oldmsr = goSuper();
241 ctx = Allocate(KernelBase->kb_SupervisorMem, sizeof(context_t));
242 bzero(ctx, sizeof(context_t));
244 wrmsr(oldmsr);
246 if (!ctx)
247 ctx = AllocMem(sizeof(context_t), MEMF_PUBLIC|MEMF_CLEAR);
249 return ctx;
251 AROS_LIBFUNC_EXIT
254 AROS_LH1(void, KrnDeleteContext,
255 AROS_LHA(void *, context, A0),
256 struct KernelBase *, KernelBase, 19, Kernel)
258 AROS_LIBFUNC_INIT
260 /* Was context in supervisor space? Deallocate it there :) */
261 if (((intptr_t)context & 0xf0000000) == 0xf0000000)
263 uint32_t oldmsr = goSuper();
265 Deallocate(KernelBase->kb_SupervisorMem, context, sizeof(context_t));
267 wrmsr(oldmsr);
269 else
270 FreeMem(context, sizeof(context_t));
272 /* Was this context owning a FPU? Make FPU totally free then */
273 if (KernelBase->kb_FPUOwner == context)
274 KernelBase->kb_FPUOwner = NULL;
276 AROS_LIBFUNC_EXIT
279 AROS_LH0I(struct TagItem *, KrnGetBootInfo,
280 struct KernelBase *, KernelBase, 11, Kernel)
282 AROS_LIBFUNC_INIT
284 return BootMsg;
286 AROS_LIBFUNC_EXIT
289 struct MemHeader mh;
291 static int Kernel_Init(LIBBASETYPEPTR LIBBASE)
293 int i;
294 struct ExecBase *SysBase = getSysBase();
295 uint32_t reg;
297 uintptr_t krn_lowest = krnGetTagData(KRN_KernelLowest, 0, BootMsg);
298 uintptr_t krn_highest = krnGetTagData(KRN_KernelHighest, 0, BootMsg);
300 /* Get the PLB and CPU speed */
302 /* PLL divisors */
303 wrdcr(CPR0_CFGADDR, CPR0_PLLD0);
304 reg = rddcr(CPR0_CFGDATA);
306 uint32_t fbdv = (reg >> 24) & 0x1f;
307 if (fbdv == 0)
308 fbdv = 32;
309 uint32_t fwdva = (reg >> 16) & 0x1f;
310 if (fwdva == 0)
311 fwdva = 16;
312 uint32_t fwdvb = (reg >> 8) & 7;
313 if (fwdvb == 0)
314 fwdvb = 8;
315 uint32_t lfbdv = reg & 0x3f;
316 if (lfbdv == 0)
317 lfbdv = 64;
319 /* OPB clock divisor */
320 wrdcr(CPR0_CFGADDR, CPR0_OPBD0);
321 reg = rddcr(CPR0_CFGDATA);
322 uint32_t opbdv0 = (reg >> 24) & 3;
323 if (opbdv0 == 0)
324 opbdv0 = 4;
326 /* Peripheral clock divisor */
327 wrdcr(CPR0_CFGADDR, CPR0_PERD0);
328 reg = rddcr(CPR0_CFGDATA);
329 uint32_t perdv0 = (reg >> 24) & 7;
330 if (perdv0 == 0)
331 perdv0 = 8;
333 /* PCI clock divisor */
334 wrdcr(CPR0_CFGADDR, CPR0_SPCID);
335 reg = rddcr(CPR0_CFGDATA);
336 uint32_t spcid0 = (reg >> 24) & 3;
337 if (spcid0 == 0)
338 spcid0 = 4;
340 /* Primary B divisor */
341 wrdcr(CPR0_CFGADDR, CPR0_PRIMBD0);
342 reg = rddcr(CPR0_CFGDATA);
343 uint32_t prbdv0 = (reg >> 24) & 7;
344 if (prbdv0 == 0)
345 prbdv0 = 8;
347 /* All divisors there. Read PLL control register and calculate the m value (see 44ep.book) */
348 wrdcr(CPR0_CFGADDR, CPR0_PLLC0);
349 reg = rddcr(CPR0_CFGDATA);
350 uint32_t m;
351 switch ((reg >> 24) & 3) /* Feedback selector */
353 case 0: /* PLL output (A or B) */
354 if ((reg & 0x20000000)) /* PLLOUTB */
355 m = lfbdv * fbdv * fwdvb;
356 else
357 m = lfbdv * fbdv * fwdva;
358 break;
359 case 1: /* CPU */
360 m = fbdv * fwdva;
361 default:
362 m = perdv0 * opbdv0 * fwdvb;
365 uint32_t vco = (m * 66666666) + m/2;
366 LIBBASE->kb_CPUFreq = vco / fwdva;
367 LIBBASE->kb_PLBFreq = vco / fwdvb / perdv0;
368 LIBBASE->kb_OPBFreq = LIBBASE->kb_PLBFreq / opbdv0;
369 LIBBASE->kb_EPBFreq = LIBBASE->kb_PLBFreq / perdv0;
370 LIBBASE->kb_PCIFreq = LIBBASE->kb_PLBFreq / spcid0;
373 * Slow down the decrement interrupt a bit. Rough guess is that UBoot has left us with
374 * 1kHz DEC counter. Enable decrementer timer and automatic reload of decrementer value.
376 wrspr(DECAR, LIBBASE->kb_OPBFreq / 50);
377 wrspr(TCR, rdspr(TCR) | TCR_DIE | TCR_ARE);
379 D(bug("[KRN] Kernel resource post-exec init\n"));
381 D(bug("[KRN] CPU Speed: %dHz\n", LIBBASE->kb_CPUFreq));
382 D(bug("[KRN] PLB Speed: %dHz\n", LIBBASE->kb_PLBFreq));
383 D(bug("[KRN] OPB Speed: %dHz\n", LIBBASE->kb_OPBFreq));
384 D(bug("[KRN] EPB Speed: %dHz\n", LIBBASE->kb_EPBFreq));
385 D(bug("[KRN] PCI Speed: %dHz\n", LIBBASE->kb_PCIFreq));
387 /* 4K granularity for data sections */
388 krn_lowest &= 0xfffff000;
389 /* 64K granularity for code sections */
390 krn_highest = (krn_highest + 0xffff) & 0xffff0000;
393 * Set the KernelBase into SPRG4. At this stage the SPRG5 should be already set by
394 * exec.library itself.
396 wrspr(SPRG4, LIBBASE);
398 D(bug("[KRN] Allowing userspace to flush caches\n"));
399 wrspr(MMUCR, rdspr(MMUCR) & ~0x000c0000);
401 for (i=0; i < 16; i++)
402 NEWLIST(&LIBBASE->kb_Exceptions[i]);
404 for (i=0; i < 64; i++)
405 NEWLIST(&LIBBASE->kb_Interrupts[i]);
407 NEWLIST(&LIBBASE->kb_Modules);
409 D(bug("[KRN] Preparing kernel private memory "));
410 /* Prepare MemHeader structure to allocate from private low memory */
411 mh.mh_Node.ln_Type = NT_MEMORY;
412 mh.mh_Node.ln_Pri = -128;
413 mh.mh_Node.ln_Name = "Kernel Memory";
414 mh.mh_Attributes = MEMF_FAST | MEMF_KICK | MEMF_LOCAL;
415 mh.mh_First = (struct MemChunk *)memlo;
416 mh.mh_Lower = mh.mh_First;
417 mh.mh_Upper = (APTR) ((uintptr_t) 0xff000000 + krn_lowest - 1);
419 mh.mh_Free = (uintptr_t)mh.mh_Upper - (uintptr_t)mh.mh_Lower + 1;
420 mh.mh_First->mc_Next = NULL;
421 mh.mh_First->mc_Bytes = mh.mh_Free;
423 D(bug("[KRN] %08x - %08x, %d KB free\n", mh.mh_Lower, mh.mh_Upper, mh.mh_Free >> 10));
425 LIBBASE->kb_SupervisorMem = &mh;
428 * Add MemHeader about kernel memory to public MemList to avoid invalid
429 * pointer debug messages for pointer that reference correctly into these
430 * mem regions.
432 struct MemHeader *mh;
433 mh = AllocMem(sizeof(struct MemHeader), MEMF_PUBLIC);
434 mh->mh_Node.ln_Type = NT_MEMORY;
435 mh->mh_Node.ln_Pri = -128;
436 mh->mh_Node.ln_Name = "Kernel Memory, Code + Data Sections";
437 mh->mh_Attributes = MEMF_FAST | MEMF_KICK | MEMF_LOCAL;
438 mh->mh_First = NULL;
439 mh->mh_Free = 0;
440 mh->mh_Lower = LIBBASE->kb_SupervisorMem;
441 mh->mh_Upper = (APTR) ((uintptr_t) 0xff000000 + krn_highest - 1);
443 Enqueue(&SysBase->MemList, &mh->mh_Node);
446 * kernel.resource is ready to run. Enable external interrupts and leave
447 * supervisor mode.
449 wrmsr(rdmsr() | MSR_EE);
450 D(bug("[KRN] Interrupts enabled\n"));
452 goUser();
453 D(bug("[KRN] Entered user mode \n"));
455 return TRUE;
458 ADD2INITLIB(Kernel_Init, 0)