[PATCH] ufs: wrong type cast
[linux-2.6/zen-sources.git] / arch / arm / mm / mm-armv.c
blob95273de4f772515780e2eac97ed2ae892ac52a0d
1 /*
2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
18 #include <linux/nodemask.h>
20 #include <asm/pgalloc.h>
21 #include <asm/page.h>
22 #include <asm/setup.h>
23 #include <asm/tlbflush.h>
25 #include <asm/mach/map.h>
27 #define CPOLICY_UNCACHED 0
28 #define CPOLICY_BUFFERED 1
29 #define CPOLICY_WRITETHROUGH 2
30 #define CPOLICY_WRITEBACK 3
31 #define CPOLICY_WRITEALLOC 4
33 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
34 static unsigned int ecc_mask __initdata = 0;
35 pgprot_t pgprot_kernel;
37 EXPORT_SYMBOL(pgprot_kernel);
39 pmd_t *top_pmd;
41 struct cachepolicy {
42 const char policy[16];
43 unsigned int cr_mask;
44 unsigned int pmd;
45 unsigned int pte;
48 static struct cachepolicy cache_policies[] __initdata = {
50 .policy = "uncached",
51 .cr_mask = CR_W|CR_C,
52 .pmd = PMD_SECT_UNCACHED,
53 .pte = 0,
54 }, {
55 .policy = "buffered",
56 .cr_mask = CR_C,
57 .pmd = PMD_SECT_BUFFERED,
58 .pte = PTE_BUFFERABLE,
59 }, {
60 .policy = "writethrough",
61 .cr_mask = 0,
62 .pmd = PMD_SECT_WT,
63 .pte = PTE_CACHEABLE,
64 }, {
65 .policy = "writeback",
66 .cr_mask = 0,
67 .pmd = PMD_SECT_WB,
68 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
69 }, {
70 .policy = "writealloc",
71 .cr_mask = 0,
72 .pmd = PMD_SECT_WBWA,
73 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
78 * These are useful for identifing cache coherency
79 * problems by allowing the cache or the cache and
80 * writebuffer to be turned off. (Note: the write
81 * buffer should not be on and the cache off).
83 static void __init early_cachepolicy(char **p)
85 int i;
87 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
88 int len = strlen(cache_policies[i].policy);
90 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
91 cachepolicy = i;
92 cr_alignment &= ~cache_policies[i].cr_mask;
93 cr_no_alignment &= ~cache_policies[i].cr_mask;
94 *p += len;
95 break;
98 if (i == ARRAY_SIZE(cache_policies))
99 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
100 flush_cache_all();
101 set_cr(cr_alignment);
104 static void __init early_nocache(char **__unused)
106 char *p = "buffered";
107 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
108 early_cachepolicy(&p);
111 static void __init early_nowrite(char **__unused)
113 char *p = "uncached";
114 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
115 early_cachepolicy(&p);
118 static void __init early_ecc(char **p)
120 if (memcmp(*p, "on", 2) == 0) {
121 ecc_mask = PMD_PROTECTION;
122 *p += 2;
123 } else if (memcmp(*p, "off", 3) == 0) {
124 ecc_mask = 0;
125 *p += 3;
129 __early_param("nocache", early_nocache);
130 __early_param("nowb", early_nowrite);
131 __early_param("cachepolicy=", early_cachepolicy);
132 __early_param("ecc=", early_ecc);
134 static int __init noalign_setup(char *__unused)
136 cr_alignment &= ~CR_A;
137 cr_no_alignment &= ~CR_A;
138 set_cr(cr_alignment);
139 return 1;
142 __setup("noalign", noalign_setup);
144 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
146 static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
148 return pmd_offset(pgd, virt);
151 static inline pmd_t *pmd_off_k(unsigned long virt)
153 return pmd_off(pgd_offset_k(virt), virt);
157 * need to get a 16k page for level 1
159 pgd_t *get_pgd_slow(struct mm_struct *mm)
161 pgd_t *new_pgd, *init_pgd;
162 pmd_t *new_pmd, *init_pmd;
163 pte_t *new_pte, *init_pte;
165 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
166 if (!new_pgd)
167 goto no_pgd;
169 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
172 * Copy over the kernel and IO PGD entries
174 init_pgd = pgd_offset_k(0);
175 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
176 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
178 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
180 if (!vectors_high()) {
182 * On ARM, first page must always be allocated since it
183 * contains the machine vectors.
185 new_pmd = pmd_alloc(mm, new_pgd, 0);
186 if (!new_pmd)
187 goto no_pmd;
189 new_pte = pte_alloc_map(mm, new_pmd, 0);
190 if (!new_pte)
191 goto no_pte;
193 init_pmd = pmd_offset(init_pgd, 0);
194 init_pte = pte_offset_map_nested(init_pmd, 0);
195 set_pte(new_pte, *init_pte);
196 pte_unmap_nested(init_pte);
197 pte_unmap(new_pte);
200 return new_pgd;
202 no_pte:
203 pmd_free(new_pmd);
204 no_pmd:
205 free_pages((unsigned long)new_pgd, 2);
206 no_pgd:
207 return NULL;
210 void free_pgd_slow(pgd_t *pgd)
212 pmd_t *pmd;
213 struct page *pte;
215 if (!pgd)
216 return;
218 /* pgd is always present and good */
219 pmd = pmd_off(pgd, 0);
220 if (pmd_none(*pmd))
221 goto free;
222 if (pmd_bad(*pmd)) {
223 pmd_ERROR(*pmd);
224 pmd_clear(pmd);
225 goto free;
228 pte = pmd_page(*pmd);
229 pmd_clear(pmd);
230 dec_page_state(nr_page_table_pages);
231 pte_lock_deinit(pte);
232 pte_free(pte);
233 pmd_free(pmd);
234 free:
235 free_pages((unsigned long) pgd, 2);
239 * Create a SECTION PGD between VIRT and PHYS in domain
240 * DOMAIN with protection PROT. This operates on half-
241 * pgdir entry increments.
243 static inline void
244 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
246 pmd_t *pmdp = pmd_off_k(virt);
248 if (virt & (1 << 20))
249 pmdp++;
251 *pmdp = __pmd(phys | prot);
252 flush_pmd_entry(pmdp);
256 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
258 static inline void
259 alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
261 int i;
263 for (i = 0; i < 16; i += 1) {
264 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
266 virt += (PGDIR_SIZE / 2);
271 * Add a PAGE mapping between VIRT and PHYS in domain
272 * DOMAIN with protection PROT. Note that due to the
273 * way we map the PTEs, we must allocate two PTE_SIZE'd
274 * blocks - one for the Linux pte table, and one for
275 * the hardware pte table.
277 static inline void
278 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
280 pmd_t *pmdp = pmd_off_k(virt);
281 pte_t *ptep;
283 if (pmd_none(*pmdp)) {
284 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
285 sizeof(pte_t));
287 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
289 ptep = pte_offset_kernel(pmdp, virt);
291 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
294 struct mem_types {
295 unsigned int prot_pte;
296 unsigned int prot_l1;
297 unsigned int prot_sect;
298 unsigned int domain;
301 static struct mem_types mem_types[] __initdata = {
302 [MT_DEVICE] = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
304 L_PTE_WRITE,
305 .prot_l1 = PMD_TYPE_TABLE,
306 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
307 PMD_SECT_AP_WRITE,
308 .domain = DOMAIN_IO,
310 [MT_CACHECLEAN] = {
311 .prot_sect = PMD_TYPE_SECT,
312 .domain = DOMAIN_KERNEL,
314 [MT_MINICLEAN] = {
315 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
316 .domain = DOMAIN_KERNEL,
318 [MT_LOW_VECTORS] = {
319 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
320 L_PTE_EXEC,
321 .prot_l1 = PMD_TYPE_TABLE,
322 .domain = DOMAIN_USER,
324 [MT_HIGH_VECTORS] = {
325 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
326 L_PTE_USER | L_PTE_EXEC,
327 .prot_l1 = PMD_TYPE_TABLE,
328 .domain = DOMAIN_USER,
330 [MT_MEMORY] = {
331 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
332 .domain = DOMAIN_KERNEL,
334 [MT_ROM] = {
335 .prot_sect = PMD_TYPE_SECT,
336 .domain = DOMAIN_KERNEL,
338 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
339 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
340 L_PTE_WRITE,
341 .prot_l1 = PMD_TYPE_TABLE,
342 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
343 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
344 PMD_SECT_TEX(1),
345 .domain = DOMAIN_IO,
347 [MT_NONSHARED_DEVICE] = {
348 .prot_l1 = PMD_TYPE_TABLE,
349 .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
350 PMD_SECT_AP_WRITE,
351 .domain = DOMAIN_IO,
356 * Adjust the PMD section entries according to the CPU in use.
358 void __init build_mem_type_table(void)
360 struct cachepolicy *cp;
361 unsigned int cr = get_cr();
362 unsigned int user_pgprot, kern_pgprot;
363 int cpu_arch = cpu_architecture();
364 int i;
366 #if defined(CONFIG_CPU_DCACHE_DISABLE)
367 if (cachepolicy > CPOLICY_BUFFERED)
368 cachepolicy = CPOLICY_BUFFERED;
369 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
370 if (cachepolicy > CPOLICY_WRITETHROUGH)
371 cachepolicy = CPOLICY_WRITETHROUGH;
372 #endif
373 if (cpu_arch < CPU_ARCH_ARMv5) {
374 if (cachepolicy >= CPOLICY_WRITEALLOC)
375 cachepolicy = CPOLICY_WRITEBACK;
376 ecc_mask = 0;
379 if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
380 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
381 if (mem_types[i].prot_l1)
382 mem_types[i].prot_l1 |= PMD_BIT4;
383 if (mem_types[i].prot_sect)
384 mem_types[i].prot_sect |= PMD_BIT4;
388 cp = &cache_policies[cachepolicy];
389 kern_pgprot = user_pgprot = cp->pte;
392 * Enable CPU-specific coherency if supported.
393 * (Only available on XSC3 at the moment.)
395 if (arch_is_coherent()) {
396 if (cpu_is_xsc3()) {
397 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
398 mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
403 * ARMv6 and above have extended page tables.
405 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
407 * bit 4 becomes XN which we must clear for the
408 * kernel memory mapping.
410 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
411 mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
414 * Mark cache clean areas and XIP ROM read only
415 * from SVC mode and no access from userspace.
417 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
418 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
419 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
422 * Mark the device area as "shared device"
424 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
425 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
428 * User pages need to be mapped with the ASID
429 * (iow, non-global)
431 user_pgprot |= L_PTE_ASID;
433 #ifdef CONFIG_SMP
435 * Mark memory with the "shared" attribute for SMP systems
437 user_pgprot |= L_PTE_SHARED;
438 kern_pgprot |= L_PTE_SHARED;
439 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
440 #endif
443 for (i = 0; i < 16; i++) {
444 unsigned long v = pgprot_val(protection_map[i]);
445 v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
446 protection_map[i] = __pgprot(v);
449 mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
450 mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
452 if (cpu_arch >= CPU_ARCH_ARMv5) {
453 #ifndef CONFIG_SMP
455 * Only use write-through for non-SMP systems
457 mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
458 mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
459 #endif
460 } else {
461 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
464 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
465 L_PTE_DIRTY | L_PTE_WRITE |
466 L_PTE_EXEC | kern_pgprot);
468 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
469 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
470 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
471 mem_types[MT_ROM].prot_sect |= cp->pmd;
473 switch (cp->pmd) {
474 case PMD_SECT_WT:
475 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
476 break;
477 case PMD_SECT_WB:
478 case PMD_SECT_WBWA:
479 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
480 break;
482 printk("Memory policy: ECC %sabled, Data cache %s\n",
483 ecc_mask ? "en" : "dis", cp->policy);
486 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
489 * Create the page directory entries and any necessary
490 * page tables for the mapping specified by `md'. We
491 * are able to cope here with varying sizes and address
492 * offsets, and we take full advantage of sections and
493 * supersections.
495 void __init create_mapping(struct map_desc *md)
497 unsigned long virt, length;
498 int prot_sect, prot_l1, domain;
499 pgprot_t prot_pte;
500 unsigned long off = (u32)__pfn_to_phys(md->pfn);
502 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
503 printk(KERN_WARNING "BUG: not creating mapping for "
504 "0x%08llx at 0x%08lx in user region\n",
505 __pfn_to_phys((u64)md->pfn), md->virtual);
506 return;
509 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
510 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
511 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
512 "overlaps vmalloc space\n",
513 __pfn_to_phys((u64)md->pfn), md->virtual);
516 domain = mem_types[md->type].domain;
517 prot_pte = __pgprot(mem_types[md->type].prot_pte);
518 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
519 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
522 * Catch 36-bit addresses
524 if(md->pfn >= 0x100000) {
525 if(domain) {
526 printk(KERN_ERR "MM: invalid domain in supersection "
527 "mapping for 0x%08llx at 0x%08lx\n",
528 __pfn_to_phys((u64)md->pfn), md->virtual);
529 return;
531 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
532 & ~SUPERSECTION_MASK) {
533 printk(KERN_ERR "MM: cannot create mapping for "
534 "0x%08llx at 0x%08lx invalid alignment\n",
535 __pfn_to_phys((u64)md->pfn), md->virtual);
536 return;
540 * Shift bits [35:32] of address into bits [23:20] of PMD
541 * (See ARMv6 spec).
543 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
546 virt = md->virtual;
547 off -= virt;
548 length = md->length;
550 if (mem_types[md->type].prot_l1 == 0 &&
551 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
552 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
553 "be mapped using pages, ignoring.\n",
554 __pfn_to_phys(md->pfn), md->virtual);
555 return;
558 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
559 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
561 virt += PAGE_SIZE;
562 length -= PAGE_SIZE;
565 /* N.B. ARMv6 supersections are only defined to work with domain 0.
566 * Since domain assignments can in fact be arbitrary, the
567 * 'domain == 0' check below is required to insure that ARMv6
568 * supersections are only allocated for domain 0 regardless
569 * of the actual domain assignments in use.
571 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
572 && domain == 0) {
574 * Align to supersection boundary if !high pages.
575 * High pages have already been checked for proper
576 * alignment above and they will fail the SUPSERSECTION_MASK
577 * check because of the way the address is encoded into
578 * offset.
580 if (md->pfn <= 0x100000) {
581 while ((virt & ~SUPERSECTION_MASK ||
582 (virt + off) & ~SUPERSECTION_MASK) &&
583 length >= (PGDIR_SIZE / 2)) {
584 alloc_init_section(virt, virt + off, prot_sect);
586 virt += (PGDIR_SIZE / 2);
587 length -= (PGDIR_SIZE / 2);
591 while (length >= SUPERSECTION_SIZE) {
592 alloc_init_supersection(virt, virt + off, prot_sect);
594 virt += SUPERSECTION_SIZE;
595 length -= SUPERSECTION_SIZE;
600 * A section mapping covers half a "pgdir" entry.
602 while (length >= (PGDIR_SIZE / 2)) {
603 alloc_init_section(virt, virt + off, prot_sect);
605 virt += (PGDIR_SIZE / 2);
606 length -= (PGDIR_SIZE / 2);
609 while (length >= PAGE_SIZE) {
610 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
612 virt += PAGE_SIZE;
613 length -= PAGE_SIZE;
618 * In order to soft-boot, we need to insert a 1:1 mapping in place of
619 * the user-mode pages. This will then ensure that we have predictable
620 * results when turning the mmu off
622 void setup_mm_for_reboot(char mode)
624 unsigned long base_pmdval;
625 pgd_t *pgd;
626 int i;
628 if (current->mm && current->mm->pgd)
629 pgd = current->mm->pgd;
630 else
631 pgd = init_mm.pgd;
633 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
634 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
635 base_pmdval |= PMD_BIT4;
637 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
638 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
639 pmd_t *pmd;
641 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
642 pmd[0] = __pmd(pmdval);
643 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
644 flush_pmd_entry(pmd);
649 * Create the architecture specific mappings
651 void __init iotable_init(struct map_desc *io_desc, int nr)
653 int i;
655 for (i = 0; i < nr; i++)
656 create_mapping(io_desc + i);