2 * linux/arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
18 #include <asm/mach-types.h>
19 #include <asm/setup.h>
20 #include <asm/sizes.h>
23 #include <asm/mach/arch.h>
24 #include <asm/mach/map.h>
28 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
30 extern void _stext
, _etext
, __data_start
, _end
;
31 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
34 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW.
37 struct page
*empty_zero_page
;
40 * The pmd table for the upper-most set of pages.
44 #define CPOLICY_UNCACHED 0
45 #define CPOLICY_BUFFERED 1
46 #define CPOLICY_WRITETHROUGH 2
47 #define CPOLICY_WRITEBACK 3
48 #define CPOLICY_WRITEALLOC 4
50 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
51 static unsigned int ecc_mask __initdata
= 0;
52 pgprot_t pgprot_kernel
;
54 EXPORT_SYMBOL(pgprot_kernel
);
57 const char policy
[16];
63 static struct cachepolicy cache_policies
[] __initdata
= {
67 .pmd
= PMD_SECT_UNCACHED
,
72 .pmd
= PMD_SECT_BUFFERED
,
73 .pte
= PTE_BUFFERABLE
,
75 .policy
= "writethrough",
80 .policy
= "writeback",
83 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
85 .policy
= "writealloc",
88 .pte
= PTE_BUFFERABLE
|PTE_CACHEABLE
,
93 * These are useful for identifing cache coherency
94 * problems by allowing the cache or the cache and
95 * writebuffer to be turned off. (Note: the write
96 * buffer should not be on and the cache off).
98 static void __init
early_cachepolicy(char **p
)
102 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
103 int len
= strlen(cache_policies
[i
].policy
);
105 if (memcmp(*p
, cache_policies
[i
].policy
, len
) == 0) {
107 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
108 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
113 if (i
== ARRAY_SIZE(cache_policies
))
114 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
116 set_cr(cr_alignment
);
118 __early_param("cachepolicy=", early_cachepolicy
);
120 static void __init
early_nocache(char **__unused
)
122 char *p
= "buffered";
123 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
124 early_cachepolicy(&p
);
126 __early_param("nocache", early_nocache
);
128 static void __init
early_nowrite(char **__unused
)
130 char *p
= "uncached";
131 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
132 early_cachepolicy(&p
);
134 __early_param("nowb", early_nowrite
);
136 static void __init
early_ecc(char **p
)
138 if (memcmp(*p
, "on", 2) == 0) {
139 ecc_mask
= PMD_PROTECTION
;
141 } else if (memcmp(*p
, "off", 3) == 0) {
146 __early_param("ecc=", early_ecc
);
148 static int __init
noalign_setup(char *__unused
)
150 cr_alignment
&= ~CR_A
;
151 cr_no_alignment
&= ~CR_A
;
152 set_cr(cr_alignment
);
155 __setup("noalign", noalign_setup
);
158 unsigned int prot_pte
;
159 unsigned int prot_l1
;
160 unsigned int prot_sect
;
164 static struct mem_types mem_types
[] __initdata
= {
166 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
168 .prot_l1
= PMD_TYPE_TABLE
,
169 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
| PMD_SECT_UNCACHED
|
174 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
,
175 .domain
= DOMAIN_KERNEL
,
178 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
| PMD_SECT_MINICACHE
,
179 .domain
= DOMAIN_KERNEL
,
182 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
184 .prot_l1
= PMD_TYPE_TABLE
,
185 .domain
= DOMAIN_USER
,
187 [MT_HIGH_VECTORS
] = {
188 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
189 L_PTE_USER
| L_PTE_EXEC
,
190 .prot_l1
= PMD_TYPE_TABLE
,
191 .domain
= DOMAIN_USER
,
194 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
| PMD_SECT_AP_WRITE
,
195 .domain
= DOMAIN_KERNEL
,
198 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
,
199 .domain
= DOMAIN_KERNEL
,
201 [MT_IXP2000_DEVICE
] = { /* IXP2400 requires XCB=101 for on-chip I/O */
202 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
204 .prot_l1
= PMD_TYPE_TABLE
,
205 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
| PMD_SECT_UNCACHED
|
206 PMD_SECT_AP_WRITE
| PMD_SECT_BUFFERABLE
|
210 [MT_NONSHARED_DEVICE
] = {
211 .prot_l1
= PMD_TYPE_TABLE
,
212 .prot_sect
= PMD_TYPE_SECT
| PMD_BIT4
| PMD_SECT_NONSHARED_DEV
|
219 * Adjust the PMD section entries according to the CPU in use.
221 static void __init
build_mem_type_table(void)
223 struct cachepolicy
*cp
;
224 unsigned int cr
= get_cr();
225 unsigned int user_pgprot
, kern_pgprot
;
226 int cpu_arch
= cpu_architecture();
229 #if defined(CONFIG_CPU_DCACHE_DISABLE)
230 if (cachepolicy
> CPOLICY_BUFFERED
)
231 cachepolicy
= CPOLICY_BUFFERED
;
232 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
233 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
234 cachepolicy
= CPOLICY_WRITETHROUGH
;
236 if (cpu_arch
< CPU_ARCH_ARMv5
) {
237 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
238 cachepolicy
= CPOLICY_WRITEBACK
;
243 * Xscale must not have PMD bit 4 set for section mappings.
246 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
247 mem_types
[i
].prot_sect
&= ~PMD_BIT4
;
250 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
253 if (cpu_arch
< CPU_ARCH_ARMv6
&& !cpu_is_xscale())
254 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
255 if (mem_types
[i
].prot_l1
)
256 mem_types
[i
].prot_l1
|= PMD_BIT4
;
258 cp
= &cache_policies
[cachepolicy
];
259 kern_pgprot
= user_pgprot
= cp
->pte
;
262 * Enable CPU-specific coherency if supported.
263 * (Only available on XSC3 at the moment.)
265 if (arch_is_coherent()) {
267 mem_types
[MT_MEMORY
].prot_sect
|= PMD_SECT_S
;
268 mem_types
[MT_MEMORY
].prot_pte
|= L_PTE_SHARED
;
273 * ARMv6 and above have extended page tables.
275 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
277 * bit 4 becomes XN which we must clear for the
278 * kernel memory mapping.
280 mem_types
[MT_MEMORY
].prot_sect
&= ~PMD_SECT_XN
;
281 mem_types
[MT_ROM
].prot_sect
&= ~PMD_SECT_XN
;
284 * Mark cache clean areas and XIP ROM read only
285 * from SVC mode and no access from userspace.
287 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
288 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
289 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
292 * Mark the device area as "shared device"
294 mem_types
[MT_DEVICE
].prot_pte
|= L_PTE_BUFFERABLE
;
295 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_BUFFERED
;
298 * User pages need to be mapped with the ASID
301 user_pgprot
|= L_PTE_ASID
;
305 * Mark memory with the "shared" attribute for SMP systems
307 user_pgprot
|= L_PTE_SHARED
;
308 kern_pgprot
|= L_PTE_SHARED
;
309 mem_types
[MT_MEMORY
].prot_sect
|= PMD_SECT_S
;
313 for (i
= 0; i
< 16; i
++) {
314 unsigned long v
= pgprot_val(protection_map
[i
]);
315 v
= (v
& ~(L_PTE_BUFFERABLE
|L_PTE_CACHEABLE
)) | user_pgprot
;
316 protection_map
[i
] = __pgprot(v
);
319 mem_types
[MT_LOW_VECTORS
].prot_pte
|= kern_pgprot
;
320 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= kern_pgprot
;
322 if (cpu_arch
>= CPU_ARCH_ARMv5
) {
325 * Only use write-through for non-SMP systems
327 mem_types
[MT_LOW_VECTORS
].prot_pte
&= ~L_PTE_BUFFERABLE
;
328 mem_types
[MT_HIGH_VECTORS
].prot_pte
&= ~L_PTE_BUFFERABLE
;
331 mem_types
[MT_MINICLEAN
].prot_sect
&= ~PMD_SECT_TEX(1);
334 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
335 L_PTE_DIRTY
| L_PTE_WRITE
|
336 L_PTE_EXEC
| kern_pgprot
);
338 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
339 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
340 mem_types
[MT_MEMORY
].prot_sect
|= ecc_mask
| cp
->pmd
;
341 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
345 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
349 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
352 printk("Memory policy: ECC %sabled, Data cache %s\n",
353 ecc_mask
? "en" : "dis", cp
->policy
);
356 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
359 * Create a SECTION PGD between VIRT and PHYS in domain
360 * DOMAIN with protection PROT. This operates on half-
361 * pgdir entry increments.
364 alloc_init_section(unsigned long virt
, unsigned long phys
, int prot
)
366 pmd_t
*pmdp
= pmd_off_k(virt
);
368 if (virt
& (1 << 20))
371 *pmdp
= __pmd(phys
| prot
);
372 flush_pmd_entry(pmdp
);
376 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
379 alloc_init_supersection(unsigned long virt
, unsigned long phys
, int prot
)
383 for (i
= 0; i
< 16; i
+= 1) {
384 alloc_init_section(virt
, phys
, prot
| PMD_SECT_SUPER
);
386 virt
+= (PGDIR_SIZE
/ 2);
391 * Add a PAGE mapping between VIRT and PHYS in domain
392 * DOMAIN with protection PROT. Note that due to the
393 * way we map the PTEs, we must allocate two PTE_SIZE'd
394 * blocks - one for the Linux pte table, and one for
395 * the hardware pte table.
398 alloc_init_page(unsigned long virt
, unsigned long phys
, unsigned int prot_l1
, pgprot_t prot
)
400 pmd_t
*pmdp
= pmd_off_k(virt
);
403 if (pmd_none(*pmdp
)) {
404 ptep
= alloc_bootmem_low_pages(2 * PTRS_PER_PTE
*
407 __pmd_populate(pmdp
, __pa(ptep
) | prot_l1
);
409 ptep
= pte_offset_kernel(pmdp
, virt
);
411 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
415 * Create the page directory entries and any necessary
416 * page tables for the mapping specified by `md'. We
417 * are able to cope here with varying sizes and address
418 * offsets, and we take full advantage of sections and
421 void __init
create_mapping(struct map_desc
*md
)
423 unsigned long virt
, length
;
424 int prot_sect
, prot_l1
, domain
;
426 unsigned long off
= (u32
)__pfn_to_phys(md
->pfn
);
428 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
429 printk(KERN_WARNING
"BUG: not creating mapping for "
430 "0x%08llx at 0x%08lx in user region\n",
431 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
435 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
436 md
->virtual >= PAGE_OFFSET
&& md
->virtual < VMALLOC_END
) {
437 printk(KERN_WARNING
"BUG: mapping for 0x%08llx at 0x%08lx "
438 "overlaps vmalloc space\n",
439 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
442 domain
= mem_types
[md
->type
].domain
;
443 prot_pte
= __pgprot(mem_types
[md
->type
].prot_pte
);
444 prot_l1
= mem_types
[md
->type
].prot_l1
| PMD_DOMAIN(domain
);
445 prot_sect
= mem_types
[md
->type
].prot_sect
| PMD_DOMAIN(domain
);
448 * Catch 36-bit addresses
450 if(md
->pfn
>= 0x100000) {
452 printk(KERN_ERR
"MM: invalid domain in supersection "
453 "mapping for 0x%08llx at 0x%08lx\n",
454 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
457 if((md
->virtual | md
->length
| __pfn_to_phys(md
->pfn
))
458 & ~SUPERSECTION_MASK
) {
459 printk(KERN_ERR
"MM: cannot create mapping for "
460 "0x%08llx at 0x%08lx invalid alignment\n",
461 __pfn_to_phys((u64
)md
->pfn
), md
->virtual);
466 * Shift bits [35:32] of address into bits [23:20] of PMD
469 off
|= (((md
->pfn
>> (32 - PAGE_SHIFT
)) & 0xF) << 20);
476 if (mem_types
[md
->type
].prot_l1
== 0 &&
477 (virt
& 0xfffff || (virt
+ off
) & 0xfffff || (virt
+ length
) & 0xfffff)) {
478 printk(KERN_WARNING
"BUG: map for 0x%08lx at 0x%08lx can not "
479 "be mapped using pages, ignoring.\n",
480 __pfn_to_phys(md
->pfn
), md
->virtual);
484 while ((virt
& 0xfffff || (virt
+ off
) & 0xfffff) && length
>= PAGE_SIZE
) {
485 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
491 /* N.B. ARMv6 supersections are only defined to work with domain 0.
492 * Since domain assignments can in fact be arbitrary, the
493 * 'domain == 0' check below is required to insure that ARMv6
494 * supersections are only allocated for domain 0 regardless
495 * of the actual domain assignments in use.
497 if ((cpu_architecture() >= CPU_ARCH_ARMv6
|| cpu_is_xsc3())
500 * Align to supersection boundary if !high pages.
501 * High pages have already been checked for proper
502 * alignment above and they will fail the SUPSERSECTION_MASK
503 * check because of the way the address is encoded into
506 if (md
->pfn
<= 0x100000) {
507 while ((virt
& ~SUPERSECTION_MASK
||
508 (virt
+ off
) & ~SUPERSECTION_MASK
) &&
509 length
>= (PGDIR_SIZE
/ 2)) {
510 alloc_init_section(virt
, virt
+ off
, prot_sect
);
512 virt
+= (PGDIR_SIZE
/ 2);
513 length
-= (PGDIR_SIZE
/ 2);
517 while (length
>= SUPERSECTION_SIZE
) {
518 alloc_init_supersection(virt
, virt
+ off
, prot_sect
);
520 virt
+= SUPERSECTION_SIZE
;
521 length
-= SUPERSECTION_SIZE
;
526 * A section mapping covers half a "pgdir" entry.
528 while (length
>= (PGDIR_SIZE
/ 2)) {
529 alloc_init_section(virt
, virt
+ off
, prot_sect
);
531 virt
+= (PGDIR_SIZE
/ 2);
532 length
-= (PGDIR_SIZE
/ 2);
535 while (length
>= PAGE_SIZE
) {
536 alloc_init_page(virt
, virt
+ off
, prot_l1
, prot_pte
);
544 * Create the architecture specific mappings
546 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
550 for (i
= 0; i
< nr
; i
++)
551 create_mapping(io_desc
+ i
);
554 static inline void prepare_page_table(struct meminfo
*mi
)
559 * Clear out all the mappings below the kernel image.
561 for (addr
= 0; addr
< MODULE_START
; addr
+= PGDIR_SIZE
)
562 pmd_clear(pmd_off_k(addr
));
564 #ifdef CONFIG_XIP_KERNEL
565 /* The XIP kernel is mapped in the module area -- skip over it */
566 addr
= ((unsigned long)&_etext
+ PGDIR_SIZE
- 1) & PGDIR_MASK
;
568 for ( ; addr
< PAGE_OFFSET
; addr
+= PGDIR_SIZE
)
569 pmd_clear(pmd_off_k(addr
));
572 * Clear out all the kernel space mappings, except for the first
573 * memory bank, up to the end of the vmalloc region.
575 for (addr
= __phys_to_virt(mi
->bank
[0].start
+ mi
->bank
[0].size
);
576 addr
< VMALLOC_END
; addr
+= PGDIR_SIZE
)
577 pmd_clear(pmd_off_k(addr
));
581 * Reserve the various regions of node 0
583 void __init
reserve_node_zero(pg_data_t
*pgdat
)
585 unsigned long res_size
= 0;
588 * Register the kernel text and data with bootmem.
589 * Note that this can only be in node 0.
591 #ifdef CONFIG_XIP_KERNEL
592 reserve_bootmem_node(pgdat
, __pa(&__data_start
), &_end
- &__data_start
);
594 reserve_bootmem_node(pgdat
, __pa(&_stext
), &_end
- &_stext
);
598 * Reserve the page tables. These are already in use,
599 * and can only be in node 0.
601 reserve_bootmem_node(pgdat
, __pa(swapper_pg_dir
),
602 PTRS_PER_PGD
* sizeof(pgd_t
));
605 * Hmm... This should go elsewhere, but we really really need to
606 * stop things allocating the low memory; ideally we need a better
607 * implementation of GFP_DMA which does not assume that DMA-able
608 * memory starts at zero.
610 if (machine_is_integrator() || machine_is_cintegrator())
611 res_size
= __pa(swapper_pg_dir
) - PHYS_OFFSET
;
614 * These should likewise go elsewhere. They pre-reserve the
615 * screen memory region at the start of main system memory.
617 if (machine_is_edb7211())
618 res_size
= 0x00020000;
619 if (machine_is_p720t())
620 res_size
= 0x00014000;
624 * Because of the SA1111 DMA bug, we want to preserve our
625 * precious DMA-able memory...
627 res_size
= __pa(swapper_pg_dir
) - PHYS_OFFSET
;
630 reserve_bootmem_node(pgdat
, PHYS_OFFSET
, res_size
);
634 * Set up device the mappings. Since we clear out the page tables for all
635 * mappings above VMALLOC_END, we will remove any debug device mappings.
636 * This means you have to be careful how you debug this function, or any
637 * called function. This means you can't use any function or debugging
638 * method which may touch any device, otherwise the kernel _will_ crash.
640 static void __init
devicemaps_init(struct machine_desc
*mdesc
)
647 * Allocate the vector page early.
649 vectors
= alloc_bootmem_low_pages(PAGE_SIZE
);
652 for (addr
= VMALLOC_END
; addr
; addr
+= PGDIR_SIZE
)
653 pmd_clear(pmd_off_k(addr
));
656 * Map the kernel if it is XIP.
657 * It is always first in the modulearea.
659 #ifdef CONFIG_XIP_KERNEL
660 map
.pfn
= __phys_to_pfn(CONFIG_XIP_PHYS_ADDR
& SECTION_MASK
);
661 map
.virtual = MODULE_START
;
662 map
.length
= ((unsigned long)&_etext
- map
.virtual + ~SECTION_MASK
) & SECTION_MASK
;
664 create_mapping(&map
);
668 * Map the cache flushing regions.
671 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
);
672 map
.virtual = FLUSH_BASE
;
674 map
.type
= MT_CACHECLEAN
;
675 create_mapping(&map
);
677 #ifdef FLUSH_BASE_MINICACHE
678 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
+ SZ_1M
);
679 map
.virtual = FLUSH_BASE_MINICACHE
;
681 map
.type
= MT_MINICLEAN
;
682 create_mapping(&map
);
686 * Create a mapping for the machine vectors at the high-vectors
687 * location (0xffff0000). If we aren't using high-vectors, also
688 * create a mapping at the low-vectors virtual address.
690 map
.pfn
= __phys_to_pfn(virt_to_phys(vectors
));
691 map
.virtual = 0xffff0000;
692 map
.length
= PAGE_SIZE
;
693 map
.type
= MT_HIGH_VECTORS
;
694 create_mapping(&map
);
696 if (!vectors_high()) {
698 map
.type
= MT_LOW_VECTORS
;
699 create_mapping(&map
);
703 * Ask the machine support to map in the statically mapped devices.
709 * Finally flush the caches and tlb to ensure that we're in a
710 * consistent state wrt the writebuffer. This also ensures that
711 * any write-allocated cache lines in the vector page are written
712 * back. After this point, we can start to touch devices again.
714 local_flush_tlb_all();
719 * paging_init() sets up the page tables, initialises the zone memory
720 * maps, and sets up the zero page, bad page and bad page tables.
722 void __init
paging_init(struct meminfo
*mi
, struct machine_desc
*mdesc
)
726 build_mem_type_table();
727 prepare_page_table(mi
);
729 devicemaps_init(mdesc
);
731 top_pmd
= pmd_off_k(0xffff0000);
734 * allocate the zero page. Note that we count on this going ok.
736 zero_page
= alloc_bootmem_low_pages(PAGE_SIZE
);
737 memzero(zero_page
, PAGE_SIZE
);
738 empty_zero_page
= virt_to_page(zero_page
);
739 flush_dcache_page(empty_zero_page
);
743 * In order to soft-boot, we need to insert a 1:1 mapping in place of
744 * the user-mode pages. This will then ensure that we have predictable
745 * results when turning the mmu off
747 void setup_mm_for_reboot(char mode
)
749 unsigned long base_pmdval
;
753 if (current
->mm
&& current
->mm
->pgd
)
754 pgd
= current
->mm
->pgd
;
758 base_pmdval
= PMD_SECT_AP_WRITE
| PMD_SECT_AP_READ
| PMD_TYPE_SECT
;
759 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ
&& !cpu_is_xscale())
760 base_pmdval
|= PMD_BIT4
;
762 for (i
= 0; i
< FIRST_USER_PGD_NR
+ USER_PTRS_PER_PGD
; i
++, pgd
++) {
763 unsigned long pmdval
= (i
<< PGDIR_SHIFT
) | base_pmdval
;
766 pmd
= pmd_off(pgd
, i
<< PGDIR_SHIFT
);
767 pmd
[0] = __pmd(pmdval
);
768 pmd
[1] = __pmd(pmdval
+ (1 << (PGDIR_SHIFT
- 1)));
769 flush_pmd_entry(pmd
);