2 * linux/arch/arm26/mm/memc.c
4 * Copyright (C) 1998-2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for older ARM processor architectures.
12 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
20 #include <asm/memory.h>
21 #include <asm/hardware.h>
25 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
27 kmem_cache_t
*pte_cache
, *pgd_cache
;
31 * Allocate space for a page table and a MEMC table.
32 * Note that we place the MEMC
33 * table before the page directory. This means we can
34 * easily get to both tightly-associated data structures
35 * with a single pointer.
37 static inline pgd_t
*alloc_pgd_table(void)
39 void *pg2k
= kmem_cache_alloc(pgd_cache
, GFP_KERNEL
);
42 pg2k
+= MEMC_TABLE_SIZE
;
48 * Free a page table. this function is the counterpart to get_pgd_slow
49 * below, not alloc_pgd_table above.
51 void free_pgd_slow(pgd_t
*pgd
)
53 unsigned long tbl
= (unsigned long)pgd
;
55 tbl
-= MEMC_TABLE_SIZE
;
57 kmem_cache_free(pgd_cache
, (void *)tbl
);
61 * Allocate a new pgd and fill it in ready for use
63 * A new tasks pgd is completely empty (all pages !present) except for:
65 * o The machine vectors at virtual address 0x0
66 * o The vmalloc region at the top of address space
69 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
71 pgd_t
*get_pgd_slow(struct mm_struct
*mm
)
73 pgd_t
*new_pgd
, *init_pgd
;
74 pmd_t
*new_pmd
, *init_pmd
;
75 pte_t
*new_pte
, *init_pte
;
77 new_pgd
= alloc_pgd_table();
82 * This lock is here just to satisfy pmd_alloc and pte_lock
83 * FIXME: I bet we could avoid taking it pretty much altogether
85 spin_lock(&mm
->page_table_lock
);
88 * On ARM, first page must always be allocated since it contains
89 * the machine vectors.
91 new_pmd
= pmd_alloc(mm
, new_pgd
, 0);
95 new_pte
= pte_alloc_kernel(mm
, new_pmd
, 0);
99 init_pgd
= pgd_offset(&init_mm
, 0);
100 init_pmd
= pmd_offset(init_pgd
, 0);
101 init_pte
= pte_offset(init_pmd
, 0);
103 set_pte(new_pte
, *init_pte
);
106 * the page table entries are zeroed
107 * when the table is created. (see the cache_ctor functions below)
108 * Now we need to plonk the kernel (vmalloc) area at the end of
109 * the address space. We copy this from the init thread, just like
110 * the init_pte we copied above...
112 memcpy(new_pgd
+ FIRST_KERNEL_PGD_NR
, init_pgd
+ FIRST_KERNEL_PGD_NR
,
113 (PTRS_PER_PGD
- FIRST_KERNEL_PGD_NR
) * sizeof(pgd_t
));
115 spin_unlock(&mm
->page_table_lock
);
117 /* update MEMC tables */
118 cpu_memc_update_all(new_pgd
);
122 spin_unlock(&mm
->page_table_lock
);
124 free_pgd_slow(new_pgd
);
128 spin_unlock(&mm
->page_table_lock
);
129 free_pgd_slow(new_pgd
);
137 * No special code is required here.
139 void setup_mm_for_reboot(char mode
)
144 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
145 * o swapper_pg_dir = 0x0207d000
146 * o kernel proper starts at 0x0208000
147 * o create (allocate) a pte to contain the machine vectors
148 * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?)
149 * o populate the init tasks page directory (pgd) with the new pte
150 * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!)
152 void __init
memtable_init(struct meminfo
*mi
)
157 page_nr
= max_low_pfn
;
159 pte
= alloc_bootmem_low_pages(PTRS_PER_PTE
* sizeof(pte_t
));
160 pte
[0] = mk_pte_phys(PAGE_OFFSET
+ SCREEN_SIZE
, PAGE_READONLY
);
161 pmd_populate(&init_mm
, pmd_offset(swapper_pg_dir
, 0), pte
);
163 for (i
= 1; i
< PTRS_PER_PGD
; i
++)
164 pgd_val(swapper_pg_dir
[i
]) = 0;
167 void __init
iotable_init(struct map_desc
*io_desc
)
173 * We never have holes in the memmap
175 void __init
create_memmap_holes(struct meminfo
*mi
)
179 static void pte_cache_ctor(void *pte
, kmem_cache_t
*cache
, unsigned long flags
)
181 memzero(pte
, sizeof(pte_t
) * PTRS_PER_PTE
);
184 static void pgd_cache_ctor(void *pgd
, kmem_cache_t
*cache
, unsigned long flags
)
186 memzero(pgd
+ MEMC_TABLE_SIZE
, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
189 void __init
pgtable_cache_init(void)
191 pte_cache
= kmem_cache_create("pte-cache",
192 sizeof(pte_t
) * PTRS_PER_PTE
,
193 0, 0, pte_cache_ctor
, NULL
);
197 pgd_cache
= kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE
+
198 sizeof(pgd_t
) * PTRS_PER_PGD
,
199 0, 0, pgd_cache_ctor
, NULL
);