1 /* pgalloc.c: page directory & page table allocation
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <asm/pgalloc.h>
18 #include <asm/cacheflush.h>
20 pgd_t swapper_pg_dir
[PTRS_PER_PGD
] __attribute__((aligned(PAGE_SIZE
)));
21 struct kmem_cache
*pgd_cache
;
23 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
25 pte_t
*pte
= (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
31 struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
36 page
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
, 0);
38 page
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
, 0);
42 flush_dcache_page(page
);
46 void __set_pmd(pmd_t
*pmdptr
, unsigned long pmd
)
48 unsigned long *__ste_p
= pmdptr
->ste
;
52 memset(__ste_p
, 0, PME_SIZE
);
55 BUG_ON(pmd
& (0x3f00 | xAMPRx_SS
| 0xe));
57 for (loop
= PME_SIZE
; loop
> 0; loop
-= 4) {
63 frv_dcache_writeback((unsigned long) pmdptr
, (unsigned long) (pmdptr
+ 1));
67 * List of all pgd's needed for non-PAE so it can invalidate entries
68 * in both cached and uncached pgd's; not needed for PAE since the
69 * kernel pmd is shared. If PAE were not to share the pmd a similar
70 * tactic would be needed. This is essentially codepath-based locking
71 * against pageattr.c; it is the unique case in which a valid change
72 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
73 * vmalloc faults work because attached pagetables are never freed.
74 * If the locking proves to be non-performant, a ticketing scheme with
75 * checks at dup_mmap(), exec(), and other mmlist addition points
76 * could be used. The locking scheme was chosen on the basis of
77 * manfred's recommendations and having no core impact whatsoever.
80 DEFINE_SPINLOCK(pgd_lock
);
81 struct page
*pgd_list
;
83 static inline void pgd_list_add(pgd_t
*pgd
)
85 struct page
*page
= virt_to_page(pgd
);
86 page
->index
= (unsigned long) pgd_list
;
88 set_page_private(pgd_list
, (unsigned long) &page
->index
);
90 set_page_private(page
, (unsigned long)&pgd_list
);
93 static inline void pgd_list_del(pgd_t
*pgd
)
95 struct page
*next
, **pprev
, *page
= virt_to_page(pgd
);
96 next
= (struct page
*) page
->index
;
97 pprev
= (struct page
**) page_private(page
);
100 set_page_private(next
, (unsigned long) pprev
);
103 void pgd_ctor(void *pgd
, struct kmem_cache
*cache
, unsigned long unused
)
107 if (PTRS_PER_PMD
== 1)
108 spin_lock_irqsave(&pgd_lock
, flags
);
110 memcpy((pgd_t
*) pgd
+ USER_PGDS_IN_LAST_PML4
,
111 swapper_pg_dir
+ USER_PGDS_IN_LAST_PML4
,
112 (PTRS_PER_PGD
- USER_PGDS_IN_LAST_PML4
) * sizeof(pgd_t
));
114 if (PTRS_PER_PMD
> 1)
118 spin_unlock_irqrestore(&pgd_lock
, flags
);
119 memset(pgd
, 0, USER_PGDS_IN_LAST_PML4
* sizeof(pgd_t
));
122 /* never called when PTRS_PER_PMD > 1 */
123 void pgd_dtor(void *pgd
, struct kmem_cache
*cache
, unsigned long unused
)
125 unsigned long flags
; /* can be called from interrupt context */
127 spin_lock_irqsave(&pgd_lock
, flags
);
129 spin_unlock_irqrestore(&pgd_lock
, flags
);
132 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
136 pgd
= kmem_cache_alloc(pgd_cache
, GFP_KERNEL
);
143 void pgd_free(pgd_t
*pgd
)
145 /* in the non-PAE case, clear_page_tables() clears user pgd entries */
146 kmem_cache_free(pgd_cache
, pgd
);
149 void __init
pgtable_cache_init(void)
151 pgd_cache
= kmem_cache_create("pgd",
152 PTRS_PER_PGD
* sizeof(pgd_t
),
153 PTRS_PER_PGD
* sizeof(pgd_t
),