x86: prepare for the unification of the cpa code
[linux-2.6/libata-dev.git] / arch / x86 / mm / pageattr-test.c
blob6cc106b388a5620a31d3030b2bba6b593142745b
1 /*
2 * self test for change_page_attr.
4 * Clears the global bit on random pages in the direct mapping, then reverts
5 * and compares page tables forwards and afterwards.
6 */
7 #include <linux/bootmem.h>
8 #include <linux/random.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
13 #include <asm/cacheflush.h>
14 #include <asm/pgtable.h>
15 #include <asm/kdebug.h>
17 enum {
18 NTEST = 400,
19 #ifdef CONFIG_X86_64
20 LOWEST_LEVEL = 4,
21 LPS = (1 << PMD_SHIFT),
22 #elif defined(CONFIG_X86_PAE)
23 LOWEST_LEVEL = 4,
24 LPS = (1 << PMD_SHIFT),
25 #else
26 LOWEST_LEVEL = 4, /* lookup_address lies here */
27 LPS = (1 << 22),
28 #endif
29 GPS = (1<<30)
32 #ifdef CONFIG_X86_64
33 # include <asm/proto.h>
34 # define max_mapped end_pfn_map
35 #else
36 # define max_mapped max_low_pfn
37 #endif
39 struct split_state {
40 long lpg, gpg, spg, exec;
41 long min_exec, max_exec;
44 static __init int print_split(struct split_state *s)
46 long i, expected, missed = 0;
47 int printed = 0;
48 int err = 0;
50 s->lpg = s->gpg = s->spg = s->exec = 0;
51 s->min_exec = ~0UL;
52 s->max_exec = 0;
53 for (i = 0; i < max_mapped; ) {
54 unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
55 int level;
56 pte_t *pte;
58 pte = lookup_address(addr, &level);
59 if (!pte) {
60 if (!printed) {
61 dump_pagetable(addr);
62 printk(KERN_INFO "CPA %lx no pte level %d\n",
63 addr, level);
64 printed = 1;
66 missed++;
67 i++;
68 continue;
71 if (level == 2 && sizeof(long) == 8) {
72 s->gpg++;
73 i += GPS/PAGE_SIZE;
74 } else if (level != LOWEST_LEVEL) {
75 if (!(pte_val(*pte) & _PAGE_PSE)) {
76 printk(KERN_ERR
77 "%lx level %d but not PSE %Lx\n",
78 addr, level, (u64)pte_val(*pte));
79 err = 1;
81 s->lpg++;
82 i += LPS/PAGE_SIZE;
83 } else {
84 s->spg++;
85 i++;
87 if (!(pte_val(*pte) & _PAGE_NX)) {
88 s->exec++;
89 if (addr < s->min_exec)
90 s->min_exec = addr;
91 if (addr > s->max_exec)
92 s->max_exec = addr;
95 printk(KERN_INFO
96 "CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
97 s->spg, s->lpg, s->gpg, s->exec,
98 s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
100 expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
101 if (expected != i) {
102 printk(KERN_ERR "CPA max_mapped %lu but expected %lu\n",
103 max_mapped, expected);
104 return 1;
106 return err;
109 static unsigned long __initdata addr[NTEST];
110 static unsigned int __initdata len[NTEST];
112 /* Change the global bit on random pages in the direct mapping */
113 static __init int exercise_pageattr(void)
115 struct split_state sa, sb, sc;
116 unsigned long *bm;
117 pte_t *pte, pte0;
118 int failed = 0;
119 int level;
120 int i, k;
121 int err;
123 printk(KERN_INFO "CPA exercising pageattr\n");
125 bm = vmalloc((max_mapped + 7) / 8);
126 if (!bm) {
127 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
128 return -ENOMEM;
130 memset(bm, 0, (max_mapped + 7) / 8);
132 failed += print_split(&sa);
133 srandom32(100);
135 for (i = 0; i < NTEST; i++) {
136 unsigned long pfn = random32() % max_mapped;
138 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
139 len[i] = random32() % 100;
140 len[i] = min_t(unsigned long, len[i], max_mapped - pfn - 1);
142 if (len[i] == 0)
143 len[i] = 1;
145 pte = NULL;
146 pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */
148 for (k = 0; k < len[i]; k++) {
149 pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
150 if (!pte || pgprot_val(pte_pgprot(*pte)) == 0) {
151 addr[i] = 0;
152 break;
154 if (k == 0) {
155 pte0 = *pte;
156 } else {
157 if (pgprot_val(pte_pgprot(*pte)) !=
158 pgprot_val(pte_pgprot(pte0))) {
159 len[i] = k;
160 break;
163 if (test_bit(pfn + k, bm)) {
164 len[i] = k;
165 break;
167 __set_bit(pfn + k, bm);
169 if (!addr[i] || !pte || !k) {
170 addr[i] = 0;
171 continue;
174 err = change_page_attr_addr(addr[i], len[i],
175 pte_pgprot(pte_clrhuge(pte_clrglobal(pte0))));
176 if (err < 0) {
177 printk(KERN_ERR "CPA %d failed %d\n", i, err);
178 failed++;
181 pte = lookup_address(addr[i], &level);
182 if (!pte || pte_global(*pte) || pte_huge(*pte)) {
183 printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i],
184 pte ? (u64)pte_val(*pte) : 0ULL);
185 failed++;
187 if (level != LOWEST_LEVEL) {
188 printk(KERN_ERR "CPA %lx: unexpected level %d\n",
189 addr[i], level);
190 failed++;
194 vfree(bm);
195 global_flush_tlb();
197 failed += print_split(&sb);
199 printk(KERN_INFO "CPA reverting everything\n");
200 for (i = 0; i < NTEST; i++) {
201 if (!addr[i])
202 continue;
203 pte = lookup_address(addr[i], &level);
204 if (!pte) {
205 printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]);
206 failed++;
207 continue;
209 err = change_page_attr_addr(addr[i], len[i],
210 pte_pgprot(pte_mkglobal(*pte)));
211 if (err < 0) {
212 printk(KERN_ERR "CPA reverting failed: %d\n", err);
213 failed++;
215 pte = lookup_address(addr[i], &level);
216 if (!pte || !pte_global(*pte)) {
217 printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n",
218 addr[i], pte ? (u64)pte_val(*pte) : 0ULL);
219 failed++;
223 global_flush_tlb();
225 failed += print_split(&sc);
227 if (failed) {
228 printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
229 WARN_ON(1);
230 } else {
231 printk(KERN_INFO "CPA selftests PASSED\n");
234 return 0;
236 module_init(exercise_pageattr);