Blackfin arch: add BUG_ON() checks to make sure we dont overflow the cplb tables
[linux-2.6/linux-2.6-openrd.git] / arch / blackfin / kernel / cplb-nompu / cplbinit.c
blobe14c37e98ed5190ed3172f1203bf68a891a06da4
1 /*
2 * Blackfin CPLB initialization
4 * Copyright 2004-2007 Analog Devices Inc.
6 * Bugs: Enter bugs at http://blackfin.uclinux.org/
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
26 #include <asm/blackfin.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cplb.h>
29 #include <asm/cplbinit.h>
31 u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
32 u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
34 #ifdef CONFIG_CPLB_SWITCH_TAB_L1
35 #define PDT_ATTR __attribute__((l1_data))
36 #else
37 #define PDT_ATTR
38 #endif
40 u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1] PDT_ATTR;
41 u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1] PDT_ATTR;
42 #ifdef CONFIG_CPLB_INFO
43 u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS] PDT_ATTR;
44 u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS] PDT_ATTR;
45 #endif
47 struct s_cplb {
48 struct cplb_tab init_i;
49 struct cplb_tab init_d;
50 struct cplb_tab switch_i;
51 struct cplb_tab switch_d;
54 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
55 static struct cplb_desc cplb_data[] = {
57 .start = 0,
58 .end = SIZE_1K,
59 .psize = SIZE_1K,
60 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
61 .i_conf = SDRAM_OOPS,
62 .d_conf = SDRAM_OOPS,
63 #if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
64 .valid = 1,
65 #else
66 .valid = 0,
67 #endif
68 .name = "Zero Pointer Guard Page",
71 .start = 0, /* dyanmic */
72 .end = 0, /* dynamic */
73 .psize = SIZE_4M,
74 .attr = INITIAL_T | SWITCH_T | I_CPLB,
75 .i_conf = L1_IMEMORY,
76 .d_conf = 0,
77 .valid = 1,
78 .name = "L1 I-Memory",
81 .start = 0, /* dynamic */
82 .end = 0, /* dynamic */
83 .psize = SIZE_4M,
84 .attr = INITIAL_T | SWITCH_T | D_CPLB,
85 .i_conf = 0,
86 .d_conf = L1_DMEMORY,
87 #if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
88 .valid = 1,
89 #else
90 .valid = 0,
91 #endif
92 .name = "L1 D-Memory",
95 .start = L2_START,
96 .end = L2_START + L2_LENGTH,
97 .psize = SIZE_1M,
98 .attr = L2_ATTR,
99 .i_conf = L2_IMEMORY,
100 .d_conf = L2_DMEMORY,
101 .valid = (L2_LENGTH > 0),
102 .name = "L2 Memory",
105 .start = 0,
106 .end = 0, /* dynamic */
107 .psize = 0,
108 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
109 .i_conf = SDRAM_IGENERIC,
110 .d_conf = SDRAM_DGENERIC,
111 .valid = 1,
112 .name = "Kernel Memory",
115 .start = 0, /* dynamic */
116 .end = 0, /* dynamic */
117 .psize = 0,
118 .attr = INITIAL_T | SWITCH_T | D_CPLB,
119 .i_conf = SDRAM_IGENERIC,
120 .d_conf = SDRAM_DNON_CHBL,
121 .valid = 1,
122 .name = "uClinux MTD Memory",
125 .start = 0, /* dynamic */
126 .end = 0, /* dynamic */
127 .psize = SIZE_1M,
128 .attr = INITIAL_T | SWITCH_T | D_CPLB,
129 .d_conf = SDRAM_DNON_CHBL,
130 .valid = 1,
131 .name = "Uncached DMA Zone",
134 .start = 0, /* dynamic */
135 .end = 0, /* dynamic */
136 .psize = 0,
137 .attr = SWITCH_T | D_CPLB,
138 .i_conf = 0, /* dynamic */
139 .d_conf = 0, /* dynamic */
140 .valid = 1,
141 .name = "Reserved Memory",
144 .start = ASYNC_BANK0_BASE,
145 .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
146 .psize = 0,
147 .attr = SWITCH_T | D_CPLB,
148 .d_conf = SDRAM_EBIU,
149 .valid = 1,
150 .name = "Asynchronous Memory Banks",
153 .start = BOOT_ROM_START,
154 .end = BOOT_ROM_START + BOOT_ROM_LENGTH,
155 .psize = SIZE_1M,
156 .attr = SWITCH_T | I_CPLB | D_CPLB,
157 .i_conf = SDRAM_IGENERIC,
158 .d_conf = SDRAM_DGENERIC,
159 .valid = 1,
160 .name = "On-Chip BootROM",
164 static bool __init lock_kernel_check(u32 start, u32 end)
166 if (start >= (u32)__init_begin || end <= (u32)_stext)
167 return false;
169 /* This cplb block overlapped with kernel area. */
170 return true;
173 static void __init
174 fill_cplbtab(struct cplb_tab *table,
175 unsigned long start, unsigned long end,
176 unsigned long block_size, unsigned long cplb_data)
178 int i;
180 switch (block_size) {
181 case SIZE_4M:
182 i = 3;
183 break;
184 case SIZE_1M:
185 i = 2;
186 break;
187 case SIZE_4K:
188 i = 1;
189 break;
190 case SIZE_1K:
191 default:
192 i = 0;
193 break;
196 cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
198 while ((start < end) && (table->pos < table->size)) {
200 table->tab[table->pos++] = start;
202 if (lock_kernel_check(start, start + block_size))
203 table->tab[table->pos++] =
204 cplb_data | CPLB_LOCK | CPLB_DIRTY;
205 else
206 table->tab[table->pos++] = cplb_data;
208 start += block_size;
212 static void __init close_cplbtab(struct cplb_tab *table)
214 while (table->pos < table->size)
215 table->tab[table->pos++] = 0;
218 /* helper function */
219 static void __init
220 __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
222 if (cplb_data[i].psize) {
223 fill_cplbtab(t,
224 cplb_data[i].start,
225 cplb_data[i].end,
226 cplb_data[i].psize,
227 cplb_data[i].i_conf);
228 } else {
229 #if defined(CONFIG_BFIN_ICACHE)
230 if (ANOMALY_05000263 && i == SDRAM_KERN) {
231 fill_cplbtab(t,
232 cplb_data[i].start,
233 cplb_data[i].end,
234 SIZE_4M,
235 cplb_data[i].i_conf);
236 } else
237 #endif
239 fill_cplbtab(t,
240 cplb_data[i].start,
241 a_start,
242 SIZE_1M,
243 cplb_data[i].i_conf);
244 fill_cplbtab(t,
245 a_start,
246 a_end,
247 SIZE_4M,
248 cplb_data[i].i_conf);
249 fill_cplbtab(t, a_end,
250 cplb_data[i].end,
251 SIZE_1M,
252 cplb_data[i].i_conf);
257 static void __init
258 __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
260 if (cplb_data[i].psize) {
261 fill_cplbtab(t,
262 cplb_data[i].start,
263 cplb_data[i].end,
264 cplb_data[i].psize,
265 cplb_data[i].d_conf);
266 } else {
267 fill_cplbtab(t,
268 cplb_data[i].start,
269 a_start, SIZE_1M,
270 cplb_data[i].d_conf);
271 fill_cplbtab(t, a_start,
272 a_end, SIZE_4M,
273 cplb_data[i].d_conf);
274 fill_cplbtab(t, a_end,
275 cplb_data[i].end,
276 SIZE_1M,
277 cplb_data[i].d_conf);
281 void __init generate_cplb_tables_cpu(unsigned int cpu)
284 u16 i, j, process;
285 u32 a_start, a_end, as, ae, as_1m;
287 struct cplb_tab *t_i = NULL;
288 struct cplb_tab *t_d = NULL;
289 struct s_cplb cplb;
291 printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
293 cplb.init_i.size = CPLB_TBL_ENTRIES;
294 cplb.init_d.size = CPLB_TBL_ENTRIES;
295 cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
296 cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
298 cplb.init_i.pos = 0;
299 cplb.init_d.pos = 0;
300 cplb.switch_i.pos = 0;
301 cplb.switch_d.pos = 0;
303 cplb.init_i.tab = icplb_tables[cpu];
304 cplb.init_d.tab = dcplb_tables[cpu];
305 cplb.switch_i.tab = ipdt_tables[cpu];
306 cplb.switch_d.tab = dpdt_tables[cpu];
308 cplb_data[L1I_MEM].start = get_l1_code_start_cpu(cpu);
309 cplb_data[L1I_MEM].end = cplb_data[L1I_MEM].start + L1_CODE_LENGTH;
310 cplb_data[L1D_MEM].start = get_l1_data_a_start_cpu(cpu);
311 cplb_data[L1D_MEM].end = get_l1_data_b_start_cpu(cpu) + L1_DATA_B_LENGTH;
312 cplb_data[SDRAM_KERN].end = memory_end;
314 #ifdef CONFIG_MTD_UCLINUX
315 cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
316 cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
317 cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
318 # if defined(CONFIG_ROMFS_FS)
319 cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
322 * The ROMFS_FS size is often not multiple of 1MB.
323 * This can cause multiple CPLB sets covering the same memory area.
324 * This will then cause multiple CPLB hit exceptions.
325 * Workaround: We ensure a contiguous memory area by extending the kernel
326 * memory section over the mtd section.
327 * For ROMFS_FS memory must be covered with ICPLBs anyways.
328 * So there is no difference between kernel and mtd memory setup.
331 cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
332 cplb_data[SDRAM_RAM_MTD].valid = 0;
334 # endif
335 #else
336 cplb_data[SDRAM_RAM_MTD].valid = 0;
337 #endif
339 cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
340 cplb_data[SDRAM_DMAZ].end = _ramend;
342 cplb_data[RES_MEM].start = _ramend;
343 cplb_data[RES_MEM].end = physical_mem_end;
345 if (reserved_mem_dcache_on)
346 cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
347 else
348 cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
350 if (reserved_mem_icache_on)
351 cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
352 else
353 cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
355 for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
356 if (!cplb_data[i].valid)
357 continue;
359 as_1m = cplb_data[i].start % SIZE_1M;
361 /* We need to make sure all sections are properly 1M aligned
362 * However between Kernel Memory and the Kernel mtd section, depending on the
363 * rootfs size, there can be overlapping memory areas.
366 if (as_1m && i != L1I_MEM && i != L1D_MEM) {
367 #ifdef CONFIG_MTD_UCLINUX
368 if (i == SDRAM_RAM_MTD) {
369 if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start)
370 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M;
371 else
372 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
373 } else
374 #endif
375 printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n",
376 cplb_data[i].name, cplb_data[i].start);
379 as = cplb_data[i].start % SIZE_4M;
380 ae = cplb_data[i].end % SIZE_4M;
382 if (as)
383 a_start = cplb_data[i].start + (SIZE_4M - (as));
384 else
385 a_start = cplb_data[i].start;
387 a_end = cplb_data[i].end - ae;
389 for (j = INITIAL_T; j <= SWITCH_T; j++) {
391 switch (j) {
392 case INITIAL_T:
393 if (cplb_data[i].attr & INITIAL_T) {
394 t_i = &cplb.init_i;
395 t_d = &cplb.init_d;
396 process = 1;
397 } else
398 process = 0;
399 break;
400 case SWITCH_T:
401 if (cplb_data[i].attr & SWITCH_T) {
402 t_i = &cplb.switch_i;
403 t_d = &cplb.switch_d;
404 process = 1;
405 } else
406 process = 0;
407 break;
408 default:
409 process = 0;
410 break;
413 if (!process)
414 continue;
415 if (cplb_data[i].attr & I_CPLB)
416 __fill_code_cplbtab(t_i, i, a_start, a_end);
418 if (cplb_data[i].attr & D_CPLB)
419 __fill_data_cplbtab(t_d, i, a_start, a_end);
423 /* make sure we locked the kernel start */
424 BUG_ON(cplb.init_i.pos < 2 + cplb_data[ZERO_P].valid);
425 BUG_ON(cplb.init_d.pos < 1 + cplb_data[ZERO_P].valid + cplb_data[L1D_MEM].valid);
427 /* make sure we didnt overflow the table */
428 BUG_ON(cplb.init_i.size <= cplb.init_i.pos);
429 BUG_ON(cplb.init_d.size <= cplb.init_d.pos);
430 BUG_ON(cplb.switch_i.size <= cplb.switch_i.pos);
431 BUG_ON(cplb.switch_d.size <= cplb.switch_d.pos);
433 /* close tables */
434 close_cplbtab(&cplb.init_i);
435 close_cplbtab(&cplb.init_d);
437 cplb.init_i.tab[cplb.init_i.pos] = -1;
438 cplb.init_d.tab[cplb.init_d.pos] = -1;
439 cplb.switch_i.tab[cplb.switch_i.pos] = -1;
440 cplb.switch_d.tab[cplb.switch_d.pos] = -1;
444 #endif