cortex_a: replace cortex_a_check_address function
[openocd.git] / src / target / armv7a.c
blobb9320d14371eb5867e841b132c5401b13029451e
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
20 ***************************************************************************/
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
26 #include <helper/replacements.h>
28 #include "armv7a.h"
29 #include "arm_disassembler.h"
31 #include "register.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
39 #include "arm_opcodes.h"
40 #include "target.h"
41 #include "target_type.h"
43 static void armv7a_show_fault_registers(struct target *target)
45 uint32_t dfsr, ifsr, dfar, ifar;
46 struct armv7a_common *armv7a = target_to_armv7a(target);
47 struct arm_dpm *dpm = armv7a->arm.dpm;
48 int retval;
50 retval = dpm->prepare(dpm);
51 if (retval != ERROR_OK)
52 return;
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval = dpm->instr_read_data_r0(dpm,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
59 &dfsr);
60 if (retval != ERROR_OK)
61 goto done;
63 retval = dpm->instr_read_data_r0(dpm,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
65 &ifsr);
66 if (retval != ERROR_OK)
67 goto done;
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval = dpm->instr_read_data_r0(dpm,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
72 &dfar);
73 if (retval != ERROR_OK)
74 goto done;
76 retval = dpm->instr_read_data_r0(dpm,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
78 &ifar);
79 if (retval != ERROR_OK)
80 goto done;
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32, dfsr, dfar);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32, ifsr, ifar);
87 done:
88 /* (void) */ dpm->finish(dpm);
92 /* retrieve main id register */
93 static int armv7a_read_midr(struct target *target)
95 int retval = ERROR_FAIL;
96 struct armv7a_common *armv7a = target_to_armv7a(target);
97 struct arm_dpm *dpm = armv7a->arm.dpm;
98 uint32_t midr;
99 retval = dpm->prepare(dpm);
100 if (retval != ERROR_OK)
101 goto done;
102 /* MRC p15,0,<Rd>,c0,c0,0; read main id register*/
104 retval = dpm->instr_read_data_r0(dpm,
105 ARMV4_5_MRC(15, 0, 0, 0, 0, 0),
106 &midr);
107 if (retval != ERROR_OK)
108 goto done;
110 armv7a->rev = (midr & 0xf);
111 armv7a->partnum = (midr >> 4) & 0xfff;
112 armv7a->arch = (midr >> 16) & 0xf;
113 armv7a->variant = (midr >> 20) & 0xf;
114 armv7a->implementor = (midr >> 24) & 0xff;
115 LOG_INFO("%s rev %" PRIx32 ", partnum %" PRIx32 ", arch %" PRIx32
116 ", variant %" PRIx32 ", implementor %" PRIx32,
117 target->cmd_name,
118 armv7a->rev,
119 armv7a->partnum,
120 armv7a->arch,
121 armv7a->variant,
122 armv7a->implementor);
124 done:
125 dpm->finish(dpm);
126 return retval;
129 static int armv7a_read_ttbcr(struct target *target)
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct arm_dpm *dpm = armv7a->arm.dpm;
133 uint32_t ttbcr, ttbcr_n;
134 int retval = dpm->prepare(dpm);
135 if (retval != ERROR_OK)
136 goto done;
137 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
138 retval = dpm->instr_read_data_r0(dpm,
139 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
140 &ttbcr);
141 if (retval != ERROR_OK)
142 goto done;
144 LOG_DEBUG("ttbcr %" PRIx32, ttbcr);
146 ttbcr_n = ttbcr & 0x7;
147 armv7a->armv7a_mmu.ttbcr = ttbcr;
148 armv7a->armv7a_mmu.cached = 1;
151 * ARM Architecture Reference Manual (ARMv7-A and ARMv7-Redition),
152 * document # ARM DDI 0406C
154 armv7a->armv7a_mmu.ttbr_range[0] = 0xffffffff >> ttbcr_n;
155 armv7a->armv7a_mmu.ttbr_range[1] = 0xffffffff;
156 armv7a->armv7a_mmu.ttbr_mask[0] = 0xffffffff << (14 - ttbcr_n);
157 armv7a->armv7a_mmu.ttbr_mask[1] = 0xffffffff << 14;
158 armv7a->armv7a_mmu.cached = 1;
160 retval = armv7a_read_midr(target);
161 if (retval != ERROR_OK)
162 goto done;
164 /* FIXME: why this special case based on part number? */
165 if ((armv7a->partnum & 0xf) == 0) {
166 /* ARM DDI 0344H , ARM DDI 0407F */
167 armv7a->armv7a_mmu.ttbr_mask[0] = 7 << (32 - ttbcr_n);
170 LOG_DEBUG("ttbr1 %s, ttbr0_mask %" PRIx32 " ttbr1_mask %" PRIx32,
171 (ttbcr_n != 0) ? "used" : "not used",
172 armv7a->armv7a_mmu.ttbr_mask[0],
173 armv7a->armv7a_mmu.ttbr_mask[1]);
175 done:
176 dpm->finish(dpm);
177 return retval;
180 /* method adapted to cortex A : reused arm v4 v5 method*/
181 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
183 uint32_t first_lvl_descriptor = 0x0;
184 uint32_t second_lvl_descriptor = 0x0;
185 int retval;
186 struct armv7a_common *armv7a = target_to_armv7a(target);
187 struct arm_dpm *dpm = armv7a->arm.dpm;
188 uint32_t ttbidx = 0; /* default to ttbr0 */
189 uint32_t ttb_mask;
190 uint32_t va_mask;
191 uint32_t ttbcr;
192 uint32_t ttb;
194 retval = dpm->prepare(dpm);
195 if (retval != ERROR_OK)
196 goto done;
198 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
199 retval = dpm->instr_read_data_r0(dpm,
200 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
201 &ttbcr);
202 if (retval != ERROR_OK)
203 goto done;
205 /* if ttbcr has changed or was not read before, re-read the information */
206 if ((armv7a->armv7a_mmu.cached == 0) ||
207 (armv7a->armv7a_mmu.ttbcr != ttbcr)) {
208 armv7a_read_ttbcr(target);
211 /* if va is above the range handled by ttbr0, select ttbr1 */
212 if (va > armv7a->armv7a_mmu.ttbr_range[0]) {
213 /* select ttb 1 */
214 ttbidx = 1;
216 /* MRC p15,0,<Rt>,c2,c0,ttbidx */
217 retval = dpm->instr_read_data_r0(dpm,
218 ARMV4_5_MRC(15, 0, 0, 2, 0, ttbidx),
219 &ttb);
220 if (retval != ERROR_OK)
221 return retval;
223 ttb_mask = armv7a->armv7a_mmu.ttbr_mask[ttbidx];
224 va_mask = 0xfff00000 & armv7a->armv7a_mmu.ttbr_range[ttbidx];
226 LOG_DEBUG("ttb_mask %" PRIx32 " va_mask %" PRIx32 " ttbidx %i",
227 ttb_mask, va_mask, ttbidx);
228 retval = armv7a->armv7a_mmu.read_physical_memory(target,
229 (ttb & ttb_mask) | ((va & va_mask) >> 18),
230 4, 1, (uint8_t *)&first_lvl_descriptor);
231 if (retval != ERROR_OK)
232 return retval;
233 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
234 &first_lvl_descriptor);
235 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
236 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
238 if ((first_lvl_descriptor & 0x3) == 0) {
239 LOG_ERROR("Address translation failure");
240 return ERROR_TARGET_TRANSLATION_FAULT;
244 if ((first_lvl_descriptor & 0x40002) == 2) {
245 /* section descriptor */
246 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
247 return ERROR_OK;
248 } else if ((first_lvl_descriptor & 0x40002) == 0x40002) {
249 /* supersection descriptor */
250 if (first_lvl_descriptor & 0x00f001e0) {
251 LOG_ERROR("Physical address does not fit into 32 bits");
252 return ERROR_TARGET_TRANSLATION_FAULT;
254 *val = (first_lvl_descriptor & 0xff000000) | (va & 0x00ffffff);
255 return ERROR_OK;
258 /* page table */
259 retval = armv7a->armv7a_mmu.read_physical_memory(target,
260 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
261 4, 1, (uint8_t *)&second_lvl_descriptor);
262 if (retval != ERROR_OK)
263 return retval;
265 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
266 &second_lvl_descriptor);
268 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
270 if ((second_lvl_descriptor & 0x3) == 0) {
271 LOG_ERROR("Address translation failure");
272 return ERROR_TARGET_TRANSLATION_FAULT;
275 if ((second_lvl_descriptor & 0x3) == 1) {
276 /* large page descriptor */
277 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
278 } else {
279 /* small page descriptor */
280 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
283 return ERROR_OK;
285 done:
286 return retval;
289 /* V7 method VA TO PA */
290 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
291 uint32_t *val, int meminfo)
293 int retval = ERROR_FAIL;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct arm_dpm *dpm = armv7a->arm.dpm;
296 uint32_t virt = va & ~0xfff;
297 uint32_t NOS, NS, INNER, OUTER;
298 *val = 0xdeadbeef;
299 retval = dpm->prepare(dpm);
300 if (retval != ERROR_OK)
301 goto done;
302 /* mmu must be enable in order to get a correct translation
303 * use VA to PA CP15 register for conversion */
304 retval = dpm->instr_write_data_r0(dpm,
305 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
306 virt);
307 if (retval != ERROR_OK)
308 goto done;
309 retval = dpm->instr_read_data_r0(dpm,
310 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
311 val);
312 /* decode memory attribute */
313 NOS = (*val >> 10) & 1; /* Not Outer shareable */
314 NS = (*val >> 9) & 1; /* Non secure */
315 INNER = (*val >> 4) & 0x7;
316 OUTER = (*val >> 2) & 0x3;
318 if (retval != ERROR_OK)
319 goto done;
320 *val = (*val & ~0xfff) + (va & 0xfff);
321 if (*val == va)
322 LOG_WARNING("virt = phys : MMU disable !!");
323 if (meminfo) {
324 LOG_INFO("%" PRIx32 " : %" PRIx32 " %s outer shareable %s secured",
325 va, *val,
326 NOS == 1 ? "not" : " ",
327 NS == 1 ? "not" : "");
328 switch (OUTER) {
329 case 0:
330 LOG_INFO("outer: Non-Cacheable");
331 break;
332 case 1:
333 LOG_INFO("outer: Write-Back, Write-Allocate");
334 break;
335 case 2:
336 LOG_INFO("outer: Write-Through, No Write-Allocate");
337 break;
338 case 3:
339 LOG_INFO("outer: Write-Back, no Write-Allocate");
340 break;
342 switch (INNER) {
343 case 0:
344 LOG_INFO("inner: Non-Cacheable");
345 break;
346 case 1:
347 LOG_INFO("inner: Strongly-ordered");
348 break;
349 case 3:
350 LOG_INFO("inner: Device");
351 break;
352 case 5:
353 LOG_INFO("inner: Write-Back, Write-Allocate");
354 break;
355 case 6:
356 LOG_INFO("inner: Write-Through");
357 break;
358 case 7:
359 LOG_INFO("inner: Write-Back, no Write-Allocate");
361 default:
362 LOG_INFO("inner: %" PRIx32 " ???", INNER);
366 done:
367 dpm->finish(dpm);
369 return retval;
372 /* FIXME: remove it */
373 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
375 struct armv7a_l2x_cache *l2x_cache;
376 struct target_list *head = target->head;
377 struct target *curr;
379 struct armv7a_common *armv7a = target_to_armv7a(target);
380 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
381 l2x_cache->base = base;
382 l2x_cache->way = way;
383 /*LOG_INFO("cache l2 initialized base %x way %d",
384 l2x_cache->base,l2x_cache->way);*/
385 if (armv7a->armv7a_mmu.armv7a_cache.outer_cache)
386 LOG_INFO("outer cache already initialized\n");
387 armv7a->armv7a_mmu.armv7a_cache.outer_cache = l2x_cache;
388 /* initialize all target in this cluster (smp target)
389 * l2 cache must be configured after smp declaration */
390 while (head != (struct target_list *)NULL) {
391 curr = head->target;
392 if (curr != target) {
393 armv7a = target_to_armv7a(curr);
394 if (armv7a->armv7a_mmu.armv7a_cache.outer_cache)
395 LOG_ERROR("smp target : outer cache already initialized\n");
396 armv7a->armv7a_mmu.armv7a_cache.outer_cache = l2x_cache;
398 head = head->next;
400 return JIM_OK;
403 /* FIXME: remove it */
404 COMMAND_HANDLER(handle_cache_l2x)
406 struct target *target = get_current_target(CMD_CTX);
407 uint32_t base, way;
409 if (CMD_ARGC != 2)
410 return ERROR_COMMAND_SYNTAX_ERROR;
412 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
413 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
414 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
416 /* AP address is in bits 31:24 of DP_SELECT */
417 armv7a_l2x_cache_init(target, base, way);
419 return ERROR_OK;
422 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
423 struct armv7a_cache_common *armv7a_cache)
425 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
426 (armv7a_cache->outer_cache);
428 int cl;
430 if (armv7a_cache->info == -1) {
431 command_print(cmd_ctx, "cache not yet identified");
432 return ERROR_OK;
435 for (cl = 0; cl < armv7a_cache->loc; cl++) {
436 struct armv7a_arch_cache *arch = &(armv7a_cache->arch[cl]);
438 if (arch->ctype & 1) {
439 command_print(cmd_ctx,
440 "L%d I-Cache: linelen %" PRIi32
441 ", associativity %" PRIi32
442 ", nsets %" PRIi32
443 ", cachesize %" PRId32 " KBytes",
444 cl+1,
445 arch->i_size.linelen,
446 arch->i_size.associativity,
447 arch->i_size.nsets,
448 arch->i_size.cachesize);
451 if (arch->ctype >= 2) {
452 command_print(cmd_ctx,
453 "L%d D-Cache: linelen %" PRIi32
454 ", associativity %" PRIi32
455 ", nsets %" PRIi32
456 ", cachesize %" PRId32 " KBytes",
457 cl+1,
458 arch->d_u_size.linelen,
459 arch->d_u_size.associativity,
460 arch->d_u_size.nsets,
461 arch->d_u_size.cachesize);
465 if (l2x_cache != NULL)
466 command_print(cmd_ctx, "Outer unified cache Base Address 0x%" PRIx32 ", %" PRId32 " ways",
467 l2x_cache->base, l2x_cache->way);
469 return ERROR_OK;
472 /* retrieve core id cluster id */
473 static int armv7a_read_mpidr(struct target *target)
475 int retval = ERROR_FAIL;
476 struct armv7a_common *armv7a = target_to_armv7a(target);
477 struct arm_dpm *dpm = armv7a->arm.dpm;
478 uint32_t mpidr;
479 retval = dpm->prepare(dpm);
480 if (retval != ERROR_OK)
481 goto done;
482 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
484 retval = dpm->instr_read_data_r0(dpm,
485 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
486 &mpidr);
487 if (retval != ERROR_OK)
488 goto done;
490 /* ARMv7R uses a different format for MPIDR.
491 * When configured uniprocessor (most R cores) it reads as 0.
492 * This will need to be implemented for multiprocessor ARMv7R cores. */
493 if (armv7a->is_armv7r) {
494 if (mpidr)
495 LOG_ERROR("MPIDR nonzero in ARMv7-R target");
496 goto done;
499 if (mpidr & 1<<31) {
500 armv7a->multi_processor_system = (mpidr >> 30) & 1;
501 armv7a->cluster_id = (mpidr >> 8) & 0xf;
502 armv7a->cpu_id = mpidr & 0x3;
503 LOG_INFO("%s cluster %x core %x %s", target_name(target),
504 armv7a->cluster_id,
505 armv7a->cpu_id,
506 armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
508 } else
509 LOG_ERROR("MPIDR not in multiprocessor format");
511 done:
512 dpm->finish(dpm);
513 return retval;
518 static int get_cache_info(struct arm_dpm *dpm, int cl, int ct, uint32_t *cache_reg)
520 int retval = ERROR_OK;
522 /* select cache level */
523 retval = dpm->instr_write_data_r0(dpm,
524 ARMV4_5_MCR(15, 2, 0, 0, 0, 0),
525 (cl << 1) | (ct == 1 ? 1 : 0));
526 if (retval != ERROR_OK)
527 goto done;
529 retval = dpm->instr_read_data_r0(dpm,
530 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
531 cache_reg);
532 done:
533 return retval;
536 static struct armv7a_cachesize decode_cache_reg(uint32_t cache_reg)
538 struct armv7a_cachesize size;
539 int i = 0;
541 size.linelen = 16 << (cache_reg & 0x7);
542 size.associativity = ((cache_reg >> 3) & 0x3ff) + 1;
543 size.nsets = ((cache_reg >> 13) & 0x7fff) + 1;
544 size.cachesize = size.linelen * size.associativity * size.nsets / 1024;
546 /* compute info for set way operation on cache */
547 size.index_shift = (cache_reg & 0x7) + 4;
548 size.index = (cache_reg >> 13) & 0x7fff;
549 size.way = ((cache_reg >> 3) & 0x3ff);
551 while (((size.way << i) & 0x80000000) == 0)
552 i++;
553 size.way_shift = i;
555 return size;
558 int armv7a_identify_cache(struct target *target)
560 /* read cache descriptor */
561 int retval = ERROR_FAIL;
562 struct armv7a_common *armv7a = target_to_armv7a(target);
563 struct arm_dpm *dpm = armv7a->arm.dpm;
564 uint32_t csselr, clidr, ctr;
565 uint32_t cache_reg;
566 int cl, ctype;
567 struct armv7a_cache_common *cache =
568 &(armv7a->armv7a_mmu.armv7a_cache);
570 if (!armv7a->is_armv7r)
571 armv7a_read_ttbcr(target);
573 retval = dpm->prepare(dpm);
574 if (retval != ERROR_OK)
575 goto done;
577 /* retrieve CTR
578 * mrc p15, 0, r0, c0, c0, 1 @ read ctr */
579 retval = dpm->instr_read_data_r0(dpm,
580 ARMV4_5_MRC(15, 0, 0, 0, 0, 1),
581 &ctr);
582 if (retval != ERROR_OK)
583 goto done;
585 cache->iminline = 4UL << (ctr & 0xf);
586 cache->dminline = 4UL << ((ctr & 0xf0000) >> 16);
587 LOG_DEBUG("ctr %" PRIx32 " ctr.iminline %" PRId32 " ctr.dminline %" PRId32,
588 ctr, cache->iminline, cache->dminline);
590 /* retrieve CLIDR
591 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
592 retval = dpm->instr_read_data_r0(dpm,
593 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
594 &clidr);
595 if (retval != ERROR_OK)
596 goto done;
598 cache->loc = (clidr & 0x7000000) >> 24;
599 LOG_DEBUG("Number of cache levels to PoC %" PRId32, cache->loc);
601 /* retrieve selected cache for later restore
602 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
603 retval = dpm->instr_read_data_r0(dpm,
604 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
605 &csselr);
606 if (retval != ERROR_OK)
607 goto done;
609 /* retrieve all available inner caches */
610 for (cl = 0; cl < cache->loc; clidr >>= 3, cl++) {
612 /* isolate cache type at current level */
613 ctype = clidr & 7;
615 /* skip reserved values */
616 if (ctype > CACHE_LEVEL_HAS_UNIFIED_CACHE)
617 continue;
619 /* separate d or unified d/i cache at this level ? */
620 if (ctype & (CACHE_LEVEL_HAS_UNIFIED_CACHE | CACHE_LEVEL_HAS_D_CACHE)) {
621 /* retrieve d-cache info */
622 retval = get_cache_info(dpm, cl, 0, &cache_reg);
623 if (retval != ERROR_OK)
624 goto done;
625 cache->arch[cl].d_u_size = decode_cache_reg(cache_reg);
627 LOG_DEBUG("data/unified cache index %d << %d, way %d << %d",
628 cache->arch[cl].d_u_size.index,
629 cache->arch[cl].d_u_size.index_shift,
630 cache->arch[cl].d_u_size.way,
631 cache->arch[cl].d_u_size.way_shift);
633 LOG_DEBUG("cacheline %d bytes %d KBytes asso %d ways",
634 cache->arch[cl].d_u_size.linelen,
635 cache->arch[cl].d_u_size.cachesize,
636 cache->arch[cl].d_u_size.associativity);
639 /* separate i-cache at this level ? */
640 if (ctype & CACHE_LEVEL_HAS_I_CACHE) {
641 /* retrieve i-cache info */
642 retval = get_cache_info(dpm, cl, 1, &cache_reg);
643 if (retval != ERROR_OK)
644 goto done;
645 cache->arch[cl].i_size = decode_cache_reg(cache_reg);
647 LOG_DEBUG("instruction cache index %d << %d, way %d << %d",
648 cache->arch[cl].i_size.index,
649 cache->arch[cl].i_size.index_shift,
650 cache->arch[cl].i_size.way,
651 cache->arch[cl].i_size.way_shift);
653 LOG_DEBUG("cacheline %d bytes %d KBytes asso %d ways",
654 cache->arch[cl].i_size.linelen,
655 cache->arch[cl].i_size.cachesize,
656 cache->arch[cl].i_size.associativity);
659 cache->arch[cl].ctype = ctype;
662 /* restore selected cache */
663 dpm->instr_write_data_r0(dpm,
664 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
665 csselr);
667 if (retval != ERROR_OK)
668 goto done;
670 /* if no l2 cache initialize l1 data cache flush function function */
671 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
672 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
673 armv7a_cache_auto_flush_all_data;
676 armv7a->armv7a_mmu.armv7a_cache.info = 1;
677 done:
678 dpm->finish(dpm);
679 armv7a_read_mpidr(target);
680 return retval;
684 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
686 struct arm *arm = &armv7a->arm;
687 arm->arch_info = armv7a;
688 target->arch_info = &armv7a->arm;
689 /* target is useful in all function arm v4 5 compatible */
690 armv7a->arm.target = target;
691 armv7a->arm.common_magic = ARM_COMMON_MAGIC;
692 armv7a->common_magic = ARMV7_COMMON_MAGIC;
693 armv7a->armv7a_mmu.armv7a_cache.info = -1;
694 armv7a->armv7a_mmu.armv7a_cache.outer_cache = NULL;
695 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
696 armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled = 1;
697 return ERROR_OK;
700 int armv7a_arch_state(struct target *target)
702 static const char *state[] = {
703 "disabled", "enabled"
706 struct armv7a_common *armv7a = target_to_armv7a(target);
707 struct arm *arm = &armv7a->arm;
709 if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
710 LOG_ERROR("BUG: called for a non-ARMv7A target");
711 return ERROR_COMMAND_SYNTAX_ERROR;
714 arm_arch_state(target);
716 if (armv7a->is_armv7r) {
717 LOG_USER("D-Cache: %s, I-Cache: %s",
718 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
719 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
720 } else {
721 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
722 state[armv7a->armv7a_mmu.mmu_enabled],
723 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
724 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
727 if (arm->core_mode == ARM_MODE_ABT)
728 armv7a_show_fault_registers(target);
729 if (target->debug_reason == DBG_REASON_WATCHPOINT)
730 LOG_USER("Watchpoint triggered at PC %#08x",
731 (unsigned) armv7a->dpm.wp_pc);
733 return ERROR_OK;
736 static const struct command_registration l2_cache_commands[] = {
738 .name = "l2x",
739 .handler = handle_cache_l2x,
740 .mode = COMMAND_EXEC,
741 .help = "configure l2x cache "
743 .usage = "[base_addr] [number_of_way]",
745 COMMAND_REGISTRATION_DONE
749 const struct command_registration l2x_cache_command_handlers[] = {
751 .name = "cache_config",
752 .mode = COMMAND_EXEC,
753 .help = "cache configuration for a target",
754 .usage = "",
755 .chain = l2_cache_commands,
757 COMMAND_REGISTRATION_DONE
760 const struct command_registration armv7a_command_handlers[] = {
762 .chain = dap_command_handlers,
765 .chain = l2x_cache_command_handlers,
768 .chain = arm7a_cache_command_handlers,
770 COMMAND_REGISTRATION_DONE