1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
20 ***************************************************************************/
26 #include <helper/replacements.h>
29 #include "arm_disassembler.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
39 #include "arm_opcodes.h"
41 #include "target_type.h"
43 static void armv7a_show_fault_registers(struct target
*target
)
45 uint32_t dfsr
, ifsr
, dfar
, ifar
;
46 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
47 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
50 retval
= dpm
->prepare(dpm
);
51 if (retval
!= ERROR_OK
)
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval
= dpm
->instr_read_data_r0(dpm
,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
60 if (retval
!= ERROR_OK
)
63 retval
= dpm
->instr_read_data_r0(dpm
,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
66 if (retval
!= ERROR_OK
)
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval
= dpm
->instr_read_data_r0(dpm
,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
73 if (retval
!= ERROR_OK
)
76 retval
= dpm
->instr_read_data_r0(dpm
,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
79 if (retval
!= ERROR_OK
)
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32
, dfsr
, dfar
);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32
, ifsr
, ifar
);
88 /* (void) */ dpm
->finish(dpm
);
92 /* retrieve main id register */
93 static int armv7a_read_midr(struct target
*target
)
95 int retval
= ERROR_FAIL
;
96 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
97 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
99 retval
= dpm
->prepare(dpm
);
100 if (retval
!= ERROR_OK
)
102 /* MRC p15,0,<Rd>,c0,c0,0; read main id register*/
104 retval
= dpm
->instr_read_data_r0(dpm
,
105 ARMV4_5_MRC(15, 0, 0, 0, 0, 0),
107 if (retval
!= ERROR_OK
)
110 armv7a
->rev
= (midr
& 0xf);
111 armv7a
->partnum
= (midr
>> 4) & 0xfff;
112 armv7a
->arch
= (midr
>> 16) & 0xf;
113 armv7a
->variant
= (midr
>> 20) & 0xf;
114 armv7a
->implementor
= (midr
>> 24) & 0xff;
115 LOG_INFO("%s rev %" PRIx32
", partnum %" PRIx32
", arch %" PRIx32
116 ", variant %" PRIx32
", implementor %" PRIx32
,
122 armv7a
->implementor
);
129 static int armv7a_read_ttbcr(struct target
*target
)
131 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
132 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
134 uint32_t ttbr0
, ttbr1
;
135 int retval
= dpm
->prepare(dpm
);
136 if (retval
!= ERROR_OK
)
138 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
139 retval
= dpm
->instr_read_data_r0(dpm
,
140 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
142 if (retval
!= ERROR_OK
)
145 retval
= dpm
->instr_read_data_r0(dpm
,
146 ARMV4_5_MRC(15, 0, 0, 2, 0, 0),
148 if (retval
!= ERROR_OK
)
151 retval
= dpm
->instr_read_data_r0(dpm
,
152 ARMV4_5_MRC(15, 0, 0, 2, 0, 1),
154 if (retval
!= ERROR_OK
)
157 LOG_INFO("ttbcr %" PRIx32
"ttbr0 %" PRIx32
"ttbr1 %" PRIx32
, ttbcr
, ttbr0
, ttbr1
);
159 armv7a
->armv7a_mmu
.ttbr1_used
= ((ttbcr
& 0x7) != 0) ? 1 : 0;
160 armv7a
->armv7a_mmu
.ttbr0_mask
= 0;
162 retval
= armv7a_read_midr(target
);
163 if (retval
!= ERROR_OK
)
166 if (armv7a
->partnum
& 0xf) {
168 * ARM Architecture Reference Manual (ARMv7-A and ARMv7-Redition),
169 * document # ARM DDI 0406C
171 armv7a
->armv7a_mmu
.ttbr0_mask
= 1 << (14 - ((ttbcr
& 0x7)));
173 /* ARM DDI 0344H , ARM DDI 0407F */
174 armv7a
->armv7a_mmu
.ttbr0_mask
= 7 << (32 - ((ttbcr
& 0x7)));
175 /* fix me , default is hard coded LINUX border */
176 armv7a
->armv7a_mmu
.os_border
= 0xc0000000;
179 LOG_DEBUG("ttbr1 %s, ttbr0_mask %" PRIx32
,
180 armv7a
->armv7a_mmu
.ttbr1_used
? "used" : "not used",
181 armv7a
->armv7a_mmu
.ttbr0_mask
);
183 if (armv7a
->armv7a_mmu
.ttbr1_used
== 1) {
184 LOG_INFO("SVC access above %" PRIx32
,
185 (0xffffffff & armv7a
->armv7a_mmu
.ttbr0_mask
));
186 armv7a
->armv7a_mmu
.os_border
= 0xffffffff & armv7a
->armv7a_mmu
.ttbr0_mask
;
193 /* method adapted to cortex A : reused arm v4 v5 method*/
194 int armv7a_mmu_translate_va(struct target
*target
, uint32_t va
, uint32_t *val
)
196 uint32_t first_lvl_descriptor
= 0x0;
197 uint32_t second_lvl_descriptor
= 0x0;
199 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
200 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
201 uint32_t ttb
= 0; /* default ttb0 */
202 if (armv7a
->armv7a_mmu
.ttbr1_used
== -1)
203 armv7a_read_ttbcr(target
);
204 if ((armv7a
->armv7a_mmu
.ttbr1_used
) &&
205 (va
> (0xffffffff & armv7a
->armv7a_mmu
.ttbr0_mask
))) {
209 retval
= dpm
->prepare(dpm
);
210 if (retval
!= ERROR_OK
)
213 /* MRC p15,0,<Rt>,c2,c0,ttb */
214 retval
= dpm
->instr_read_data_r0(dpm
,
215 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb
),
217 if (retval
!= ERROR_OK
)
219 retval
= armv7a
->armv7a_mmu
.read_physical_memory(target
,
220 (ttb
& 0xffffc000) | ((va
& 0xfff00000) >> 18),
221 4, 1, (uint8_t *)&first_lvl_descriptor
);
222 if (retval
!= ERROR_OK
)
224 first_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
225 &first_lvl_descriptor
);
226 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
227 LOG_DEBUG("1st lvl desc: %8.8" PRIx32
"", first_lvl_descriptor
);
229 if ((first_lvl_descriptor
& 0x3) == 0) {
230 LOG_ERROR("Address translation failure");
231 return ERROR_TARGET_TRANSLATION_FAULT
;
235 if ((first_lvl_descriptor
& 0x3) == 2) {
236 /* section descriptor */
237 *val
= (first_lvl_descriptor
& 0xfff00000) | (va
& 0x000fffff);
241 if ((first_lvl_descriptor
& 0x3) == 1) {
242 /* coarse page table */
243 retval
= armv7a
->armv7a_mmu
.read_physical_memory(target
,
244 (first_lvl_descriptor
& 0xfffffc00) | ((va
& 0x000ff000) >> 10),
245 4, 1, (uint8_t *)&second_lvl_descriptor
);
246 if (retval
!= ERROR_OK
)
248 } else if ((first_lvl_descriptor
& 0x3) == 3) {
249 /* fine page table */
250 retval
= armv7a
->armv7a_mmu
.read_physical_memory(target
,
251 (first_lvl_descriptor
& 0xfffff000) | ((va
& 0x000ffc00) >> 8),
252 4, 1, (uint8_t *)&second_lvl_descriptor
);
253 if (retval
!= ERROR_OK
)
257 second_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
258 &second_lvl_descriptor
);
260 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32
"", second_lvl_descriptor
);
262 if ((second_lvl_descriptor
& 0x3) == 0) {
263 LOG_ERROR("Address translation failure");
264 return ERROR_TARGET_TRANSLATION_FAULT
;
267 if ((second_lvl_descriptor
& 0x3) == 1) {
268 /* large page descriptor */
269 *val
= (second_lvl_descriptor
& 0xffff0000) | (va
& 0x0000ffff);
273 if ((second_lvl_descriptor
& 0x3) == 2) {
274 /* small page descriptor */
275 *val
= (second_lvl_descriptor
& 0xfffff000) | (va
& 0x00000fff);
279 if ((second_lvl_descriptor
& 0x3) == 3) {
280 *val
= (second_lvl_descriptor
& 0xfffffc00) | (va
& 0x000003ff);
284 /* should not happen */
285 LOG_ERROR("Address translation failure");
286 return ERROR_TARGET_TRANSLATION_FAULT
;
292 /* V7 method VA TO PA */
293 int armv7a_mmu_translate_va_pa(struct target
*target
, uint32_t va
,
294 uint32_t *val
, int meminfo
)
296 int retval
= ERROR_FAIL
;
297 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
298 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
299 uint32_t virt
= va
& ~0xfff;
300 uint32_t NOS
, NS
, INNER
, OUTER
;
302 retval
= dpm
->prepare(dpm
);
303 if (retval
!= ERROR_OK
)
305 /* mmu must be enable in order to get a correct translation
306 * use VA to PA CP15 register for conversion */
307 retval
= dpm
->instr_write_data_r0(dpm
,
308 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
310 if (retval
!= ERROR_OK
)
312 retval
= dpm
->instr_read_data_r0(dpm
,
313 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
315 /* decode memory attribute */
316 NOS
= (*val
>> 10) & 1; /* Not Outer shareable */
317 NS
= (*val
>> 9) & 1; /* Non secure */
318 INNER
= (*val
>> 4) & 0x7;
319 OUTER
= (*val
>> 2) & 0x3;
321 if (retval
!= ERROR_OK
)
323 *val
= (*val
& ~0xfff) + (va
& 0xfff);
325 LOG_WARNING("virt = phys : MMU disable !!");
327 LOG_INFO("%" PRIx32
" : %" PRIx32
" %s outer shareable %s secured",
329 NOS
== 1 ? "not" : " ",
330 NS
== 1 ? "not" : "");
333 LOG_INFO("outer: Non-Cacheable");
336 LOG_INFO("outer: Write-Back, Write-Allocate");
339 LOG_INFO("outer: Write-Through, No Write-Allocate");
342 LOG_INFO("outer: Write-Back, no Write-Allocate");
347 LOG_INFO("inner: Non-Cacheable");
350 LOG_INFO("inner: Strongly-ordered");
353 LOG_INFO("inner: Device");
356 LOG_INFO("inner: Write-Back, Write-Allocate");
359 LOG_INFO("inner: Write-Through");
362 LOG_INFO("inner: Write-Back, no Write-Allocate");
365 LOG_INFO("inner: %" PRIx32
" ???", INNER
);
375 static int armv7a_handle_inner_cache_info_command(struct command_context
*cmd_ctx
,
376 struct armv7a_cache_common
*armv7a_cache
)
378 if (armv7a_cache
->ctype
== -1) {
379 command_print(cmd_ctx
, "cache not yet identified");
383 command_print(cmd_ctx
,
384 "D-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
385 armv7a_cache
->d_u_size
.linelen
,
386 armv7a_cache
->d_u_size
.associativity
,
387 armv7a_cache
->d_u_size
.nsets
,
388 armv7a_cache
->d_u_size
.cachesize
);
390 command_print(cmd_ctx
,
391 "I-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
392 armv7a_cache
->i_size
.linelen
,
393 armv7a_cache
->i_size
.associativity
,
394 armv7a_cache
->i_size
.nsets
,
395 armv7a_cache
->i_size
.cachesize
);
400 static int _armv7a_flush_all_data(struct target
*target
)
402 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
403 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
404 struct armv7a_cachesize
*d_u_size
=
405 &(armv7a
->armv7a_mmu
.armv7a_cache
.d_u_size
);
406 int32_t c_way
, c_index
= d_u_size
->index
;
408 /* check that cache data is on at target halt */
409 if (!armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
) {
410 LOG_INFO("flushed not performed :cache not on at target halt");
413 retval
= dpm
->prepare(dpm
);
414 if (retval
!= ERROR_OK
)
417 c_way
= d_u_size
->way
;
419 uint32_t value
= (c_index
<< d_u_size
->index_shift
)
420 | (c_way
<< d_u_size
->way_shift
);
422 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
423 retval
= dpm
->instr_write_data_r0(dpm
,
424 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
426 if (retval
!= ERROR_OK
)
429 } while (c_way
>= 0);
431 } while (c_index
>= 0);
434 LOG_ERROR("flushed failed");
439 static int armv7a_flush_all_data(struct target
*target
)
441 int retval
= ERROR_FAIL
;
442 /* check that armv7a_cache is correctly identify */
443 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
444 if (armv7a
->armv7a_mmu
.armv7a_cache
.ctype
== -1) {
445 LOG_ERROR("trying to flush un-identified cache");
450 /* look if all the other target have been flushed in order to flush level
452 struct target_list
*head
;
455 while (head
!= (struct target_list
*)NULL
) {
457 if (curr
->state
== TARGET_HALTED
) {
458 LOG_INFO("Wait flushing data l1 on core %" PRId32
, curr
->coreid
);
459 retval
= _armv7a_flush_all_data(curr
);
464 retval
= _armv7a_flush_all_data(target
);
468 /* L2 is not specific to armv7a a specific file is needed */
469 static int armv7a_l2x_flush_all_data(struct target
*target
)
472 #define L2X0_CLEAN_INV_WAY 0x7FC
473 int retval
= ERROR_FAIL
;
474 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
475 struct armv7a_l2x_cache
*l2x_cache
= (struct armv7a_l2x_cache
*)
476 (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
);
477 uint32_t base
= l2x_cache
->base
;
478 uint32_t l2_way
= l2x_cache
->way
;
479 uint32_t l2_way_val
= (1 << l2_way
) - 1;
480 retval
= armv7a_flush_all_data(target
);
481 if (retval
!= ERROR_OK
)
483 retval
= target
->type
->write_phys_memory(target
,
484 (uint32_t)(base
+(uint32_t)L2X0_CLEAN_INV_WAY
),
487 (uint8_t *)&l2_way_val
);
491 static int armv7a_handle_l2x_cache_info_command(struct command_context
*cmd_ctx
,
492 struct armv7a_cache_common
*armv7a_cache
)
495 struct armv7a_l2x_cache
*l2x_cache
= (struct armv7a_l2x_cache
*)
496 (armv7a_cache
->l2_cache
);
498 if (armv7a_cache
->ctype
== -1) {
499 command_print(cmd_ctx
, "cache not yet identified");
503 command_print(cmd_ctx
,
504 "L1 D-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
505 armv7a_cache
->d_u_size
.linelen
,
506 armv7a_cache
->d_u_size
.associativity
,
507 armv7a_cache
->d_u_size
.nsets
,
508 armv7a_cache
->d_u_size
.cachesize
);
510 command_print(cmd_ctx
,
511 "L1 I-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
512 armv7a_cache
->i_size
.linelen
,
513 armv7a_cache
->i_size
.associativity
,
514 armv7a_cache
->i_size
.nsets
,
515 armv7a_cache
->i_size
.cachesize
);
516 command_print(cmd_ctx
, "L2 unified cache Base Address 0x%" PRIx32
", %" PRId32
" ways",
517 l2x_cache
->base
, l2x_cache
->way
);
524 static int armv7a_l2x_cache_init(struct target
*target
, uint32_t base
, uint32_t way
)
526 struct armv7a_l2x_cache
*l2x_cache
;
527 struct target_list
*head
= target
->head
;
530 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
531 l2x_cache
= calloc(1, sizeof(struct armv7a_l2x_cache
));
532 l2x_cache
->base
= base
;
533 l2x_cache
->way
= way
;
534 /*LOG_INFO("cache l2 initialized base %x way %d",
535 l2x_cache->base,l2x_cache->way);*/
536 if (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
)
537 LOG_INFO("cache l2 already initialized\n");
538 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= l2x_cache
;
539 /* initialize l1 / l2x cache function */
540 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
541 = armv7a_l2x_flush_all_data
;
542 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
543 armv7a_handle_l2x_cache_info_command
;
544 /* initialize all target in this cluster (smp target)
545 * l2 cache must be configured after smp declaration */
546 while (head
!= (struct target_list
*)NULL
) {
548 if (curr
!= target
) {
549 armv7a
= target_to_armv7a(curr
);
550 if (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
)
551 LOG_ERROR("smp target : cache l2 already initialized\n");
552 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= l2x_cache
;
553 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
=
554 armv7a_l2x_flush_all_data
;
555 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
556 armv7a_handle_l2x_cache_info_command
;
563 COMMAND_HANDLER(handle_cache_l2x
)
565 struct target
*target
= get_current_target(CMD_CTX
);
569 return ERROR_COMMAND_SYNTAX_ERROR
;
571 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
572 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], base
);
573 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], way
);
575 /* AP address is in bits 31:24 of DP_SELECT */
576 armv7a_l2x_cache_init(target
, base
, way
);
581 int armv7a_handle_cache_info_command(struct command_context
*cmd_ctx
,
582 struct armv7a_cache_common
*armv7a_cache
)
584 if (armv7a_cache
->ctype
== -1) {
585 command_print(cmd_ctx
, "cache not yet identified");
589 if (armv7a_cache
->display_cache_info
)
590 armv7a_cache
->display_cache_info(cmd_ctx
, armv7a_cache
);
594 /* retrieve core id cluster id */
595 static int armv7a_read_mpidr(struct target
*target
)
597 int retval
= ERROR_FAIL
;
598 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
599 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
601 retval
= dpm
->prepare(dpm
);
602 if (retval
!= ERROR_OK
)
604 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
606 retval
= dpm
->instr_read_data_r0(dpm
,
607 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
609 if (retval
!= ERROR_OK
)
612 /* ARMv7R uses a different format for MPIDR.
613 * When configured uniprocessor (most R cores) it reads as 0.
614 * This will need to be implemented for multiprocessor ARMv7R cores. */
615 if (armv7a
->is_armv7r
) {
617 LOG_ERROR("MPIDR nonzero in ARMv7-R target");
622 armv7a
->multi_processor_system
= (mpidr
>> 30) & 1;
623 armv7a
->cluster_id
= (mpidr
>> 8) & 0xf;
624 armv7a
->cpu_id
= mpidr
& 0x3;
625 LOG_INFO("%s cluster %x core %x %s", target_name(target
),
628 armv7a
->multi_processor_system
== 0 ? "multi core" : "mono core");
631 LOG_ERROR("MPIDR not in multiprocessor format");
640 int armv7a_identify_cache(struct target
*target
)
642 /* read cache descriptor */
643 int retval
= ERROR_FAIL
;
644 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
645 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
646 uint32_t cache_selected
, clidr
;
647 uint32_t cache_i_reg
, cache_d_reg
;
648 struct armv7a_cache_common
*cache
= &(armv7a
->armv7a_mmu
.armv7a_cache
);
649 if (!armv7a
->is_armv7r
)
650 armv7a_read_ttbcr(target
);
651 retval
= dpm
->prepare(dpm
);
653 if (retval
!= ERROR_OK
)
656 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
657 retval
= dpm
->instr_read_data_r0(dpm
,
658 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
660 if (retval
!= ERROR_OK
)
662 clidr
= (clidr
& 0x7000000) >> 23;
663 LOG_INFO("number of cache level %" PRIx32
, (uint32_t)(clidr
/ 2));
664 if ((clidr
/ 2) > 1) {
665 /* FIXME not supported present in cortex A8 and later */
666 /* in cortex A7, A15 */
667 LOG_ERROR("cache l2 present :not supported");
669 /* retrieve selected cache
670 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
671 retval
= dpm
->instr_read_data_r0(dpm
,
672 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
674 if (retval
!= ERROR_OK
)
677 retval
= armv7a
->arm
.mrc(target
, 15,
681 if (retval
!= ERROR_OK
)
683 /* select instruction cache
684 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
685 * [0] : 1 instruction cache selection , 0 data cache selection */
686 retval
= dpm
->instr_write_data_r0(dpm
,
687 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
689 if (retval
!= ERROR_OK
)
693 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
694 * [2:0] line size 001 eight word per line
695 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
696 retval
= dpm
->instr_read_data_r0(dpm
,
697 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
699 if (retval
!= ERROR_OK
)
702 /* select data cache*/
703 retval
= dpm
->instr_write_data_r0(dpm
,
704 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
706 if (retval
!= ERROR_OK
)
709 retval
= dpm
->instr_read_data_r0(dpm
,
710 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
712 if (retval
!= ERROR_OK
)
715 /* restore selected cache */
716 dpm
->instr_write_data_r0(dpm
,
717 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
720 if (retval
!= ERROR_OK
)
725 cache
->d_u_size
.linelen
= 16 << (cache_d_reg
& 0x7);
726 cache
->d_u_size
.cachesize
= (((cache_d_reg
>> 13) & 0x7fff)+1)/8;
727 cache
->d_u_size
.nsets
= (cache_d_reg
>> 13) & 0x7fff;
728 cache
->d_u_size
.associativity
= ((cache_d_reg
>> 3) & 0x3ff) + 1;
729 /* compute info for set way operation on cache */
730 cache
->d_u_size
.index_shift
= (cache_d_reg
& 0x7) + 4;
731 cache
->d_u_size
.index
= (cache_d_reg
>> 13) & 0x7fff;
732 cache
->d_u_size
.way
= ((cache_d_reg
>> 3) & 0x3ff);
733 cache
->d_u_size
.way_shift
= cache
->d_u_size
.way
+ 1;
736 while (((cache
->d_u_size
.way_shift
>> i
) & 1) != 1)
738 cache
->d_u_size
.way_shift
= 32-i
;
741 LOG_INFO("data cache index %d << %d, way %d << %d",
742 cache
->d_u_size
.index
, cache
->d_u_size
.index_shift
,
744 cache
->d_u_size
.way_shift
);
746 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
747 cache
->d_u_size
.linelen
,
748 cache
->d_u_size
.cachesize
,
749 cache
->d_u_size
.associativity
);
751 cache
->i_size
.linelen
= 16 << (cache_i_reg
& 0x7);
752 cache
->i_size
.associativity
= ((cache_i_reg
>> 3) & 0x3ff) + 1;
753 cache
->i_size
.nsets
= (cache_i_reg
>> 13) & 0x7fff;
754 cache
->i_size
.cachesize
= (((cache_i_reg
>> 13) & 0x7fff)+1)/8;
755 /* compute info for set way operation on cache */
756 cache
->i_size
.index_shift
= (cache_i_reg
& 0x7) + 4;
757 cache
->i_size
.index
= (cache_i_reg
>> 13) & 0x7fff;
758 cache
->i_size
.way
= ((cache_i_reg
>> 3) & 0x3ff);
759 cache
->i_size
.way_shift
= cache
->i_size
.way
+ 1;
762 while (((cache
->i_size
.way_shift
>> i
) & 1) != 1)
764 cache
->i_size
.way_shift
= 32-i
;
767 LOG_INFO("instruction cache index %d << %d, way %d << %d",
768 cache
->i_size
.index
, cache
->i_size
.index_shift
,
769 cache
->i_size
.way
, cache
->i_size
.way_shift
);
771 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
772 cache
->i_size
.linelen
,
773 cache
->i_size
.cachesize
,
774 cache
->i_size
.associativity
);
776 /* if no l2 cache initialize l1 data cache flush function function */
777 if (armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
== NULL
) {
778 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
779 armv7a_handle_inner_cache_info_command
;
780 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
=
781 armv7a_flush_all_data
;
783 armv7a
->armv7a_mmu
.armv7a_cache
.ctype
= 0;
787 armv7a_read_mpidr(target
);
792 int armv7a_init_arch_info(struct target
*target
, struct armv7a_common
*armv7a
)
794 struct arm
*arm
= &armv7a
->arm
;
795 arm
->arch_info
= armv7a
;
796 target
->arch_info
= &armv7a
->arm
;
797 /* target is useful in all function arm v4 5 compatible */
798 armv7a
->arm
.target
= target
;
799 armv7a
->arm
.common_magic
= ARM_COMMON_MAGIC
;
800 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
801 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= NULL
;
802 armv7a
->armv7a_mmu
.armv7a_cache
.ctype
= -1;
803 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
= NULL
;
804 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
= NULL
;
808 int armv7a_arch_state(struct target
*target
)
810 static const char *state
[] = {
811 "disabled", "enabled"
814 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
815 struct arm
*arm
= &armv7a
->arm
;
817 if (armv7a
->common_magic
!= ARMV7_COMMON_MAGIC
) {
818 LOG_ERROR("BUG: called for a non-ARMv7A target");
819 return ERROR_COMMAND_SYNTAX_ERROR
;
822 arm_arch_state(target
);
824 if (armv7a
->is_armv7r
) {
825 LOG_USER("D-Cache: %s, I-Cache: %s",
826 state
[armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
],
827 state
[armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
]);
829 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
830 state
[armv7a
->armv7a_mmu
.mmu_enabled
],
831 state
[armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
],
832 state
[armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
]);
835 if (arm
->core_mode
== ARM_MODE_ABT
)
836 armv7a_show_fault_registers(target
);
837 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
)
838 LOG_USER("Watchpoint triggered at PC %#08x",
839 (unsigned) armv7a
->dpm
.wp_pc
);
844 static const struct command_registration l2_cache_commands
[] = {
847 .handler
= handle_cache_l2x
,
848 .mode
= COMMAND_EXEC
,
849 .help
= "configure l2x cache "
851 .usage
= "[base_addr] [number_of_way]",
853 COMMAND_REGISTRATION_DONE
857 const struct command_registration l2x_cache_command_handlers
[] = {
859 .name
= "cache_config",
860 .mode
= COMMAND_EXEC
,
861 .help
= "cache configuration for a target",
863 .chain
= l2_cache_commands
,
865 COMMAND_REGISTRATION_DONE
869 const struct command_registration armv7a_command_handlers
[] = {
871 .chain
= dap_command_handlers
,
874 .chain
= l2x_cache_command_handlers
,
876 COMMAND_REGISTRATION_DONE