1 /* Caching code for GDB, the GNU debugger.
3 Copyright (C) 1992, 1993, 1995, 1996, 1998, 1999, 2000, 2001, 2003, 2007,
4 2008, 2009, 2010 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
28 #include "splay-tree.h"
30 /* The data cache could lead to incorrect results because it doesn't
31 know about volatile variables, thus making it impossible to debug
32 functions which use memory mapped I/O devices. Set the nocache
33 memory region attribute in those cases.
35 In general the dcache speeds up performance. Some speed improvement
36 comes from the actual caching mechanism, but the major gain is in
37 the reduction of the remote protocol overhead; instead of reading
38 or writing a large area of memory in 4 byte requests, the cache
39 bundles up the requests into LINE_SIZE chunks, reducing overhead
40 significantly. This is most useful when accessing a large amount
41 of data, such as when performing a backtrace.
43 The cache is a splay tree along with a linked list for replacement.
44 Each block caches a LINE_SIZE area of memory. Within each line we
45 remember the address of the line (which must be a multiple of
46 LINE_SIZE) and the actual data block.
48 Lines are only allocated as needed, so DCACHE_SIZE really specifies the
49 *maximum* number of lines in the cache.
51 At present, the cache is write-through rather than writeback: as soon
52 as data is written to the cache, it is also immediately written to
53 the target. Therefore, cache lines are never "dirty". Whether a given
54 line is valid or not depends on where it is stored in the dcache_struct;
55 there is no per-block valid flag. */
57 /* NOTE: Interaction of dcache and memory region attributes
59 As there is no requirement that memory region attributes be aligned
60 to or be a multiple of the dcache page size, dcache_read_line() and
61 dcache_write_line() must break up the page by memory region. If a
62 chunk does not have the cache attribute set, an invalid memory type
63 is set, etc., then the chunk is skipped. Those chunks are handled
64 in target_xfer_memory() (or target_xfer_memory_partial()).
66 This doesn't occur very often. The most common occurance is when
67 the last bit of the .text segment and the first bit of the .data
68 segment fall within the same dcache page with a ro/cacheable memory
69 region defined for the .text segment and a rw/non-cacheable memory
70 region defined for the .data segment. */
72 /* The maximum number of lines stored. The total size of the cache is
73 equal to DCACHE_SIZE times LINE_SIZE. */
74 #define DCACHE_SIZE 4096
76 /* The size of a cache line. Smaller values reduce the time taken to
77 read a single byte and make the cache more granular, but increase
78 overhead and reduce the effectiveness of the cache as a prefetcher. */
79 #define LINE_SIZE_POWER 6
80 #define LINE_SIZE (1 << LINE_SIZE_POWER)
82 /* Each cache block holds LINE_SIZE bytes of data
83 starting at a multiple-of-LINE_SIZE address. */
85 #define LINE_SIZE_MASK ((LINE_SIZE - 1))
86 #define XFORM(x) ((x) & LINE_SIZE_MASK)
87 #define MASK(x) ((x) & ~LINE_SIZE_MASK)
91 /* for least-recently-allocated and free lists */
92 struct dcache_block
*prev
;
93 struct dcache_block
*next
;
95 CORE_ADDR addr
; /* address of data */
96 gdb_byte data
[LINE_SIZE
]; /* bytes at given address */
97 int refs
; /* # hits */
103 struct dcache_block
*oldest
; /* least-recently-allocated list */
105 /* The free list is maintained identically to OLDEST to simplify
106 the code: we only need one set of accessors. */
107 struct dcache_block
*freelist
;
109 /* The number of in-use lines in the cache. */
112 /* The ptid of last inferior to use cache or null_ptid. */
116 typedef void (block_func
) (struct dcache_block
*block
, void *param
);
118 static struct dcache_block
*dcache_hit (DCACHE
*dcache
, CORE_ADDR addr
);
120 static int dcache_read_line (DCACHE
*dcache
, struct dcache_block
*db
);
122 static struct dcache_block
*dcache_alloc (DCACHE
*dcache
, CORE_ADDR addr
);
124 static void dcache_info (char *exp
, int tty
);
126 void _initialize_dcache (void);
128 static int dcache_enabled_p
= 0; /* OBSOLETE */
131 show_dcache_enabled_p (struct ui_file
*file
, int from_tty
,
132 struct cmd_list_element
*c
, const char *value
)
134 fprintf_filtered (file
, _("Deprecated remotecache flag is %s.\n"), value
);
137 static DCACHE
*last_cache
; /* Used by info dcache */
139 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
140 *BLIST is not updated (unless it was previously NULL of course).
141 This is for the least-recently-allocated list's sake:
142 BLIST points to the oldest block.
143 ??? This makes for poor cache usage of the free list,
144 but is it measurable? */
147 append_block (struct dcache_block
**blist
, struct dcache_block
*block
)
151 block
->next
= *blist
;
152 block
->prev
= (*blist
)->prev
;
153 block
->prev
->next
= block
;
154 (*blist
)->prev
= block
;
155 /* We don't update *BLIST here to maintain the invariant that for the
156 least-recently-allocated list *BLIST points to the oldest block. */
166 /* Remove BLOCK from circular block list BLIST. */
169 remove_block (struct dcache_block
**blist
, struct dcache_block
*block
)
171 if (block
->next
== block
)
177 block
->next
->prev
= block
->prev
;
178 block
->prev
->next
= block
->next
;
179 /* If we removed the block *BLIST points to, shift it to the next block
180 to maintain the invariant that for the least-recently-allocated list
181 *BLIST points to the oldest block. */
183 *blist
= block
->next
;
187 /* Iterate over all elements in BLIST, calling FUNC.
188 PARAM is passed to FUNC.
189 FUNC may remove the block it's passed, but only that block. */
192 for_each_block (struct dcache_block
**blist
, block_func
*func
, void *param
)
194 struct dcache_block
*db
;
202 struct dcache_block
*next
= db
->next
;
207 while (*blist
&& db
!= *blist
);
210 /* BLOCK_FUNC function for dcache_invalidate.
211 This doesn't remove the block from the oldest list on purpose.
212 dcache_invalidate will do it later. */
215 invalidate_block (struct dcache_block
*block
, void *param
)
217 DCACHE
*dcache
= (DCACHE
*) param
;
219 splay_tree_remove (dcache
->tree
, (splay_tree_key
) block
->addr
);
220 append_block (&dcache
->freelist
, block
);
223 /* Free all the data cache blocks, thus discarding all cached data. */
226 dcache_invalidate (DCACHE
*dcache
)
228 for_each_block (&dcache
->oldest
, invalidate_block
, dcache
);
230 dcache
->oldest
= NULL
;
232 dcache
->ptid
= null_ptid
;
235 /* Invalidate the line associated with ADDR. */
238 dcache_invalidate_line (DCACHE
*dcache
, CORE_ADDR addr
)
240 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
244 splay_tree_remove (dcache
->tree
, (splay_tree_key
) db
->addr
);
245 remove_block (&dcache
->oldest
, db
);
246 append_block (&dcache
->freelist
, db
);
251 /* If addr is present in the dcache, return the address of the block
252 containing it. Otherwise return NULL. */
254 static struct dcache_block
*
255 dcache_hit (DCACHE
*dcache
, CORE_ADDR addr
)
257 struct dcache_block
*db
;
259 splay_tree_node node
= splay_tree_lookup (dcache
->tree
,
260 (splay_tree_key
) MASK (addr
));
265 db
= (struct dcache_block
*) node
->value
;
270 /* Fill a cache line from target memory.
271 The result is 1 for success, 0 if the (entire) cache line
275 dcache_read_line (DCACHE
*dcache
, struct dcache_block
*db
)
282 struct mem_region
*region
;
290 /* Don't overrun if this block is right at the end of the region. */
291 region
= lookup_mem_region (memaddr
);
292 if (region
->hi
== 0 || memaddr
+ len
< region
->hi
)
295 reg_len
= region
->hi
- memaddr
;
297 /* Skip non-readable regions. The cache attribute can be ignored,
298 since we may be loading this for a stack access. */
299 if (region
->attrib
.mode
== MEM_WO
)
307 res
= target_read (¤t_target
, TARGET_OBJECT_RAW_MEMORY
,
308 NULL
, myaddr
, memaddr
, reg_len
);
320 /* Get a free cache block, put or keep it on the valid list,
321 and return its address. */
323 static struct dcache_block
*
324 dcache_alloc (DCACHE
*dcache
, CORE_ADDR addr
)
326 struct dcache_block
*db
;
328 if (dcache
->size
>= DCACHE_SIZE
)
330 /* Evict the least recently allocated line. */
332 remove_block (&dcache
->oldest
, db
);
334 splay_tree_remove (dcache
->tree
, (splay_tree_key
) db
->addr
);
338 db
= dcache
->freelist
;
340 remove_block (&dcache
->freelist
, db
);
342 db
= xmalloc (sizeof (struct dcache_block
));
347 db
->addr
= MASK (addr
);
350 /* Put DB at the end of the list, it's the newest. */
351 append_block (&dcache
->oldest
, db
);
353 splay_tree_insert (dcache
->tree
, (splay_tree_key
) db
->addr
,
354 (splay_tree_value
) db
);
359 /* Using the data cache DCACHE, store in *PTR the contents of the byte at
360 address ADDR in the remote machine.
362 Returns 1 for success, 0 for error. */
365 dcache_peek_byte (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
)
367 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
371 db
= dcache_alloc (dcache
, addr
);
373 if (!dcache_read_line (dcache
, db
))
377 *ptr
= db
->data
[XFORM (addr
)];
381 /* Write the byte at PTR into ADDR in the data cache.
383 The caller is responsible for also promptly writing the data
384 through to target memory.
386 If addr is not in cache, this function does nothing; writing to
387 an area of memory which wasn't present in the cache doesn't cause
390 Always return 1 (meaning success) to simplify dcache_xfer_memory. */
393 dcache_poke_byte (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
)
395 struct dcache_block
*db
= dcache_hit (dcache
, addr
);
398 db
->data
[XFORM (addr
)] = *ptr
;
404 dcache_splay_tree_compare (splay_tree_key a
, splay_tree_key b
)
414 /* Allocate and initialize a data cache. */
422 dcache
= (DCACHE
*) xmalloc (sizeof (*dcache
));
424 dcache
->tree
= splay_tree_new (dcache_splay_tree_compare
,
428 dcache
->oldest
= NULL
;
429 dcache
->freelist
= NULL
;
431 dcache
->ptid
= null_ptid
;
437 /* BLOCK_FUNC routine for dcache_free. */
440 free_block (struct dcache_block
*block
, void *param
)
445 /* Free a data cache. */
448 dcache_free (DCACHE
*dcache
)
450 if (last_cache
== dcache
)
453 splay_tree_delete (dcache
->tree
);
454 for_each_block (&dcache
->oldest
, free_block
, NULL
);
455 for_each_block (&dcache
->freelist
, free_block
, NULL
);
459 /* Read or write LEN bytes from inferior memory at MEMADDR, transferring
460 to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is
463 Return the number of bytes actually transfered, or -1 if the
464 transfer is not supported or otherwise fails. Return of a non-negative
465 value less than LEN indicates that no further transfer is possible.
466 NOTE: This is different than the to_xfer_partial interface, in which
467 positive values less than LEN mean further transfers may be possible. */
470 dcache_xfer_memory (struct target_ops
*ops
, DCACHE
*dcache
,
471 CORE_ADDR memaddr
, gdb_byte
*myaddr
,
472 int len
, int should_write
)
476 int (*xfunc
) (DCACHE
*dcache
, CORE_ADDR addr
, gdb_byte
*ptr
);
477 xfunc
= should_write
? dcache_poke_byte
: dcache_peek_byte
;
479 /* If this is a different inferior from what we've recorded,
482 if (! ptid_equal (inferior_ptid
, dcache
->ptid
))
484 dcache_invalidate (dcache
);
485 dcache
->ptid
= inferior_ptid
;
488 /* Do write-through first, so that if it fails, we don't write to
493 res
= target_write (ops
, TARGET_OBJECT_RAW_MEMORY
,
494 NULL
, myaddr
, memaddr
, len
);
497 /* Update LEN to what was actually written. */
501 for (i
= 0; i
< len
; i
++)
503 if (!xfunc (dcache
, memaddr
+ i
, myaddr
+ i
))
505 /* That failed. Discard its cache line so we don't have a
506 partially read line. */
507 dcache_invalidate_line (dcache
, memaddr
+ i
);
508 /* If we're writing, we still wrote LEN bytes. */
519 /* FIXME: There would be some benefit to making the cache write-back and
520 moving the writeback operation to a higher layer, as it could occur
521 after a sequence of smaller writes have been completed (as when a stack
522 frame is constructed for an inferior function call). Note that only
523 moving it up one level to target_xfer_memory[_partial]() is not
524 sufficient since we want to coalesce memory transfers that are
525 "logically" connected but not actually a single call to one of the
526 memory transfer functions. */
528 /* Just update any cache lines which are already present. This is called
529 by memory_xfer_partial in cases where the access would otherwise not go
530 through the cache. */
533 dcache_update (DCACHE
*dcache
, CORE_ADDR memaddr
, gdb_byte
*myaddr
, int len
)
536 for (i
= 0; i
< len
; i
++)
537 dcache_poke_byte (dcache
, memaddr
+ i
, myaddr
+ i
);
541 dcache_print_line (int index
)
544 struct dcache_block
*db
;
549 printf_filtered (_("No data cache available.\n"));
553 n
= splay_tree_min (last_cache
->tree
);
555 for (i
= index
; i
> 0; --i
)
559 n
= splay_tree_successor (last_cache
->tree
, n
->key
);
564 printf_filtered (_("No such cache line exists.\n"));
568 db
= (struct dcache_block
*) n
->value
;
570 printf_filtered (_("Line %d: address %s [%d hits]\n"),
571 index
, paddress (target_gdbarch
, db
->addr
), db
->refs
);
573 for (j
= 0; j
< LINE_SIZE
; j
++)
575 printf_filtered ("%02x ", db
->data
[j
]);
577 /* Print a newline every 16 bytes (48 characters) */
578 if ((j
% 16 == 15) && (j
!= LINE_SIZE
- 1))
579 printf_filtered ("\n");
581 printf_filtered ("\n");
585 dcache_info (char *exp
, int tty
)
588 int i
, refcount
, lineno
;
593 i
= strtol (exp
, &linestart
, 10);
594 if (linestart
== exp
|| i
< 0)
596 printf_filtered (_("Usage: info dcache [linenumber]\n"));
600 dcache_print_line (i
);
604 printf_filtered (_("Dcache line width %d, maximum size %d\n"),
605 LINE_SIZE
, DCACHE_SIZE
);
607 if (!last_cache
|| ptid_equal (last_cache
->ptid
, null_ptid
))
609 printf_filtered (_("No data cache available.\n"));
613 printf_filtered (_("Contains data for %s\n"),
614 target_pid_to_str (last_cache
->ptid
));
618 n
= splay_tree_min (last_cache
->tree
);
623 struct dcache_block
*db
= (struct dcache_block
*) n
->value
;
625 printf_filtered (_("Line %d: address %s [%d hits]\n"),
626 i
, paddress (target_gdbarch
, db
->addr
), db
->refs
);
628 refcount
+= db
->refs
;
630 n
= splay_tree_successor (last_cache
->tree
, n
->key
);
633 printf_filtered (_("Cache state: %d active lines, %d hits\n"), i
, refcount
);
637 _initialize_dcache (void)
639 add_setshow_boolean_cmd ("remotecache", class_support
,
640 &dcache_enabled_p
, _("\
641 Set cache use for remote targets."), _("\
642 Show cache use for remote targets."), _("\
643 This used to enable the data cache for remote targets. The cache\n\
644 functionality is now controlled by the memory region system and the\n\
645 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\
646 exists only for compatibility reasons."),
648 show_dcache_enabled_p
,
649 &setlist
, &showlist
);
651 add_info ("dcache", dcache_info
,
653 Print information on the dcache performance.\n\
654 With no arguments, this command prints the cache configuration and a\n\
655 summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\
656 the contents of a given line."));