Extend objdump's --show-all-symbols option so that it also shows the extra symbols...
[binutils-gdb.git] / sim / frv / cache.c
blobea496db0ba79797549307b68372972b458655c52
1 /* frv cache model.
2 Copyright (C) 1999-2024 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* This must come before any other includes. */
21 #include "defs.h"
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
26 #include "libiberty.h"
27 #include "sim-main.h"
28 #include "cache.h"
29 #include "bfd.h"
30 #include <stdlib.h>
32 void
33 frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
35 int elements;
36 int i, j;
37 SIM_DESC sd;
39 /* Set defaults for fields which are not initialized. */
40 sd = CPU_STATE (cpu);
41 switch (STATE_ARCHITECTURE (sd)->mach)
43 case bfd_mach_fr400:
44 case bfd_mach_fr450:
45 if (cache->configured_sets == 0)
46 cache->configured_sets = 512;
47 if (cache->configured_ways == 0)
48 cache->configured_ways = 2;
49 if (cache->line_size == 0)
50 cache->line_size = 32;
51 if (cache->memory_latency == 0)
52 cache->memory_latency = 20;
53 break;
54 case bfd_mach_fr550:
55 if (cache->configured_sets == 0)
56 cache->configured_sets = 128;
57 if (cache->configured_ways == 0)
58 cache->configured_ways = 4;
59 if (cache->line_size == 0)
60 cache->line_size = 64;
61 if (cache->memory_latency == 0)
62 cache->memory_latency = 20;
63 break;
64 default:
65 if (cache->configured_sets == 0)
66 cache->configured_sets = 64;
67 if (cache->configured_ways == 0)
68 cache->configured_ways = 4;
69 if (cache->line_size == 0)
70 cache->line_size = 64;
71 if (cache->memory_latency == 0)
72 cache->memory_latency = 20;
73 break;
76 frv_cache_reconfigure (cpu, cache);
78 /* First allocate the cache storage based on the given dimensions. */
79 elements = cache->sets * cache->ways;
80 cache->tag_storage = (FRV_CACHE_TAG *)
81 zalloc (elements * sizeof (*cache->tag_storage));
82 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
84 /* Initialize the pipelines and status buffers. */
85 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
87 cache->pipeline[i].requests = NULL;
88 cache->pipeline[i].status.flush.valid = 0;
89 cache->pipeline[i].status.return_buffer.valid = 0;
90 cache->pipeline[i].status.return_buffer.data
91 = (char *) xmalloc (cache->line_size);
92 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
93 cache->pipeline[i].stages[j].request = NULL;
95 cache->BARS.valid = 0;
96 cache->NARS.valid = 0;
98 /* Now set the cache state. */
99 cache->cpu = cpu;
100 cache->statistics.accesses = 0;
101 cache->statistics.hits = 0;
104 void
105 frv_cache_term (FRV_CACHE *cache)
107 /* Free the cache storage. */
108 free (cache->tag_storage);
109 free (cache->data_storage);
110 free (cache->pipeline[LS].status.return_buffer.data);
111 free (cache->pipeline[LD].status.return_buffer.data);
114 /* Reset the cache configuration based on registers in the cpu. */
115 void
116 frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
118 int ihsr8;
119 int icdm;
120 SIM_DESC sd;
122 /* Set defaults for fields which are not initialized. */
123 sd = CPU_STATE (current_cpu);
124 switch (STATE_ARCHITECTURE (sd)->mach)
126 case bfd_mach_fr550:
127 if (cache == CPU_INSN_CACHE (current_cpu))
129 ihsr8 = GET_IHSR8 ();
130 icdm = GET_IHSR8_ICDM (ihsr8);
131 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
132 if (icdm)
134 cache->sets = cache->sets * cache->ways;
135 cache->ways = 1;
136 break;
139 ATTRIBUTE_FALLTHROUGH;
140 default:
141 /* Set the cache to its original settings. */
142 cache->sets = cache->configured_sets;
143 cache->ways = cache->configured_ways;
144 break;
148 /* Determine whether the given cache is enabled. */
150 frv_cache_enabled (FRV_CACHE *cache)
152 SIM_CPU *current_cpu = cache->cpu;
153 int hsr0 = GET_HSR0 ();
154 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
155 return 1;
156 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
157 return 1;
158 return 0;
161 /* Determine whether the given address is RAM access, assuming that HSR0.RME
162 is set. */
163 static int
164 ram_access (FRV_CACHE *cache, USI address)
166 int ihsr8;
167 int cwe;
168 USI start, end, way_size;
169 SIM_CPU *current_cpu = cache->cpu;
170 SIM_DESC sd = CPU_STATE (current_cpu);
172 switch (STATE_ARCHITECTURE (sd)->mach)
174 case bfd_mach_fr550:
175 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
176 ihsr8 = GET_IHSR8 ();
177 if (cache == CPU_INSN_CACHE (current_cpu))
179 start = 0xfe000000;
180 end = 0xfe008000;
181 cwe = GET_IHSR8_ICWE (ihsr8);
183 else
185 start = 0xfe400000;
186 end = 0xfe408000;
187 cwe = GET_IHSR8_DCWE (ihsr8);
189 way_size = (end - start) / 4;
190 end -= way_size * cwe;
191 return address >= start && address < end;
192 default:
193 break;
196 return 1; /* RAM access */
199 /* Determine whether the given address should be accessed without using
200 the cache. */
201 static int
202 non_cache_access (FRV_CACHE *cache, USI address)
204 int hsr0;
205 SIM_DESC sd;
206 SIM_CPU *current_cpu = cache->cpu;
208 sd = CPU_STATE (current_cpu);
209 switch (STATE_ARCHITECTURE (sd)->mach)
211 case bfd_mach_fr400:
212 case bfd_mach_fr450:
213 if (address >= 0xff000000
214 || (address >= 0xfe000000 && address <= 0xfeffffff))
215 return 1; /* non-cache access */
216 break;
217 case bfd_mach_fr550:
218 if (address >= 0xff000000
219 || (address >= 0xfeff0000 && address <= 0xfeffffff))
220 return 1; /* non-cache access */
221 if (cache == CPU_INSN_CACHE (current_cpu))
223 if (address >= 0xfe000000 && address <= 0xfe007fff)
224 return 1; /* non-cache access */
226 else if (address >= 0xfe400000 && address <= 0xfe407fff)
227 return 1; /* non-cache access */
228 break;
229 default:
230 if (address >= 0xff000000
231 || (address >= 0xfeff0000 && address <= 0xfeffffff))
232 return 1; /* non-cache access */
233 if (cache == CPU_INSN_CACHE (current_cpu))
235 if (address >= 0xfe000000 && address <= 0xfe003fff)
236 return 1; /* non-cache access */
238 else if (address >= 0xfe400000 && address <= 0xfe403fff)
239 return 1; /* non-cache access */
240 break;
243 hsr0 = GET_HSR0 ();
244 if (GET_HSR0_RME (hsr0))
245 return ram_access (cache, address);
247 return 0; /* cache-access */
250 /* Find the cache line corresponding to the given address.
251 If it is found then 'return_tag' is set to point to the tag for that line
252 and 1 is returned.
253 If it is not found, 'return_tag' is set to point to the tag for the least
254 recently used line and 0 is returned.
256 static int
257 get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
259 int set;
260 int way;
261 int bits;
262 USI tag;
263 FRV_CACHE_TAG *found;
264 FRV_CACHE_TAG *available;
266 ++cache->statistics.accesses;
268 /* First calculate which set this address will fall into. Do this by
269 shifting out the bits representing the offset within the line and
270 then keeping enough bits to index the set. */
271 set = address & ~(cache->line_size - 1);
272 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
273 set >>= 1;
274 set &= (cache->sets - 1);
276 /* Now search the set for a valid tag which matches this address. At the
277 same time make note of the least recently used tag, which we will return
278 if no match is found. */
279 available = NULL;
280 tag = CACHE_ADDRESS_TAG (cache, address);
281 for (way = 0; way < cache->ways; ++way)
283 found = CACHE_TAG (cache, set, way);
284 /* This tag is available as the least recently used if it is the
285 least recently used seen so far and it is not locked. */
286 if (! found->locked && (available == NULL || available->lru > found->lru))
287 available = found;
288 if (found->valid && found->tag == tag)
290 *return_tag = found;
291 ++cache->statistics.hits;
292 return 1; /* found it */
296 *return_tag = available;
297 return 0; /* not found */
300 /* Write the given data out to memory. */
301 static void
302 write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
304 SIM_CPU *cpu = cache->cpu;
305 IADDR pc = CPU_PC_GET (cpu);
306 int write_index = 0;
308 switch (length)
310 case 1:
311 default:
312 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
313 break;
314 case 2:
315 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
316 break;
317 case 4:
318 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
319 break;
320 case 8:
321 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
322 break;
325 for (write_index = 0; write_index < length; ++write_index)
327 /* TODO: Better way to copy memory than a byte at a time? */
328 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
329 data[write_index]);
333 /* Write a cache line out to memory. */
334 static void
335 write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
337 SI address = tag->tag;
338 int set = CACHE_TAG_SET_NUMBER (cache, tag);
339 int bits;
340 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
341 set <<= 1;
342 address |= set;
343 write_data_to_memory (cache, address, tag->line, cache->line_size);
346 static void
347 read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
348 int length)
350 PCADDR pc = CPU_PC_GET (current_cpu);
351 int i;
352 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
353 for (i = 0; i < length; ++i)
355 /* TODO: Better way to copy memory than a byte at a time? */
356 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
357 address + i);
361 /* Fill the given cache line from memory. */
362 static void
363 fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
365 int line_alignment;
366 SI read_address;
367 SIM_CPU *current_cpu = cache->cpu;
369 /* If this line is already valid and the cache is in copy-back mode, then
370 write this line to memory before refilling it.
371 Check the dirty bit first, since it is less likely to be set. */
372 if (tag->dirty && tag->valid)
374 int hsr0 = GET_HSR0 ();
375 if (GET_HSR0_CBM (hsr0))
376 write_line_to_memory (cache, tag);
378 else if (tag->line == NULL)
380 int line_index = tag - cache->tag_storage;
381 tag->line = cache->data_storage + (line_index * cache->line_size);
384 line_alignment = cache->line_size - 1;
385 read_address = address & ~line_alignment;
386 read_data_from_memory (current_cpu, read_address, tag->line,
387 cache->line_size);
388 tag->tag = CACHE_ADDRESS_TAG (cache, address);
389 tag->valid = 1;
392 /* Update the LRU information for the tags in the same set as the given tag. */
393 static void
394 set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
396 /* All tags in the same set are contiguous, so find the beginning of the
397 set by aligning to the size of a set. */
398 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
399 FRV_CACHE_TAG *limit = item + cache->ways;
401 while (item < limit)
403 if (item->lru > tag->lru)
404 --item->lru;
405 ++item;
407 tag->lru = cache->ways; /* Mark as most recently used. */
410 /* Update the LRU information for the tags in the same set as the given tag. */
411 static void
412 set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
414 /* All tags in the same set are contiguous, so find the beginning of the
415 set by aligning to the size of a set. */
416 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
417 FRV_CACHE_TAG *limit = item + cache->ways;
419 while (item < limit)
421 if (item->lru != 0 && item->lru < tag->lru)
422 ++item->lru;
423 ++item;
425 tag->lru = 0; /* Mark as least recently used. */
428 /* Find the line containing the given address and load it if it is not
429 already loaded.
430 Returns the tag of the requested line. */
431 static FRV_CACHE_TAG *
432 find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
434 /* See if this data is already in the cache. */
435 FRV_CACHE_TAG *tag;
436 int found = get_tag (cache, address, &tag);
438 /* Fill the line from memory, if it is not valid. */
439 if (! found)
441 /* The tag could be NULL is all ways in the set were used and locked. */
442 if (tag == NULL)
443 return tag;
445 fill_line_from_memory (cache, tag, address);
446 tag->dirty = 0;
449 /* Update the LRU information for the tags in this set. */
450 set_most_recently_used (cache, tag);
452 return tag;
455 static void
456 copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
457 SI address)
459 /* A cache line was available for the data.
460 Copy the data from the cache line to the output buffer. */
461 memcpy (cache->pipeline[pipe].status.return_buffer.data,
462 tag->line, cache->line_size);
463 cache->pipeline[pipe].status.return_buffer.address
464 = address & ~(cache->line_size - 1);
465 cache->pipeline[pipe].status.return_buffer.valid = 1;
468 static void
469 copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
471 address &= ~(cache->line_size - 1);
472 read_data_from_memory (cache->cpu, address,
473 cache->pipeline[pipe].status.return_buffer.data,
474 cache->line_size);
475 cache->pipeline[pipe].status.return_buffer.address = address;
476 cache->pipeline[pipe].status.return_buffer.valid = 1;
479 static void
480 set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
482 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
485 /* Read data from the given cache.
486 Returns the number of cycles required to obtain the data. */
488 frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
490 FRV_CACHE_TAG *tag;
492 if (non_cache_access (cache, address))
494 copy_memory_to_return_buffer (cache, pipe, address);
495 return 1;
498 tag = find_or_retrieve_cache_line (cache, address);
500 if (tag == NULL)
501 return 0; /* Indicate non-cache-access. */
503 /* A cache line was available for the data.
504 Copy the data from the cache line to the output buffer. */
505 copy_line_to_return_buffer (cache, pipe, tag, address);
507 return 1; /* TODO - number of cycles unknown */
510 /* Writes data through the given cache.
511 The data is assumed to be in target endian order.
512 Returns the number of cycles required to write the data. */
514 frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
516 int copy_back;
518 /* See if this data is already in the cache. */
519 SIM_CPU *current_cpu = cache->cpu;
520 FRV_CACHE_TAG *tag;
521 int found;
523 if (non_cache_access (cache, address))
525 write_data_to_memory (cache, address, data, length);
526 return 1;
529 found = get_tag (cache, address, &tag);
531 /* Write the data to the cache line if one was available and if it is
532 either a hit or a miss in copy-back mode.
533 The tag may be NULL if all ways were in use and locked on a miss.
535 copy_back = GET_HSR0_CBM (GET_HSR0 ());
536 if (tag != NULL && (found || copy_back))
538 int line_offset;
539 /* Load the line from memory first, if it was a miss. */
540 if (! found)
541 fill_line_from_memory (cache, tag, address);
542 line_offset = address & (cache->line_size - 1);
543 memcpy (tag->line + line_offset, data, length);
544 tag->dirty = 1;
546 /* Update the LRU information for the tags in this set. */
547 set_most_recently_used (cache, tag);
550 /* Write the data to memory if there was no line available or we are in
551 write-through (not copy-back mode). */
552 if (tag == NULL || ! copy_back)
554 write_data_to_memory (cache, address, data, length);
555 if (tag != NULL)
556 tag->dirty = 0;
559 return 1; /* TODO - number of cycles unknown */
562 /* Preload the cache line containing the given address. Lock the
563 data if requested.
564 Returns the number of cycles required to write the data. */
566 frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
568 int offset;
569 int lines;
571 if (non_cache_access (cache, address))
572 return 1;
574 /* preload at least 1 line. */
575 if (length == 0)
576 length = 1;
578 offset = address & (cache->line_size - 1);
579 lines = 1 + (offset + length - 1) / cache->line_size;
581 /* Careful with this loop -- length is unsigned. */
582 for (/**/; lines > 0; --lines)
584 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
585 if (lock && tag != NULL)
586 tag->locked = 1;
587 address += cache->line_size;
590 return 1; /* TODO - number of cycles unknown */
593 /* Unlock the cache line containing the given address.
594 Returns the number of cycles required to unlock the line. */
596 frv_cache_unlock (FRV_CACHE *cache, SI address)
598 FRV_CACHE_TAG *tag;
599 int found;
601 if (non_cache_access (cache, address))
602 return 1;
604 found = get_tag (cache, address, &tag);
606 if (found)
607 tag->locked = 0;
609 return 1; /* TODO - number of cycles unknown */
612 static void
613 invalidate_return_buffer (FRV_CACHE *cache, SI address)
615 /* If this address is in one of the return buffers, then invalidate that
616 return buffer. */
617 address &= ~(cache->line_size - 1);
618 if (address == cache->pipeline[LS].status.return_buffer.address)
619 cache->pipeline[LS].status.return_buffer.valid = 0;
620 if (address == cache->pipeline[LD].status.return_buffer.address)
621 cache->pipeline[LD].status.return_buffer.valid = 0;
624 /* Invalidate the cache line containing the given address. Flush the
625 data if requested.
626 Returns the number of cycles required to write the data. */
628 frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
630 /* See if this data is already in the cache. */
631 FRV_CACHE_TAG *tag;
632 int found;
634 /* Check for non-cache access. This operation is still perfromed even if
635 the cache is not currently enabled. */
636 if (non_cache_access (cache, address))
637 return 1;
639 /* If the line is found, invalidate it. If a flush is requested, then flush
640 it if it is dirty. */
641 found = get_tag (cache, address, &tag);
642 if (found)
644 SIM_CPU *cpu;
645 /* If a flush is requested, then flush it if it is dirty. */
646 if (tag->dirty && flush)
647 write_line_to_memory (cache, tag);
648 set_least_recently_used (cache, tag);
649 tag->valid = 0;
650 tag->locked = 0;
652 /* If this is the insn cache, then flush the cpu's scache as well. */
653 cpu = cache->cpu;
654 if (cache == CPU_INSN_CACHE (cpu))
655 scache_flush_cpu (cpu);
658 invalidate_return_buffer (cache, address);
660 return 1; /* TODO - number of cycles unknown */
663 /* Invalidate the entire cache. Flush the data if requested. */
665 frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
667 /* See if this data is already in the cache. */
668 int elements = cache->sets * cache->ways;
669 FRV_CACHE_TAG *tag = cache->tag_storage;
670 SIM_CPU *cpu;
671 int i;
673 for(i = 0; i < elements; ++i, ++tag)
675 /* If a flush is requested, then flush it if it is dirty. */
676 if (tag->valid && tag->dirty && flush)
677 write_line_to_memory (cache, tag);
678 tag->valid = 0;
679 tag->locked = 0;
683 /* If this is the insn cache, then flush the cpu's scache as well. */
684 cpu = cache->cpu;
685 if (cache == CPU_INSN_CACHE (cpu))
686 scache_flush_cpu (cpu);
688 /* Invalidate both return buffers. */
689 cache->pipeline[LS].status.return_buffer.valid = 0;
690 cache->pipeline[LD].status.return_buffer.valid = 0;
692 return 1; /* TODO - number of cycles unknown */
695 /* ---------------------------------------------------------------------------
696 Functions for operating the cache in cycle accurate mode.
697 ------------------------------------------------------------------------- */
698 /* Convert a VLIW slot to a cache pipeline index. */
699 static int
700 convert_slot_to_index (int slot)
702 switch (slot)
704 case UNIT_I0:
705 case UNIT_C:
706 return LS;
707 case UNIT_I1:
708 return LD;
709 default:
710 abort ();
712 return 0;
715 /* Allocate free chains of cache requests. */
716 #define FREE_CHAIN_SIZE 16
717 static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
718 static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
720 static void
721 allocate_new_cache_requests (void)
723 int i;
724 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
725 * sizeof (FRV_CACHE_REQUEST));
726 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
728 frv_cache_request_free_chain[i].next
729 = & frv_cache_request_free_chain[i + 1];
732 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
735 /* Return the next free request in the queue for the given cache pipeline. */
736 static FRV_CACHE_REQUEST *
737 new_cache_request (void)
739 FRV_CACHE_REQUEST *req;
741 /* Allocate new elements for the free chain if necessary. */
742 if (frv_cache_request_free_chain == NULL)
743 allocate_new_cache_requests ();
745 req = frv_cache_request_free_chain;
746 frv_cache_request_free_chain = req->next;
748 return req;
751 /* Return the given cache request to the free chain. */
752 static void
753 free_cache_request (FRV_CACHE_REQUEST *req)
755 if (req->kind == req_store)
757 req->next = frv_store_request_free_chain;
758 frv_store_request_free_chain = req;
760 else
762 req->next = frv_cache_request_free_chain;
763 frv_cache_request_free_chain = req;
767 /* Search the free chain for an existing store request with a buffer that's
768 large enough. */
769 static FRV_CACHE_REQUEST *
770 new_store_request (int length)
772 FRV_CACHE_REQUEST *prev = NULL;
773 FRV_CACHE_REQUEST *req;
774 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
776 if (req->u.store.length == length)
777 break;
778 prev = req;
780 if (req != NULL)
782 if (prev == NULL)
783 frv_store_request_free_chain = req->next;
784 else
785 prev->next = req->next;
786 return req;
789 /* No existing request buffer was found, so make a new one. */
790 req = new_cache_request ();
791 req->kind = req_store;
792 req->u.store.data = xmalloc (length);
793 req->u.store.length = length;
794 return req;
797 /* Remove the given request from the given pipeline. */
798 static void
799 pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
801 FRV_CACHE_REQUEST *next = request->next;
802 FRV_CACHE_REQUEST *prev = request->prev;
804 if (prev == NULL)
805 p->requests = next;
806 else
807 prev->next = next;
809 if (next != NULL)
810 next->prev = prev;
813 /* Add the given request to the given pipeline. */
814 static void
815 pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
817 FRV_CACHE_REQUEST *prev = NULL;
818 FRV_CACHE_REQUEST *item;
820 /* Add the request in priority order. 0 is the highest priority. */
821 for (item = p->requests; item != NULL; item = item->next)
823 if (item->priority > request->priority)
824 break;
825 prev = item;
828 request->next = item;
829 request->prev = prev;
830 if (prev == NULL)
831 p->requests = request;
832 else
833 prev->next = request;
834 if (item != NULL)
835 item->prev = request;
838 /* Requeu the given request from the last of the given pipeline. */
839 static void
840 pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
842 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
843 FRV_CACHE_REQUEST *req = stage->request;
844 stage->request = NULL;
845 pipeline_add_request (p, req);
848 /* Return the priority lower than the lowest one in this cache pipeline.
849 0 is the highest priority. */
850 static int
851 next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
853 int i;
854 int pipe;
855 int lowest = 0;
856 FRV_CACHE_REQUEST *req;
858 /* Check the priorities of any queued items. */
859 for (req = pipeline->requests; req != NULL; req = req->next)
860 if (req->priority > lowest)
861 lowest = req->priority;
863 /* Check the priorities of items in the pipeline stages. */
864 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
866 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
867 if (stage->request != NULL && stage->request->priority > lowest)
868 lowest = stage->request->priority;
871 /* Check the priorities of load requests waiting in WAR. These are one
872 higher than the request that spawned them. */
873 for (i = 0; i < NUM_WARS; ++i)
875 FRV_CACHE_WAR *war = & pipeline->WAR[i];
876 if (war->valid && war->priority > lowest)
877 lowest = war->priority + 1;
880 /* Check the priorities of any BARS or NARS associated with this pipeline.
881 These are one higher than the request that spawned them. */
882 pipe = pipeline - cache->pipeline;
883 if (cache->BARS.valid && cache->BARS.pipe == pipe
884 && cache->BARS.priority > lowest)
885 lowest = cache->BARS.priority + 1;
886 if (cache->NARS.valid && cache->NARS.pipe == pipe
887 && cache->NARS.priority > lowest)
888 lowest = cache->NARS.priority + 1;
890 /* Return a priority 2 lower than the lowest found. This allows a WAR
891 request to be generated with a priority greater than this but less than
892 the next higher priority request. */
893 return lowest + 2;
896 static void
897 add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
899 /* Add the load request to the indexed pipeline. */
900 FRV_CACHE_REQUEST *req = new_cache_request ();
901 req->kind = req_WAR;
902 req->reqno = war->reqno;
903 req->priority = war->priority;
904 req->address = war->address;
905 req->u.WAR.preload = war->preload;
906 req->u.WAR.lock = war->lock;
907 pipeline_add_request (pipeline, req);
910 /* Remove the next request from the given pipeline and return it. */
911 static FRV_CACHE_REQUEST *
912 pipeline_next_request (FRV_CACHE_PIPELINE *p)
914 FRV_CACHE_REQUEST *first = p->requests;
915 if (first != NULL)
916 pipeline_remove_request (p, first);
917 return first;
920 /* Return the request which is at the given stage of the given pipeline. */
921 static FRV_CACHE_REQUEST *
922 pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
924 return p->stages[stage].request;
927 static void
928 advance_pipelines (FRV_CACHE *cache)
930 int stage;
931 int pipe;
932 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
934 /* Free the final stage requests. */
935 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
937 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
938 if (req != NULL)
939 free_cache_request (req);
942 /* Shuffle the requests along the pipeline. */
943 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
945 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
946 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
949 /* Add a new request to the pipeline. */
950 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
951 pipelines[pipe].stages[FIRST_STAGE].request
952 = pipeline_next_request (& pipelines[pipe]);
955 /* Handle a request for a load from the given address. */
956 void
957 frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
959 FRV_CACHE_REQUEST *req;
961 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
962 int pipe = convert_slot_to_index (slot);
963 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
965 /* Add the load request to the indexed pipeline. */
966 req = new_cache_request ();
967 req->kind = req_load;
968 req->reqno = reqno;
969 req->priority = next_priority (cache, pipeline);
970 req->address = address;
972 pipeline_add_request (pipeline, req);
975 void
976 frv_cache_request_store (FRV_CACHE *cache, SI address,
977 int slot, char *data, unsigned length)
979 FRV_CACHE_REQUEST *req;
981 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
982 int pipe = convert_slot_to_index (slot);
983 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
985 /* Add the load request to the indexed pipeline. */
986 req = new_store_request (length);
987 req->kind = req_store;
988 req->reqno = NO_REQNO;
989 req->priority = next_priority (cache, pipeline);
990 req->address = address;
991 req->u.store.length = length;
992 memcpy (req->u.store.data, data, length);
994 pipeline_add_request (pipeline, req);
995 invalidate_return_buffer (cache, address);
998 /* Handle a request to invalidate the cache line containing the given address.
999 Flush the data if requested. */
1000 void
1001 frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
1002 int slot, int all, int flush)
1004 FRV_CACHE_REQUEST *req;
1006 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1007 int pipe = convert_slot_to_index (slot);
1008 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1010 /* Add the load request to the indexed pipeline. */
1011 req = new_cache_request ();
1012 req->kind = req_invalidate;
1013 req->reqno = reqno;
1014 req->priority = next_priority (cache, pipeline);
1015 req->address = address;
1016 req->u.invalidate.all = all;
1017 req->u.invalidate.flush = flush;
1019 pipeline_add_request (pipeline, req);
1022 /* Handle a request to preload the cache line containing the given address. */
1023 void
1024 frv_cache_request_preload (FRV_CACHE *cache, SI address,
1025 int slot, int length, int lock)
1027 FRV_CACHE_REQUEST *req;
1029 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1030 int pipe = convert_slot_to_index (slot);
1031 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1033 /* Add the load request to the indexed pipeline. */
1034 req = new_cache_request ();
1035 req->kind = req_preload;
1036 req->reqno = NO_REQNO;
1037 req->priority = next_priority (cache, pipeline);
1038 req->address = address;
1039 req->u.preload.length = length;
1040 req->u.preload.lock = lock;
1042 pipeline_add_request (pipeline, req);
1043 invalidate_return_buffer (cache, address);
1046 /* Handle a request to unlock the cache line containing the given address. */
1047 void
1048 frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1050 FRV_CACHE_REQUEST *req;
1052 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1053 int pipe = convert_slot_to_index (slot);
1054 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1056 /* Add the load request to the indexed pipeline. */
1057 req = new_cache_request ();
1058 req->kind = req_unlock;
1059 req->reqno = NO_REQNO;
1060 req->priority = next_priority (cache, pipeline);
1061 req->address = address;
1063 pipeline_add_request (pipeline, req);
1066 /* Check whether this address interferes with a pending request of
1067 higher priority. */
1068 static int
1069 address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1070 int pipe)
1072 int i, j;
1073 int line_mask = ~(cache->line_size - 1);
1074 int other_pipe;
1075 int priority = req->priority;
1076 FRV_CACHE_REQUEST *other_req;
1077 SI other_address;
1078 SI all_address;
1080 address &= line_mask;
1081 all_address = -1 & line_mask;
1083 /* Check for collisions in the queue for this pipeline. */
1084 for (other_req = cache->pipeline[pipe].requests;
1085 other_req != NULL;
1086 other_req = other_req->next)
1088 other_address = other_req->address & line_mask;
1089 if ((address == other_address || address == all_address)
1090 && priority > other_req->priority)
1091 return 1;
1094 /* Check for a collision in the the other pipeline. */
1095 other_pipe = pipe ^ 1;
1096 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1097 if (other_req != NULL)
1099 other_address = other_req->address & line_mask;
1100 if (address == other_address || address == all_address)
1101 return 1;
1104 /* Check for a collision with load requests waiting in WAR. */
1105 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1107 for (j = 0; j < NUM_WARS; ++j)
1109 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1110 if (war->valid
1111 && (address == (war->address & line_mask)
1112 || address == all_address)
1113 && priority > war->priority)
1114 return 1;
1116 /* If this is not a WAR request, then yield to any WAR requests in
1117 either pipeline or to a higher priority request in the same pipeline.
1119 if (req->kind != req_WAR)
1121 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1123 other_req = cache->pipeline[i].stages[j].request;
1124 if (other_req != NULL)
1126 if (other_req->kind == req_WAR)
1127 return 1;
1128 if (i == pipe
1129 && (address == (other_req->address & line_mask)
1130 || address == all_address)
1131 && priority > other_req->priority)
1132 return 1;
1138 /* Check for a collision with load requests waiting in ARS. */
1139 if (cache->BARS.valid
1140 && (address == (cache->BARS.address & line_mask)
1141 || address == all_address)
1142 && priority > cache->BARS.priority)
1143 return 1;
1144 if (cache->NARS.valid
1145 && (address == (cache->NARS.address & line_mask)
1146 || address == all_address)
1147 && priority > cache->NARS.priority)
1148 return 1;
1150 return 0;
1153 /* Wait for a free WAR register in BARS or NARS. */
1154 static void
1155 wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1157 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1159 if (! cache->BARS.valid)
1161 cache->BARS.pipe = pipe;
1162 cache->BARS.reqno = req->reqno;
1163 cache->BARS.address = req->address;
1164 cache->BARS.priority = req->priority - 1;
1165 switch (req->kind)
1167 case req_load:
1168 cache->BARS.preload = 0;
1169 cache->BARS.lock = 0;
1170 break;
1171 case req_store:
1172 cache->BARS.preload = 1;
1173 cache->BARS.lock = 0;
1174 break;
1175 case req_preload:
1176 cache->BARS.preload = 1;
1177 cache->BARS.lock = req->u.preload.lock;
1178 break;
1180 cache->BARS.valid = 1;
1181 return;
1183 if (! cache->NARS.valid)
1185 cache->NARS.pipe = pipe;
1186 cache->NARS.reqno = req->reqno;
1187 cache->NARS.address = req->address;
1188 cache->NARS.priority = req->priority - 1;
1189 switch (req->kind)
1191 case req_load:
1192 cache->NARS.preload = 0;
1193 cache->NARS.lock = 0;
1194 break;
1195 case req_store:
1196 cache->NARS.preload = 1;
1197 cache->NARS.lock = 0;
1198 break;
1199 case req_preload:
1200 cache->NARS.preload = 1;
1201 cache->NARS.lock = req->u.preload.lock;
1202 break;
1204 cache->NARS.valid = 1;
1205 return;
1207 /* All wait registers are busy, so resubmit this request. */
1208 pipeline_requeue_request (pipeline);
1211 /* Find a free WAR register and wait for memory to fetch the data. */
1212 static void
1213 wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1215 int war;
1216 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1218 /* Find a valid WAR to hold this request. */
1219 for (war = 0; war < NUM_WARS; ++war)
1220 if (! pipeline->WAR[war].valid)
1221 break;
1222 if (war >= NUM_WARS)
1224 wait_for_WAR (cache, pipe, req);
1225 return;
1228 pipeline->WAR[war].address = req->address;
1229 pipeline->WAR[war].reqno = req->reqno;
1230 pipeline->WAR[war].priority = req->priority - 1;
1231 pipeline->WAR[war].latency = cache->memory_latency + 1;
1232 switch (req->kind)
1234 case req_load:
1235 pipeline->WAR[war].preload = 0;
1236 pipeline->WAR[war].lock = 0;
1237 break;
1238 case req_store:
1239 pipeline->WAR[war].preload = 1;
1240 pipeline->WAR[war].lock = 0;
1241 break;
1242 case req_preload:
1243 pipeline->WAR[war].preload = 1;
1244 pipeline->WAR[war].lock = req->u.preload.lock;
1245 break;
1247 pipeline->WAR[war].valid = 1;
1250 static void
1251 handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1253 FRV_CACHE_TAG *tag;
1254 SI address = req->address;
1256 /* If this address interferes with an existing request, then requeue it. */
1257 if (address_interference (cache, address, req, pipe))
1259 pipeline_requeue_request (& cache->pipeline[pipe]);
1260 return;
1263 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1265 int found = get_tag (cache, address, &tag);
1267 /* If the data was found, return it to the caller. */
1268 if (found)
1270 set_most_recently_used (cache, tag);
1271 copy_line_to_return_buffer (cache, pipe, tag, address);
1272 set_return_buffer_reqno (cache, pipe, req->reqno);
1273 return;
1277 /* The data is not in the cache or this is a non-cache access. We need to
1278 wait for the memory unit to fetch it. Store this request in the WAR in
1279 the meantime. */
1280 wait_in_WAR (cache, pipe, req);
1283 static void
1284 handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1286 int found;
1287 FRV_CACHE_TAG *tag;
1288 int length;
1289 int lock;
1290 int offset;
1291 int lines;
1292 int line;
1293 SI address = req->address;
1294 SI cur_address;
1296 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1297 return;
1299 /* preload at least 1 line. */
1300 length = req->u.preload.length;
1301 if (length == 0)
1302 length = 1;
1304 /* Make sure that this request does not interfere with a pending request. */
1305 offset = address & (cache->line_size - 1);
1306 lines = 1 + (offset + length - 1) / cache->line_size;
1307 cur_address = address & ~(cache->line_size - 1);
1308 for (line = 0; line < lines; ++line)
1310 /* If this address interferes with an existing request,
1311 then requeue it. */
1312 if (address_interference (cache, cur_address, req, pipe))
1314 pipeline_requeue_request (& cache->pipeline[pipe]);
1315 return;
1317 cur_address += cache->line_size;
1320 /* Now process each cache line. */
1321 /* Careful with this loop -- length is unsigned. */
1322 lock = req->u.preload.lock;
1323 cur_address = address & ~(cache->line_size - 1);
1324 for (line = 0; line < lines; ++line)
1326 /* If the data was found, then lock it if requested. */
1327 found = get_tag (cache, cur_address, &tag);
1328 if (found)
1330 if (lock)
1331 tag->locked = 1;
1333 else
1335 /* The data is not in the cache. We need to wait for the memory
1336 unit to fetch it. Store this request in the WAR in the meantime.
1338 wait_in_WAR (cache, pipe, req);
1340 cur_address += cache->line_size;
1344 static void
1345 handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1347 SIM_CPU *current_cpu;
1348 FRV_CACHE_TAG *tag;
1349 int found;
1350 int copy_back;
1351 SI address = req->address;
1352 char *data = req->u.store.data;
1353 int length = req->u.store.length;
1355 /* If this address interferes with an existing request, then requeue it. */
1356 if (address_interference (cache, address, req, pipe))
1358 pipeline_requeue_request (& cache->pipeline[pipe]);
1359 return;
1362 /* Non-cache access. Write the data directly to memory. */
1363 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1365 write_data_to_memory (cache, address, data, length);
1366 return;
1369 /* See if the data is in the cache. */
1370 found = get_tag (cache, address, &tag);
1372 /* Write the data to the cache line if one was available and if it is
1373 either a hit or a miss in copy-back mode.
1374 The tag may be NULL if all ways were in use and locked on a miss.
1376 current_cpu = cache->cpu;
1377 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1378 if (tag != NULL && (found || copy_back))
1380 int line_offset;
1381 /* Load the line from memory first, if it was a miss. */
1382 if (! found)
1384 /* We need to wait for the memory unit to fetch the data.
1385 Store this request in the WAR and requeue the store request. */
1386 wait_in_WAR (cache, pipe, req);
1387 pipeline_requeue_request (& cache->pipeline[pipe]);
1388 /* Decrement the counts of accesses and hits because when the requeued
1389 request is processed again, it will appear to be a new access and
1390 a hit. */
1391 --cache->statistics.accesses;
1392 --cache->statistics.hits;
1393 return;
1395 line_offset = address & (cache->line_size - 1);
1396 memcpy (tag->line + line_offset, data, length);
1397 invalidate_return_buffer (cache, address);
1398 tag->dirty = 1;
1400 /* Update the LRU information for the tags in this set. */
1401 set_most_recently_used (cache, tag);
1404 /* Write the data to memory if there was no line available or we are in
1405 write-through (not copy-back mode). */
1406 if (tag == NULL || ! copy_back)
1408 write_data_to_memory (cache, address, data, length);
1409 if (tag != NULL)
1410 tag->dirty = 0;
1414 static void
1415 handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1417 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1418 SI address = req->address;
1419 SI interfere_address = req->u.invalidate.all ? -1 : address;
1421 /* If this address interferes with an existing request, then requeue it. */
1422 if (address_interference (cache, interfere_address, req, pipe))
1424 pipeline_requeue_request (pipeline);
1425 return;
1428 /* Invalidate the cache line now. This function already checks for
1429 non-cache access. */
1430 if (req->u.invalidate.all)
1431 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1432 else
1433 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1434 if (req->u.invalidate.flush)
1436 pipeline->status.flush.reqno = req->reqno;
1437 pipeline->status.flush.address = address;
1438 pipeline->status.flush.valid = 1;
1442 static void
1443 handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1445 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1446 SI address = req->address;
1448 /* If this address interferes with an existing request, then requeue it. */
1449 if (address_interference (cache, address, req, pipe))
1451 pipeline_requeue_request (pipeline);
1452 return;
1455 /* Unlock the cache line. This function checks for non-cache access. */
1456 frv_cache_unlock (cache, address);
1459 static void
1460 handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1462 FRV_CACHE_TAG *tag;
1463 SI address = req->address;
1465 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1467 /* Look for the data in the cache. The statistics of cache hit or
1468 miss have already been recorded, so save and restore the stats before
1469 and after obtaining the cache line. */
1470 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1471 tag = find_or_retrieve_cache_line (cache, address);
1472 cache->statistics = save_stats;
1473 if (tag != NULL)
1475 if (! req->u.WAR.preload)
1477 copy_line_to_return_buffer (cache, pipe, tag, address);
1478 set_return_buffer_reqno (cache, pipe, req->reqno);
1480 else
1482 invalidate_return_buffer (cache, address);
1483 if (req->u.WAR.lock)
1484 tag->locked = 1;
1486 return;
1490 /* All cache lines in the set were locked, so just copy the data to the
1491 return buffer directly. */
1492 if (! req->u.WAR.preload)
1494 copy_memory_to_return_buffer (cache, pipe, address);
1495 set_return_buffer_reqno (cache, pipe, req->reqno);
1499 /* Resolve any conflicts and/or execute the given requests. */
1500 static void
1501 arbitrate_requests (FRV_CACHE *cache)
1503 int pipe;
1504 /* Simply execute the requests in the final pipeline stages. */
1505 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1507 FRV_CACHE_REQUEST *req
1508 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1509 /* Make sure that there is a request to handle. */
1510 if (req == NULL)
1511 continue;
1513 /* Handle the request. */
1514 switch (req->kind)
1516 case req_load:
1517 handle_req_load (cache, pipe, req);
1518 break;
1519 case req_store:
1520 handle_req_store (cache, pipe, req);
1521 break;
1522 case req_invalidate:
1523 handle_req_invalidate (cache, pipe, req);
1524 break;
1525 case req_preload:
1526 handle_req_preload (cache, pipe, req);
1527 break;
1528 case req_unlock:
1529 handle_req_unlock (cache, pipe, req);
1530 break;
1531 case req_WAR:
1532 handle_req_WAR (cache, pipe, req);
1533 break;
1534 default:
1535 abort ();
1540 /* Move a waiting ARS register to a free WAR register. */
1541 static void
1542 move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1544 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1545 NARS to BARS if it is valid. */
1546 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1548 war->address = cache->BARS.address;
1549 war->reqno = cache->BARS.reqno;
1550 war->priority = cache->BARS.priority;
1551 war->preload = cache->BARS.preload;
1552 war->lock = cache->BARS.lock;
1553 war->latency = cache->memory_latency + 1;
1554 war->valid = 1;
1555 if (cache->NARS.valid)
1557 cache->BARS = cache->NARS;
1558 cache->NARS.valid = 0;
1560 else
1561 cache->BARS.valid = 0;
1562 return;
1564 /* If NARS is valid for this pipe, then move it to the given WAR. */
1565 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1567 war->address = cache->NARS.address;
1568 war->reqno = cache->NARS.reqno;
1569 war->priority = cache->NARS.priority;
1570 war->preload = cache->NARS.preload;
1571 war->lock = cache->NARS.lock;
1572 war->latency = cache->memory_latency + 1;
1573 war->valid = 1;
1574 cache->NARS.valid = 0;
1578 /* Decrease the latencies of the various states in the cache. */
1579 static void
1580 decrease_latencies (FRV_CACHE *cache)
1582 int pipe, j;
1583 /* Check the WAR registers. */
1584 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1586 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1587 for (j = 0; j < NUM_WARS; ++j)
1589 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1590 if (war->valid)
1592 --war->latency;
1593 /* If the latency has expired, then submit a WAR request to the
1594 pipeline. */
1595 if (war->latency <= 0)
1597 add_WAR_request (pipeline, war);
1598 war->valid = 0;
1599 move_ARS_to_WAR (cache, pipe, war);
1606 /* Run the cache for the given number of cycles. */
1607 void
1608 frv_cache_run (FRV_CACHE *cache, int cycles)
1610 int i;
1611 for (i = 0; i < cycles; ++i)
1613 advance_pipelines (cache);
1614 arbitrate_requests (cache);
1615 decrease_latencies (cache);
1620 frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1622 SI offset;
1623 FRV_CACHE_TAG *tag;
1625 if (non_cache_access (cache, address))
1626 return 0;
1629 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1630 int found = get_tag (cache, address, &tag);
1631 cache->statistics = save_stats;
1633 if (! found)
1634 return 0; /* Indicate non-cache-access. */
1637 /* A cache line was available for the data.
1638 Extract the target data from the line. */
1639 offset = address & (cache->line_size - 1);
1640 *value = T2H_4 (*(SI *)(tag->line + offset));
1641 return 1;
1644 /* Check the return buffers of the data cache to see if the requested data is
1645 available. */
1647 frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1648 unsigned reqno)
1650 return cache->pipeline[pipe].status.return_buffer.valid
1651 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1652 && cache->pipeline[pipe].status.return_buffer.address <= address
1653 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1654 > address;
1657 /* Check to see if the requested data has been flushed. */
1659 frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1661 return cache->pipeline[pipe].status.flush.valid
1662 && cache->pipeline[pipe].status.flush.reqno == reqno
1663 && cache->pipeline[pipe].status.flush.address <= address
1664 && cache->pipeline[pipe].status.flush.address + cache->line_size
1665 > address;