Add documentation for the MIPS assembler's -march=from-abi command line option
[binutils-gdb.git] / sim / frv / cache.c
blob76f762fe5ee7ecc340a51749cf74bc26a4060472
1 /* frv cache model.
2 Copyright (C) 1999-2023 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* This must come before any other includes. */
21 #include "defs.h"
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
26 #include "libiberty.h"
27 #include "sim-main.h"
28 #include "cache.h"
29 #include "bfd.h"
30 #include <stdlib.h>
32 void
33 frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
35 int elements;
36 int i, j;
37 SIM_DESC sd;
39 /* Set defaults for fields which are not initialized. */
40 sd = CPU_STATE (cpu);
41 switch (STATE_ARCHITECTURE (sd)->mach)
43 case bfd_mach_fr400:
44 case bfd_mach_fr450:
45 if (cache->configured_sets == 0)
46 cache->configured_sets = 512;
47 if (cache->configured_ways == 0)
48 cache->configured_ways = 2;
49 if (cache->line_size == 0)
50 cache->line_size = 32;
51 if (cache->memory_latency == 0)
52 cache->memory_latency = 20;
53 break;
54 case bfd_mach_fr550:
55 if (cache->configured_sets == 0)
56 cache->configured_sets = 128;
57 if (cache->configured_ways == 0)
58 cache->configured_ways = 4;
59 if (cache->line_size == 0)
60 cache->line_size = 64;
61 if (cache->memory_latency == 0)
62 cache->memory_latency = 20;
63 break;
64 default:
65 if (cache->configured_sets == 0)
66 cache->configured_sets = 64;
67 if (cache->configured_ways == 0)
68 cache->configured_ways = 4;
69 if (cache->line_size == 0)
70 cache->line_size = 64;
71 if (cache->memory_latency == 0)
72 cache->memory_latency = 20;
73 break;
76 frv_cache_reconfigure (cpu, cache);
78 /* First allocate the cache storage based on the given dimensions. */
79 elements = cache->sets * cache->ways;
80 cache->tag_storage = (FRV_CACHE_TAG *)
81 zalloc (elements * sizeof (*cache->tag_storage));
82 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
84 /* Initialize the pipelines and status buffers. */
85 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
87 cache->pipeline[i].requests = NULL;
88 cache->pipeline[i].status.flush.valid = 0;
89 cache->pipeline[i].status.return_buffer.valid = 0;
90 cache->pipeline[i].status.return_buffer.data
91 = (char *) xmalloc (cache->line_size);
92 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
93 cache->pipeline[i].stages[j].request = NULL;
95 cache->BARS.valid = 0;
96 cache->NARS.valid = 0;
98 /* Now set the cache state. */
99 cache->cpu = cpu;
100 cache->statistics.accesses = 0;
101 cache->statistics.hits = 0;
104 void
105 frv_cache_term (FRV_CACHE *cache)
107 /* Free the cache storage. */
108 free (cache->tag_storage);
109 free (cache->data_storage);
110 free (cache->pipeline[LS].status.return_buffer.data);
111 free (cache->pipeline[LD].status.return_buffer.data);
114 /* Reset the cache configuration based on registers in the cpu. */
115 void
116 frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
118 int ihsr8;
119 int icdm;
120 SIM_DESC sd;
122 /* Set defaults for fields which are not initialized. */
123 sd = CPU_STATE (current_cpu);
124 switch (STATE_ARCHITECTURE (sd)->mach)
126 case bfd_mach_fr550:
127 if (cache == CPU_INSN_CACHE (current_cpu))
129 ihsr8 = GET_IHSR8 ();
130 icdm = GET_IHSR8_ICDM (ihsr8);
131 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
132 if (icdm)
134 cache->sets = cache->sets * cache->ways;
135 cache->ways = 1;
136 break;
139 /* fall through */
140 default:
141 /* Set the cache to its original settings. */
142 cache->sets = cache->configured_sets;
143 cache->ways = cache->configured_ways;
144 break;
148 /* Determine whether the given cache is enabled. */
150 frv_cache_enabled (FRV_CACHE *cache)
152 SIM_CPU *current_cpu = cache->cpu;
153 int hsr0 = GET_HSR0 ();
154 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
155 return 1;
156 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
157 return 1;
158 return 0;
161 /* Determine whether the given address is RAM access, assuming that HSR0.RME
162 is set. */
163 static int
164 ram_access (FRV_CACHE *cache, USI address)
166 int ihsr8;
167 int cwe;
168 USI start, end, way_size;
169 SIM_CPU *current_cpu = cache->cpu;
170 SIM_DESC sd = CPU_STATE (current_cpu);
172 switch (STATE_ARCHITECTURE (sd)->mach)
174 case bfd_mach_fr550:
175 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
176 ihsr8 = GET_IHSR8 ();
177 if (cache == CPU_INSN_CACHE (current_cpu))
179 start = 0xfe000000;
180 end = 0xfe008000;
181 cwe = GET_IHSR8_ICWE (ihsr8);
183 else
185 start = 0xfe400000;
186 end = 0xfe408000;
187 cwe = GET_IHSR8_DCWE (ihsr8);
189 way_size = (end - start) / 4;
190 end -= way_size * cwe;
191 return address >= start && address < end;
192 default:
193 break;
196 return 1; /* RAM access */
199 /* Determine whether the given address should be accessed without using
200 the cache. */
201 static int
202 non_cache_access (FRV_CACHE *cache, USI address)
204 int hsr0;
205 SIM_DESC sd;
206 SIM_CPU *current_cpu = cache->cpu;
208 sd = CPU_STATE (current_cpu);
209 switch (STATE_ARCHITECTURE (sd)->mach)
211 case bfd_mach_fr400:
212 case bfd_mach_fr450:
213 if (address >= 0xff000000
214 || (address >= 0xfe000000 && address <= 0xfeffffff))
215 return 1; /* non-cache access */
216 break;
217 case bfd_mach_fr550:
218 if (address >= 0xff000000
219 || (address >= 0xfeff0000 && address <= 0xfeffffff))
220 return 1; /* non-cache access */
221 if (cache == CPU_INSN_CACHE (current_cpu))
223 if (address >= 0xfe000000 && address <= 0xfe007fff)
224 return 1; /* non-cache access */
226 else if (address >= 0xfe400000 && address <= 0xfe407fff)
227 return 1; /* non-cache access */
228 break;
229 default:
230 if (address >= 0xff000000
231 || (address >= 0xfeff0000 && address <= 0xfeffffff))
232 return 1; /* non-cache access */
233 if (cache == CPU_INSN_CACHE (current_cpu))
235 if (address >= 0xfe000000 && address <= 0xfe003fff)
236 return 1; /* non-cache access */
238 else if (address >= 0xfe400000 && address <= 0xfe403fff)
239 return 1; /* non-cache access */
240 break;
243 hsr0 = GET_HSR0 ();
244 if (GET_HSR0_RME (hsr0))
245 return ram_access (cache, address);
247 return 0; /* cache-access */
250 /* Find the cache line corresponding to the given address.
251 If it is found then 'return_tag' is set to point to the tag for that line
252 and 1 is returned.
253 If it is not found, 'return_tag' is set to point to the tag for the least
254 recently used line and 0 is returned.
256 static int
257 get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
259 int set;
260 int way;
261 int bits;
262 USI tag;
263 FRV_CACHE_TAG *found;
264 FRV_CACHE_TAG *available;
266 ++cache->statistics.accesses;
268 /* First calculate which set this address will fall into. Do this by
269 shifting out the bits representing the offset within the line and
270 then keeping enough bits to index the set. */
271 set = address & ~(cache->line_size - 1);
272 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
273 set >>= 1;
274 set &= (cache->sets - 1);
276 /* Now search the set for a valid tag which matches this address. At the
277 same time make note of the least recently used tag, which we will return
278 if no match is found. */
279 available = NULL;
280 tag = CACHE_ADDRESS_TAG (cache, address);
281 for (way = 0; way < cache->ways; ++way)
283 found = CACHE_TAG (cache, set, way);
284 /* This tag is available as the least recently used if it is the
285 least recently used seen so far and it is not locked. */
286 if (! found->locked && (available == NULL || available->lru > found->lru))
287 available = found;
288 if (found->valid && found->tag == tag)
290 *return_tag = found;
291 ++cache->statistics.hits;
292 return 1; /* found it */
296 *return_tag = available;
297 return 0; /* not found */
300 /* Write the given data out to memory. */
301 static void
302 write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
304 SIM_CPU *cpu = cache->cpu;
305 IADDR pc = CPU_PC_GET (cpu);
306 int write_index = 0;
308 switch (length)
310 case 1:
311 default:
312 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
313 break;
314 case 2:
315 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
316 break;
317 case 4:
318 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
319 break;
320 case 8:
321 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
322 break;
325 for (write_index = 0; write_index < length; ++write_index)
327 /* TODO: Better way to copy memory than a byte at a time? */
328 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
329 data[write_index]);
333 /* Write a cache line out to memory. */
334 static void
335 write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
337 SI address = tag->tag;
338 int set = CACHE_TAG_SET_NUMBER (cache, tag);
339 int bits;
340 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
341 set <<= 1;
342 address |= set;
343 write_data_to_memory (cache, address, tag->line, cache->line_size);
346 static void
347 read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
348 int length)
350 PCADDR pc = CPU_PC_GET (current_cpu);
351 int i;
352 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
353 for (i = 0; i < length; ++i)
355 /* TODO: Better way to copy memory than a byte at a time? */
356 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
357 address + i);
361 /* Fill the given cache line from memory. */
362 static void
363 fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
365 PCADDR pc;
366 int line_alignment;
367 SI read_address;
368 SIM_CPU *current_cpu = cache->cpu;
370 /* If this line is already valid and the cache is in copy-back mode, then
371 write this line to memory before refilling it.
372 Check the dirty bit first, since it is less likely to be set. */
373 if (tag->dirty && tag->valid)
375 int hsr0 = GET_HSR0 ();
376 if (GET_HSR0_CBM (hsr0))
377 write_line_to_memory (cache, tag);
379 else if (tag->line == NULL)
381 int line_index = tag - cache->tag_storage;
382 tag->line = cache->data_storage + (line_index * cache->line_size);
385 pc = CPU_PC_GET (current_cpu);
386 line_alignment = cache->line_size - 1;
387 read_address = address & ~line_alignment;
388 read_data_from_memory (current_cpu, read_address, tag->line,
389 cache->line_size);
390 tag->tag = CACHE_ADDRESS_TAG (cache, address);
391 tag->valid = 1;
394 /* Update the LRU information for the tags in the same set as the given tag. */
395 static void
396 set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
398 /* All tags in the same set are contiguous, so find the beginning of the
399 set by aligning to the size of a set. */
400 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
401 FRV_CACHE_TAG *limit = item + cache->ways;
403 while (item < limit)
405 if (item->lru > tag->lru)
406 --item->lru;
407 ++item;
409 tag->lru = cache->ways; /* Mark as most recently used. */
412 /* Update the LRU information for the tags in the same set as the given tag. */
413 static void
414 set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
416 /* All tags in the same set are contiguous, so find the beginning of the
417 set by aligning to the size of a set. */
418 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
419 FRV_CACHE_TAG *limit = item + cache->ways;
421 while (item < limit)
423 if (item->lru != 0 && item->lru < tag->lru)
424 ++item->lru;
425 ++item;
427 tag->lru = 0; /* Mark as least recently used. */
430 /* Find the line containing the given address and load it if it is not
431 already loaded.
432 Returns the tag of the requested line. */
433 static FRV_CACHE_TAG *
434 find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
436 /* See if this data is already in the cache. */
437 FRV_CACHE_TAG *tag;
438 int found = get_tag (cache, address, &tag);
440 /* Fill the line from memory, if it is not valid. */
441 if (! found)
443 /* The tag could be NULL is all ways in the set were used and locked. */
444 if (tag == NULL)
445 return tag;
447 fill_line_from_memory (cache, tag, address);
448 tag->dirty = 0;
451 /* Update the LRU information for the tags in this set. */
452 set_most_recently_used (cache, tag);
454 return tag;
457 static void
458 copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
459 SI address)
461 /* A cache line was available for the data.
462 Copy the data from the cache line to the output buffer. */
463 memcpy (cache->pipeline[pipe].status.return_buffer.data,
464 tag->line, cache->line_size);
465 cache->pipeline[pipe].status.return_buffer.address
466 = address & ~(cache->line_size - 1);
467 cache->pipeline[pipe].status.return_buffer.valid = 1;
470 static void
471 copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
473 address &= ~(cache->line_size - 1);
474 read_data_from_memory (cache->cpu, address,
475 cache->pipeline[pipe].status.return_buffer.data,
476 cache->line_size);
477 cache->pipeline[pipe].status.return_buffer.address = address;
478 cache->pipeline[pipe].status.return_buffer.valid = 1;
481 static void
482 set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
484 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
487 /* Read data from the given cache.
488 Returns the number of cycles required to obtain the data. */
490 frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
492 FRV_CACHE_TAG *tag;
494 if (non_cache_access (cache, address))
496 copy_memory_to_return_buffer (cache, pipe, address);
497 return 1;
500 tag = find_or_retrieve_cache_line (cache, address);
502 if (tag == NULL)
503 return 0; /* Indicate non-cache-access. */
505 /* A cache line was available for the data.
506 Copy the data from the cache line to the output buffer. */
507 copy_line_to_return_buffer (cache, pipe, tag, address);
509 return 1; /* TODO - number of cycles unknown */
512 /* Writes data through the given cache.
513 The data is assumed to be in target endian order.
514 Returns the number of cycles required to write the data. */
516 frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
518 int copy_back;
520 /* See if this data is already in the cache. */
521 SIM_CPU *current_cpu = cache->cpu;
522 USI hsr0 = GET_HSR0 ();
523 FRV_CACHE_TAG *tag;
524 int found;
526 if (non_cache_access (cache, address))
528 write_data_to_memory (cache, address, data, length);
529 return 1;
532 found = get_tag (cache, address, &tag);
534 /* Write the data to the cache line if one was available and if it is
535 either a hit or a miss in copy-back mode.
536 The tag may be NULL if all ways were in use and locked on a miss.
538 copy_back = GET_HSR0_CBM (GET_HSR0 ());
539 if (tag != NULL && (found || copy_back))
541 int line_offset;
542 /* Load the line from memory first, if it was a miss. */
543 if (! found)
544 fill_line_from_memory (cache, tag, address);
545 line_offset = address & (cache->line_size - 1);
546 memcpy (tag->line + line_offset, data, length);
547 tag->dirty = 1;
549 /* Update the LRU information for the tags in this set. */
550 set_most_recently_used (cache, tag);
553 /* Write the data to memory if there was no line available or we are in
554 write-through (not copy-back mode). */
555 if (tag == NULL || ! copy_back)
557 write_data_to_memory (cache, address, data, length);
558 if (tag != NULL)
559 tag->dirty = 0;
562 return 1; /* TODO - number of cycles unknown */
565 /* Preload the cache line containing the given address. Lock the
566 data if requested.
567 Returns the number of cycles required to write the data. */
569 frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
571 int offset;
572 int lines;
574 if (non_cache_access (cache, address))
575 return 1;
577 /* preload at least 1 line. */
578 if (length == 0)
579 length = 1;
581 offset = address & (cache->line_size - 1);
582 lines = 1 + (offset + length - 1) / cache->line_size;
584 /* Careful with this loop -- length is unsigned. */
585 for (/**/; lines > 0; --lines)
587 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
588 if (lock && tag != NULL)
589 tag->locked = 1;
590 address += cache->line_size;
593 return 1; /* TODO - number of cycles unknown */
596 /* Unlock the cache line containing the given address.
597 Returns the number of cycles required to unlock the line. */
599 frv_cache_unlock (FRV_CACHE *cache, SI address)
601 FRV_CACHE_TAG *tag;
602 int found;
604 if (non_cache_access (cache, address))
605 return 1;
607 found = get_tag (cache, address, &tag);
609 if (found)
610 tag->locked = 0;
612 return 1; /* TODO - number of cycles unknown */
615 static void
616 invalidate_return_buffer (FRV_CACHE *cache, SI address)
618 /* If this address is in one of the return buffers, then invalidate that
619 return buffer. */
620 address &= ~(cache->line_size - 1);
621 if (address == cache->pipeline[LS].status.return_buffer.address)
622 cache->pipeline[LS].status.return_buffer.valid = 0;
623 if (address == cache->pipeline[LD].status.return_buffer.address)
624 cache->pipeline[LD].status.return_buffer.valid = 0;
627 /* Invalidate the cache line containing the given address. Flush the
628 data if requested.
629 Returns the number of cycles required to write the data. */
631 frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
633 /* See if this data is already in the cache. */
634 FRV_CACHE_TAG *tag;
635 int found;
637 /* Check for non-cache access. This operation is still perfromed even if
638 the cache is not currently enabled. */
639 if (non_cache_access (cache, address))
640 return 1;
642 /* If the line is found, invalidate it. If a flush is requested, then flush
643 it if it is dirty. */
644 found = get_tag (cache, address, &tag);
645 if (found)
647 SIM_CPU *cpu;
648 /* If a flush is requested, then flush it if it is dirty. */
649 if (tag->dirty && flush)
650 write_line_to_memory (cache, tag);
651 set_least_recently_used (cache, tag);
652 tag->valid = 0;
653 tag->locked = 0;
655 /* If this is the insn cache, then flush the cpu's scache as well. */
656 cpu = cache->cpu;
657 if (cache == CPU_INSN_CACHE (cpu))
658 scache_flush_cpu (cpu);
661 invalidate_return_buffer (cache, address);
663 return 1; /* TODO - number of cycles unknown */
666 /* Invalidate the entire cache. Flush the data if requested. */
668 frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
670 /* See if this data is already in the cache. */
671 int elements = cache->sets * cache->ways;
672 FRV_CACHE_TAG *tag = cache->tag_storage;
673 SIM_CPU *cpu;
674 int i;
676 for(i = 0; i < elements; ++i, ++tag)
678 /* If a flush is requested, then flush it if it is dirty. */
679 if (tag->valid && tag->dirty && flush)
680 write_line_to_memory (cache, tag);
681 tag->valid = 0;
682 tag->locked = 0;
686 /* If this is the insn cache, then flush the cpu's scache as well. */
687 cpu = cache->cpu;
688 if (cache == CPU_INSN_CACHE (cpu))
689 scache_flush_cpu (cpu);
691 /* Invalidate both return buffers. */
692 cache->pipeline[LS].status.return_buffer.valid = 0;
693 cache->pipeline[LD].status.return_buffer.valid = 0;
695 return 1; /* TODO - number of cycles unknown */
698 /* ---------------------------------------------------------------------------
699 Functions for operating the cache in cycle accurate mode.
700 ------------------------------------------------------------------------- */
701 /* Convert a VLIW slot to a cache pipeline index. */
702 static int
703 convert_slot_to_index (int slot)
705 switch (slot)
707 case UNIT_I0:
708 case UNIT_C:
709 return LS;
710 case UNIT_I1:
711 return LD;
712 default:
713 abort ();
715 return 0;
718 /* Allocate free chains of cache requests. */
719 #define FREE_CHAIN_SIZE 16
720 static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
721 static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
723 static void
724 allocate_new_cache_requests (void)
726 int i;
727 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
728 * sizeof (FRV_CACHE_REQUEST));
729 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
731 frv_cache_request_free_chain[i].next
732 = & frv_cache_request_free_chain[i + 1];
735 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
738 /* Return the next free request in the queue for the given cache pipeline. */
739 static FRV_CACHE_REQUEST *
740 new_cache_request (void)
742 FRV_CACHE_REQUEST *req;
744 /* Allocate new elements for the free chain if necessary. */
745 if (frv_cache_request_free_chain == NULL)
746 allocate_new_cache_requests ();
748 req = frv_cache_request_free_chain;
749 frv_cache_request_free_chain = req->next;
751 return req;
754 /* Return the given cache request to the free chain. */
755 static void
756 free_cache_request (FRV_CACHE_REQUEST *req)
758 if (req->kind == req_store)
760 req->next = frv_store_request_free_chain;
761 frv_store_request_free_chain = req;
763 else
765 req->next = frv_cache_request_free_chain;
766 frv_cache_request_free_chain = req;
770 /* Search the free chain for an existing store request with a buffer that's
771 large enough. */
772 static FRV_CACHE_REQUEST *
773 new_store_request (int length)
775 FRV_CACHE_REQUEST *prev = NULL;
776 FRV_CACHE_REQUEST *req;
777 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
779 if (req->u.store.length == length)
780 break;
781 prev = req;
783 if (req != NULL)
785 if (prev == NULL)
786 frv_store_request_free_chain = req->next;
787 else
788 prev->next = req->next;
789 return req;
792 /* No existing request buffer was found, so make a new one. */
793 req = new_cache_request ();
794 req->kind = req_store;
795 req->u.store.data = xmalloc (length);
796 req->u.store.length = length;
797 return req;
800 /* Remove the given request from the given pipeline. */
801 static void
802 pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
804 FRV_CACHE_REQUEST *next = request->next;
805 FRV_CACHE_REQUEST *prev = request->prev;
807 if (prev == NULL)
808 p->requests = next;
809 else
810 prev->next = next;
812 if (next != NULL)
813 next->prev = prev;
816 /* Add the given request to the given pipeline. */
817 static void
818 pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
820 FRV_CACHE_REQUEST *prev = NULL;
821 FRV_CACHE_REQUEST *item;
823 /* Add the request in priority order. 0 is the highest priority. */
824 for (item = p->requests; item != NULL; item = item->next)
826 if (item->priority > request->priority)
827 break;
828 prev = item;
831 request->next = item;
832 request->prev = prev;
833 if (prev == NULL)
834 p->requests = request;
835 else
836 prev->next = request;
837 if (item != NULL)
838 item->prev = request;
841 /* Requeu the given request from the last of the given pipeline. */
842 static void
843 pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
845 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
846 FRV_CACHE_REQUEST *req = stage->request;
847 stage->request = NULL;
848 pipeline_add_request (p, req);
851 /* Return the priority lower than the lowest one in this cache pipeline.
852 0 is the highest priority. */
853 static int
854 next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
856 int i, j;
857 int pipe;
858 int lowest = 0;
859 FRV_CACHE_REQUEST *req;
861 /* Check the priorities of any queued items. */
862 for (req = pipeline->requests; req != NULL; req = req->next)
863 if (req->priority > lowest)
864 lowest = req->priority;
866 /* Check the priorities of items in the pipeline stages. */
867 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
869 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
870 if (stage->request != NULL && stage->request->priority > lowest)
871 lowest = stage->request->priority;
874 /* Check the priorities of load requests waiting in WAR. These are one
875 higher than the request that spawned them. */
876 for (i = 0; i < NUM_WARS; ++i)
878 FRV_CACHE_WAR *war = & pipeline->WAR[i];
879 if (war->valid && war->priority > lowest)
880 lowest = war->priority + 1;
883 /* Check the priorities of any BARS or NARS associated with this pipeline.
884 These are one higher than the request that spawned them. */
885 pipe = pipeline - cache->pipeline;
886 if (cache->BARS.valid && cache->BARS.pipe == pipe
887 && cache->BARS.priority > lowest)
888 lowest = cache->BARS.priority + 1;
889 if (cache->NARS.valid && cache->NARS.pipe == pipe
890 && cache->NARS.priority > lowest)
891 lowest = cache->NARS.priority + 1;
893 /* Return a priority 2 lower than the lowest found. This allows a WAR
894 request to be generated with a priority greater than this but less than
895 the next higher priority request. */
896 return lowest + 2;
899 static void
900 add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
902 /* Add the load request to the indexed pipeline. */
903 FRV_CACHE_REQUEST *req = new_cache_request ();
904 req->kind = req_WAR;
905 req->reqno = war->reqno;
906 req->priority = war->priority;
907 req->address = war->address;
908 req->u.WAR.preload = war->preload;
909 req->u.WAR.lock = war->lock;
910 pipeline_add_request (pipeline, req);
913 /* Remove the next request from the given pipeline and return it. */
914 static FRV_CACHE_REQUEST *
915 pipeline_next_request (FRV_CACHE_PIPELINE *p)
917 FRV_CACHE_REQUEST *first = p->requests;
918 if (first != NULL)
919 pipeline_remove_request (p, first);
920 return first;
923 /* Return the request which is at the given stage of the given pipeline. */
924 static FRV_CACHE_REQUEST *
925 pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
927 return p->stages[stage].request;
930 static void
931 advance_pipelines (FRV_CACHE *cache)
933 int stage;
934 int pipe;
935 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
937 /* Free the final stage requests. */
938 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
940 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
941 if (req != NULL)
942 free_cache_request (req);
945 /* Shuffle the requests along the pipeline. */
946 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
948 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
949 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
952 /* Add a new request to the pipeline. */
953 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
954 pipelines[pipe].stages[FIRST_STAGE].request
955 = pipeline_next_request (& pipelines[pipe]);
958 /* Handle a request for a load from the given address. */
959 void
960 frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
962 FRV_CACHE_REQUEST *req;
964 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
965 int pipe = convert_slot_to_index (slot);
966 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
968 /* Add the load request to the indexed pipeline. */
969 req = new_cache_request ();
970 req->kind = req_load;
971 req->reqno = reqno;
972 req->priority = next_priority (cache, pipeline);
973 req->address = address;
975 pipeline_add_request (pipeline, req);
978 void
979 frv_cache_request_store (FRV_CACHE *cache, SI address,
980 int slot, char *data, unsigned length)
982 FRV_CACHE_REQUEST *req;
984 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
985 int pipe = convert_slot_to_index (slot);
986 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
988 /* Add the load request to the indexed pipeline. */
989 req = new_store_request (length);
990 req->kind = req_store;
991 req->reqno = NO_REQNO;
992 req->priority = next_priority (cache, pipeline);
993 req->address = address;
994 req->u.store.length = length;
995 memcpy (req->u.store.data, data, length);
997 pipeline_add_request (pipeline, req);
998 invalidate_return_buffer (cache, address);
1001 /* Handle a request to invalidate the cache line containing the given address.
1002 Flush the data if requested. */
1003 void
1004 frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
1005 int slot, int all, int flush)
1007 FRV_CACHE_REQUEST *req;
1009 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1010 int pipe = convert_slot_to_index (slot);
1011 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1013 /* Add the load request to the indexed pipeline. */
1014 req = new_cache_request ();
1015 req->kind = req_invalidate;
1016 req->reqno = reqno;
1017 req->priority = next_priority (cache, pipeline);
1018 req->address = address;
1019 req->u.invalidate.all = all;
1020 req->u.invalidate.flush = flush;
1022 pipeline_add_request (pipeline, req);
1025 /* Handle a request to preload the cache line containing the given address. */
1026 void
1027 frv_cache_request_preload (FRV_CACHE *cache, SI address,
1028 int slot, int length, int lock)
1030 FRV_CACHE_REQUEST *req;
1032 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1033 int pipe = convert_slot_to_index (slot);
1034 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1036 /* Add the load request to the indexed pipeline. */
1037 req = new_cache_request ();
1038 req->kind = req_preload;
1039 req->reqno = NO_REQNO;
1040 req->priority = next_priority (cache, pipeline);
1041 req->address = address;
1042 req->u.preload.length = length;
1043 req->u.preload.lock = lock;
1045 pipeline_add_request (pipeline, req);
1046 invalidate_return_buffer (cache, address);
1049 /* Handle a request to unlock the cache line containing the given address. */
1050 void
1051 frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1053 FRV_CACHE_REQUEST *req;
1055 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1056 int pipe = convert_slot_to_index (slot);
1057 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1059 /* Add the load request to the indexed pipeline. */
1060 req = new_cache_request ();
1061 req->kind = req_unlock;
1062 req->reqno = NO_REQNO;
1063 req->priority = next_priority (cache, pipeline);
1064 req->address = address;
1066 pipeline_add_request (pipeline, req);
1069 /* Check whether this address interferes with a pending request of
1070 higher priority. */
1071 static int
1072 address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1073 int pipe)
1075 int i, j;
1076 int line_mask = ~(cache->line_size - 1);
1077 int other_pipe;
1078 int priority = req->priority;
1079 FRV_CACHE_REQUEST *other_req;
1080 SI other_address;
1081 SI all_address;
1083 address &= line_mask;
1084 all_address = -1 & line_mask;
1086 /* Check for collisions in the queue for this pipeline. */
1087 for (other_req = cache->pipeline[pipe].requests;
1088 other_req != NULL;
1089 other_req = other_req->next)
1091 other_address = other_req->address & line_mask;
1092 if ((address == other_address || address == all_address)
1093 && priority > other_req->priority)
1094 return 1;
1097 /* Check for a collision in the the other pipeline. */
1098 other_pipe = pipe ^ 1;
1099 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1100 if (other_req != NULL)
1102 other_address = other_req->address & line_mask;
1103 if (address == other_address || address == all_address)
1104 return 1;
1107 /* Check for a collision with load requests waiting in WAR. */
1108 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1110 for (j = 0; j < NUM_WARS; ++j)
1112 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1113 if (war->valid
1114 && (address == (war->address & line_mask)
1115 || address == all_address)
1116 && priority > war->priority)
1117 return 1;
1119 /* If this is not a WAR request, then yield to any WAR requests in
1120 either pipeline or to a higher priority request in the same pipeline.
1122 if (req->kind != req_WAR)
1124 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1126 other_req = cache->pipeline[i].stages[j].request;
1127 if (other_req != NULL)
1129 if (other_req->kind == req_WAR)
1130 return 1;
1131 if (i == pipe
1132 && (address == (other_req->address & line_mask)
1133 || address == all_address)
1134 && priority > other_req->priority)
1135 return 1;
1141 /* Check for a collision with load requests waiting in ARS. */
1142 if (cache->BARS.valid
1143 && (address == (cache->BARS.address & line_mask)
1144 || address == all_address)
1145 && priority > cache->BARS.priority)
1146 return 1;
1147 if (cache->NARS.valid
1148 && (address == (cache->NARS.address & line_mask)
1149 || address == all_address)
1150 && priority > cache->NARS.priority)
1151 return 1;
1153 return 0;
1156 /* Wait for a free WAR register in BARS or NARS. */
1157 static void
1158 wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1160 FRV_CACHE_WAR war;
1161 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1163 if (! cache->BARS.valid)
1165 cache->BARS.pipe = pipe;
1166 cache->BARS.reqno = req->reqno;
1167 cache->BARS.address = req->address;
1168 cache->BARS.priority = req->priority - 1;
1169 switch (req->kind)
1171 case req_load:
1172 cache->BARS.preload = 0;
1173 cache->BARS.lock = 0;
1174 break;
1175 case req_store:
1176 cache->BARS.preload = 1;
1177 cache->BARS.lock = 0;
1178 break;
1179 case req_preload:
1180 cache->BARS.preload = 1;
1181 cache->BARS.lock = req->u.preload.lock;
1182 break;
1184 cache->BARS.valid = 1;
1185 return;
1187 if (! cache->NARS.valid)
1189 cache->NARS.pipe = pipe;
1190 cache->NARS.reqno = req->reqno;
1191 cache->NARS.address = req->address;
1192 cache->NARS.priority = req->priority - 1;
1193 switch (req->kind)
1195 case req_load:
1196 cache->NARS.preload = 0;
1197 cache->NARS.lock = 0;
1198 break;
1199 case req_store:
1200 cache->NARS.preload = 1;
1201 cache->NARS.lock = 0;
1202 break;
1203 case req_preload:
1204 cache->NARS.preload = 1;
1205 cache->NARS.lock = req->u.preload.lock;
1206 break;
1208 cache->NARS.valid = 1;
1209 return;
1211 /* All wait registers are busy, so resubmit this request. */
1212 pipeline_requeue_request (pipeline);
1215 /* Find a free WAR register and wait for memory to fetch the data. */
1216 static void
1217 wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1219 int war;
1220 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1222 /* Find a valid WAR to hold this request. */
1223 for (war = 0; war < NUM_WARS; ++war)
1224 if (! pipeline->WAR[war].valid)
1225 break;
1226 if (war >= NUM_WARS)
1228 wait_for_WAR (cache, pipe, req);
1229 return;
1232 pipeline->WAR[war].address = req->address;
1233 pipeline->WAR[war].reqno = req->reqno;
1234 pipeline->WAR[war].priority = req->priority - 1;
1235 pipeline->WAR[war].latency = cache->memory_latency + 1;
1236 switch (req->kind)
1238 case req_load:
1239 pipeline->WAR[war].preload = 0;
1240 pipeline->WAR[war].lock = 0;
1241 break;
1242 case req_store:
1243 pipeline->WAR[war].preload = 1;
1244 pipeline->WAR[war].lock = 0;
1245 break;
1246 case req_preload:
1247 pipeline->WAR[war].preload = 1;
1248 pipeline->WAR[war].lock = req->u.preload.lock;
1249 break;
1251 pipeline->WAR[war].valid = 1;
1254 static void
1255 handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1257 FRV_CACHE_TAG *tag;
1258 SI address = req->address;
1260 /* If this address interferes with an existing request, then requeue it. */
1261 if (address_interference (cache, address, req, pipe))
1263 pipeline_requeue_request (& cache->pipeline[pipe]);
1264 return;
1267 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1269 int found = get_tag (cache, address, &tag);
1271 /* If the data was found, return it to the caller. */
1272 if (found)
1274 set_most_recently_used (cache, tag);
1275 copy_line_to_return_buffer (cache, pipe, tag, address);
1276 set_return_buffer_reqno (cache, pipe, req->reqno);
1277 return;
1281 /* The data is not in the cache or this is a non-cache access. We need to
1282 wait for the memory unit to fetch it. Store this request in the WAR in
1283 the meantime. */
1284 wait_in_WAR (cache, pipe, req);
1287 static void
1288 handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1290 int found;
1291 FRV_CACHE_WAR war;
1292 FRV_CACHE_TAG *tag;
1293 int length;
1294 int lock;
1295 int offset;
1296 int lines;
1297 int line;
1298 SI address = req->address;
1299 SI cur_address;
1301 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1302 return;
1304 /* preload at least 1 line. */
1305 length = req->u.preload.length;
1306 if (length == 0)
1307 length = 1;
1309 /* Make sure that this request does not interfere with a pending request. */
1310 offset = address & (cache->line_size - 1);
1311 lines = 1 + (offset + length - 1) / cache->line_size;
1312 cur_address = address & ~(cache->line_size - 1);
1313 for (line = 0; line < lines; ++line)
1315 /* If this address interferes with an existing request,
1316 then requeue it. */
1317 if (address_interference (cache, cur_address, req, pipe))
1319 pipeline_requeue_request (& cache->pipeline[pipe]);
1320 return;
1322 cur_address += cache->line_size;
1325 /* Now process each cache line. */
1326 /* Careful with this loop -- length is unsigned. */
1327 lock = req->u.preload.lock;
1328 cur_address = address & ~(cache->line_size - 1);
1329 for (line = 0; line < lines; ++line)
1331 /* If the data was found, then lock it if requested. */
1332 found = get_tag (cache, cur_address, &tag);
1333 if (found)
1335 if (lock)
1336 tag->locked = 1;
1338 else
1340 /* The data is not in the cache. We need to wait for the memory
1341 unit to fetch it. Store this request in the WAR in the meantime.
1343 wait_in_WAR (cache, pipe, req);
1345 cur_address += cache->line_size;
1349 static void
1350 handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1352 SIM_CPU *current_cpu;
1353 FRV_CACHE_TAG *tag;
1354 int found;
1355 int copy_back;
1356 SI address = req->address;
1357 char *data = req->u.store.data;
1358 int length = req->u.store.length;
1360 /* If this address interferes with an existing request, then requeue it. */
1361 if (address_interference (cache, address, req, pipe))
1363 pipeline_requeue_request (& cache->pipeline[pipe]);
1364 return;
1367 /* Non-cache access. Write the data directly to memory. */
1368 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1370 write_data_to_memory (cache, address, data, length);
1371 return;
1374 /* See if the data is in the cache. */
1375 found = get_tag (cache, address, &tag);
1377 /* Write the data to the cache line if one was available and if it is
1378 either a hit or a miss in copy-back mode.
1379 The tag may be NULL if all ways were in use and locked on a miss.
1381 current_cpu = cache->cpu;
1382 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1383 if (tag != NULL && (found || copy_back))
1385 int line_offset;
1386 /* Load the line from memory first, if it was a miss. */
1387 if (! found)
1389 /* We need to wait for the memory unit to fetch the data.
1390 Store this request in the WAR and requeue the store request. */
1391 wait_in_WAR (cache, pipe, req);
1392 pipeline_requeue_request (& cache->pipeline[pipe]);
1393 /* Decrement the counts of accesses and hits because when the requeued
1394 request is processed again, it will appear to be a new access and
1395 a hit. */
1396 --cache->statistics.accesses;
1397 --cache->statistics.hits;
1398 return;
1400 line_offset = address & (cache->line_size - 1);
1401 memcpy (tag->line + line_offset, data, length);
1402 invalidate_return_buffer (cache, address);
1403 tag->dirty = 1;
1405 /* Update the LRU information for the tags in this set. */
1406 set_most_recently_used (cache, tag);
1409 /* Write the data to memory if there was no line available or we are in
1410 write-through (not copy-back mode). */
1411 if (tag == NULL || ! copy_back)
1413 write_data_to_memory (cache, address, data, length);
1414 if (tag != NULL)
1415 tag->dirty = 0;
1419 static void
1420 handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1422 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1423 SI address = req->address;
1424 SI interfere_address = req->u.invalidate.all ? -1 : address;
1426 /* If this address interferes with an existing request, then requeue it. */
1427 if (address_interference (cache, interfere_address, req, pipe))
1429 pipeline_requeue_request (pipeline);
1430 return;
1433 /* Invalidate the cache line now. This function already checks for
1434 non-cache access. */
1435 if (req->u.invalidate.all)
1436 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1437 else
1438 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1439 if (req->u.invalidate.flush)
1441 pipeline->status.flush.reqno = req->reqno;
1442 pipeline->status.flush.address = address;
1443 pipeline->status.flush.valid = 1;
1447 static void
1448 handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1450 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1451 SI address = req->address;
1453 /* If this address interferes with an existing request, then requeue it. */
1454 if (address_interference (cache, address, req, pipe))
1456 pipeline_requeue_request (pipeline);
1457 return;
1460 /* Unlock the cache line. This function checks for non-cache access. */
1461 frv_cache_unlock (cache, address);
1464 static void
1465 handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1467 char *buffer;
1468 FRV_CACHE_TAG *tag;
1469 SI address = req->address;
1471 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1473 /* Look for the data in the cache. The statistics of cache hit or
1474 miss have already been recorded, so save and restore the stats before
1475 and after obtaining the cache line. */
1476 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1477 tag = find_or_retrieve_cache_line (cache, address);
1478 cache->statistics = save_stats;
1479 if (tag != NULL)
1481 if (! req->u.WAR.preload)
1483 copy_line_to_return_buffer (cache, pipe, tag, address);
1484 set_return_buffer_reqno (cache, pipe, req->reqno);
1486 else
1488 invalidate_return_buffer (cache, address);
1489 if (req->u.WAR.lock)
1490 tag->locked = 1;
1492 return;
1496 /* All cache lines in the set were locked, so just copy the data to the
1497 return buffer directly. */
1498 if (! req->u.WAR.preload)
1500 copy_memory_to_return_buffer (cache, pipe, address);
1501 set_return_buffer_reqno (cache, pipe, req->reqno);
1505 /* Resolve any conflicts and/or execute the given requests. */
1506 static void
1507 arbitrate_requests (FRV_CACHE *cache)
1509 int pipe;
1510 /* Simply execute the requests in the final pipeline stages. */
1511 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1513 FRV_CACHE_REQUEST *req
1514 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1515 /* Make sure that there is a request to handle. */
1516 if (req == NULL)
1517 continue;
1519 /* Handle the request. */
1520 switch (req->kind)
1522 case req_load:
1523 handle_req_load (cache, pipe, req);
1524 break;
1525 case req_store:
1526 handle_req_store (cache, pipe, req);
1527 break;
1528 case req_invalidate:
1529 handle_req_invalidate (cache, pipe, req);
1530 break;
1531 case req_preload:
1532 handle_req_preload (cache, pipe, req);
1533 break;
1534 case req_unlock:
1535 handle_req_unlock (cache, pipe, req);
1536 break;
1537 case req_WAR:
1538 handle_req_WAR (cache, pipe, req);
1539 break;
1540 default:
1541 abort ();
1546 /* Move a waiting ARS register to a free WAR register. */
1547 static void
1548 move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1550 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1551 NARS to BARS if it is valid. */
1552 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1554 war->address = cache->BARS.address;
1555 war->reqno = cache->BARS.reqno;
1556 war->priority = cache->BARS.priority;
1557 war->preload = cache->BARS.preload;
1558 war->lock = cache->BARS.lock;
1559 war->latency = cache->memory_latency + 1;
1560 war->valid = 1;
1561 if (cache->NARS.valid)
1563 cache->BARS = cache->NARS;
1564 cache->NARS.valid = 0;
1566 else
1567 cache->BARS.valid = 0;
1568 return;
1570 /* If NARS is valid for this pipe, then move it to the given WAR. */
1571 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1573 war->address = cache->NARS.address;
1574 war->reqno = cache->NARS.reqno;
1575 war->priority = cache->NARS.priority;
1576 war->preload = cache->NARS.preload;
1577 war->lock = cache->NARS.lock;
1578 war->latency = cache->memory_latency + 1;
1579 war->valid = 1;
1580 cache->NARS.valid = 0;
1584 /* Decrease the latencies of the various states in the cache. */
1585 static void
1586 decrease_latencies (FRV_CACHE *cache)
1588 int pipe, j;
1589 /* Check the WAR registers. */
1590 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1592 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1593 for (j = 0; j < NUM_WARS; ++j)
1595 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1596 if (war->valid)
1598 --war->latency;
1599 /* If the latency has expired, then submit a WAR request to the
1600 pipeline. */
1601 if (war->latency <= 0)
1603 add_WAR_request (pipeline, war);
1604 war->valid = 0;
1605 move_ARS_to_WAR (cache, pipe, war);
1612 /* Run the cache for the given number of cycles. */
1613 void
1614 frv_cache_run (FRV_CACHE *cache, int cycles)
1616 int i;
1617 for (i = 0; i < cycles; ++i)
1619 advance_pipelines (cache);
1620 arbitrate_requests (cache);
1621 decrease_latencies (cache);
1626 frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1628 SI offset;
1629 FRV_CACHE_TAG *tag;
1631 if (non_cache_access (cache, address))
1632 return 0;
1635 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1636 int found = get_tag (cache, address, &tag);
1637 cache->statistics = save_stats;
1639 if (! found)
1640 return 0; /* Indicate non-cache-access. */
1643 /* A cache line was available for the data.
1644 Extract the target data from the line. */
1645 offset = address & (cache->line_size - 1);
1646 *value = T2H_4 (*(SI *)(tag->line + offset));
1647 return 1;
1650 /* Check the return buffers of the data cache to see if the requested data is
1651 available. */
1653 frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1654 unsigned reqno)
1656 return cache->pipeline[pipe].status.return_buffer.valid
1657 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1658 && cache->pipeline[pipe].status.return_buffer.address <= address
1659 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1660 > address;
1663 /* Check to see if the requested data has been flushed. */
1665 frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1667 return cache->pipeline[pipe].status.flush.valid
1668 && cache->pipeline[pipe].status.flush.reqno == reqno
1669 && cache->pipeline[pipe].status.flush.address <= address
1670 && cache->pipeline[pipe].status.flush.address + cache->line_size
1671 > address;