2 Copyright (C) 1999-2023 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* This must come before any other includes. */
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
26 #include "libiberty.h"
33 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
39 /* Set defaults for fields which are not initialized. */
41 switch (STATE_ARCHITECTURE (sd
)->mach
)
45 if (cache
->configured_sets
== 0)
46 cache
->configured_sets
= 512;
47 if (cache
->configured_ways
== 0)
48 cache
->configured_ways
= 2;
49 if (cache
->line_size
== 0)
50 cache
->line_size
= 32;
51 if (cache
->memory_latency
== 0)
52 cache
->memory_latency
= 20;
55 if (cache
->configured_sets
== 0)
56 cache
->configured_sets
= 128;
57 if (cache
->configured_ways
== 0)
58 cache
->configured_ways
= 4;
59 if (cache
->line_size
== 0)
60 cache
->line_size
= 64;
61 if (cache
->memory_latency
== 0)
62 cache
->memory_latency
= 20;
65 if (cache
->configured_sets
== 0)
66 cache
->configured_sets
= 64;
67 if (cache
->configured_ways
== 0)
68 cache
->configured_ways
= 4;
69 if (cache
->line_size
== 0)
70 cache
->line_size
= 64;
71 if (cache
->memory_latency
== 0)
72 cache
->memory_latency
= 20;
76 frv_cache_reconfigure (cpu
, cache
);
78 /* First allocate the cache storage based on the given dimensions. */
79 elements
= cache
->sets
* cache
->ways
;
80 cache
->tag_storage
= (FRV_CACHE_TAG
*)
81 zalloc (elements
* sizeof (*cache
->tag_storage
));
82 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
84 /* Initialize the pipelines and status buffers. */
85 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
87 cache
->pipeline
[i
].requests
= NULL
;
88 cache
->pipeline
[i
].status
.flush
.valid
= 0;
89 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
90 cache
->pipeline
[i
].status
.return_buffer
.data
91 = (char *) xmalloc (cache
->line_size
);
92 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
93 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
95 cache
->BARS
.valid
= 0;
96 cache
->NARS
.valid
= 0;
98 /* Now set the cache state. */
100 cache
->statistics
.accesses
= 0;
101 cache
->statistics
.hits
= 0;
105 frv_cache_term (FRV_CACHE
*cache
)
107 /* Free the cache storage. */
108 free (cache
->tag_storage
);
109 free (cache
->data_storage
);
110 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
111 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
114 /* Reset the cache configuration based on registers in the cpu. */
116 frv_cache_reconfigure (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
)
122 /* Set defaults for fields which are not initialized. */
123 sd
= CPU_STATE (current_cpu
);
124 switch (STATE_ARCHITECTURE (sd
)->mach
)
127 if (cache
== CPU_INSN_CACHE (current_cpu
))
129 ihsr8
= GET_IHSR8 ();
130 icdm
= GET_IHSR8_ICDM (ihsr8
);
131 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
134 cache
->sets
= cache
->sets
* cache
->ways
;
141 /* Set the cache to its original settings. */
142 cache
->sets
= cache
->configured_sets
;
143 cache
->ways
= cache
->configured_ways
;
148 /* Determine whether the given cache is enabled. */
150 frv_cache_enabled (FRV_CACHE
*cache
)
152 SIM_CPU
*current_cpu
= cache
->cpu
;
153 int hsr0
= GET_HSR0 ();
154 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
156 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
161 /* Determine whether the given address is RAM access, assuming that HSR0.RME
164 ram_access (FRV_CACHE
*cache
, USI address
)
168 USI start
, end
, way_size
;
169 SIM_CPU
*current_cpu
= cache
->cpu
;
170 SIM_DESC sd
= CPU_STATE (current_cpu
);
172 switch (STATE_ARCHITECTURE (sd
)->mach
)
175 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
176 ihsr8
= GET_IHSR8 ();
177 if (cache
== CPU_INSN_CACHE (current_cpu
))
181 cwe
= GET_IHSR8_ICWE (ihsr8
);
187 cwe
= GET_IHSR8_DCWE (ihsr8
);
189 way_size
= (end
- start
) / 4;
190 end
-= way_size
* cwe
;
191 return address
>= start
&& address
< end
;
196 return 1; /* RAM access */
199 /* Determine whether the given address should be accessed without using
202 non_cache_access (FRV_CACHE
*cache
, USI address
)
206 SIM_CPU
*current_cpu
= cache
->cpu
;
208 sd
= CPU_STATE (current_cpu
);
209 switch (STATE_ARCHITECTURE (sd
)->mach
)
213 if (address
>= 0xff000000
214 || (address
>= 0xfe000000 && address
<= 0xfeffffff))
215 return 1; /* non-cache access */
218 if (address
>= 0xff000000
219 || (address
>= 0xfeff0000 && address
<= 0xfeffffff))
220 return 1; /* non-cache access */
221 if (cache
== CPU_INSN_CACHE (current_cpu
))
223 if (address
>= 0xfe000000 && address
<= 0xfe007fff)
224 return 1; /* non-cache access */
226 else if (address
>= 0xfe400000 && address
<= 0xfe407fff)
227 return 1; /* non-cache access */
230 if (address
>= 0xff000000
231 || (address
>= 0xfeff0000 && address
<= 0xfeffffff))
232 return 1; /* non-cache access */
233 if (cache
== CPU_INSN_CACHE (current_cpu
))
235 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
236 return 1; /* non-cache access */
238 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
239 return 1; /* non-cache access */
244 if (GET_HSR0_RME (hsr0
))
245 return ram_access (cache
, address
);
247 return 0; /* cache-access */
250 /* Find the cache line corresponding to the given address.
251 If it is found then 'return_tag' is set to point to the tag for that line
253 If it is not found, 'return_tag' is set to point to the tag for the least
254 recently used line and 0 is returned.
257 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
263 FRV_CACHE_TAG
*found
;
264 FRV_CACHE_TAG
*available
;
266 ++cache
->statistics
.accesses
;
268 /* First calculate which set this address will fall into. Do this by
269 shifting out the bits representing the offset within the line and
270 then keeping enough bits to index the set. */
271 set
= address
& ~(cache
->line_size
- 1);
272 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
274 set
&= (cache
->sets
- 1);
276 /* Now search the set for a valid tag which matches this address. At the
277 same time make note of the least recently used tag, which we will return
278 if no match is found. */
280 tag
= CACHE_ADDRESS_TAG (cache
, address
);
281 for (way
= 0; way
< cache
->ways
; ++way
)
283 found
= CACHE_TAG (cache
, set
, way
);
284 /* This tag is available as the least recently used if it is the
285 least recently used seen so far and it is not locked. */
286 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
288 if (found
->valid
&& found
->tag
== tag
)
291 ++cache
->statistics
.hits
;
292 return 1; /* found it */
296 *return_tag
= available
;
297 return 0; /* not found */
300 /* Write the given data out to memory. */
302 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
304 SIM_CPU
*cpu
= cache
->cpu
;
305 IADDR pc
= CPU_PC_GET (cpu
);
312 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
315 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
318 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
321 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
325 for (write_index
= 0; write_index
< length
; ++write_index
)
327 /* TODO: Better way to copy memory than a byte at a time? */
328 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
333 /* Write a cache line out to memory. */
335 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
337 SI address
= tag
->tag
;
338 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
340 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
343 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
347 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
350 PCADDR pc
= CPU_PC_GET (current_cpu
);
352 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
353 for (i
= 0; i
< length
; ++i
)
355 /* TODO: Better way to copy memory than a byte at a time? */
356 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
361 /* Fill the given cache line from memory. */
363 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
368 SIM_CPU
*current_cpu
= cache
->cpu
;
370 /* If this line is already valid and the cache is in copy-back mode, then
371 write this line to memory before refilling it.
372 Check the dirty bit first, since it is less likely to be set. */
373 if (tag
->dirty
&& tag
->valid
)
375 int hsr0
= GET_HSR0 ();
376 if (GET_HSR0_CBM (hsr0
))
377 write_line_to_memory (cache
, tag
);
379 else if (tag
->line
== NULL
)
381 int line_index
= tag
- cache
->tag_storage
;
382 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
385 pc
= CPU_PC_GET (current_cpu
);
386 line_alignment
= cache
->line_size
- 1;
387 read_address
= address
& ~line_alignment
;
388 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
390 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
394 /* Update the LRU information for the tags in the same set as the given tag. */
396 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
398 /* All tags in the same set are contiguous, so find the beginning of the
399 set by aligning to the size of a set. */
400 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
401 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
405 if (item
->lru
> tag
->lru
)
409 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
412 /* Update the LRU information for the tags in the same set as the given tag. */
414 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
416 /* All tags in the same set are contiguous, so find the beginning of the
417 set by aligning to the size of a set. */
418 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
419 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
423 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
427 tag
->lru
= 0; /* Mark as least recently used. */
430 /* Find the line containing the given address and load it if it is not
432 Returns the tag of the requested line. */
433 static FRV_CACHE_TAG
*
434 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
436 /* See if this data is already in the cache. */
438 int found
= get_tag (cache
, address
, &tag
);
440 /* Fill the line from memory, if it is not valid. */
443 /* The tag could be NULL is all ways in the set were used and locked. */
447 fill_line_from_memory (cache
, tag
, address
);
451 /* Update the LRU information for the tags in this set. */
452 set_most_recently_used (cache
, tag
);
458 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
461 /* A cache line was available for the data.
462 Copy the data from the cache line to the output buffer. */
463 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
464 tag
->line
, cache
->line_size
);
465 cache
->pipeline
[pipe
].status
.return_buffer
.address
466 = address
& ~(cache
->line_size
- 1);
467 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
471 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
473 address
&= ~(cache
->line_size
- 1);
474 read_data_from_memory (cache
->cpu
, address
,
475 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
477 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
478 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
482 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
484 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
487 /* Read data from the given cache.
488 Returns the number of cycles required to obtain the data. */
490 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
494 if (non_cache_access (cache
, address
))
496 copy_memory_to_return_buffer (cache
, pipe
, address
);
500 tag
= find_or_retrieve_cache_line (cache
, address
);
503 return 0; /* Indicate non-cache-access. */
505 /* A cache line was available for the data.
506 Copy the data from the cache line to the output buffer. */
507 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
509 return 1; /* TODO - number of cycles unknown */
512 /* Writes data through the given cache.
513 The data is assumed to be in target endian order.
514 Returns the number of cycles required to write the data. */
516 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
520 /* See if this data is already in the cache. */
521 SIM_CPU
*current_cpu
= cache
->cpu
;
522 USI hsr0
= GET_HSR0 ();
526 if (non_cache_access (cache
, address
))
528 write_data_to_memory (cache
, address
, data
, length
);
532 found
= get_tag (cache
, address
, &tag
);
534 /* Write the data to the cache line if one was available and if it is
535 either a hit or a miss in copy-back mode.
536 The tag may be NULL if all ways were in use and locked on a miss.
538 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
539 if (tag
!= NULL
&& (found
|| copy_back
))
542 /* Load the line from memory first, if it was a miss. */
544 fill_line_from_memory (cache
, tag
, address
);
545 line_offset
= address
& (cache
->line_size
- 1);
546 memcpy (tag
->line
+ line_offset
, data
, length
);
549 /* Update the LRU information for the tags in this set. */
550 set_most_recently_used (cache
, tag
);
553 /* Write the data to memory if there was no line available or we are in
554 write-through (not copy-back mode). */
555 if (tag
== NULL
|| ! copy_back
)
557 write_data_to_memory (cache
, address
, data
, length
);
562 return 1; /* TODO - number of cycles unknown */
565 /* Preload the cache line containing the given address. Lock the
567 Returns the number of cycles required to write the data. */
569 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
574 if (non_cache_access (cache
, address
))
577 /* preload at least 1 line. */
581 offset
= address
& (cache
->line_size
- 1);
582 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
584 /* Careful with this loop -- length is unsigned. */
585 for (/**/; lines
> 0; --lines
)
587 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
588 if (lock
&& tag
!= NULL
)
590 address
+= cache
->line_size
;
593 return 1; /* TODO - number of cycles unknown */
596 /* Unlock the cache line containing the given address.
597 Returns the number of cycles required to unlock the line. */
599 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
604 if (non_cache_access (cache
, address
))
607 found
= get_tag (cache
, address
, &tag
);
612 return 1; /* TODO - number of cycles unknown */
616 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
618 /* If this address is in one of the return buffers, then invalidate that
620 address
&= ~(cache
->line_size
- 1);
621 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
622 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
623 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
624 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
627 /* Invalidate the cache line containing the given address. Flush the
629 Returns the number of cycles required to write the data. */
631 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
633 /* See if this data is already in the cache. */
637 /* Check for non-cache access. This operation is still perfromed even if
638 the cache is not currently enabled. */
639 if (non_cache_access (cache
, address
))
642 /* If the line is found, invalidate it. If a flush is requested, then flush
643 it if it is dirty. */
644 found
= get_tag (cache
, address
, &tag
);
648 /* If a flush is requested, then flush it if it is dirty. */
649 if (tag
->dirty
&& flush
)
650 write_line_to_memory (cache
, tag
);
651 set_least_recently_used (cache
, tag
);
655 /* If this is the insn cache, then flush the cpu's scache as well. */
657 if (cache
== CPU_INSN_CACHE (cpu
))
658 scache_flush_cpu (cpu
);
661 invalidate_return_buffer (cache
, address
);
663 return 1; /* TODO - number of cycles unknown */
666 /* Invalidate the entire cache. Flush the data if requested. */
668 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
670 /* See if this data is already in the cache. */
671 int elements
= cache
->sets
* cache
->ways
;
672 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
676 for(i
= 0; i
< elements
; ++i
, ++tag
)
678 /* If a flush is requested, then flush it if it is dirty. */
679 if (tag
->valid
&& tag
->dirty
&& flush
)
680 write_line_to_memory (cache
, tag
);
686 /* If this is the insn cache, then flush the cpu's scache as well. */
688 if (cache
== CPU_INSN_CACHE (cpu
))
689 scache_flush_cpu (cpu
);
691 /* Invalidate both return buffers. */
692 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
693 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
695 return 1; /* TODO - number of cycles unknown */
698 /* ---------------------------------------------------------------------------
699 Functions for operating the cache in cycle accurate mode.
700 ------------------------------------------------------------------------- */
701 /* Convert a VLIW slot to a cache pipeline index. */
703 convert_slot_to_index (int slot
)
718 /* Allocate free chains of cache requests. */
719 #define FREE_CHAIN_SIZE 16
720 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
721 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
724 allocate_new_cache_requests (void)
727 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
728 * sizeof (FRV_CACHE_REQUEST
));
729 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
731 frv_cache_request_free_chain
[i
].next
732 = & frv_cache_request_free_chain
[i
+ 1];
735 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
738 /* Return the next free request in the queue for the given cache pipeline. */
739 static FRV_CACHE_REQUEST
*
740 new_cache_request (void)
742 FRV_CACHE_REQUEST
*req
;
744 /* Allocate new elements for the free chain if necessary. */
745 if (frv_cache_request_free_chain
== NULL
)
746 allocate_new_cache_requests ();
748 req
= frv_cache_request_free_chain
;
749 frv_cache_request_free_chain
= req
->next
;
754 /* Return the given cache request to the free chain. */
756 free_cache_request (FRV_CACHE_REQUEST
*req
)
758 if (req
->kind
== req_store
)
760 req
->next
= frv_store_request_free_chain
;
761 frv_store_request_free_chain
= req
;
765 req
->next
= frv_cache_request_free_chain
;
766 frv_cache_request_free_chain
= req
;
770 /* Search the free chain for an existing store request with a buffer that's
772 static FRV_CACHE_REQUEST
*
773 new_store_request (int length
)
775 FRV_CACHE_REQUEST
*prev
= NULL
;
776 FRV_CACHE_REQUEST
*req
;
777 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
779 if (req
->u
.store
.length
== length
)
786 frv_store_request_free_chain
= req
->next
;
788 prev
->next
= req
->next
;
792 /* No existing request buffer was found, so make a new one. */
793 req
= new_cache_request ();
794 req
->kind
= req_store
;
795 req
->u
.store
.data
= xmalloc (length
);
796 req
->u
.store
.length
= length
;
800 /* Remove the given request from the given pipeline. */
802 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
804 FRV_CACHE_REQUEST
*next
= request
->next
;
805 FRV_CACHE_REQUEST
*prev
= request
->prev
;
816 /* Add the given request to the given pipeline. */
818 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
820 FRV_CACHE_REQUEST
*prev
= NULL
;
821 FRV_CACHE_REQUEST
*item
;
823 /* Add the request in priority order. 0 is the highest priority. */
824 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
826 if (item
->priority
> request
->priority
)
831 request
->next
= item
;
832 request
->prev
= prev
;
834 p
->requests
= request
;
836 prev
->next
= request
;
838 item
->prev
= request
;
841 /* Requeu the given request from the last of the given pipeline. */
843 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
845 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
846 FRV_CACHE_REQUEST
*req
= stage
->request
;
847 stage
->request
= NULL
;
848 pipeline_add_request (p
, req
);
851 /* Return the priority lower than the lowest one in this cache pipeline.
852 0 is the highest priority. */
854 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
859 FRV_CACHE_REQUEST
*req
;
861 /* Check the priorities of any queued items. */
862 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
863 if (req
->priority
> lowest
)
864 lowest
= req
->priority
;
866 /* Check the priorities of items in the pipeline stages. */
867 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
869 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
870 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
871 lowest
= stage
->request
->priority
;
874 /* Check the priorities of load requests waiting in WAR. These are one
875 higher than the request that spawned them. */
876 for (i
= 0; i
< NUM_WARS
; ++i
)
878 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
879 if (war
->valid
&& war
->priority
> lowest
)
880 lowest
= war
->priority
+ 1;
883 /* Check the priorities of any BARS or NARS associated with this pipeline.
884 These are one higher than the request that spawned them. */
885 pipe
= pipeline
- cache
->pipeline
;
886 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
887 && cache
->BARS
.priority
> lowest
)
888 lowest
= cache
->BARS
.priority
+ 1;
889 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
890 && cache
->NARS
.priority
> lowest
)
891 lowest
= cache
->NARS
.priority
+ 1;
893 /* Return a priority 2 lower than the lowest found. This allows a WAR
894 request to be generated with a priority greater than this but less than
895 the next higher priority request. */
900 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
902 /* Add the load request to the indexed pipeline. */
903 FRV_CACHE_REQUEST
*req
= new_cache_request ();
905 req
->reqno
= war
->reqno
;
906 req
->priority
= war
->priority
;
907 req
->address
= war
->address
;
908 req
->u
.WAR
.preload
= war
->preload
;
909 req
->u
.WAR
.lock
= war
->lock
;
910 pipeline_add_request (pipeline
, req
);
913 /* Remove the next request from the given pipeline and return it. */
914 static FRV_CACHE_REQUEST
*
915 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
917 FRV_CACHE_REQUEST
*first
= p
->requests
;
919 pipeline_remove_request (p
, first
);
923 /* Return the request which is at the given stage of the given pipeline. */
924 static FRV_CACHE_REQUEST
*
925 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
927 return p
->stages
[stage
].request
;
931 advance_pipelines (FRV_CACHE
*cache
)
935 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
937 /* Free the final stage requests. */
938 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
940 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
942 free_cache_request (req
);
945 /* Shuffle the requests along the pipeline. */
946 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
948 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
949 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
952 /* Add a new request to the pipeline. */
953 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
954 pipelines
[pipe
].stages
[FIRST_STAGE
].request
955 = pipeline_next_request (& pipelines
[pipe
]);
958 /* Handle a request for a load from the given address. */
960 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
962 FRV_CACHE_REQUEST
*req
;
964 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
965 int pipe
= convert_slot_to_index (slot
);
966 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
968 /* Add the load request to the indexed pipeline. */
969 req
= new_cache_request ();
970 req
->kind
= req_load
;
972 req
->priority
= next_priority (cache
, pipeline
);
973 req
->address
= address
;
975 pipeline_add_request (pipeline
, req
);
979 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
980 int slot
, char *data
, unsigned length
)
982 FRV_CACHE_REQUEST
*req
;
984 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
985 int pipe
= convert_slot_to_index (slot
);
986 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
988 /* Add the load request to the indexed pipeline. */
989 req
= new_store_request (length
);
990 req
->kind
= req_store
;
991 req
->reqno
= NO_REQNO
;
992 req
->priority
= next_priority (cache
, pipeline
);
993 req
->address
= address
;
994 req
->u
.store
.length
= length
;
995 memcpy (req
->u
.store
.data
, data
, length
);
997 pipeline_add_request (pipeline
, req
);
998 invalidate_return_buffer (cache
, address
);
1001 /* Handle a request to invalidate the cache line containing the given address.
1002 Flush the data if requested. */
1004 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
1005 int slot
, int all
, int flush
)
1007 FRV_CACHE_REQUEST
*req
;
1009 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1010 int pipe
= convert_slot_to_index (slot
);
1011 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1013 /* Add the load request to the indexed pipeline. */
1014 req
= new_cache_request ();
1015 req
->kind
= req_invalidate
;
1017 req
->priority
= next_priority (cache
, pipeline
);
1018 req
->address
= address
;
1019 req
->u
.invalidate
.all
= all
;
1020 req
->u
.invalidate
.flush
= flush
;
1022 pipeline_add_request (pipeline
, req
);
1025 /* Handle a request to preload the cache line containing the given address. */
1027 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
1028 int slot
, int length
, int lock
)
1030 FRV_CACHE_REQUEST
*req
;
1032 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1033 int pipe
= convert_slot_to_index (slot
);
1034 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1036 /* Add the load request to the indexed pipeline. */
1037 req
= new_cache_request ();
1038 req
->kind
= req_preload
;
1039 req
->reqno
= NO_REQNO
;
1040 req
->priority
= next_priority (cache
, pipeline
);
1041 req
->address
= address
;
1042 req
->u
.preload
.length
= length
;
1043 req
->u
.preload
.lock
= lock
;
1045 pipeline_add_request (pipeline
, req
);
1046 invalidate_return_buffer (cache
, address
);
1049 /* Handle a request to unlock the cache line containing the given address. */
1051 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
1053 FRV_CACHE_REQUEST
*req
;
1055 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1056 int pipe
= convert_slot_to_index (slot
);
1057 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1059 /* Add the load request to the indexed pipeline. */
1060 req
= new_cache_request ();
1061 req
->kind
= req_unlock
;
1062 req
->reqno
= NO_REQNO
;
1063 req
->priority
= next_priority (cache
, pipeline
);
1064 req
->address
= address
;
1066 pipeline_add_request (pipeline
, req
);
1069 /* Check whether this address interferes with a pending request of
1072 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
1076 int line_mask
= ~(cache
->line_size
- 1);
1078 int priority
= req
->priority
;
1079 FRV_CACHE_REQUEST
*other_req
;
1083 address
&= line_mask
;
1084 all_address
= -1 & line_mask
;
1086 /* Check for collisions in the queue for this pipeline. */
1087 for (other_req
= cache
->pipeline
[pipe
].requests
;
1089 other_req
= other_req
->next
)
1091 other_address
= other_req
->address
& line_mask
;
1092 if ((address
== other_address
|| address
== all_address
)
1093 && priority
> other_req
->priority
)
1097 /* Check for a collision in the the other pipeline. */
1098 other_pipe
= pipe
^ 1;
1099 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
1100 if (other_req
!= NULL
)
1102 other_address
= other_req
->address
& line_mask
;
1103 if (address
== other_address
|| address
== all_address
)
1107 /* Check for a collision with load requests waiting in WAR. */
1108 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1110 for (j
= 0; j
< NUM_WARS
; ++j
)
1112 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1114 && (address
== (war
->address
& line_mask
)
1115 || address
== all_address
)
1116 && priority
> war
->priority
)
1119 /* If this is not a WAR request, then yield to any WAR requests in
1120 either pipeline or to a higher priority request in the same pipeline.
1122 if (req
->kind
!= req_WAR
)
1124 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1126 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1127 if (other_req
!= NULL
)
1129 if (other_req
->kind
== req_WAR
)
1132 && (address
== (other_req
->address
& line_mask
)
1133 || address
== all_address
)
1134 && priority
> other_req
->priority
)
1141 /* Check for a collision with load requests waiting in ARS. */
1142 if (cache
->BARS
.valid
1143 && (address
== (cache
->BARS
.address
& line_mask
)
1144 || address
== all_address
)
1145 && priority
> cache
->BARS
.priority
)
1147 if (cache
->NARS
.valid
1148 && (address
== (cache
->NARS
.address
& line_mask
)
1149 || address
== all_address
)
1150 && priority
> cache
->NARS
.priority
)
1156 /* Wait for a free WAR register in BARS or NARS. */
1158 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1161 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1163 if (! cache
->BARS
.valid
)
1165 cache
->BARS
.pipe
= pipe
;
1166 cache
->BARS
.reqno
= req
->reqno
;
1167 cache
->BARS
.address
= req
->address
;
1168 cache
->BARS
.priority
= req
->priority
- 1;
1172 cache
->BARS
.preload
= 0;
1173 cache
->BARS
.lock
= 0;
1176 cache
->BARS
.preload
= 1;
1177 cache
->BARS
.lock
= 0;
1180 cache
->BARS
.preload
= 1;
1181 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1184 cache
->BARS
.valid
= 1;
1187 if (! cache
->NARS
.valid
)
1189 cache
->NARS
.pipe
= pipe
;
1190 cache
->NARS
.reqno
= req
->reqno
;
1191 cache
->NARS
.address
= req
->address
;
1192 cache
->NARS
.priority
= req
->priority
- 1;
1196 cache
->NARS
.preload
= 0;
1197 cache
->NARS
.lock
= 0;
1200 cache
->NARS
.preload
= 1;
1201 cache
->NARS
.lock
= 0;
1204 cache
->NARS
.preload
= 1;
1205 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1208 cache
->NARS
.valid
= 1;
1211 /* All wait registers are busy, so resubmit this request. */
1212 pipeline_requeue_request (pipeline
);
1215 /* Find a free WAR register and wait for memory to fetch the data. */
1217 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1220 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1222 /* Find a valid WAR to hold this request. */
1223 for (war
= 0; war
< NUM_WARS
; ++war
)
1224 if (! pipeline
->WAR
[war
].valid
)
1226 if (war
>= NUM_WARS
)
1228 wait_for_WAR (cache
, pipe
, req
);
1232 pipeline
->WAR
[war
].address
= req
->address
;
1233 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1234 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1235 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1239 pipeline
->WAR
[war
].preload
= 0;
1240 pipeline
->WAR
[war
].lock
= 0;
1243 pipeline
->WAR
[war
].preload
= 1;
1244 pipeline
->WAR
[war
].lock
= 0;
1247 pipeline
->WAR
[war
].preload
= 1;
1248 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1251 pipeline
->WAR
[war
].valid
= 1;
1255 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1258 SI address
= req
->address
;
1260 /* If this address interferes with an existing request, then requeue it. */
1261 if (address_interference (cache
, address
, req
, pipe
))
1263 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1267 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1269 int found
= get_tag (cache
, address
, &tag
);
1271 /* If the data was found, return it to the caller. */
1274 set_most_recently_used (cache
, tag
);
1275 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1276 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1281 /* The data is not in the cache or this is a non-cache access. We need to
1282 wait for the memory unit to fetch it. Store this request in the WAR in
1284 wait_in_WAR (cache
, pipe
, req
);
1288 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1298 SI address
= req
->address
;
1301 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1304 /* preload at least 1 line. */
1305 length
= req
->u
.preload
.length
;
1309 /* Make sure that this request does not interfere with a pending request. */
1310 offset
= address
& (cache
->line_size
- 1);
1311 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1312 cur_address
= address
& ~(cache
->line_size
- 1);
1313 for (line
= 0; line
< lines
; ++line
)
1315 /* If this address interferes with an existing request,
1317 if (address_interference (cache
, cur_address
, req
, pipe
))
1319 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1322 cur_address
+= cache
->line_size
;
1325 /* Now process each cache line. */
1326 /* Careful with this loop -- length is unsigned. */
1327 lock
= req
->u
.preload
.lock
;
1328 cur_address
= address
& ~(cache
->line_size
- 1);
1329 for (line
= 0; line
< lines
; ++line
)
1331 /* If the data was found, then lock it if requested. */
1332 found
= get_tag (cache
, cur_address
, &tag
);
1340 /* The data is not in the cache. We need to wait for the memory
1341 unit to fetch it. Store this request in the WAR in the meantime.
1343 wait_in_WAR (cache
, pipe
, req
);
1345 cur_address
+= cache
->line_size
;
1350 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1352 SIM_CPU
*current_cpu
;
1356 SI address
= req
->address
;
1357 char *data
= req
->u
.store
.data
;
1358 int length
= req
->u
.store
.length
;
1360 /* If this address interferes with an existing request, then requeue it. */
1361 if (address_interference (cache
, address
, req
, pipe
))
1363 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1367 /* Non-cache access. Write the data directly to memory. */
1368 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1370 write_data_to_memory (cache
, address
, data
, length
);
1374 /* See if the data is in the cache. */
1375 found
= get_tag (cache
, address
, &tag
);
1377 /* Write the data to the cache line if one was available and if it is
1378 either a hit or a miss in copy-back mode.
1379 The tag may be NULL if all ways were in use and locked on a miss.
1381 current_cpu
= cache
->cpu
;
1382 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1383 if (tag
!= NULL
&& (found
|| copy_back
))
1386 /* Load the line from memory first, if it was a miss. */
1389 /* We need to wait for the memory unit to fetch the data.
1390 Store this request in the WAR and requeue the store request. */
1391 wait_in_WAR (cache
, pipe
, req
);
1392 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1393 /* Decrement the counts of accesses and hits because when the requeued
1394 request is processed again, it will appear to be a new access and
1396 --cache
->statistics
.accesses
;
1397 --cache
->statistics
.hits
;
1400 line_offset
= address
& (cache
->line_size
- 1);
1401 memcpy (tag
->line
+ line_offset
, data
, length
);
1402 invalidate_return_buffer (cache
, address
);
1405 /* Update the LRU information for the tags in this set. */
1406 set_most_recently_used (cache
, tag
);
1409 /* Write the data to memory if there was no line available or we are in
1410 write-through (not copy-back mode). */
1411 if (tag
== NULL
|| ! copy_back
)
1413 write_data_to_memory (cache
, address
, data
, length
);
1420 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1422 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1423 SI address
= req
->address
;
1424 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1426 /* If this address interferes with an existing request, then requeue it. */
1427 if (address_interference (cache
, interfere_address
, req
, pipe
))
1429 pipeline_requeue_request (pipeline
);
1433 /* Invalidate the cache line now. This function already checks for
1434 non-cache access. */
1435 if (req
->u
.invalidate
.all
)
1436 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1438 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1439 if (req
->u
.invalidate
.flush
)
1441 pipeline
->status
.flush
.reqno
= req
->reqno
;
1442 pipeline
->status
.flush
.address
= address
;
1443 pipeline
->status
.flush
.valid
= 1;
1448 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1450 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1451 SI address
= req
->address
;
1453 /* If this address interferes with an existing request, then requeue it. */
1454 if (address_interference (cache
, address
, req
, pipe
))
1456 pipeline_requeue_request (pipeline
);
1460 /* Unlock the cache line. This function checks for non-cache access. */
1461 frv_cache_unlock (cache
, address
);
1465 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1469 SI address
= req
->address
;
1471 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1473 /* Look for the data in the cache. The statistics of cache hit or
1474 miss have already been recorded, so save and restore the stats before
1475 and after obtaining the cache line. */
1476 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1477 tag
= find_or_retrieve_cache_line (cache
, address
);
1478 cache
->statistics
= save_stats
;
1481 if (! req
->u
.WAR
.preload
)
1483 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1484 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1488 invalidate_return_buffer (cache
, address
);
1489 if (req
->u
.WAR
.lock
)
1496 /* All cache lines in the set were locked, so just copy the data to the
1497 return buffer directly. */
1498 if (! req
->u
.WAR
.preload
)
1500 copy_memory_to_return_buffer (cache
, pipe
, address
);
1501 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1505 /* Resolve any conflicts and/or execute the given requests. */
1507 arbitrate_requests (FRV_CACHE
*cache
)
1510 /* Simply execute the requests in the final pipeline stages. */
1511 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1513 FRV_CACHE_REQUEST
*req
1514 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1515 /* Make sure that there is a request to handle. */
1519 /* Handle the request. */
1523 handle_req_load (cache
, pipe
, req
);
1526 handle_req_store (cache
, pipe
, req
);
1528 case req_invalidate
:
1529 handle_req_invalidate (cache
, pipe
, req
);
1532 handle_req_preload (cache
, pipe
, req
);
1535 handle_req_unlock (cache
, pipe
, req
);
1538 handle_req_WAR (cache
, pipe
, req
);
1546 /* Move a waiting ARS register to a free WAR register. */
1548 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1550 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1551 NARS to BARS if it is valid. */
1552 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1554 war
->address
= cache
->BARS
.address
;
1555 war
->reqno
= cache
->BARS
.reqno
;
1556 war
->priority
= cache
->BARS
.priority
;
1557 war
->preload
= cache
->BARS
.preload
;
1558 war
->lock
= cache
->BARS
.lock
;
1559 war
->latency
= cache
->memory_latency
+ 1;
1561 if (cache
->NARS
.valid
)
1563 cache
->BARS
= cache
->NARS
;
1564 cache
->NARS
.valid
= 0;
1567 cache
->BARS
.valid
= 0;
1570 /* If NARS is valid for this pipe, then move it to the given WAR. */
1571 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1573 war
->address
= cache
->NARS
.address
;
1574 war
->reqno
= cache
->NARS
.reqno
;
1575 war
->priority
= cache
->NARS
.priority
;
1576 war
->preload
= cache
->NARS
.preload
;
1577 war
->lock
= cache
->NARS
.lock
;
1578 war
->latency
= cache
->memory_latency
+ 1;
1580 cache
->NARS
.valid
= 0;
1584 /* Decrease the latencies of the various states in the cache. */
1586 decrease_latencies (FRV_CACHE
*cache
)
1589 /* Check the WAR registers. */
1590 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1592 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1593 for (j
= 0; j
< NUM_WARS
; ++j
)
1595 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1599 /* If the latency has expired, then submit a WAR request to the
1601 if (war
->latency
<= 0)
1603 add_WAR_request (pipeline
, war
);
1605 move_ARS_to_WAR (cache
, pipe
, war
);
1612 /* Run the cache for the given number of cycles. */
1614 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1617 for (i
= 0; i
< cycles
; ++i
)
1619 advance_pipelines (cache
);
1620 arbitrate_requests (cache
);
1621 decrease_latencies (cache
);
1626 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1631 if (non_cache_access (cache
, address
))
1635 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1636 int found
= get_tag (cache
, address
, &tag
);
1637 cache
->statistics
= save_stats
;
1640 return 0; /* Indicate non-cache-access. */
1643 /* A cache line was available for the data.
1644 Extract the target data from the line. */
1645 offset
= address
& (cache
->line_size
- 1);
1646 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1650 /* Check the return buffers of the data cache to see if the requested data is
1653 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1656 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1657 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1658 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1659 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1663 /* Check to see if the requested data has been flushed. */
1665 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1667 return cache
->pipeline
[pipe
].status
.flush
.valid
1668 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1669 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1670 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size