1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998-2023 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* This must come before any other includes. */
25 #define WANT_CPU_FRVBF
31 #if WITH_PROFILE_MODEL_P
34 #include "profile-fr400.h"
35 #include "profile-fr500.h"
36 #include "profile-fr550.h"
39 reset_gr_flags (SIM_CPU
*cpu
, INT gr
)
41 SIM_DESC sd
= CPU_STATE (cpu
);
42 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
43 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
44 fr400_reset_gr_flags (cpu
, gr
);
45 /* Other machines have no gr flags right now. */
49 reset_fr_flags (SIM_CPU
*cpu
, INT fr
)
51 SIM_DESC sd
= CPU_STATE (cpu
);
52 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
53 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
54 fr400_reset_fr_flags (cpu
, fr
);
55 else if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
56 fr500_reset_fr_flags (cpu
, fr
);
60 reset_acc_flags (SIM_CPU
*cpu
, INT acc
)
62 SIM_DESC sd
= CPU_STATE (cpu
);
63 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
64 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
65 fr400_reset_acc_flags (cpu
, acc
);
66 /* Other machines have no acc flags right now. */
70 reset_cc_flags (SIM_CPU
*cpu
, INT cc
)
72 SIM_DESC sd
= CPU_STATE (cpu
);
73 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
74 fr500_reset_cc_flags (cpu
, cc
);
75 /* Other machines have no cc flags. */
79 set_use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
83 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
84 reset_gr_flags (cpu
, gr
);
85 ps
->cur_gr_complex
|= (((DI
)1) << gr
);
90 set_use_not_gr_complex (SIM_CPU
*cpu
, INT gr
)
94 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
95 ps
->cur_gr_complex
&= ~(((DI
)1) << gr
);
100 use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
104 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
105 return ps
->cur_gr_complex
& (((DI
)1) << gr
);
110 /* Globals flag indicates whether this insn is being modeled. */
111 enum FRV_INSN_MODELING model_insn
= FRV_INSN_NO_MODELING
;
113 /* static buffer for the name of the currently most restrictive hazard. */
114 static char hazard_name
[100] = "";
116 /* Print information about the wait applied to an entire VLIW insn. */
117 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer
[]
119 {1, NO_REQNO
}, {1, NO_REQNO
} /* init with impossible address. */
131 /* A queue of load requests from the data cache. Use to keep track of loads
132 which are still pending. */
133 /* TODO -- some of these are mutually exclusive and can use a union. */
148 enum cache_request request
;
149 } CACHE_QUEUE_ELEMENT
;
151 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
156 CACHE_QUEUE_ELEMENT q
[CACHE_QUEUE_SIZE
];
157 } cache_queue
= {0, 0};
159 /* Queue a request for a load from the cache. The load will be queued as
160 'inactive' and will be requested after the given number
161 of cycles have passed from the point the load is activated. */
163 request_cache_load (SIM_CPU
*cpu
, INT regnum
, int regtype
, int cycles
)
165 CACHE_QUEUE_ELEMENT
*q
;
169 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
171 if (CPU_LOAD_LENGTH (cpu
) == 0)
174 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
175 abort (); /* TODO: Make the queue dynamic */
177 q
= & cache_queue
.q
[cache_queue
.ix
];
180 q
->reqno
= cache_queue
.reqno
++;
181 q
->request
= cache_load
;
182 q
->cache
= CPU_DATA_CACHE (cpu
);
183 q
->address
= CPU_LOAD_ADDRESS (cpu
);
184 q
->length
= CPU_LOAD_LENGTH (cpu
);
185 q
->is_signed
= CPU_LOAD_SIGNED (cpu
);
187 q
->regtype
= regtype
;
191 vliw
= CPU_VLIW (cpu
);
192 slot
= vliw
->next_slot
- 1;
193 q
->slot
= (*vliw
->current_vliw
)[slot
];
195 CPU_LOAD_LENGTH (cpu
) = 0;
198 /* Queue a request to flush the cache. The request will be queued as
199 'inactive' and will be requested after the given number
200 of cycles have passed from the point the request is activated. */
202 request_cache_flush (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
204 CACHE_QUEUE_ELEMENT
*q
;
208 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
209 abort (); /* TODO: Make the queue dynamic */
211 q
= & cache_queue
.q
[cache_queue
.ix
];
214 q
->reqno
= cache_queue
.reqno
++;
215 q
->request
= cache_flush
;
217 q
->address
= CPU_LOAD_ADDRESS (cpu
);
218 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
222 vliw
= CPU_VLIW (cpu
);
223 slot
= vliw
->next_slot
- 1;
224 q
->slot
= (*vliw
->current_vliw
)[slot
];
227 /* Queue a request to invalidate the cache. The request will be queued as
228 'inactive' and will be requested after the given number
229 of cycles have passed from the point the request is activated. */
231 request_cache_invalidate (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
233 CACHE_QUEUE_ELEMENT
*q
;
237 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
238 abort (); /* TODO: Make the queue dynamic */
240 q
= & cache_queue
.q
[cache_queue
.ix
];
243 q
->reqno
= cache_queue
.reqno
++;
244 q
->request
= cache_invalidate
;
246 q
->address
= CPU_LOAD_ADDRESS (cpu
);
247 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
251 vliw
= CPU_VLIW (cpu
);
252 slot
= vliw
->next_slot
- 1;
253 q
->slot
= (*vliw
->current_vliw
)[slot
];
256 /* Queue a request to preload the cache. The request will be queued as
257 'inactive' and will be requested after the given number
258 of cycles have passed from the point the request is activated. */
260 request_cache_preload (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
262 CACHE_QUEUE_ELEMENT
*q
;
266 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
267 abort (); /* TODO: Make the queue dynamic */
269 q
= & cache_queue
.q
[cache_queue
.ix
];
272 q
->reqno
= cache_queue
.reqno
++;
273 q
->request
= cache_preload
;
275 q
->address
= CPU_LOAD_ADDRESS (cpu
);
276 q
->length
= CPU_LOAD_LENGTH (cpu
);
277 q
->lock
= CPU_LOAD_LOCK (cpu
);
281 vliw
= CPU_VLIW (cpu
);
282 slot
= vliw
->next_slot
- 1;
283 q
->slot
= (*vliw
->current_vliw
)[slot
];
285 CPU_LOAD_LENGTH (cpu
) = 0;
288 /* Queue a request to unlock the cache. The request will be queued as
289 'inactive' and will be requested after the given number
290 of cycles have passed from the point the request is activated. */
292 request_cache_unlock (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
294 CACHE_QUEUE_ELEMENT
*q
;
298 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
299 abort (); /* TODO: Make the queue dynamic */
301 q
= & cache_queue
.q
[cache_queue
.ix
];
304 q
->reqno
= cache_queue
.reqno
++;
305 q
->request
= cache_unlock
;
307 q
->address
= CPU_LOAD_ADDRESS (cpu
);
311 vliw
= CPU_VLIW (cpu
);
312 slot
= vliw
->next_slot
- 1;
313 q
->slot
= (*vliw
->current_vliw
)[slot
];
317 submit_cache_request (CACHE_QUEUE_ELEMENT
*q
)
322 frv_cache_request_load (q
->cache
, q
->reqno
, q
->address
, q
->slot
);
325 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
328 case cache_invalidate
:
329 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
333 frv_cache_request_preload (q
->cache
, q
->address
, q
->slot
,
337 frv_cache_request_unlock (q
->cache
, q
->address
, q
->slot
);
344 /* Activate all inactive load requests. */
346 activate_cache_requests (SIM_CPU
*cpu
)
349 for (i
= 0; i
< cache_queue
.ix
; ++i
)
351 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
355 /* Submit the request now if the cycle count is zero. */
357 submit_cache_request (q
);
362 /* Check to see if a load is pending which affects the given register(s).
365 load_pending_for_register (SIM_CPU
*cpu
, int regnum
, int words
, int regtype
)
368 for (i
= 0; i
< cache_queue
.ix
; ++i
)
370 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
372 /* Must be the same kind of register. */
373 if (! q
->active
|| q
->request
!= cache_load
|| q
->regtype
!= regtype
)
376 /* If the registers numbers are equal, then we have a match. */
377 if (q
->regnum
== regnum
)
378 return 1; /* load pending */
380 /* Check for overlap of a load with a multi-word register. */
381 if (regnum
< q
->regnum
)
383 if (regnum
+ words
> q
->regnum
)
386 /* Check for overlap of a multi-word load with the register. */
389 int data_words
= (q
->length
+ sizeof (SI
) - 1) / sizeof (SI
);
390 if (q
->regnum
+ data_words
> regnum
)
395 return 0; /* no load pending */
398 /* Check to see if a cache flush pending which affects the given address. */
400 flush_pending_for_address (SIM_CPU
*cpu
, SI address
)
402 int line_mask
= ~(CPU_DATA_CACHE (cpu
)->line_size
- 1);
404 for (i
= 0; i
< cache_queue
.ix
; ++i
)
406 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
408 /* Must be the same kind of request and active. */
409 if (! q
->active
|| q
->request
!= cache_flush
)
412 /* If the addresses are equal, then we have a match. */
413 if ((q
->address
& line_mask
) == (address
& line_mask
))
414 return 1; /* flush pending */
417 return 0; /* no flush pending */
421 remove_cache_queue_element (SIM_CPU
*cpu
, int i
)
423 /* If we are removing the load of a FR register, then remember which one(s).
425 CACHE_QUEUE_ELEMENT q
= cache_queue
.q
[i
];
427 for (--cache_queue
.ix
; i
< cache_queue
.ix
; ++i
)
428 cache_queue
.q
[i
] = cache_queue
.q
[i
+ 1];
430 /* If we removed a load of a FR register, check to see if any other loads
431 of that register is still queued. If not, then apply the queued post
432 processing time of that register to its latency. Also apply
433 1 extra cycle of latency to the register since it was a floating point
435 if (q
.request
== cache_load
&& q
.regtype
!= REGTYPE_NONE
)
437 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
438 int data_words
= (q
.length
+ sizeof (SI
) - 1) / sizeof (SI
);
440 for (j
= 0; j
< data_words
; ++j
)
442 int regnum
= q
.regnum
+ j
;
443 if (! load_pending_for_register (cpu
, regnum
, 1, q
.regtype
))
445 if (q
.regtype
== REGTYPE_FR
)
447 int *fr
= ps
->fr_busy
;
448 fr
[regnum
] += 1 + ps
->fr_ptime
[regnum
];
449 ps
->fr_ptime
[regnum
] = 0;
456 /* Copy data from the cache buffer to the target register(s). */
458 copy_load_data (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
, int slot
,
459 CACHE_QUEUE_ELEMENT
*q
)
464 if (q
->regtype
== REGTYPE_FR
)
468 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
469 SET_H_FR (q
->regnum
, value
);
473 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
474 SET_H_FR (q
->regnum
, value
);
481 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
482 SET_H_GR (q
->regnum
, value
);
486 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
487 SET_H_GR (q
->regnum
, value
);
492 if (q
->regtype
== REGTYPE_FR
)
496 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
497 SET_H_FR (q
->regnum
, value
);
501 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
502 SET_H_FR (q
->regnum
, value
);
509 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
510 SET_H_GR (q
->regnum
, value
);
514 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
515 SET_H_GR (q
->regnum
, value
);
520 if (q
->regtype
== REGTYPE_FR
)
523 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SF
, 4));
528 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SI
, 4));
532 if (q
->regtype
== REGTYPE_FR
)
534 SET_H_FR_DOUBLE (q
->regnum
,
535 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DF
, 8));
539 SET_H_GR_DOUBLE (q
->regnum
,
540 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DI
, 8));
544 if (q
->regtype
== REGTYPE_FR
)
545 frvbf_h_fr_quad_set_handler (current_cpu
, q
->regnum
,
546 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
550 frvbf_h_gr_quad_set_handler (current_cpu
, q
->regnum
,
551 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
561 request_complete (SIM_CPU
*cpu
, CACHE_QUEUE_ELEMENT
*q
)
564 if (! q
->active
|| q
->cycles
> 0)
567 cache
= CPU_DATA_CACHE (cpu
);
571 /* For loads, we must wait until the data is returned from the cache. */
572 if (frv_cache_data_in_buffer (cache
, 0, q
->address
, q
->reqno
))
574 copy_load_data (cpu
, cache
, 0, q
);
577 if (frv_cache_data_in_buffer (cache
, 1, q
->address
, q
->reqno
))
579 copy_load_data (cpu
, cache
, 1, q
);
585 /* We must wait until the data is flushed. */
586 if (frv_cache_data_flushed (cache
, 0, q
->address
, q
->reqno
))
588 if (frv_cache_data_flushed (cache
, 1, q
->address
, q
->reqno
))
593 /* All other requests are complete once they've been made. */
600 /* Run the insn and data caches through the given number of cycles, taking
601 note of load requests which are fullfilled as a result. */
603 run_caches (SIM_CPU
*cpu
, int cycles
)
605 FRV_CACHE
* data_cache
= CPU_DATA_CACHE (cpu
);
606 FRV_CACHE
* insn_cache
= CPU_INSN_CACHE (cpu
);
608 /* For each cycle, run the caches, noting which requests have been fullfilled
609 and submitting new requests on their designated cycles. */
610 for (i
= 0; i
< cycles
; ++i
)
613 /* Run the caches through 1 cycle. */
614 frv_cache_run (data_cache
, 1);
615 frv_cache_run (insn_cache
, 1);
617 /* Note whether prefetched insn data has been loaded yet. */
618 for (j
= LS
; j
< FRV_CACHE_PIPELINES
; ++j
)
620 if (frv_insn_fetch_buffer
[j
].reqno
!= NO_REQNO
621 && frv_cache_data_in_buffer (insn_cache
, j
,
622 frv_insn_fetch_buffer
[j
].address
,
623 frv_insn_fetch_buffer
[j
].reqno
))
624 frv_insn_fetch_buffer
[j
].reqno
= NO_REQNO
;
627 /* Check to see which requests have been satisfied and which should
629 for (j
= 0; j
< cache_queue
.ix
; ++j
)
631 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[j
];
635 /* If a load has been satisfied, complete the operation and remove it
637 if (request_complete (cpu
, q
))
639 remove_cache_queue_element (cpu
, j
);
644 /* Decrease the cycle count of each queued request.
645 Submit a request for each queued request whose cycle count has
649 submit_cache_request (q
);
655 apply_latency_adjustments (SIM_CPU
*cpu
)
657 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
659 /* update the latencies of the registers. */
660 int *fr
= ps
->fr_busy
;
661 int *acc
= ps
->acc_busy
;
662 for (i
= 0; i
< 64; ++i
)
664 if (ps
->fr_busy_adjust
[i
] > 0)
665 *fr
-= ps
->fr_busy_adjust
[i
]; /* OK if it goes negative. */
666 if (ps
->acc_busy_adjust
[i
] > 0)
667 *acc
-= ps
->acc_busy_adjust
[i
]; /* OK if it goes negative. */
673 /* Account for the number of cycles which have just passed in the latency of
674 various system elements. Works for negative cycles too so that latency
675 can be extended in the case of insn fetch latency.
676 If negative or zero, then no adjustment is necessary. */
678 update_latencies (SIM_CPU
*cpu
, int cycles
)
680 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
682 /* update the latencies of the registers. */
689 int *gr
= ps
->gr_busy
;
690 int *fr
= ps
->fr_busy
;
691 int *acc
= ps
->acc_busy
;
693 /* This loop handles GR, FR and ACC registers. */
694 for (i
= 0; i
< 64; ++i
)
699 reset_gr_flags (cpu
, i
);
703 /* If the busy drops to 0, then mark the register as
707 int *fr_lat
= ps
->fr_latency
+ i
;
709 ps
->fr_busy_adjust
[i
] = 0;
710 /* Only clear flags if this register has no target latency. */
712 reset_fr_flags (cpu
, i
);
716 /* If the busy drops to 0, then mark the register as
720 int *acc_lat
= ps
->acc_latency
+ i
;
722 ps
->acc_busy_adjust
[i
] = 0;
723 /* Only clear flags if this register has no target latency. */
725 reset_acc_flags (cpu
, i
);
733 /* This loop handles CCR registers. */
735 for (i
= 0; i
< 8; ++i
)
740 reset_cc_flags (cpu
, i
);
746 /* This loop handles SPR registers. */
748 for (i
= 0; i
< 4096; ++i
)
756 /* This loop handles resources. */
757 idiv
= ps
->idiv_busy
;
758 fdiv
= ps
->fdiv_busy
;
759 fsqrt
= ps
->fsqrt_busy
;
760 for (i
= 0; i
< 2; ++i
)
762 *idiv
= (*idiv
<= cycles
) ? 0 : (*idiv
- cycles
);
763 *fdiv
= (*fdiv
<= cycles
) ? 0 : (*fdiv
- cycles
);
764 *fsqrt
= (*fsqrt
<= cycles
) ? 0 : (*fsqrt
- cycles
);
769 /* Float and media units can occur in 4 slots on some machines. */
770 flt
= ps
->float_busy
;
771 media
= ps
->media_busy
;
772 for (i
= 0; i
< 4; ++i
)
774 *flt
= (*flt
<= cycles
) ? 0 : (*flt
- cycles
);
775 *media
= (*media
<= cycles
) ? 0 : (*media
- cycles
);
781 /* Print information about the wait for the given number of cycles. */
783 frv_model_trace_wait_cycles (SIM_CPU
*cpu
, int cycles
, const char *hazard_name
)
785 if (TRACE_INSN_P (cpu
) && cycles
> 0)
787 SIM_DESC sd
= CPU_STATE (cpu
);
788 trace_printf (sd
, cpu
, "**** %s wait %d cycles ***\n",
789 hazard_name
, cycles
);
794 trace_vliw_wait_cycles (SIM_CPU
*cpu
)
796 if (TRACE_INSN_P (cpu
))
798 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
799 frv_model_trace_wait_cycles (cpu
, ps
->vliw_wait
, hazard_name
);
803 /* Wait for the given number of cycles. */
805 frv_model_advance_cycles (SIM_CPU
*cpu
, int cycles
)
807 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
808 update_latencies (cpu
, cycles
);
809 run_caches (cpu
, cycles
);
810 PROFILE_MODEL_TOTAL_CYCLES (p
) += cycles
;
814 handle_resource_wait (SIM_CPU
*cpu
)
816 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
817 if (ps
->vliw_wait
!= 0)
818 frv_model_advance_cycles (cpu
, ps
->vliw_wait
);
819 if (ps
->vliw_load_stall
> ps
->vliw_wait
)
820 ps
->vliw_load_stall
-= ps
->vliw_wait
;
822 ps
->vliw_load_stall
= 0;
825 /* Account for the number of cycles until these resources will be available
828 update_target_latencies (SIM_CPU
*cpu
)
830 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
832 /* update the latencies of the registers. */
834 int *gr_lat
= ps
->gr_latency
;
835 int *fr_lat
= ps
->fr_latency
;
836 int *acc_lat
= ps
->acc_latency
;
839 int *gr
= ps
->gr_busy
;
840 int *fr
= ps
->fr_busy
;
841 int *acc
= ps
->acc_busy
;
843 /* This loop handles GR, FR and ACC registers. */
844 for (i
= 0; i
< 64; ++i
)
865 /* This loop handles CCR registers. */
867 ccr_lat
= ps
->ccr_latency
;
868 for (i
= 0; i
< 8; ++i
)
877 /* This loop handles SPR registers. */
879 spr_lat
= ps
->spr_latency
;
880 for (i
= 0; i
< 4096; ++i
)
891 /* Run the caches until all pending cache flushes are complete. */
893 wait_for_flush (SIM_CPU
*cpu
)
895 SI address
= CPU_LOAD_ADDRESS (cpu
);
897 while (flush_pending_for_address (cpu
, address
))
899 frv_model_advance_cycles (cpu
, 1);
902 if (TRACE_INSN_P (cpu
) && wait
)
904 sprintf (hazard_name
, "Data cache flush address %x:", address
);
905 frv_model_trace_wait_cycles (cpu
, wait
, hazard_name
);
909 /* Initialize cycle counting for an insn.
910 FIRST_P is non-zero if this is the first insn in a set of parallel
913 frvbf_model_insn_before (SIM_CPU
*cpu
, int first_p
)
915 SIM_DESC sd
= CPU_STATE (cpu
);
916 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
920 memset (ps
->fr_busy_adjust
, 0, sizeof (ps
->fr_busy_adjust
));
921 memset (ps
->acc_busy_adjust
, 0, sizeof (ps
->acc_busy_adjust
));
927 ps
->vliw_branch_taken
= 0;
928 ps
->vliw_load_stall
= 0;
931 switch (STATE_ARCHITECTURE (sd
)->mach
)
935 fr400_model_insn_before (cpu
, first_p
);
938 fr500_model_insn_before (cpu
, first_p
);
941 fr550_model_insn_before (cpu
, first_p
);
948 wait_for_flush (cpu
);
951 /* Record the cycles computed for an insn.
952 LAST_P is non-zero if this is the last insn in a set of parallel insns,
953 and we update the total cycle count.
954 CYCLES is the cycle count of the insn. */
957 frvbf_model_insn_after (SIM_CPU
*cpu
, int last_p
, int cycles
)
959 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
960 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
961 SIM_DESC sd
= CPU_STATE (cpu
);
963 PROFILE_MODEL_CUR_INSN_CYCLES (p
) = cycles
;
965 /* The number of cycles for a VLIW insn is the maximum number of cycles
966 used by any individual insn within it. */
967 if (cycles
> ps
->vliw_cycles
)
968 ps
->vliw_cycles
= cycles
;
972 /* This is the last insn in a VLIW insn. */
973 struct frv_interrupt_timer
*timer
= & frv_interrupt_state
.timer
;
975 activate_cache_requests (cpu
); /* before advancing cycles. */
976 apply_latency_adjustments (cpu
); /* must go first. */
977 update_target_latencies (cpu
); /* must go next. */
978 frv_model_advance_cycles (cpu
, ps
->vliw_cycles
);
980 PROFILE_MODEL_LOAD_STALL_CYCLES (p
) += ps
->vliw_load_stall
;
982 /* Check the interrupt timer. cycles contains the total cycle count. */
985 cycles
= PROFILE_MODEL_TOTAL_CYCLES (p
);
986 if (timer
->current
% timer
->value
987 + (cycles
- timer
->current
) >= timer
->value
)
988 frv_queue_external_interrupt (cpu
, timer
->interrupt
);
989 timer
->current
= cycles
;
992 ps
->past_first_p
= 0; /* Next one will be the first in a new VLIW. */
993 ps
->branch_address
= -1;
996 ps
->past_first_p
= 1;
998 switch (STATE_ARCHITECTURE (sd
)->mach
)
1000 case bfd_mach_fr400
:
1001 case bfd_mach_fr450
:
1002 fr400_model_insn_after (cpu
, last_p
, cycles
);
1004 case bfd_mach_fr500
:
1005 fr500_model_insn_after (cpu
, last_p
, cycles
);
1007 case bfd_mach_fr550
:
1008 fr550_model_insn_after (cpu
, last_p
, cycles
);
1016 frvbf_model_branch (SIM_CPU
*current_cpu
, PCADDR target
, int hint
)
1018 /* Record the hint and branch address for use in profiling. */
1019 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1020 ps
->branch_hint
= hint
;
1021 ps
->branch_address
= target
;
1024 /* Top up the latency of the given GR by the given number of cycles. */
1026 update_GR_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1030 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1031 int *gr
= ps
->gr_latency
;
1032 if (gr
[out_GR
] < cycles
)
1033 gr
[out_GR
] = cycles
;
1038 decrease_GR_busy (SIM_CPU
*cpu
, INT in_GR
, int cycles
)
1042 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1043 int *gr
= ps
->gr_busy
;
1044 gr
[in_GR
] -= cycles
;
1048 /* Top up the latency of the given double GR by the number of cycles. */
1050 update_GRdouble_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1054 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1055 int *gr
= ps
->gr_latency
;
1056 if (gr
[out_GR
] < cycles
)
1057 gr
[out_GR
] = cycles
;
1058 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1059 gr
[out_GR
+ 1] = cycles
;
1064 update_GR_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1068 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1069 int *gr
= ps
->gr_latency
;
1071 /* The latency of the GR will be at least the number of cycles used
1073 if (gr
[out_GR
] < cycles
)
1074 gr
[out_GR
] = cycles
;
1076 /* The latency will also depend on how long it takes to retrieve the
1077 data from the cache or memory. Assume that the load is issued
1078 after the last cycle of the insn. */
1079 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1084 update_GRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1088 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1089 int *gr
= ps
->gr_latency
;
1091 /* The latency of the GR will be at least the number of cycles used
1093 if (gr
[out_GR
] < cycles
)
1094 gr
[out_GR
] = cycles
;
1095 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1096 gr
[out_GR
+ 1] = cycles
;
1098 /* The latency will also depend on how long it takes to retrieve the
1099 data from the cache or memory. Assume that the load is issued
1100 after the last cycle of the insn. */
1101 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1106 update_GR_latency_for_swap (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1108 update_GR_latency_for_load (cpu
, out_GR
, cycles
);
1111 /* Top up the latency of the given FR by the given number of cycles. */
1113 update_FR_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1117 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1118 int *fr
= ps
->fr_latency
;
1119 if (fr
[out_FR
] < cycles
)
1120 fr
[out_FR
] = cycles
;
1124 /* Top up the latency of the given double FR by the number of cycles. */
1126 update_FRdouble_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1130 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1131 int *fr
= ps
->fr_latency
;
1132 if (fr
[out_FR
] < cycles
)
1133 fr
[out_FR
] = cycles
;
1134 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1135 fr
[out_FR
+ 1] = cycles
;
1140 update_FR_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1144 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1145 int *fr
= ps
->fr_latency
;
1147 /* The latency of the FR will be at least the number of cycles used
1149 if (fr
[out_FR
] < cycles
)
1150 fr
[out_FR
] = cycles
;
1152 /* The latency will also depend on how long it takes to retrieve the
1153 data from the cache or memory. Assume that the load is issued
1154 after the last cycle of the insn. */
1155 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1160 update_FRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1164 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1165 int *fr
= ps
->fr_latency
;
1167 /* The latency of the FR will be at least the number of cycles used
1169 if (fr
[out_FR
] < cycles
)
1170 fr
[out_FR
] = cycles
;
1171 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1172 fr
[out_FR
+ 1] = cycles
;
1174 /* The latency will also depend on how long it takes to retrieve the
1175 data from the cache or memory. Assume that the load is issued
1176 after the last cycle of the insn. */
1177 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1181 /* Top up the post-processing time of the given FR by the given number of
1184 update_FR_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1188 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1189 /* If a load is pending on this register, then add the cycles to
1190 the post processing time for this register. Otherwise apply it
1191 directly to the latency of the register. */
1192 if (! load_pending_for_register (cpu
, out_FR
, 1, REGTYPE_FR
))
1194 int *fr
= ps
->fr_latency
;
1195 fr
[out_FR
] += cycles
;
1198 ps
->fr_ptime
[out_FR
] += cycles
;
1203 update_FRdouble_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1207 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1208 /* If a load is pending on this register, then add the cycles to
1209 the post processing time for this register. Otherwise apply it
1210 directly to the latency of the register. */
1211 if (! load_pending_for_register (cpu
, out_FR
, 2, REGTYPE_FR
))
1213 int *fr
= ps
->fr_latency
;
1214 fr
[out_FR
] += cycles
;
1216 fr
[out_FR
+ 1] += cycles
;
1220 ps
->fr_ptime
[out_FR
] += cycles
;
1222 ps
->fr_ptime
[out_FR
+ 1] += cycles
;
1227 /* Top up the post-processing time of the given ACC by the given number of
1230 update_ACC_ptime (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1234 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1235 /* No load can be pending on this register. Apply the cycles
1236 directly to the latency of the register. */
1237 int *acc
= ps
->acc_latency
;
1238 acc
[out_ACC
] += cycles
;
1242 /* Top up the post-processing time of the given SPR by the given number of
1245 update_SPR_ptime (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1249 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1250 /* No load can be pending on this register. Apply the cycles
1251 directly to the latency of the register. */
1252 int *spr
= ps
->spr_latency
;
1253 spr
[out_SPR
] += cycles
;
1258 decrease_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1262 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1263 int *acc
= ps
->acc_busy
;
1264 acc
[out_ACC
] -= cycles
;
1265 if (ps
->acc_busy_adjust
[out_ACC
] >= 0
1266 && cycles
> ps
->acc_busy_adjust
[out_ACC
])
1267 ps
->acc_busy_adjust
[out_ACC
] = cycles
;
1272 increase_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1276 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1277 int *acc
= ps
->acc_busy
;
1278 acc
[out_ACC
] += cycles
;
1283 enforce_full_acc_latency (SIM_CPU
*cpu
, INT in_ACC
)
1285 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1286 ps
->acc_busy_adjust
[in_ACC
] = -1;
1290 decrease_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1294 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1295 int *fr
= ps
->fr_busy
;
1296 fr
[out_FR
] -= cycles
;
1297 if (ps
->fr_busy_adjust
[out_FR
] >= 0
1298 && cycles
> ps
->fr_busy_adjust
[out_FR
])
1299 ps
->fr_busy_adjust
[out_FR
] = cycles
;
1304 increase_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1308 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1309 int *fr
= ps
->fr_busy
;
1310 fr
[out_FR
] += cycles
;
1314 /* Top up the latency of the given ACC by the given number of cycles. */
1316 update_ACC_latency (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1320 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1321 int *acc
= ps
->acc_latency
;
1322 if (acc
[out_ACC
] < cycles
)
1323 acc
[out_ACC
] = cycles
;
1327 /* Top up the latency of the given CCR by the given number of cycles. */
1329 update_CCR_latency (SIM_CPU
*cpu
, INT out_CCR
, int cycles
)
1333 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1334 int *ccr
= ps
->ccr_latency
;
1335 if (ccr
[out_CCR
] < cycles
)
1336 ccr
[out_CCR
] = cycles
;
1340 /* Top up the latency of the given SPR by the given number of cycles. */
1342 update_SPR_latency (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1346 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1347 int *spr
= ps
->spr_latency
;
1348 if (spr
[out_SPR
] < cycles
)
1349 spr
[out_SPR
] = cycles
;
1353 /* Top up the latency of the given integer division resource by the given
1354 number of cycles. */
1356 update_idiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1358 /* operate directly on the busy cycles since each resource can only
1359 be used once in a VLIW insn. */
1360 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1361 int *r
= ps
->idiv_busy
;
1362 r
[in_resource
] = cycles
;
1365 /* Set the latency of the given resource to the given number of cycles. */
1367 update_fdiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1369 /* operate directly on the busy cycles since each resource can only
1370 be used once in a VLIW insn. */
1371 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1372 int *r
= ps
->fdiv_busy
;
1373 r
[in_resource
] = cycles
;
1376 /* Set the latency of the given resource to the given number of cycles. */
1378 update_fsqrt_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1380 /* operate directly on the busy cycles since each resource can only
1381 be used once in a VLIW insn. */
1382 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1383 int *r
= ps
->fsqrt_busy
;
1384 r
[in_resource
] = cycles
;
1387 /* Set the latency of the given resource to the given number of cycles. */
1389 update_float_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1391 /* operate directly on the busy cycles since each resource can only
1392 be used once in a VLIW insn. */
1393 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1394 int *r
= ps
->float_busy
;
1395 r
[in_resource
] = cycles
;
1399 update_media_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1401 /* operate directly on the busy cycles since each resource can only
1402 be used once in a VLIW insn. */
1403 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1404 int *r
= ps
->media_busy
;
1405 r
[in_resource
] = cycles
;
1408 /* Set the branch penalty to the given number of cycles. */
1410 update_branch_penalty (SIM_CPU
*cpu
, int cycles
)
1412 /* operate directly on the busy cycles since only one branch can occur
1414 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1415 ps
->branch_penalty
= cycles
;
1418 /* Check the availability of the given GR register and update the number
1419 of cycles the current VLIW insn must wait until it is available. */
1421 vliw_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1423 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1424 int *gr
= ps
->gr_busy
;
1425 /* If the latency of the register is greater than the current wait
1426 then update the current wait. */
1427 if (in_GR
>= 0 && gr
[in_GR
] > ps
->vliw_wait
)
1429 if (TRACE_INSN_P (cpu
))
1430 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1431 ps
->vliw_wait
= gr
[in_GR
];
1435 /* Check the availability of the given GR register and update the number
1436 of cycles the current VLIW insn must wait until it is available. */
1438 vliw_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1440 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1441 int *gr
= ps
->gr_busy
;
1442 /* If the latency of the register is greater than the current wait
1443 then update the current wait. */
1446 if (gr
[in_GR
] > ps
->vliw_wait
)
1448 if (TRACE_INSN_P (cpu
))
1449 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1450 ps
->vliw_wait
= gr
[in_GR
];
1452 if (in_GR
< 63 && gr
[in_GR
+ 1] > ps
->vliw_wait
)
1454 if (TRACE_INSN_P (cpu
))
1455 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
+ 1);
1456 ps
->vliw_wait
= gr
[in_GR
+ 1];
1461 /* Check the availability of the given FR register and update the number
1462 of cycles the current VLIW insn must wait until it is available. */
1464 vliw_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1466 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1467 int *fr
= ps
->fr_busy
;
1468 /* If the latency of the register is greater than the current wait
1469 then update the current wait. */
1470 if (in_FR
>= 0 && fr
[in_FR
] > ps
->vliw_wait
)
1472 if (TRACE_INSN_P (cpu
))
1473 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1474 ps
->vliw_wait
= fr
[in_FR
];
1478 /* Check the availability of the given GR register and update the number
1479 of cycles the current VLIW insn must wait until it is available. */
1481 vliw_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1483 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1484 int *fr
= ps
->fr_busy
;
1485 /* If the latency of the register is greater than the current wait
1486 then update the current wait. */
1489 if (fr
[in_FR
] > ps
->vliw_wait
)
1491 if (TRACE_INSN_P (cpu
))
1492 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1493 ps
->vliw_wait
= fr
[in_FR
];
1495 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->vliw_wait
)
1497 if (TRACE_INSN_P (cpu
))
1498 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1499 ps
->vliw_wait
= fr
[in_FR
+ 1];
1504 /* Check the availability of the given CCR register and update the number
1505 of cycles the current VLIW insn must wait until it is available. */
1507 vliw_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1509 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1510 int *ccr
= ps
->ccr_busy
;
1511 /* If the latency of the register is greater than the current wait
1512 then update the current wait. */
1513 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->vliw_wait
)
1515 if (TRACE_INSN_P (cpu
))
1518 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
-4);
1520 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1522 ps
->vliw_wait
= ccr
[in_CCR
];
1526 /* Check the availability of the given ACC register and update the number
1527 of cycles the current VLIW insn must wait until it is available. */
1529 vliw_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1531 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1532 int *acc
= ps
->acc_busy
;
1533 /* If the latency of the register is greater than the current wait
1534 then update the current wait. */
1535 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->vliw_wait
)
1537 if (TRACE_INSN_P (cpu
))
1538 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1539 ps
->vliw_wait
= acc
[in_ACC
];
1543 /* Check the availability of the given SPR register and update the number
1544 of cycles the current VLIW insn must wait until it is available. */
1546 vliw_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1548 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1549 int *spr
= ps
->spr_busy
;
1550 /* If the latency of the register is greater than the current wait
1551 then update the current wait. */
1552 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->vliw_wait
)
1554 if (TRACE_INSN_P (cpu
))
1555 sprintf (hazard_name
, "Data hazard for spr %d:", in_SPR
);
1556 ps
->vliw_wait
= spr
[in_SPR
];
1560 /* Check the availability of the given integer division resource and update
1561 the number of cycles the current VLIW insn must wait until it is available.
1564 vliw_wait_for_idiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1566 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1567 int *r
= ps
->idiv_busy
;
1568 /* If the latency of the resource is greater than the current wait
1569 then update the current wait. */
1570 if (r
[in_resource
] > ps
->vliw_wait
)
1572 if (TRACE_INSN_P (cpu
))
1574 sprintf (hazard_name
, "Resource hazard for integer division in slot I%d:", in_resource
);
1576 ps
->vliw_wait
= r
[in_resource
];
1580 /* Check the availability of the given float division resource and update
1581 the number of cycles the current VLIW insn must wait until it is available.
1584 vliw_wait_for_fdiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1586 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1587 int *r
= ps
->fdiv_busy
;
1588 /* If the latency of the resource is greater than the current wait
1589 then update the current wait. */
1590 if (r
[in_resource
] > ps
->vliw_wait
)
1592 if (TRACE_INSN_P (cpu
))
1594 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", in_resource
);
1596 ps
->vliw_wait
= r
[in_resource
];
1600 /* Check the availability of the given float square root resource and update
1601 the number of cycles the current VLIW insn must wait until it is available.
1604 vliw_wait_for_fsqrt_resource (SIM_CPU
*cpu
, INT in_resource
)
1606 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1607 int *r
= ps
->fsqrt_busy
;
1608 /* If the latency of the resource is greater than the current wait
1609 then update the current wait. */
1610 if (r
[in_resource
] > ps
->vliw_wait
)
1612 if (TRACE_INSN_P (cpu
))
1614 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", in_resource
);
1616 ps
->vliw_wait
= r
[in_resource
];
1620 /* Check the availability of the given float unit resource and update
1621 the number of cycles the current VLIW insn must wait until it is available.
1624 vliw_wait_for_float_resource (SIM_CPU
*cpu
, INT in_resource
)
1626 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1627 int *r
= ps
->float_busy
;
1628 /* If the latency of the resource is greater than the current wait
1629 then update the current wait. */
1630 if (r
[in_resource
] > ps
->vliw_wait
)
1632 if (TRACE_INSN_P (cpu
))
1634 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", in_resource
);
1636 ps
->vliw_wait
= r
[in_resource
];
1640 /* Check the availability of the given media unit resource and update
1641 the number of cycles the current VLIW insn must wait until it is available.
1644 vliw_wait_for_media_resource (SIM_CPU
*cpu
, INT in_resource
)
1646 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1647 int *r
= ps
->media_busy
;
1648 /* If the latency of the resource is greater than the current wait
1649 then update the current wait. */
1650 if (r
[in_resource
] > ps
->vliw_wait
)
1652 if (TRACE_INSN_P (cpu
))
1654 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", in_resource
);
1656 ps
->vliw_wait
= r
[in_resource
];
1660 /* Run the caches until all requests for the given register(s) are satisfied. */
1662 load_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1667 while (load_pending_for_register (cpu
, in_GR
, 1/*words*/, REGTYPE_NONE
))
1669 frv_model_advance_cycles (cpu
, 1);
1674 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1675 ps
->vliw_wait
+= wait
;
1676 ps
->vliw_load_stall
+= wait
;
1677 if (TRACE_INSN_P (cpu
))
1678 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1684 load_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1688 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1691 while (load_pending_for_register (cpu
, in_FR
, 1/*words*/, REGTYPE_FR
))
1693 frv_model_advance_cycles (cpu
, 1);
1696 /* Post processing time may have been added to the register's
1697 latency after the loads were processed. Account for that too.
1703 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1705 /* Update the vliw_wait with the number of cycles we waited for the
1706 load and any post-processing. */
1709 ps
->vliw_wait
+= wait
;
1710 ps
->vliw_load_stall
+= wait
;
1711 if (TRACE_INSN_P (cpu
))
1712 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1718 load_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1723 while (load_pending_for_register (cpu
, in_GR
, 2/*words*/, REGTYPE_NONE
))
1725 frv_model_advance_cycles (cpu
, 1);
1730 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1731 ps
->vliw_wait
+= wait
;
1732 ps
->vliw_load_stall
+= wait
;
1733 if (TRACE_INSN_P (cpu
))
1734 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1740 load_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1744 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1747 while (load_pending_for_register (cpu
, in_FR
, 2/*words*/, REGTYPE_FR
))
1749 frv_model_advance_cycles (cpu
, 1);
1752 /* Post processing time may have been added to the registers'
1753 latencies after the loads were processed. Account for that too.
1759 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1765 wait
+= fr
[in_FR
+ 1];
1766 frv_model_advance_cycles (cpu
, fr
[in_FR
+ 1]);
1769 /* Update the vliw_wait with the number of cycles we waited for the
1770 load and any post-processing. */
1773 ps
->vliw_wait
+= wait
;
1774 ps
->vliw_load_stall
+= wait
;
1775 if (TRACE_INSN_P (cpu
))
1776 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1782 enforce_full_fr_latency (SIM_CPU
*cpu
, INT in_FR
)
1784 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1785 ps
->fr_busy_adjust
[in_FR
] = -1;
1788 /* Calculate how long the post processing for a floating point insn must
1789 wait for resources to become available. */
1791 post_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1793 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1794 int *fr
= ps
->fr_busy
;
1796 if (in_FR
>= 0 && fr
[in_FR
] > ps
->post_wait
)
1798 ps
->post_wait
= fr
[in_FR
];
1799 if (TRACE_INSN_P (cpu
))
1800 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1804 /* Calculate how long the post processing for a floating point insn must
1805 wait for resources to become available. */
1807 post_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1809 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1810 int *fr
= ps
->fr_busy
;
1814 if (fr
[in_FR
] > ps
->post_wait
)
1816 ps
->post_wait
= fr
[in_FR
];
1817 if (TRACE_INSN_P (cpu
))
1818 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1820 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->post_wait
)
1822 ps
->post_wait
= fr
[in_FR
+ 1];
1823 if (TRACE_INSN_P (cpu
))
1824 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1830 post_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1832 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1833 int *acc
= ps
->acc_busy
;
1835 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->post_wait
)
1837 ps
->post_wait
= acc
[in_ACC
];
1838 if (TRACE_INSN_P (cpu
))
1839 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1844 post_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1846 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1847 int *ccr
= ps
->ccr_busy
;
1849 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->post_wait
)
1851 ps
->post_wait
= ccr
[in_CCR
];
1852 if (TRACE_INSN_P (cpu
))
1855 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
- 4);
1857 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1863 post_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1865 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1866 int *spr
= ps
->spr_busy
;
1868 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->post_wait
)
1870 ps
->post_wait
= spr
[in_SPR
];
1871 if (TRACE_INSN_P (cpu
))
1872 sprintf (hazard_name
, "Data hazard for spr[%d]:", in_SPR
);
1877 post_wait_for_fdiv (SIM_CPU
*cpu
, INT slot
)
1879 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1880 int *fdiv
= ps
->fdiv_busy
;
1882 /* Multiple floating point divisions in the same slot need only wait 1
1884 if (fdiv
[slot
] > 0 && 1 > ps
->post_wait
)
1887 if (TRACE_INSN_P (cpu
))
1889 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", slot
);
1895 post_wait_for_fsqrt (SIM_CPU
*cpu
, INT slot
)
1897 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1898 int *fsqrt
= ps
->fsqrt_busy
;
1900 /* Multiple floating point square roots in the same slot need only wait 1
1902 if (fsqrt
[slot
] > 0 && 1 > ps
->post_wait
)
1905 if (TRACE_INSN_P (cpu
))
1907 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", slot
);
1913 post_wait_for_float (SIM_CPU
*cpu
, INT slot
)
1915 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1916 int *flt
= ps
->float_busy
;
1918 /* Multiple floating point square roots in the same slot need only wait 1
1920 if (flt
[slot
] > ps
->post_wait
)
1922 ps
->post_wait
= flt
[slot
];
1923 if (TRACE_INSN_P (cpu
))
1925 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", slot
);
1931 post_wait_for_media (SIM_CPU
*cpu
, INT slot
)
1933 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1934 int *media
= ps
->media_busy
;
1936 /* Multiple floating point square roots in the same slot need only wait 1
1938 if (media
[slot
] > ps
->post_wait
)
1940 ps
->post_wait
= media
[slot
];
1941 if (TRACE_INSN_P (cpu
))
1943 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", slot
);
1948 /* Print cpu-specific profile information. */
1949 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1952 print_cache (SIM_CPU
*cpu
, FRV_CACHE
*cache
, const char *cache_name
)
1954 SIM_DESC sd
= CPU_STATE (cpu
);
1961 sim_io_printf (sd
, " %s Cache\n\n", cache_name
);
1962 accesses
= cache
->statistics
.accesses
;
1963 sim_io_printf (sd
, " Total accesses: %s\n", COMMAS (accesses
));
1967 unsigned hits
= cache
->statistics
.hits
;
1968 sim_io_printf (sd
, " Hits: %s\n", COMMAS (hits
));
1969 rate
= (float)hits
/ accesses
;
1970 sim_io_printf (sd
, " Hit rate: %.2f%%\n", rate
* 100);
1974 sim_io_printf (sd
, " Model %s has no %s cache\n",
1975 MODEL_NAME (CPU_MODEL (cpu
)), cache_name
);
1977 sim_io_printf (sd
, "\n");
1980 /* This table must correspond to the UNIT_ATTR table in
1981 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1982 listed since the others cannot occur after mapping. */
1987 "I0", "I1", "I01", "I2", "I3", "IALL",
1988 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1994 print_parallel (SIM_CPU
*cpu
, bool verbose
)
1996 SIM_DESC sd
= CPU_STATE (cpu
);
1997 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
1998 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1999 unsigned total
, vliw
;
2003 sim_io_printf (sd
, "Model %s Parallelization\n\n",
2004 MODEL_NAME (CPU_MODEL (cpu
)));
2006 total
= PROFILE_TOTAL_INSN_COUNT (p
);
2007 sim_io_printf (sd
, " Total instructions: %s\n", COMMAS (total
));
2008 vliw
= ps
->vliw_insns
;
2009 sim_io_printf (sd
, " VLIW instructions: %s\n", COMMAS (vliw
));
2010 average
= (float)total
/ vliw
;
2011 sim_io_printf (sd
, " Average VLIW length: %.2f\n", average
);
2012 average
= (float)PROFILE_MODEL_TOTAL_CYCLES (p
) / vliw
;
2013 sim_io_printf (sd
, " Cycles per VLIW instruction: %.2f\n", average
);
2014 average
= (float)total
/ PROFILE_MODEL_TOTAL_CYCLES (p
);
2015 sim_io_printf (sd
, " Instructions per cycle: %.2f\n", average
);
2021 int max_name_len
= 0;
2022 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2024 if (INSNS_IN_SLOT (i
))
2027 if (INSNS_IN_SLOT (i
) > max_val
)
2028 max_val
= INSNS_IN_SLOT (i
);
2029 len
= strlen (slot_names
[i
]);
2030 if (len
> max_name_len
)
2036 sim_io_printf (sd
, "\n");
2037 sim_io_printf (sd
, " Instructions per slot:\n");
2038 sim_io_printf (sd
, "\n");
2039 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2041 if (INSNS_IN_SLOT (i
) != 0)
2043 sim_io_printf (sd
, " %*s: %*s: ",
2044 max_name_len
, slot_names
[i
],
2045 max_val
< 10000 ? 5 : 10,
2046 COMMAS (INSNS_IN_SLOT (i
)));
2047 sim_profile_print_bar (sd
, cpu
, PROFILE_HISTOGRAM_WIDTH
,
2050 sim_io_printf (sd
, "\n");
2053 } /* details to print */
2056 sim_io_printf (sd
, "\n");
2060 frv_profile_info (SIM_CPU
*cpu
, bool verbose
)
2062 /* FIXME: Need to add smp support. */
2063 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
2065 #if WITH_PROFILE_PARALLEL_P
2066 if (PROFILE_FLAGS (p
) [PROFILE_PARALLEL_IDX
])
2067 print_parallel (cpu
, verbose
);
2070 #if WITH_PROFILE_CACHE_P
2071 if (PROFILE_FLAGS (p
) [PROFILE_CACHE_IDX
])
2073 SIM_DESC sd
= CPU_STATE (cpu
);
2074 sim_io_printf (sd
, "Model %s Cache Statistics\n\n",
2075 MODEL_NAME (CPU_MODEL (cpu
)));
2076 print_cache (cpu
, CPU_INSN_CACHE (cpu
), "Instruction");
2077 print_cache (cpu
, CPU_DATA_CACHE (cpu
), "Data");
2079 #endif /* WITH_PROFILE_CACHE_P */
2082 /* A hack to get registers referenced for profiling. */
2083 SI
frv_ref_SI (SI ref
) {return ref
;}
2084 #endif /* WITH_PROFILE_MODEL_P */