Update objcopy's --section-alignment option so that it sets the alignment flag on...
[binutils-gdb.git] / sim / ppc / events.c
blobf281e973329be8ee971d9d9291d225e25cdca5d3
1 /* This file is part of the program psim.
3 Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef _EVENTS_C_
22 #define _EVENTS_C_
24 #include "basics.h"
25 #include "events.h"
27 #include <signal.h>
28 #include <stdlib.h>
30 #if !defined (SIM_EVENTS_POLL_RATE)
31 #define SIM_EVENTS_POLL_RATE 0x1000
32 #endif
36 /* The event queue maintains a single absolute time using two
37 variables.
39 TIME_OF_EVENT: this holds the time at which the next event is ment
40 to occure. If no next event it will hold the time of the last
41 event.
43 TIME_FROM_EVENT: The current distance from TIME_OF_EVENT. If an
44 event is pending, this will be positive. If no future event is
45 pending this will be negative. This variable is decremented once
46 for each iteration of a clock cycle.
48 Initially, the clock is started at time one (1) with TIME_OF_EVENT
49 == 0 and TIME_FROM_EVENT == -1.
51 Clearly there is a bug in that this code assumes that the absolute
52 time counter will never become greater than 2^62. */
54 typedef struct _event_entry event_entry;
55 struct _event_entry {
56 void *data;
57 event_handler *handler;
58 int64_t time_of_event;
59 event_entry *next;
62 struct _event_queue {
63 int processing;
64 event_entry *queue;
65 event_entry *volatile held;
66 event_entry *volatile *volatile held_end;
67 int64_t time_of_event;
68 int64_t time_from_event;
72 STATIC_INLINE_EVENTS\
73 (void)
74 sim_events_poll (void *data)
76 event_queue *queue = data;
77 /* just re-schedule in 1000 million ticks time */
78 event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
79 sim_io_poll_quit ();
83 INLINE_EVENTS\
84 (event_queue *)
85 event_queue_create(void)
87 event_queue *new_event_queue = ZALLOC(event_queue);
89 new_event_queue->processing = 0;
90 new_event_queue->queue = NULL;
91 new_event_queue->held = NULL;
92 new_event_queue->held_end = &new_event_queue->held;
94 /* both times are already zero */
95 return new_event_queue;
99 INLINE_EVENTS\
100 (void)
101 event_queue_init(event_queue *queue)
103 event_entry *event;
105 /* drain the interrupt queue */
107 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
108 sigset_t old_mask;
109 sigset_t new_mask;
110 sigfillset(&new_mask);
111 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
112 #endif
113 event = queue->held;
114 while (event != NULL) {
115 event_entry *dead = event;
116 event = event->next;
117 free(dead);
119 queue->held = NULL;
120 queue->held_end = &queue->held;
121 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
122 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
123 #endif
126 /* drain the normal queue */
127 event = queue->queue;
128 while (event != NULL) {
129 event_entry *dead = event;
130 event = event->next;
131 free(dead);
133 queue->queue = NULL;
135 /* wind time back to one */
136 queue->processing = 0;
137 queue->time_of_event = 0;
138 queue->time_from_event = -1;
140 /* schedule our initial counter event */
141 event_queue_schedule (queue, 0, sim_events_poll, queue);
144 INLINE_EVENTS\
145 (int64_t)
146 event_queue_time(event_queue *queue)
148 return queue->time_of_event - queue->time_from_event;
151 STATIC_INLINE_EVENTS\
152 (void)
153 update_time_from_event(event_queue *events)
155 int64_t current_time = event_queue_time(events);
156 if (events->queue != NULL) {
157 events->time_from_event = (events->queue->time_of_event - current_time);
158 events->time_of_event = events->queue->time_of_event;
160 else {
161 events->time_of_event = current_time - 1;
162 events->time_from_event = -1;
164 if (WITH_TRACE && ppc_trace[trace_events])
166 event_entry *event;
167 int i;
168 for (event = events->queue, i = 0;
169 event != NULL;
170 event = event->next, i++)
172 TRACE(trace_events, ("event time-from-event - time %" PRIi64 ", delta %" PRIi64 " - event %d, tag %p, time %" PRIi64 ", handler %p, data %p\n",
173 current_time,
174 events->time_from_event,
176 event,
177 event->time_of_event,
178 event->handler,
179 event->data));
182 ASSERT(current_time == event_queue_time(events));
185 STATIC_INLINE_EVENTS\
186 (void)
187 insert_event_entry(event_queue *events,
188 event_entry *new_event,
189 int64_t delta)
191 event_entry *curr;
192 event_entry **prev;
193 int64_t time_of_event;
195 if (delta < 0)
196 error("what is past is past!\n");
198 /* compute when the event should occure */
199 time_of_event = event_queue_time(events) + delta;
201 /* find the queue insertion point - things are time ordered */
202 prev = &events->queue;
203 curr = events->queue;
204 while (curr != NULL && time_of_event >= curr->time_of_event) {
205 ASSERT(curr->next == NULL
206 || curr->time_of_event <= curr->next->time_of_event);
207 prev = &curr->next;
208 curr = curr->next;
210 ASSERT(curr == NULL || time_of_event < curr->time_of_event);
212 /* insert it */
213 new_event->next = curr;
214 *prev = new_event;
215 new_event->time_of_event = time_of_event;
217 /* adjust the time until the first event */
218 update_time_from_event(events);
221 INLINE_EVENTS\
222 (event_entry_tag)
223 event_queue_schedule(event_queue *events,
224 int64_t delta_time,
225 event_handler *handler,
226 void *data)
228 event_entry *new_event = ZALLOC(event_entry);
229 new_event->data = data;
230 new_event->handler = handler;
231 insert_event_entry(events, new_event, delta_time);
232 TRACE(trace_events, ("event scheduled at %" PRIi64 " - tag %p - time %" PRIi64 ", handler %p, data %p\n",
233 event_queue_time(events),
234 new_event,
235 new_event->time_of_event,
236 new_event->handler,
237 new_event->data));
238 return (event_entry_tag)new_event;
242 INLINE_EVENTS\
243 (event_entry_tag)
244 event_queue_schedule_after_signal(event_queue *events,
245 int64_t delta_time,
246 event_handler *handler,
247 void *data)
249 event_entry *new_event = ZALLOC(event_entry);
251 new_event->data = data;
252 new_event->handler = handler;
253 new_event->time_of_event = delta_time; /* work it out later */
254 new_event->next = NULL;
257 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
258 sigset_t old_mask;
259 sigset_t new_mask;
260 sigfillset(&new_mask);
261 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
262 #endif
263 if (events->held == NULL) {
264 events->held = new_event;
266 else {
267 *events->held_end = new_event;
269 events->held_end = &new_event->next;
270 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
271 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
272 #endif
275 TRACE(trace_events, ("event scheduled at %" PRIi64 " - tag %p - time %" PRIi64 ", handler %p, data %p\n",
276 event_queue_time(events),
277 new_event,
278 new_event->time_of_event,
279 new_event->handler,
280 new_event->data));
282 return (event_entry_tag)new_event;
286 INLINE_EVENTS\
287 (void)
288 event_queue_deschedule(event_queue *events,
289 event_entry_tag event_to_remove)
291 event_entry *to_remove = (event_entry*)event_to_remove;
292 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
293 if (event_to_remove != NULL) {
294 event_entry *current;
295 event_entry **ptr_to_current;
296 for (ptr_to_current = &events->queue, current = *ptr_to_current;
297 current != NULL && current != to_remove;
298 ptr_to_current = &current->next, current = *ptr_to_current);
299 if (current == to_remove) {
300 *ptr_to_current = current->next;
301 TRACE(trace_events, ("event descheduled at %" PRIi64 " - tag %p - time %" PRIi64 ", handler %p, data %p\n",
302 event_queue_time(events),
303 event_to_remove,
304 current->time_of_event,
305 current->handler,
306 current->data));
307 free(current);
308 update_time_from_event(events);
310 else {
311 TRACE(trace_events, ("event descheduled at %" PRIi64 " - tag %p - not found\n",
312 event_queue_time(events),
313 event_to_remove));
316 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
322 INLINE_EVENTS\
323 (int)
324 event_queue_tick(event_queue *events)
326 int64_t time_from_event;
328 /* we should only be here when the previous tick has been fully processed */
329 ASSERT(!events->processing);
331 /* move any events that were queued by any signal handlers onto the
332 real event queue. BTW: When inlining, having this code here,
333 instead of in event_queue_process() causes GCC to put greater
334 weight on keeping the pointer EVENTS in a register. This, in
335 turn results in better code being output. */
336 if (events->held != NULL) {
337 event_entry *held_events;
338 event_entry *curr_event;
341 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
342 sigset_t old_mask;
343 sigset_t new_mask;
344 sigfillset(&new_mask);
345 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
346 #endif
347 held_events = events->held;
348 events->held = NULL;
349 events->held_end = &events->held;
350 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
351 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
352 #endif
355 do {
356 curr_event = held_events;
357 held_events = curr_event->next;
358 insert_event_entry(events, curr_event, curr_event->time_of_event);
359 } while (held_events != NULL);
362 /* advance time, checking to see if we've reached time zero which
363 would indicate the time for the next event has arrived */
364 time_from_event = events->time_from_event;
365 events->time_from_event = time_from_event - 1;
366 return time_from_event == 0;
371 INLINE_EVENTS\
372 (void)
373 event_queue_process(event_queue *events)
375 int64_t event_time = event_queue_time(events);
377 ASSERT((events->time_from_event == -1 && events->queue != NULL)
378 || events->processing); /* something to do */
380 /* consume all events for this or earlier times. Be careful to
381 allow a new event to appear under our feet */
382 events->processing = 1;
383 while (events->queue != NULL
384 && events->queue->time_of_event <= event_time) {
385 event_entry *to_do = events->queue;
386 event_handler *handler = to_do->handler;
387 void *data = to_do->data;
388 events->queue = to_do->next;
389 TRACE(trace_events, ("event issued at %" PRIi64 " - tag %p - time %" PRIi64 ", handler %p, data %p\n",
390 event_time,
391 to_do,
392 to_do->time_of_event,
393 handler,
394 data));
395 free(to_do);
396 /* Always re-compute the time to the next event so that HANDLER()
397 can safely insert new events into the queue. */
398 update_time_from_event(events);
399 handler(data);
401 events->processing = 0;
403 ASSERT(events->time_from_event > 0);
404 ASSERT(events->queue != NULL); /* always poll event */
408 #endif /* _EVENTS_C_ */