few fixes/additions:
[newos.git] / kernel / timer.c
blob95c1843c564a57299f1d4dee328e290f351cdfad
1 /*
2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/console.h>
7 #include <kernel/debug.h>
8 #include <kernel/thread.h>
9 #include <kernel/int.h>
10 #include <kernel/smp.h>
11 #include <kernel/vm.h>
12 #include <kernel/timer.h>
13 #include <sys/errors.h>
14 #include <boot/stage2.h>
16 #include <kernel/arch/cpu.h>
17 #include <kernel/arch/timer.h>
18 #include <kernel/arch/smp.h>
20 static struct timer_event * volatile events[SMP_MAX_CPUS] = { NULL, };
21 static spinlock_t timer_spinlock[SMP_MAX_CPUS] = { 0, };
23 int timer_init(kernel_args *ka)
25 dprintf("init_timer: entry\n");
27 return arch_init_timer(ka);
30 // NOTE: expects interrupts to be off
31 static void add_event_to_list(struct timer_event *event, struct timer_event * volatile *list)
33 struct timer_event *next;
34 struct timer_event *last = NULL;
36 // stick it in the event list
37 next = *list;
38 while(next != NULL && next->sched_time < event->sched_time) {
39 last = next;
40 next = next->next;
43 if(last != NULL) {
44 event->next = last->next;
45 last->next = event;
46 } else {
47 event->next = next;
48 *list = event;
52 int timer_interrupt()
54 time_t curr_time = system_time();
55 struct timer_event *event;
56 spinlock_t *spinlock;
57 int curr_cpu = smp_get_current_cpu();
58 int rc = INT_NO_RESCHEDULE;
60 // dprintf("timer_interrupt: time 0x%x 0x%x, cpu %d\n", system_time(), smp_get_current_cpu());
62 spinlock = &timer_spinlock[curr_cpu];
64 acquire_spinlock(spinlock);
66 restart_scan:
67 event = events[curr_cpu];
68 if(event != NULL && event->sched_time < curr_time) {
69 // this event needs to happen
70 int mode = event->mode;
72 events[curr_cpu] = event->next;
73 event->sched_time = 0;
75 release_spinlock(spinlock);
77 // call the callback
78 // note: if the event is not periodic, it is ok
79 // to delete the event structure inside the callback
80 if(event->func != NULL) {
81 if(event->func(event->data) == INT_RESCHEDULE)
82 rc = INT_RESCHEDULE;
85 acquire_spinlock(spinlock);
87 if(mode == TIMER_MODE_PERIODIC) {
88 // we need to adjust it and add it back to the list
89 event->sched_time = system_time() + event->periodic_time;
90 if(event->sched_time == 0)
91 event->sched_time = 1; // if we wrapped around and happen
92 // to hit zero, set it to one, since
93 // zero represents not scheduled
94 add_event_to_list(event, &events[curr_cpu]);
97 goto restart_scan; // the list may have changed
100 // setup the next hardware timer
101 if(events[curr_cpu] != NULL)
102 arch_timer_set_hardware_timer(events[curr_cpu]->sched_time - system_time());
104 release_spinlock(spinlock);
106 return rc;
109 void timer_setup_timer(timer_callback func, void *data, struct timer_event *event)
111 event->func = func;
112 event->data = data;
113 event->sched_time = 0;
116 int timer_set_event(time_t relative_time, timer_mode mode, struct timer_event *event)
118 int state;
119 int curr_cpu;
121 if(event == NULL)
122 return ERR_INVALID_ARGS;
124 if(event->sched_time != 0)
125 panic("timer_set_event: event 0x%x in list already!\n", event);
127 event->sched_time = system_time() + relative_time;
128 if(event->sched_time == 0)
129 event->sched_time = 1; // if we wrapped around and happen
130 // to hit zero, set it to one, since
131 // zero represents not scheduled
132 event->mode = mode;
133 if(event->mode == TIMER_MODE_PERIODIC)
134 event->periodic_time = relative_time;
136 state = int_disable_interrupts();
138 curr_cpu = smp_get_current_cpu();
140 acquire_spinlock(&timer_spinlock[curr_cpu]);
142 add_event_to_list(event, &events[curr_cpu]);
144 // if we were stuck at the head of the list, set the hardware timer
145 if(event == events[curr_cpu]) {
146 arch_timer_set_hardware_timer(relative_time);
149 release_spinlock(&timer_spinlock[curr_cpu]);
150 int_restore_interrupts(state);
152 return 0;
155 int timer_cancel_event(struct timer_event *event)
157 int state;
158 struct timer_event *last = NULL;
159 struct timer_event *e;
160 bool reset_timer = false;
161 bool foundit = false;
162 int num_cpus = smp_get_num_cpus();
163 int cpu;
164 int curr_cpu;
166 if(event->sched_time == 0)
167 return 0; // it's not scheduled
169 state = int_disable_interrupts();
170 curr_cpu = smp_get_current_cpu();
171 acquire_spinlock(&timer_spinlock[curr_cpu]);
173 // walk through all of the cpu's timer queues
174 for(cpu = 0; cpu < num_cpus; cpu++) {
175 e = events[cpu];
176 while(e != NULL) {
177 if(e == event) {
178 // we found it
179 foundit = true;
180 if(e == events[cpu]) {
181 events[cpu] = e->next;
182 // we'll need to reset the local timer if
183 // this is in the local timer queue
184 if(cpu == curr_cpu)
185 reset_timer = true;
186 } else {
187 last->next = e->next;
189 e->next = NULL;
190 // break out of the whole thing
191 goto done;
193 last = e;
194 e = e->next;
197 done:
199 if(reset_timer == true) {
200 if(events[cpu] == NULL) {
201 arch_timer_clear_hardware_timer();
202 } else {
203 arch_timer_set_hardware_timer(events[cpu]->sched_time - system_time());
207 release_spinlock(&timer_spinlock[curr_cpu]);
208 int_restore_interrupts(state);
210 return (foundit ? 0 : ERR_GENERAL);