Import 2.1.115pre3
[davej-history.git] / arch / ppc / kernel / idle.c
blobbd7980678431125f4722a58ccb031eb1ae06f6fd
1 /*
2 * $Id: idle.c,v 1.48 1998/07/30 11:29:22 davem Exp $
4 * Idle daemon for PowerPC. Idle daemon will handle any action
5 * that needs to be taken when the system becomes idle.
7 * Written by Cort Dougan (cort@cs.nmt.edu)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 #define __KERNEL_SYSCALLS__
16 #include <linux/config.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/malloc.h>
28 #include <asm/pgtable.h>
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/processor.h>
33 #include <asm/mmu.h>
34 #include <asm/cache.h>
35 #ifdef CONFIG_PMAC
36 #include <asm/mediabay.h>
37 #endif
39 void zero_paged(void);
40 void power_save(void);
41 void inline htab_reclaim(void);
43 unsigned long htab_reclaim_on = 0;
44 unsigned long zero_paged_on = 0;
45 unsigned long powersave_nap = 0;
47 int idled(void *unused)
49 int ret = -EPERM;
51 for (;;)
53 __sti();
55 /* endless loop with no priority at all */
56 current->priority = -100;
57 current->counter = -100;
59 check_pgt_cache();
61 if ( !current->need_resched && zero_paged_on ) zero_paged();
62 if ( !current->need_resched && htab_reclaim_on ) htab_reclaim();
65 * Only processor 1 may sleep now since processor 2 would
66 * never wake up. Need to add timer code for processor 2
67 * then it can sleep. -- Cort
69 #ifndef __SMP__
70 if ( !current->need_resched ) power_save();
71 #endif /* __SMP__ */
72 schedule();
74 ret = 0;
75 return ret;
78 #ifdef __SMP__
80 * SMP entry into the idle task - calls the same thing as the
81 * non-smp versions. -- Cort
83 int cpu_idle(void *unused)
85 idled(unused);
86 return 0;
88 #endif /* __SMP__ */
91 * Syscall entry into the idle task. -- Cort
93 asmlinkage int sys_idle(void)
95 extern int media_bay_task(void *);
96 if(current->pid != 0)
97 return -EPERM;
99 #ifdef CONFIG_PMAC
100 if (media_bay_present)
101 kernel_thread(media_bay_task, NULL, 0);
102 #endif
104 idled(NULL);
105 return 0; /* should never execute this but it makes gcc happy -- Cort */
109 * Mark 'zombie' pte's in the hash table as invalid.
110 * This improves performance for the hash table reload code
111 * a bit since we don't consider unused pages as valid.
112 * -- Cort
114 PTE *reclaim_ptr = 0;
115 void inline htab_reclaim(void)
117 #ifndef CONFIG_8xx
118 #if 0
119 PTE *ptr, *start;
120 static int dir = 1;
121 #endif
122 struct task_struct *p;
123 unsigned long valid = 0;
124 extern PTE *Hash, *Hash_end;
125 extern unsigned long Hash_size;
127 /* if we don't have a htab */
128 if ( Hash_size == 0 )
129 return;
130 lock_dcache(1);
132 #if 0
133 /* find a random place in the htab to start each time */
134 start = &Hash[jiffies%(Hash_size/sizeof(PTE))];
135 /* go a different direction each time */
136 dir *= -1;
137 for ( ptr = start;
138 !current->need_resched && (ptr != Hash_end) && (ptr != Hash);
139 ptr += dir)
141 #else
142 if ( !reclaim_ptr ) reclaim_ptr = Hash;
143 while ( !current->need_resched )
145 reclaim_ptr++;
146 if ( reclaim_ptr == Hash_end ) reclaim_ptr = Hash;
147 #endif
148 if (!reclaim_ptr->v)
149 continue;
150 valid = 0;
151 for_each_task(p)
153 if ( current->need_resched )
154 goto out;
155 /* if this vsid/context is in use */
156 if ( (reclaim_ptr->vsid >> 4) == p->mm->context )
158 valid = 1;
159 break;
162 if ( valid )
163 continue;
164 /* this pte isn't used */
165 reclaim_ptr->v = 0;
167 out:
168 if ( current->need_resched ) printk("need_resched: %lx\n", current->need_resched);
169 unlock_dcache();
170 #endif /* CONFIG_8xx */
174 * Returns a pre-zero'd page from the list otherwise returns
175 * NULL.
177 unsigned long get_zero_page_fast(void)
179 unsigned long page = 0;
181 atomic_inc((atomic_t *)&quicklists.zeropage_calls);
182 if ( zero_quicklist )
184 /* atomically remove this page from the list */
185 asm ( "101:lwarx %1,0,%2\n" /* reserve zero_cache */
186 " lwz %0,0(%1)\n" /* get next -- new zero_cache */
187 " stwcx. %0,0,%2\n" /* update zero_cache */
188 " bne- 101b\n" /* if lost reservation try again */
189 : "=&r" (zero_quicklist), "=&r" (page)
190 : "r" (&zero_quicklist)
191 : "cc" );
192 #ifdef __SMP__
193 /* if another cpu beat us above this can happen -- Cort */
194 if ( page == 0 )
195 return 0;
196 #endif /* __SMP__ */
197 /* we can update zerocount after the fact since it is not
198 * used for anything but control of a loop which doesn't
199 * matter since it won't affect anything if it zero's one
200 * less page -- Cort
202 atomic_inc((atomic_t *)&quicklists.zeropage_hits);
203 atomic_dec((atomic_t *)&zero_cache_sz);
205 /* zero out the pointer to next in the page */
206 *(unsigned long *)page = 0;
207 return page;
209 return 0;
213 * Experimental stuff to zero out pages in the idle task
214 * to speed up get_free_pages(). Zero's out pages until
215 * we've reached the limit of zero'd pages. We handle
216 * reschedule()'s in here so when we return we know we've
217 * zero'd all we need to for now.
219 int zero_cache_water[2] = { 25, 96 }; /* high and low water marks for zero cache */
220 void zero_paged(void)
222 unsigned long pageptr = 0; /* current page being zero'd */
223 unsigned long bytecount = 0;
224 pte_t *pte;
226 if ( zero_cache_sz >= zero_cache_water[0] )
227 return;
228 while ( (zero_cache_sz < zero_cache_water[1]) && (!current->need_resched) )
231 * Mark a page as reserved so we can mess with it
232 * If we're interrupted we keep this page and our place in it
233 * since we validly hold it and it's reserved for us.
235 pageptr = __get_free_pages(GFP_ATOMIC, 0);
236 if ( !pageptr )
237 return;
239 if ( current->need_resched )
240 schedule();
243 * Make the page no cache so we don't blow our cache with 0's
244 * We should just turn off the cache instead. -- Cort
246 pte = find_pte(init_task.mm, pageptr);
247 if ( !pte )
249 printk("pte NULL in zero_paged()\n");
250 return;
253 pte_uncache(*pte);
254 flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr);
256 * Important here to not take time away from real processes.
258 for ( bytecount = 0; bytecount < PAGE_SIZE ; bytecount += 4 )
260 if ( current->need_resched )
261 schedule();
262 *(unsigned long *)(bytecount + pageptr) = 0;
266 * If we finished zero-ing out a page add this page to
267 * the zero_cache atomically -- we can't use
268 * down/up since we can't sleep in idle.
269 * Disabling interrupts is also a bad idea since we would
270 * steal time away from real processes.
271 * We can also have several zero_paged's running
272 * on different processors so we can't interfere with them.
273 * So we update the list atomically without locking it.
274 * -- Cort
276 /* turn cache on for this page */
278 pte_cache(*pte);
279 flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr);
280 /* atomically add this page to the list */
281 asm ( "101:lwarx %0,0,%1\n" /* reserve zero_cache */
282 " stw %0,0(%2)\n" /* update *pageptr */
283 #ifdef __SMP__
284 " sync\n" /* let store settle */
285 #endif
286 " mr %0,%2\n" /* update zero_cache in reg */
287 " stwcx. %2,0,%1\n" /* update zero_cache in mem */
288 " bne- 101b\n" /* if lost reservation try again */
289 : "=&r" (zero_quicklist)
290 : "r" (&zero_quicklist), "r" (pageptr)
291 : "cc" );
293 * This variable is used in the above loop and nowhere
294 * else so the worst that could happen is we would
295 * zero out one more or one less page than we want
296 * per processor on the machine. This is because
297 * we could add our page to the list but not have
298 * zerocount updated yet when another processor
299 * reads it. -- Cort
301 atomic_inc((atomic_t *)&zero_cache_sz);
302 atomic_inc((atomic_t *)&quicklists.zerototal);
306 void power_save(void)
308 unsigned long msr, hid0;
310 /* only sleep on the 603-family/750 processors */
311 switch (_get_PVR() >> 16) {
312 case 3: /* 603 */
313 case 6: /* 603e */
314 case 7: /* 603ev */
315 case 8: /* 750 */
316 save_flags(msr);
317 cli();
318 if (!current->need_resched) {
319 asm("mfspr %0,1008" : "=r" (hid0) :);
320 hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
321 hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
322 asm("mtspr 1008,%0" : : "r" (hid0));
323 msr |= MSR_POW;
325 restore_flags(msr);
326 default:
327 return;