Import 2.3.6pre1
[davej-history.git] / arch / ppc / kernel / idle.c
blob38ac7d108a1f3b47f36e389be35662be4e08698b
1 /*
2 * $Id: idle.c,v 1.62 1999/05/24 05:43:18 cort Exp $
4 * Idle daemon for PowerPC. Idle daemon will handle any action
5 * that needs to be taken when the system becomes idle.
7 * Written by Cort Dougan (cort@cs.nmt.edu)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/malloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/uaccess.h>
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/processor.h>
31 #include <asm/mmu.h>
32 #include <asm/cache.h>
34 void zero_paged(void);
35 void power_save(void);
36 void inline htab_reclaim(void);
38 unsigned long htab_reclaim_on = 0;
39 unsigned long zero_paged_on = 0;
40 unsigned long powersave_nap = 0;
42 unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
43 unsigned long zero_sz; /* # currently pre-zero'd pages */
44 unsigned long zeropage_hits; /* # zero'd pages request that we've done */
45 unsigned long zeropage_calls; /* # zero'd pages request that've been made */
46 unsigned long zerototal; /* # pages zero'd over time */
48 int idled(void *unused)
50 /* endless loop with no priority at all */
51 current->priority = 0;
52 current->counter = -100;
53 init_idle();
54 for (;;)
56 __sti();
58 check_pgt_cache();
60 if ( !current->need_resched && zero_paged_on ) zero_paged();
61 if ( !current->need_resched && htab_reclaim_on ) htab_reclaim();
62 if ( !current->need_resched ) power_save();
64 #ifdef __SMP__
65 if (current->need_resched)
66 #endif
67 schedule();
69 return 0;
72 #ifdef __SMP__
74 * SMP entry into the idle task - calls the same thing as the
75 * non-smp versions. -- Cort
77 int cpu_idle(void *unused)
79 idled(unused);
80 return 0;
82 #endif /* __SMP__ */
85 * Syscall entry into the idle task. -- Cort
87 asmlinkage int sys_idle(void)
89 if(current->pid != 0)
90 return -EPERM;
92 idled(NULL);
93 return 0; /* should never execute this but it makes gcc happy -- Cort */
97 * Mark 'zombie' pte's in the hash table as invalid.
98 * This improves performance for the hash table reload code
99 * a bit since we don't consider unused pages as valid.
100 * -- Cort
102 PTE *reclaim_ptr = 0;
103 void inline htab_reclaim(void)
105 #ifndef CONFIG_8xx
106 #if 0
107 PTE *ptr, *start;
108 static int dir = 1;
109 #endif
110 struct task_struct *p;
111 unsigned long valid = 0;
112 extern PTE *Hash, *Hash_end;
113 extern unsigned long Hash_size;
115 /* if we don't have a htab */
116 if ( Hash_size == 0 )
117 return;
118 #if 0
119 /* find a random place in the htab to start each time */
120 start = &Hash[jiffies%(Hash_size/sizeof(PTE))];
121 /* go a different direction each time */
122 dir *= -1;
123 for ( ptr = start;
124 !current->need_resched && (ptr != Hash_end) && (ptr != Hash);
125 ptr += dir)
127 #else
128 if ( !reclaim_ptr ) reclaim_ptr = Hash;
129 while ( !current->need_resched )
131 reclaim_ptr++;
132 if ( reclaim_ptr == Hash_end ) reclaim_ptr = Hash;
133 #endif
134 if (!reclaim_ptr->v)
135 continue;
136 valid = 0;
137 for_each_task(p)
139 if ( current->need_resched )
140 goto out;
141 /* if this vsid/context is in use */
142 if ( (reclaim_ptr->vsid >> 4) == p->mm->context )
144 valid = 1;
145 break;
148 if ( valid )
149 continue;
150 /* this pte isn't used */
151 reclaim_ptr->v = 0;
153 out:
154 if ( current->need_resched ) printk("need_resched: %lx\n", current->need_resched);
155 #endif /* CONFIG_8xx */
159 * Returns a pre-zero'd page from the list otherwise returns
160 * NULL.
162 unsigned long get_zero_page_fast(void)
164 unsigned long page = 0;
166 atomic_inc((atomic_t *)&zero_cache_calls);
167 if ( zero_quicklist )
169 /* atomically remove this page from the list */
170 asm ( "101:lwarx %1,0,%2\n" /* reserve zero_cache */
171 " lwz %0,0(%1)\n" /* get next -- new zero_cache */
172 " stwcx. %0,0,%2\n" /* update zero_cache */
173 " bne- 101b\n" /* if lost reservation try again */
174 : "=&r" (zero_quicklist), "=&r" (page)
175 : "r" (&zero_quicklist)
176 : "cc" );
177 #ifdef __SMP__
178 /* if another cpu beat us above this can happen -- Cort */
179 if ( page == 0 )
180 return 0;
181 #endif /* __SMP__ */
182 /* we can update zerocount after the fact since it is not
183 * used for anything but control of a loop which doesn't
184 * matter since it won't affect anything if it zeros one
185 * less page -- Cort
187 atomic_inc((atomic_t *)&zero_cache_hits);
188 atomic_dec((atomic_t *)&zero_cache_sz);
190 /* zero out the pointer to next in the page */
191 *(unsigned long *)page = 0;
192 return page;
194 return 0;
198 * Experimental stuff to zero out pages in the idle task
199 * to speed up get_free_pages(). Zero's out pages until
200 * we've reached the limit of zero'd pages. We handle
201 * reschedule()'s in here so when we return we know we've
202 * zero'd all we need to for now.
204 int zero_cache_water[2] = { 25, 96 }; /* high and low water marks for zero cache */
205 void zero_paged(void)
207 unsigned long pageptr = 0; /* current page being zero'd */
208 unsigned long bytecount = 0;
209 pte_t *pte;
211 if ( zero_cache_sz >= zero_cache_water[0] )
212 return;
213 while ( (zero_cache_sz < zero_cache_water[1]) && (!current->need_resched) )
216 * Mark a page as reserved so we can mess with it
217 * If we're interrupted we keep this page and our place in it
218 * since we validly hold it and it's reserved for us.
220 pageptr = __get_free_pages(GFP_ATOMIC, 0);
221 if ( !pageptr )
222 return;
224 if ( current->need_resched )
225 schedule();
228 * Make the page no cache so we don't blow our cache with 0's
230 pte = find_pte(init_task.mm, pageptr);
231 if ( !pte )
233 printk("pte NULL in zero_paged()\n");
234 return;
237 pte_uncache(*pte);
238 flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr);
240 * Important here to not take time away from real processes.
242 for ( bytecount = 0; bytecount < PAGE_SIZE ; bytecount += 4 )
244 if ( current->need_resched )
245 schedule();
246 *(unsigned long *)(bytecount + pageptr) = 0;
250 * If we finished zero-ing out a page add this page to
251 * the zero_cache atomically -- we can't use
252 * down/up since we can't sleep in idle.
253 * Disabling interrupts is also a bad idea since we would
254 * steal time away from real processes.
255 * We can also have several zero_paged's running
256 * on different processors so we can't interfere with them.
257 * So we update the list atomically without locking it.
258 * -- Cort
261 /* turn cache on for this page */
262 pte_cache(*pte);
263 flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr);
264 /* atomically add this page to the list */
265 asm ( "101:lwarx %0,0,%1\n" /* reserve zero_cache */
266 " stw %0,0(%2)\n" /* update *pageptr */
267 #ifdef __SMP__
268 " sync\n" /* let store settle */
269 #endif
270 " mr %0,%2\n" /* update zero_cache in reg */
271 " stwcx. %2,0,%1\n" /* update zero_cache in mem */
272 " bne- 101b\n" /* if lost reservation try again */
273 : "=&r" (zero_quicklist)
274 : "r" (&zero_quicklist), "r" (pageptr)
275 : "cc" );
277 * This variable is used in the above loop and nowhere
278 * else so the worst that could happen is we would
279 * zero out one more or one less page than we want
280 * per processor on the machine. This is because
281 * we could add our page to the list but not have
282 * zerocount updated yet when another processor
283 * reads it. -- Cort
285 atomic_inc((atomic_t *)&zero_cache_sz);
286 atomic_inc((atomic_t *)&zero_cache_total);
290 void power_save(void)
292 unsigned long msr, hid0;
294 /* only sleep on the 603-family/750 processors */
295 switch (_get_PVR() >> 16) {
296 case 3: /* 603 */
297 case 6: /* 603e */
298 case 7: /* 603ev */
299 case 8: /* 750 */
300 save_flags(msr);
301 cli();
302 if (!current->need_resched) {
303 asm("mfspr %0,1008" : "=r" (hid0) :);
304 hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE);
305 hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM;
306 asm("mtspr 1008,%0" : : "r" (hid0));
308 /* set the POW bit in the MSR, and enable interrupts
309 * so we wake up sometime! */
310 _nmask_and_or_msr(0, MSR_POW | MSR_EE);
312 /* Disable interrupts again so restore_flags will
313 * work. */
314 _nmask_and_or_msr(MSR_EE, 0);
316 restore_flags(msr);
317 default:
318 return;