2 * $Id: idle.c,v 1.66 1999/09/05 11:56:30 paulus Exp $
4 * Idle daemon for PowerPC. Idle daemon will handle any action
5 * that needs to be taken when the system becomes idle.
7 * Written by Cort Dougan (cort@cs.nmt.edu)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/malloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/uaccess.h>
28 #include <asm/system.h>
30 #include <asm/processor.h>
32 #include <asm/cache.h>
34 void zero_paged(void);
35 void power_save(void);
36 void inline htab_reclaim(void);
38 unsigned long htab_reclaim_on
= 0;
39 unsigned long zero_paged_on
= 0;
40 unsigned long powersave_nap
= 0;
42 unsigned long *zero_cache
; /* head linked list of pre-zero'd pages */
43 unsigned long zero_sz
; /* # currently pre-zero'd pages */
44 unsigned long zeropage_hits
; /* # zero'd pages request that we've done */
45 unsigned long zeropage_calls
; /* # zero'd pages request that've been made */
46 unsigned long zerototal
; /* # pages zero'd over time */
50 /* endless loop with no priority at all */
51 current
->priority
= 0;
52 current
->counter
= -100;
60 if ( !current
->need_resched
&& zero_paged_on
) zero_paged();
61 if ( !current
->need_resched
&& htab_reclaim_on
) htab_reclaim();
62 if ( !current
->need_resched
) power_save();
65 if (current
->need_resched
)
73 * SMP entry into the idle task - calls the same thing as the
74 * non-smp versions. -- Cort
83 * Mark 'zombie' pte's in the hash table as invalid.
84 * This improves performance for the hash table reload code
85 * a bit since we don't consider unused pages as valid.
89 void inline htab_reclaim(void)
96 struct task_struct
*p
;
97 unsigned long valid
= 0;
98 extern PTE
*Hash
, *Hash_end
;
99 extern unsigned long Hash_size
;
101 /* if we don't have a htab */
102 if ( Hash_size
== 0 )
105 /* find a random place in the htab to start each time */
106 start
= &Hash
[jiffies
%(Hash_size
/sizeof(PTE
))];
107 /* go a different direction each time */
110 !current
->need_resched
&& (ptr
!= Hash_end
) && (ptr
!= Hash
);
114 if ( !reclaim_ptr
) reclaim_ptr
= Hash
;
115 while ( !current
->need_resched
)
118 if ( reclaim_ptr
== Hash_end
) reclaim_ptr
= Hash
;
125 if ( current
->need_resched
)
127 /* if this vsid/context is in use */
128 if ( (reclaim_ptr
->vsid
>> 4) == p
->mm
->context
)
136 /* this pte isn't used */
140 if ( current
->need_resched
) printk("need_resched: %lx\n", current
->need_resched
);
141 #endif /* CONFIG_8xx */
145 * Returns a pre-zero'd page from the list otherwise returns
148 unsigned long get_zero_page_fast(void)
150 unsigned long page
= 0;
152 atomic_inc((atomic_t
*)&zero_cache_calls
);
153 if ( zero_quicklist
)
155 /* atomically remove this page from the list */
156 asm ( "101:lwarx %1,0,%2\n" /* reserve zero_cache */
157 " lwz %0,0(%1)\n" /* get next -- new zero_cache */
158 " stwcx. %0,0,%2\n" /* update zero_cache */
159 " bne- 101b\n" /* if lost reservation try again */
160 : "=&r" (zero_quicklist
), "=&r" (page
)
161 : "r" (&zero_quicklist
)
164 /* if another cpu beat us above this can happen -- Cort */
168 /* we can update zerocount after the fact since it is not
169 * used for anything but control of a loop which doesn't
170 * matter since it won't affect anything if it zeros one
173 atomic_inc((atomic_t
*)&zero_cache_hits
);
174 atomic_dec((atomic_t
*)&zero_cache_sz
);
176 /* zero out the pointer to next in the page */
177 *(unsigned long *)page
= 0;
184 * Experimental stuff to zero out pages in the idle task
185 * to speed up get_free_pages(). Zero's out pages until
186 * we've reached the limit of zero'd pages. We handle
187 * reschedule()'s in here so when we return we know we've
188 * zero'd all we need to for now.
190 int zero_cache_water
[2] = { 25, 96 }; /* high and low water marks for zero cache */
191 void zero_paged(void)
193 unsigned long pageptr
= 0; /* current page being zero'd */
194 unsigned long bytecount
= 0;
197 if ( zero_cache_sz
>= zero_cache_water
[0] )
199 while ( (zero_cache_sz
< zero_cache_water
[1]) && (!current
->need_resched
) )
202 * Mark a page as reserved so we can mess with it
203 * If we're interrupted we keep this page and our place in it
204 * since we validly hold it and it's reserved for us.
206 pageptr
= __get_free_pages(GFP_ATOMIC
, 0);
210 if ( current
->need_resched
)
214 * Make the page no cache so we don't blow our cache with 0's
216 pte
= find_pte(&init_mm
, pageptr
);
219 printk("pte NULL in zero_paged()\n");
224 flush_tlb_page(find_vma(&init_mm
,pageptr
),pageptr
);
226 * Important here to not take time away from real processes.
228 for ( bytecount
= 0; bytecount
< PAGE_SIZE
; bytecount
+= 4 )
230 if ( current
->need_resched
)
232 *(unsigned long *)(bytecount
+ pageptr
) = 0;
236 * If we finished zero-ing out a page add this page to
237 * the zero_cache atomically -- we can't use
238 * down/up since we can't sleep in idle.
239 * Disabling interrupts is also a bad idea since we would
240 * steal time away from real processes.
241 * We can also have several zero_paged's running
242 * on different processors so we can't interfere with them.
243 * So we update the list atomically without locking it.
247 /* turn cache on for this page */
249 flush_tlb_page(find_vma(&init_mm
,pageptr
),pageptr
);
250 /* atomically add this page to the list */
251 asm ( "101:lwarx %0,0,%1\n" /* reserve zero_cache */
252 " stw %0,0(%2)\n" /* update *pageptr */
254 " sync\n" /* let store settle */
256 " mr %0,%2\n" /* update zero_cache in reg */
257 " stwcx. %2,0,%1\n" /* update zero_cache in mem */
258 " bne- 101b\n" /* if lost reservation try again */
259 : "=&r" (zero_quicklist
)
260 : "r" (&zero_quicklist
), "r" (pageptr
)
263 * This variable is used in the above loop and nowhere
264 * else so the worst that could happen is we would
265 * zero out one more or one less page than we want
266 * per processor on the machine. This is because
267 * we could add our page to the list but not have
268 * zerocount updated yet when another processor
271 atomic_inc((atomic_t
*)&zero_cache_sz
);
272 atomic_inc((atomic_t
*)&zero_cache_total
);
276 void power_save(void)
278 unsigned long msr
, hid0
;
280 /* only sleep on the 603-family/750 processors */
281 switch (_get_PVR() >> 16) {
288 if (!current
->need_resched
) {
289 asm("mfspr %0,1008" : "=r" (hid0
) :);
290 hid0
&= ~(HID0_NAP
| HID0_SLEEP
| HID0_DOZE
);
291 hid0
|= (powersave_nap
? HID0_NAP
: HID0_DOZE
) | HID0_DPM
;
292 asm("mtspr 1008,%0" : : "r" (hid0
));
294 /* set the POW bit in the MSR, and enable interrupts
295 * so we wake up sometime! */
296 _nmask_and_or_msr(0, MSR_POW
| MSR_EE
);
298 /* Disable interrupts again so restore_flags will
300 _nmask_and_or_msr(MSR_EE
, 0);