1 #include <linux/config.h>
2 #include <linux/threads.h>
3 #include <asm/processor.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
7 #include <asm/ppc_asm.h>
8 #include <asm/offsets.h>
12 * Structure for storing CPU registers on the save area.
18 #define SL_SPRG0 0x10 /* 4 sprg's */
31 #define SL_R12 0x74 /* r12 to r31 */
32 #define SL_SIZE (SL_R12 + 80)
37 _GLOBAL(swsusp_save_area)
44 _GLOBAL(swsusp_arch_suspend)
46 lis r11,swsusp_save_area@h
47 ori r11,r11,swsusp_save_area@l
63 /* Get a stable timebase and save it */
76 stw r4,SL_SPRG0+4(r11)
78 stw r4,SL_SPRG0+8(r11)
80 stw r4,SL_SPRG0+12(r11)
86 stw r4,SL_DBAT0+4(r11)
90 stw r4,SL_DBAT1+4(r11)
94 stw r4,SL_DBAT2+4(r11)
98 stw r4,SL_DBAT3+4(r11)
102 stw r4,SL_IBAT0+4(r11)
106 stw r4,SL_IBAT1+4(r11)
110 stw r4,SL_IBAT2+4(r11)
114 stw r4,SL_IBAT3+4(r11)
117 /* Backup various CPU config stuffs */
120 /* Call the low level suspend stuff (we should probably have made
125 /* Restore LR from the save area */
126 lis r11,swsusp_save_area@h
127 ori r11,r11,swsusp_save_area@l
135 _GLOBAL(swsusp_arch_resume)
137 /* Stop pending alitvec streams and memory accesses */
140 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
143 /* Disable MSR:DR to make sure we don't take a TLB or
144 * hash miss during the copy, as our hash table will
145 * for a while be unuseable. For .text, we assume we are
146 * covered by a BAT. This works only for non-G5 at this
147 * point. G5 will need a better approach, possibly using
148 * a small temporary hash table filled with large mappings,
149 * disabling the MMU completely isn't a good option for
150 * performance reasons.
151 * (Note that 750's may have the same performance issue as
152 * the G5 in this case, we should investigate using moving
153 * BATs for these CPUs)
157 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
162 /* Load ptr the list of pages to copy in r3 */
163 lis r11,(pagedir_nosave - KERNELBASE)@h
164 ori r11,r11,pagedir_nosave@l
167 /* Copy the pages. This is a very basic implementation, to
168 * be replaced by something more cache efficient */
173 lwz r11,pbe_address(r3) /* source */
175 lwz r10,pbe_orig_address(r3) /* destination */
193 /* Do a very simple cache flush/inval of the L1 to ensure
194 * coherency of the icache
206 /* Now flush those cache lines */
216 /* Ok, we are now running with the kernel data of the old
217 * kernel fully restored. We can get to the save area
218 * easily now. As for the rest of the code, it assumes the
219 * loader kernel and the booted one are exactly identical
221 lis r11,swsusp_save_area@h
222 ori r11,r11,swsusp_save_area@l
226 /* Restore various CPU config stuffs */
227 bl __restore_cpu_setup
229 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
230 * This is a bit hairy as we are running out of those BATs,
231 * but first, our code is probably in the icache, and we are
232 * writing the same value to the BAT, so that should be fine,
233 * though a better solution will have to be found long-term
239 lwz r4,SL_SPRG0+4(r11)
241 lwz r4,SL_SPRG0+8(r11)
243 lwz r4,SL_SPRG0+12(r11)
249 lwz r4,SL_DBAT0+4(r11)
253 lwz r4,SL_DBAT1+4(r11)
257 lwz r4,SL_DBAT2+4(r11)
261 lwz r4,SL_DBAT3+4(r11)
265 lwz r4,SL_IBAT0+4(r11)
269 lwz r4,SL_IBAT1+4(r11)
273 lwz r4,SL_IBAT2+4(r11)
277 lwz r4,SL_IBAT3+4(r11)
299 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
303 1: addic. r4,r4,-0x1000
308 /* restore the MSR and turn on the MMU */
321 /* Kick decrementer */
325 /* Restore the callee-saved registers and return */
334 // XXX Note: we don't really need to call swsusp_resume
339 /* FIXME:This construct is actually not useful since we don't shut
340 * down the instruction MMU, we could just flip back MSR-DR on.