1 #include <linux/threads.h>
2 #include <asm/processor.h>
4 #include <asm/cputable.h>
5 #include <asm/thread_info.h>
6 #include <asm/ppc_asm.h>
7 #include <asm/asm-offsets.h>
11 * Structure for storing CPU registers on the save area.
17 #define SL_SPRG0 0x10 /* 4 sprg's */
30 #define SL_R12 0x74 /* r12 to r31 */
31 #define SL_SIZE (SL_R12 + 80)
36 _GLOBAL(swsusp_save_area)
43 _GLOBAL(swsusp_arch_suspend)
45 lis r11,swsusp_save_area@h
46 ori r11,r11,swsusp_save_area@l
62 /* Get a stable timebase and save it */
75 stw r4,SL_SPRG0+4(r11)
77 stw r4,SL_SPRG0+8(r11)
79 stw r4,SL_SPRG0+12(r11)
85 stw r4,SL_DBAT0+4(r11)
89 stw r4,SL_DBAT1+4(r11)
93 stw r4,SL_DBAT2+4(r11)
97 stw r4,SL_DBAT3+4(r11)
101 stw r4,SL_IBAT0+4(r11)
105 stw r4,SL_IBAT1+4(r11)
109 stw r4,SL_IBAT2+4(r11)
113 stw r4,SL_IBAT3+4(r11)
116 /* Backup various CPU config stuffs */
119 /* Call the low level suspend stuff (we should probably have made
124 /* Restore LR from the save area */
125 lis r11,swsusp_save_area@h
126 ori r11,r11,swsusp_save_area@l
134 _GLOBAL(swsusp_arch_resume)
136 #ifdef CONFIG_ALTIVEC
137 /* Stop pending alitvec streams and memory accesses */
140 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
144 /* Disable MSR:DR to make sure we don't take a TLB or
145 * hash miss during the copy, as our hash table will
146 * for a while be unuseable. For .text, we assume we are
147 * covered by a BAT. This works only for non-G5 at this
148 * point. G5 will need a better approach, possibly using
149 * a small temporary hash table filled with large mappings,
150 * disabling the MMU completely isn't a good option for
151 * performance reasons.
152 * (Note that 750's may have the same performance issue as
153 * the G5 in this case, we should investigate using moving
154 * BATs for these CPUs)
158 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
163 /* Load ptr the list of pages to copy in r3 */
164 lis r11,(restore_pblist - KERNELBASE)@h
165 ori r11,r11,restore_pblist@l
168 /* Copy the pages. This is a very basic implementation, to
169 * be replaced by something more cache efficient */
174 lwz r11,pbe_address(r3) /* source */
176 lwz r10,pbe_orig_address(r3) /* destination */
194 /* Do a very simple cache flush/inval of the L1 to ensure
195 * coherency of the icache
207 /* Now flush those cache lines */
217 /* Ok, we are now running with the kernel data of the old
218 * kernel fully restored. We can get to the save area
219 * easily now. As for the rest of the code, it assumes the
220 * loader kernel and the booted one are exactly identical
222 lis r11,swsusp_save_area@h
223 ori r11,r11,swsusp_save_area@l
227 /* Restore various CPU config stuffs */
228 bl __restore_cpu_setup
230 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
231 * This is a bit hairy as we are running out of those BATs,
232 * but first, our code is probably in the icache, and we are
233 * writing the same value to the BAT, so that should be fine,
234 * though a better solution will have to be found long-term
240 lwz r4,SL_SPRG0+4(r11)
242 lwz r4,SL_SPRG0+8(r11)
244 lwz r4,SL_SPRG0+12(r11)
250 lwz r4,SL_DBAT0+4(r11)
254 lwz r4,SL_DBAT1+4(r11)
258 lwz r4,SL_DBAT2+4(r11)
262 lwz r4,SL_DBAT3+4(r11)
266 lwz r4,SL_IBAT0+4(r11)
270 lwz r4,SL_IBAT1+4(r11)
274 lwz r4,SL_IBAT2+4(r11)
278 lwz r4,SL_IBAT3+4(r11)
282 BEGIN_MMU_FTR_SECTION
300 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
304 1: addic. r4,r4,-0x1000
309 /* restore the MSR and turn on the MMU */
322 /* Kick decrementer */
326 /* Restore the callee-saved registers and return */
335 // XXX Note: we don't really need to call swsusp_resume
340 /* FIXME:This construct is actually not useful since we don't shut
341 * down the instruction MMU, we could just flip back MSR-DR on.