JFS: Endian errors
[linux-2.6.22.y-op.git] / arch / ppc64 / mm / stab.c
blob31491131d5e4ace3cdd06f2a350ddbf31780a608
1 /*
2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <asm/pgtable.h>
17 #include <asm/mmu.h>
18 #include <asm/mmu_context.h>
19 #include <asm/paca.h>
20 #include <asm/cputable.h>
22 /* Both the segment table and SLB code uses the following cache */
23 #define NR_STAB_CACHE_ENTRIES 8
24 DEFINE_PER_CPU(long, stab_cache_ptr);
25 DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
28 * Create a segment table entry for the given esid/vsid pair.
30 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
32 unsigned long esid_data, vsid_data;
33 unsigned long entry, group, old_esid, castout_entry, i;
34 unsigned int global_entry;
35 struct stab_entry *ste, *castout_ste;
36 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
38 vsid_data = vsid << STE_VSID_SHIFT;
39 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
40 if (! kernel_segment)
41 esid_data |= STE_ESID_KS;
43 /* Search the primary group first. */
44 global_entry = (esid & 0x1f) << 3;
45 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
47 /* Find an empty entry, if one exists. */
48 for (group = 0; group < 2; group++) {
49 for (entry = 0; entry < 8; entry++, ste++) {
50 if (!(ste->esid_data & STE_ESID_V)) {
51 ste->vsid_data = vsid_data;
52 asm volatile("eieio":::"memory");
53 ste->esid_data = esid_data;
54 return (global_entry | entry);
57 /* Now search the secondary group. */
58 global_entry = ((~esid) & 0x1f) << 3;
59 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
63 * Could not find empty entry, pick one with a round robin selection.
64 * Search all entries in the two groups.
66 castout_entry = get_paca()->stab_rr;
67 for (i = 0; i < 16; i++) {
68 if (castout_entry < 8) {
69 global_entry = (esid & 0x1f) << 3;
70 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
71 castout_ste = ste + castout_entry;
72 } else {
73 global_entry = ((~esid) & 0x1f) << 3;
74 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
75 castout_ste = ste + (castout_entry - 8);
78 /* Dont cast out the first kernel segment */
79 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
80 break;
82 castout_entry = (castout_entry + 1) & 0xf;
85 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
87 /* Modify the old entry to the new value. */
89 /* Force previous translations to complete. DRENG */
90 asm volatile("isync" : : : "memory");
92 old_esid = castout_ste->esid_data >> SID_SHIFT;
93 castout_ste->esid_data = 0; /* Invalidate old entry */
95 asm volatile("sync" : : : "memory"); /* Order update */
97 castout_ste->vsid_data = vsid_data;
98 asm volatile("eieio" : : : "memory"); /* Order update */
99 castout_ste->esid_data = esid_data;
101 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
102 /* Ensure completion of slbie */
103 asm volatile("sync" : : : "memory");
105 return (global_entry | (castout_entry & 0x7));
109 * Allocate a segment table entry for the given ea and mm
111 static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
113 unsigned long vsid;
114 unsigned char stab_entry;
115 unsigned long offset;
117 /* Kernel or user address? */
118 if (ea >= KERNELBASE) {
119 vsid = get_kernel_vsid(ea);
120 } else {
121 if ((ea >= TASK_SIZE_USER64) || (! mm))
122 return 1;
124 vsid = get_vsid(mm->context.id, ea);
127 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
129 if (ea < KERNELBASE) {
130 offset = __get_cpu_var(stab_cache_ptr);
131 if (offset < NR_STAB_CACHE_ENTRIES)
132 __get_cpu_var(stab_cache[offset++]) = stab_entry;
133 else
134 offset = NR_STAB_CACHE_ENTRIES+1;
135 __get_cpu_var(stab_cache_ptr) = offset;
137 /* Order update */
138 asm volatile("sync":::"memory");
141 return 0;
144 int ste_allocate(unsigned long ea)
146 return __ste_allocate(ea, current->mm);
150 * Do the segment table work for a context switch: flush all user
151 * entries from the table, then preload some probably useful entries
152 * for the new task
154 void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
156 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
157 struct stab_entry *ste;
158 unsigned long offset = __get_cpu_var(stab_cache_ptr);
159 unsigned long pc = KSTK_EIP(tsk);
160 unsigned long stack = KSTK_ESP(tsk);
161 unsigned long unmapped_base;
163 /* Force previous translations to complete. DRENG */
164 asm volatile("isync" : : : "memory");
166 if (offset <= NR_STAB_CACHE_ENTRIES) {
167 int i;
169 for (i = 0; i < offset; i++) {
170 ste = stab + __get_cpu_var(stab_cache[i]);
171 ste->esid_data = 0; /* invalidate entry */
173 } else {
174 unsigned long entry;
176 /* Invalidate all entries. */
177 ste = stab;
179 /* Never flush the first entry. */
180 ste += 1;
181 for (entry = 1;
182 entry < (PAGE_SIZE / sizeof(struct stab_entry));
183 entry++, ste++) {
184 unsigned long ea;
185 ea = ste->esid_data & ESID_MASK;
186 if (ea < KERNELBASE) {
187 ste->esid_data = 0;
192 asm volatile("sync; slbia; sync":::"memory");
194 __get_cpu_var(stab_cache_ptr) = 0;
196 /* Now preload some entries for the new task */
197 if (test_tsk_thread_flag(tsk, TIF_32BIT))
198 unmapped_base = TASK_UNMAPPED_BASE_USER32;
199 else
200 unmapped_base = TASK_UNMAPPED_BASE_USER64;
202 __ste_allocate(pc, mm);
204 if (GET_ESID(pc) == GET_ESID(stack))
205 return;
207 __ste_allocate(stack, mm);
209 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
210 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
211 return;
213 __ste_allocate(unmapped_base, mm);
215 /* Order update */
216 asm volatile("sync" : : : "memory");
219 extern void slb_initialize(void);
222 * Build an entry for the base kernel segment and put it into
223 * the segment table or SLB. All other segment table or SLB
224 * entries are faulted in.
226 void stab_initialize(unsigned long stab)
228 unsigned long vsid = get_kernel_vsid(KERNELBASE);
230 if (cpu_has_feature(CPU_FTR_SLB)) {
231 slb_initialize();
232 } else {
233 asm volatile("isync; slbia; isync":::"memory");
234 make_ste(stab, GET_ESID(KERNELBASE), vsid);
236 /* Order update */
237 asm volatile("sync":::"memory");