allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / s390 / kernel / early.c
blob50538e5456183194b08d60d81d73b84cfa448983
1 /*
2 * arch/s390/kernel/early.c
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/ctype.h>
13 #include <linux/lockdep.h>
14 #include <linux/module.h>
15 #include <linux/pfn.h>
16 #include <linux/uaccess.h>
17 #include <asm/ipl.h>
18 #include <asm/lowcore.h>
19 #include <asm/processor.h>
20 #include <asm/sections.h>
21 #include <asm/setup.h>
22 #include <asm/cpcmd.h>
23 #include <asm/sclp.h>
26 * Create a Kernel NSS if the SAVESYS= parameter is defined
28 #define DEFSYS_CMD_SIZE 96
29 #define SAVESYS_CMD_SIZE 32
31 char kernel_nss_name[NSS_NAME_SIZE + 1];
33 #ifdef CONFIG_SHARED_KERNEL
34 static noinline __init void create_kernel_nss(void)
36 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
37 #ifdef CONFIG_BLK_DEV_INITRD
38 unsigned int sinitrd_pfn, einitrd_pfn;
39 #endif
40 int response;
41 char *savesys_ptr;
42 char upper_command_line[COMMAND_LINE_SIZE];
43 char defsys_cmd[DEFSYS_CMD_SIZE];
44 char savesys_cmd[SAVESYS_CMD_SIZE];
46 /* Do nothing if we are not running under VM */
47 if (!MACHINE_IS_VM)
48 return;
50 /* Convert COMMAND_LINE to upper case */
51 for (i = 0; i < strlen(COMMAND_LINE); i++)
52 upper_command_line[i] = toupper(COMMAND_LINE[i]);
54 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
56 if (!savesys_ptr)
57 return;
59 savesys_ptr += 8; /* Point to the beginning of the NSS name */
60 for (i = 0; i < NSS_NAME_SIZE; i++) {
61 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
62 break;
63 kernel_nss_name[i] = savesys_ptr[i];
66 stext_pfn = PFN_DOWN(__pa(&_stext));
67 eshared_pfn = PFN_DOWN(__pa(&_eshared));
68 end_pfn = PFN_UP(__pa(&_end));
69 min_size = end_pfn << 2;
71 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
72 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
73 eshared_pfn, end_pfn);
75 #ifdef CONFIG_BLK_DEV_INITRD
76 if (INITRD_START && INITRD_SIZE) {
77 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
78 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
79 min_size = einitrd_pfn << 2;
80 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
81 sinitrd_pfn, einitrd_pfn);
83 #endif
85 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
86 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
87 kernel_nss_name, kernel_nss_name);
89 __cpcmd(defsys_cmd, NULL, 0, &response);
91 if (response != 0)
92 return;
94 __cpcmd(savesys_cmd, NULL, 0, &response);
96 if (response != strlen(savesys_cmd))
97 return;
99 ipl_flags = IPL_NSS_VALID;
102 #else /* CONFIG_SHARED_KERNEL */
104 static inline void create_kernel_nss(void) { }
106 #endif /* CONFIG_SHARED_KERNEL */
109 * Clear bss memory
111 static noinline __init void clear_bss_section(void)
113 memset(__bss_start, 0, __bss_stop - __bss_start);
117 * Initialize storage key for kernel pages
119 static noinline __init void init_kernel_storage_key(void)
121 unsigned long end_pfn, init_pfn;
123 end_pfn = PFN_UP(__pa(&_end));
125 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
126 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
129 static noinline __init void detect_machine_type(void)
131 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
133 get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
135 /* Running under z/VM ? */
136 if (cpuinfo->cpu_id.version == 0xff)
137 machine_flags |= 1;
139 /* Running on a P/390 ? */
140 if (cpuinfo->cpu_id.machine == 0x7490)
141 machine_flags |= 4;
144 #ifdef CONFIG_64BIT
145 static noinline __init int memory_fast_detect(void)
147 unsigned long val0 = 0;
148 unsigned long val1 = 0xc;
149 int ret = -ENOSYS;
151 if (ipl_flags & IPL_NSS_VALID)
152 return -ENOSYS;
154 asm volatile(
155 " diag %1,%2,0x260\n"
156 "0: lhi %0,0\n"
157 "1:\n"
158 EX_TABLE(0b,1b)
159 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
161 if (ret || val0 != val1)
162 return -ENOSYS;
164 memory_chunk[0].size = val0 + 1;
165 return 0;
167 #else
168 static inline int memory_fast_detect(void)
170 return -ENOSYS;
172 #endif
174 #define ADDR2G (1UL << 31)
176 static noinline __init unsigned long sclp_memory_detect(void)
178 struct sclp_readinfo_sccb *sccb;
179 unsigned long long memsize;
181 sccb = &s390_readinfo_sccb;
183 if (sccb->header.response_code != 0x10)
184 return 0;
186 if (sccb->rnsize)
187 memsize = sccb->rnsize << 20;
188 else
189 memsize = sccb->rnsize2 << 20;
190 if (sccb->rnmax)
191 memsize *= sccb->rnmax;
192 else
193 memsize *= sccb->rnmax2;
194 #ifndef CONFIG_64BIT
196 * Can't deal with more than 2G in 31 bit addressing mode, so
197 * limit the value in order to avoid strange side effects.
199 if (memsize > ADDR2G)
200 memsize = ADDR2G;
201 #endif
202 return (unsigned long) memsize;
205 static inline __init unsigned long __tprot(unsigned long addr)
207 int cc = -1;
209 asm volatile(
210 " tprot 0(%1),0\n"
211 "0: ipm %0\n"
212 " srl %0,28\n"
213 "1:\n"
214 EX_TABLE(0b,1b)
215 : "+d" (cc) : "a" (addr) : "cc");
216 return (unsigned long)cc;
219 /* Checking memory in 128KB increments. */
220 #define CHUNK_INCR (1UL << 17)
222 static noinline __init void find_memory_chunks(unsigned long memsize)
224 unsigned long addr = 0, old_addr = 0;
225 unsigned long old_cc = CHUNK_READ_WRITE;
226 unsigned long cc;
227 int chunk = 0;
229 while (chunk < MEMORY_CHUNKS) {
230 cc = __tprot(addr);
231 while (cc == old_cc) {
232 addr += CHUNK_INCR;
233 cc = __tprot(addr);
234 #ifndef CONFIG_64BIT
235 if (addr == ADDR2G)
236 break;
237 #endif
240 if (old_addr != addr &&
241 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
242 memory_chunk[chunk].addr = old_addr;
243 memory_chunk[chunk].size = addr - old_addr;
244 memory_chunk[chunk].type = old_cc;
245 chunk++;
248 old_addr = addr;
249 old_cc = cc;
251 #ifndef CONFIG_64BIT
252 if (addr == ADDR2G)
253 break;
254 #endif
256 * Finish memory detection at the first hole
257 * if storage size is unknown.
259 if (cc == -1UL && !memsize)
260 break;
261 if (memsize && addr >= memsize)
262 break;
266 static __init void early_pgm_check_handler(void)
268 unsigned long addr;
269 const struct exception_table_entry *fixup;
271 addr = S390_lowcore.program_old_psw.addr;
272 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
273 if (!fixup)
274 disabled_wait(0);
275 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
278 static noinline __init void setup_lowcore_early(void)
280 psw_t psw;
282 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
283 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
284 S390_lowcore.external_new_psw = psw;
285 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
286 S390_lowcore.program_new_psw = psw;
287 s390_base_pgm_handler_fn = early_pgm_check_handler;
291 * Save ipl parameters, clear bss memory, initialize storage keys
292 * and create a kernel NSS at startup if the SAVESYS= parm is defined
294 void __init startup_init(void)
296 unsigned long memsize;
298 ipl_save_parameters();
299 clear_bss_section();
300 init_kernel_storage_key();
301 lockdep_init();
302 lockdep_off();
303 detect_machine_type();
304 create_kernel_nss();
305 sort_main_extable();
306 setup_lowcore_early();
307 sclp_readinfo_early();
308 memsize = sclp_memory_detect();
309 if (memory_fast_detect() < 0)
310 find_memory_chunks(memsize);
311 lockdep_on();