2 * Copyright IBM Corp. 2008
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
10 #include <asm/setup.h>
12 static int memory_fast_detect(struct mem_chunk
*chunk
)
14 unsigned long val0
= 0;
15 unsigned long val1
= 0xc;
18 if (ipl_flags
& IPL_NSS_VALID
)
25 : "+d" (rc
), "+d" (val0
), "+d" (val1
) : : "cc");
27 if (rc
|| val0
!= val1
)
29 chunk
->size
= val0
+ 1;
33 static inline int tprot(unsigned long addr
)
43 : "+d" (rc
) : "a" (addr
) : "cc");
47 #define ADDR2G (1ULL << 31)
49 static void find_memory_chunks(struct mem_chunk chunk
[])
51 unsigned long long memsize
, rnmax
, rzm
;
52 unsigned long addr
= 0, size
;
56 rnmax
= sclp_get_rnmax();
57 memsize
= rzm
* rnmax
;
60 if (sizeof(long) == 4) {
61 rzm
= min(ADDR2G
, rzm
);
62 memsize
= memsize
? min(ADDR2G
, memsize
) : ADDR2G
;
69 if (memsize
&& addr
+ size
>= memsize
)
71 } while (type
== tprot(addr
+ size
));
72 if (type
== CHUNK_READ_WRITE
|| type
== CHUNK_READ_ONLY
) {
79 } while (addr
< memsize
&& i
< MEMORY_CHUNKS
);
82 void detect_memory_layout(struct mem_chunk chunk
[])
84 unsigned long flags
, cr0
;
86 memset(chunk
, 0, MEMORY_CHUNKS
* sizeof(struct mem_chunk
));
87 if (memory_fast_detect(&chunk
[0]) == 0)
89 /* Disable IRQs, DAT and low address protection so tprot does the
90 * right thing and we don't get scheduled away with low address
91 * protection disabled.
93 flags
= __raw_local_irq_stnsm(0xf8);
94 __ctl_store(cr0
, 0, 0);
95 __ctl_clear_bit(0, 28);
96 find_memory_chunks(chunk
);
97 __ctl_load(cr0
, 0, 0);
98 __raw_local_irq_ssm(flags
);
100 EXPORT_SYMBOL(detect_memory_layout
);