3 #ifdef CONFIG_RANDOMIZE_BASE
5 #include <asm/archrandom.h>
8 #include <generated/compile.h>
9 #include <linux/module.h>
10 #include <linux/uts.h>
11 #include <linux/utsname.h>
12 #include <generated/utsrelease.h>
13 #include <linux/version.h>
15 /* Simplified build-specific string for starting entropy. */
16 static const char build_str
[] = UTS_RELEASE
" (" LINUX_COMPILE_BY
"@"
17 LINUX_COMPILE_HOST
") (" LINUX_COMPILER
") " UTS_VERSION
;
19 #define I8254_PORT_CONTROL 0x43
20 #define I8254_PORT_COUNTER0 0x40
21 #define I8254_CMD_READBACK 0xC0
22 #define I8254_SELECT_COUNTER0 0x02
23 #define I8254_STATUS_NOTREADY 0x40
24 static inline u16
i8254(void)
29 outb(I8254_PORT_CONTROL
,
30 I8254_CMD_READBACK
| I8254_SELECT_COUNTER0
);
31 status
= inb(I8254_PORT_COUNTER0
);
32 timer
= inb(I8254_PORT_COUNTER0
);
33 timer
|= inb(I8254_PORT_COUNTER0
) << 8;
34 } while (status
& I8254_STATUS_NOTREADY
);
39 static unsigned long rotate_xor(unsigned long hash
, const void *area
,
43 unsigned long *ptr
= (unsigned long *)area
;
45 for (i
= 0; i
< size
/ sizeof(hash
); i
++) {
46 /* Rotate by odd number of bits and XOR. */
47 hash
= (hash
<< ((sizeof(hash
) * 8) - 7)) | (hash
>> 7);
54 /* Attempt to create a simple but unpredictable starting entropy. */
55 static unsigned long get_random_boot(void)
57 unsigned long hash
= 0;
59 hash
= rotate_xor(hash
, build_str
, sizeof(build_str
));
60 hash
= rotate_xor(hash
, real_mode
, sizeof(*real_mode
));
65 static unsigned long get_random_long(void)
68 const unsigned long mix_const
= 0x5d6008cbf3848dd3UL
;
70 const unsigned long mix_const
= 0x3f39e593UL
;
72 unsigned long raw
, random
= get_random_boot();
73 bool use_i8254
= true;
75 debug_putstr("KASLR using");
77 if (has_cpuflag(X86_FEATURE_RDRAND
)) {
78 debug_putstr(" RDRAND");
79 if (rdrand_long(&raw
)) {
85 if (has_cpuflag(X86_FEATURE_TSC
)) {
86 debug_putstr(" RDTSC");
94 debug_putstr(" i8254");
98 /* Circular multiply for better bit diffusion */
100 : "=a" (random
), "=d" (raw
)
101 : "a" (random
), "rm" (mix_const
));
104 debug_putstr("...\n");
114 #define MEM_AVOID_MAX 5
115 struct mem_vector mem_avoid
[MEM_AVOID_MAX
];
117 static bool mem_contains(struct mem_vector
*region
, struct mem_vector
*item
)
119 /* Item at least partially before region. */
120 if (item
->start
< region
->start
)
122 /* Item at least partially after region. */
123 if (item
->start
+ item
->size
> region
->start
+ region
->size
)
128 static bool mem_overlaps(struct mem_vector
*one
, struct mem_vector
*two
)
130 /* Item one is entirely before item two. */
131 if (one
->start
+ one
->size
<= two
->start
)
133 /* Item one is entirely after item two. */
134 if (one
->start
>= two
->start
+ two
->size
)
139 static void mem_avoid_init(unsigned long input
, unsigned long input_size
,
140 unsigned long output
, unsigned long output_size
)
142 u64 initrd_start
, initrd_size
;
143 u64 cmd_line
, cmd_line_size
;
144 unsigned long unsafe
, unsafe_len
;
148 * Avoid the region that is unsafe to overlap during
149 * decompression (see calculations at top of misc.c).
151 unsafe_len
= (output_size
>> 12) + 32768 + 18;
152 unsafe
= (unsigned long)input
+ input_size
- unsafe_len
;
153 mem_avoid
[0].start
= unsafe
;
154 mem_avoid
[0].size
= unsafe_len
;
157 initrd_start
= (u64
)real_mode
->ext_ramdisk_image
<< 32;
158 initrd_start
|= real_mode
->hdr
.ramdisk_image
;
159 initrd_size
= (u64
)real_mode
->ext_ramdisk_size
<< 32;
160 initrd_size
|= real_mode
->hdr
.ramdisk_size
;
161 mem_avoid
[1].start
= initrd_start
;
162 mem_avoid
[1].size
= initrd_size
;
164 /* Avoid kernel command line. */
165 cmd_line
= (u64
)real_mode
->ext_cmd_line_ptr
<< 32;
166 cmd_line
|= real_mode
->hdr
.cmd_line_ptr
;
167 /* Calculate size of cmd_line. */
168 ptr
= (char *)(unsigned long)cmd_line
;
169 for (cmd_line_size
= 0; ptr
[cmd_line_size
++]; )
171 mem_avoid
[2].start
= cmd_line
;
172 mem_avoid
[2].size
= cmd_line_size
;
174 /* Avoid heap memory. */
175 mem_avoid
[3].start
= (unsigned long)free_mem_ptr
;
176 mem_avoid
[3].size
= BOOT_HEAP_SIZE
;
178 /* Avoid stack memory. */
179 mem_avoid
[4].start
= (unsigned long)free_mem_end_ptr
;
180 mem_avoid
[4].size
= BOOT_STACK_SIZE
;
183 /* Does this memory vector overlap a known avoided area? */
184 bool mem_avoid_overlap(struct mem_vector
*img
)
188 for (i
= 0; i
< MEM_AVOID_MAX
; i
++) {
189 if (mem_overlaps(img
, &mem_avoid
[i
]))
196 unsigned long slots
[CONFIG_RANDOMIZE_BASE_MAX_OFFSET
/ CONFIG_PHYSICAL_ALIGN
];
197 unsigned long slot_max
= 0;
199 static void slots_append(unsigned long addr
)
201 /* Overflowing the slots list should be impossible. */
202 if (slot_max
>= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
/
203 CONFIG_PHYSICAL_ALIGN
)
206 slots
[slot_max
++] = addr
;
209 static unsigned long slots_fetch_random(void)
211 /* Handle case of no slots stored. */
215 return slots
[get_random_long() % slot_max
];
218 static void process_e820_entry(struct e820entry
*entry
,
219 unsigned long minimum
,
220 unsigned long image_size
)
222 struct mem_vector region
, img
;
224 /* Skip non-RAM entries. */
225 if (entry
->type
!= E820_RAM
)
228 /* Ignore entries entirely above our maximum. */
229 if (entry
->addr
>= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
)
232 /* Ignore entries entirely below our minimum. */
233 if (entry
->addr
+ entry
->size
< minimum
)
236 region
.start
= entry
->addr
;
237 region
.size
= entry
->size
;
239 /* Potentially raise address to minimum location. */
240 if (region
.start
< minimum
)
241 region
.start
= minimum
;
243 /* Potentially raise address to meet alignment requirements. */
244 region
.start
= ALIGN(region
.start
, CONFIG_PHYSICAL_ALIGN
);
246 /* Did we raise the address above the bounds of this e820 region? */
247 if (region
.start
> entry
->addr
+ entry
->size
)
250 /* Reduce size by any delta from the original address. */
251 region
.size
-= region
.start
- entry
->addr
;
253 /* Reduce maximum size to fit end of image within maximum limit. */
254 if (region
.start
+ region
.size
> CONFIG_RANDOMIZE_BASE_MAX_OFFSET
)
255 region
.size
= CONFIG_RANDOMIZE_BASE_MAX_OFFSET
- region
.start
;
257 /* Walk each aligned slot and check for avoided areas. */
258 for (img
.start
= region
.start
, img
.size
= image_size
;
259 mem_contains(®ion
, &img
) ;
260 img
.start
+= CONFIG_PHYSICAL_ALIGN
) {
261 if (mem_avoid_overlap(&img
))
263 slots_append(img
.start
);
267 static unsigned long find_random_addr(unsigned long minimum
,
273 /* Make sure minimum is aligned. */
274 minimum
= ALIGN(minimum
, CONFIG_PHYSICAL_ALIGN
);
276 /* Verify potential e820 positions, appending to slots list. */
277 for (i
= 0; i
< real_mode
->e820_entries
; i
++) {
278 process_e820_entry(&real_mode
->e820_map
[i
], minimum
, size
);
281 return slots_fetch_random();
284 unsigned char *choose_kernel_location(unsigned char *input
,
285 unsigned long input_size
,
286 unsigned char *output
,
287 unsigned long output_size
)
289 unsigned long choice
= (unsigned long)output
;
290 unsigned long random
;
292 if (cmdline_find_option_bool("nokaslr")) {
293 debug_putstr("KASLR disabled...\n");
297 /* Record the various known unsafe memory ranges. */
298 mem_avoid_init((unsigned long)input
, input_size
,
299 (unsigned long)output
, output_size
);
301 /* Walk e820 and find a random address. */
302 random
= find_random_addr(choice
, output_size
);
304 debug_putstr("KASLR could not find suitable E820 region...\n");
308 /* Always enforce the minimum. */
314 return (unsigned char *)choice
;
317 #endif /* CONFIG_RANDOMIZE_BASE */