2 * QEMU low level functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #if defined(USE_KQEMU)
36 #if defined(__i386__) && !defined(CONFIG_SOFTMMU) && !defined(CONFIG_USER_ONLY)
41 /* When not using soft mmu, libc independant functions are needed for
42 the CPU core because it needs to use alternates stacks and
43 libc/thread incompatibles settings */
45 #include <linux/unistd.h>
47 #define QEMU_SYSCALL0(name) \
50 __asm__ volatile ("int $0x80" \
52 : "0" (__NR_##name)); \
56 #define QEMU_SYSCALL1(name,arg1) \
59 __asm__ volatile ("int $0x80" \
61 : "0" (__NR_##name),"b" ((long)(arg1))); \
65 #define QEMU_SYSCALL2(name,arg1,arg2) \
68 __asm__ volatile ("int $0x80" \
70 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
74 #define QEMU_SYSCALL3(name,arg1,arg2,arg3) \
77 __asm__ volatile ("int $0x80" \
79 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
80 "d" ((long)(arg3))); \
84 #define QEMU_SYSCALL4(name,arg1,arg2,arg3,arg4) \
87 __asm__ volatile ("int $0x80" \
89 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
90 "d" ((long)(arg3)),"S" ((long)(arg4))); \
94 #define QEMU_SYSCALL5(name,arg1,arg2,arg3,arg4,arg5) \
97 __asm__ volatile ("int $0x80" \
99 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
100 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
104 #define QEMU_SYSCALL6(name,arg1,arg2,arg3,arg4,arg5,arg6) \
107 __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \
109 : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
110 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
111 "0" ((long)(arg6))); \
115 /****************************************************************/
116 /* shmat replacement */
118 int qemu_ipc(int call
, unsigned long first
,
119 unsigned long second
, unsigned long third
,
120 void *ptr
, unsigned long fifth
)
122 QEMU_SYSCALL6(ipc
, call
, first
, second
, third
, ptr
, fifth
);
127 /* we must define shmat so that a specific address will be used when
128 mapping the X11 ximage */
129 void *shmat(int shmid
, const void *shmaddr
, int shmflg
)
133 /* we give an address in the right memory area */
135 shmaddr
= get_mmap_addr(8192 * 1024);
136 ret
= qemu_ipc(SHMAT
, shmid
, shmflg
, (unsigned long)&ptr
, (void *)shmaddr
, 0);
142 /****************************************************************/
143 /* sigaction bypassing the threads */
145 static int kernel_sigaction(int signum
, const struct qemu_sigaction
*act
,
146 struct qemu_sigaction
*oldact
,
149 QEMU_SYSCALL4(rt_sigaction
, signum
, act
, oldact
, sigsetsize
);
152 int qemu_sigaction(int signum
, const struct qemu_sigaction
*act
,
153 struct qemu_sigaction
*oldact
)
155 return kernel_sigaction(signum
, act
, oldact
, 8);
158 /****************************************************************/
159 /* memory allocation */
161 //#define DEBUG_MALLOC
163 #define MALLOC_BASE 0xab000000
164 #define PHYS_RAM_BASE 0xac000000
166 #define MALLOC_ALIGN 16
167 #define BLOCK_HEADER_SIZE 16
169 typedef struct MemoryBlock
{
170 struct MemoryBlock
*next
;
171 unsigned long size
; /* size of block, including header */
174 static MemoryBlock
*first_free_block
;
175 static unsigned long malloc_addr
= MALLOC_BASE
;
177 static void *malloc_get_space(size_t size
)
180 size
= TARGET_PAGE_ALIGN(size
);
181 ptr
= mmap((void *)malloc_addr
, size
,
182 PROT_WRITE
| PROT_READ
,
183 MAP_PRIVATE
| MAP_FIXED
| MAP_ANON
, -1, 0);
184 if (ptr
== MAP_FAILED
)
190 void *qemu_malloc(size_t size
)
192 MemoryBlock
*mb
, *mb1
, **pmb
;
194 size_t size1
, area_size
;
199 size
= (size
+ BLOCK_HEADER_SIZE
+ MALLOC_ALIGN
- 1) & ~(MALLOC_ALIGN
- 1);
200 pmb
= &first_free_block
;
205 if (size
<= mb
->size
)
209 /* no big enough blocks found: get new space */
210 area_size
= TARGET_PAGE_ALIGN(size
);
211 mb
= malloc_get_space(area_size
);
214 size1
= area_size
- size
;
216 /* create a new free block */
217 mb1
= (MemoryBlock
*)((uint8_t *)mb
+ size
);
224 /* a free block was found: use it */
225 size1
= mb
->size
- size
;
227 /* create a new free block */
228 mb1
= (MemoryBlock
*)((uint8_t *)mb
+ size
);
229 mb1
->next
= mb
->next
;
233 /* suppress the first block */
239 ptr
= ((uint8_t *)mb
+ BLOCK_HEADER_SIZE
);
241 qemu_printf("malloc: size=0x%x ptr=0x%lx\n", size
, (unsigned long)ptr
);
246 void qemu_free(void *ptr
)
252 mb
= (MemoryBlock
*)((uint8_t *)ptr
- BLOCK_HEADER_SIZE
);
253 mb
->next
= first_free_block
;
254 first_free_block
= mb
;
257 /****************************************************************/
258 /* virtual memory allocation */
260 unsigned long mmap_addr
= PHYS_RAM_BASE
;
262 void *get_mmap_addr(unsigned long size
)
266 mmap_addr
+= ((size
+ 4095) & ~4095) + 4096;
280 void *get_mmap_addr(unsigned long size
)
285 void qemu_free(void *ptr
)
290 void *qemu_malloc(size_t size
)
297 void *qemu_vmalloc(size_t size
)
299 /* FIXME: this is not exactly optimal solution since VirtualAlloc
300 has 64Kb granularity, but at least it guarantees us that the
301 memory is page aligned. */
302 return VirtualAlloc(NULL
, size
, MEM_COMMIT
, PAGE_READWRITE
);
305 void qemu_vfree(void *ptr
)
307 VirtualFree(ptr
, 0, MEM_RELEASE
);
312 #if defined(USE_KQEMU)
315 #include <sys/mman.h>
318 void *kqemu_vmalloc(size_t size
)
320 static int phys_ram_fd
= -1;
321 static int phys_ram_size
= 0;
323 char phys_ram_file
[1024];
327 if (phys_ram_fd
< 0) {
328 tmpdir
= getenv("QEMU_TMPDIR");
331 if (statfs(tmpdir
, &stfs
) == 0) {
336 free_space
= (int64_t)stfs
.f_bavail
* stfs
.f_bsize
;
337 if ((ram_size
+ 8192 * 1024) >= free_space
) {
338 ram_mb
= (ram_size
/ (1024 * 1024));
340 "You do not have enough space in '%s' for the %d MB of QEMU virtual RAM.\n",
342 if (strcmp(tmpdir
, "/dev/shm") == 0) {
343 fprintf(stderr
, "To have more space available provided you have enough RAM and swap, do as root:\n"
345 "mount -t tmpfs -o size=%dm none /dev/shm\n",
349 "Use the '-m' option of QEMU to diminish the amount of virtual RAM or use the\n"
350 "QEMU_TMPDIR environment variable to set another directory where the QEMU\n"
351 "temporary RAM file will be opened.\n");
353 fprintf(stderr
, "Or disable the accelerator module with -no-kqemu\n");
357 snprintf(phys_ram_file
, sizeof(phys_ram_file
), "%s/qemuXXXXXX",
359 if (mkstemp(phys_ram_file
) < 0) {
361 "warning: could not create temporary file in '%s'.\n"
362 "Use QEMU_TMPDIR to select a directory in a tmpfs filesystem.\n"
363 "Using '/tmp' as fallback.\n",
365 snprintf(phys_ram_file
, sizeof(phys_ram_file
), "%s/qemuXXXXXX",
367 if (mkstemp(phys_ram_file
) < 0) {
368 fprintf(stderr
, "Could not create temporary memory file '%s'\n",
373 phys_ram_fd
= open(phys_ram_file
, O_CREAT
| O_TRUNC
| O_RDWR
, 0600);
374 if (phys_ram_fd
< 0) {
375 fprintf(stderr
, "Could not open temporary memory file '%s'\n",
379 unlink(phys_ram_file
);
381 size
= (size
+ 4095) & ~4095;
382 ftruncate(phys_ram_fd
, phys_ram_size
+ size
);
385 PROT_WRITE
| PROT_READ
, MAP_SHARED
,
386 phys_ram_fd
, phys_ram_size
);
387 if (ptr
== MAP_FAILED
) {
388 fprintf(stderr
, "Could not map physical memory\n");
391 phys_ram_size
+= size
;
395 void kqemu_vfree(void *ptr
)
397 /* may be useful some day, but currently we do not need to free */
402 /* alloc shared memory pages */
403 void *qemu_vmalloc(size_t size
)
405 #if defined(USE_KQEMU)
407 return kqemu_vmalloc(size
);
412 return memalign(4096, size
);
416 void qemu_vfree(void *ptr
)
418 #if defined(USE_KQEMU)
429 void *qemu_mallocz(size_t size
)
432 ptr
= qemu_malloc(size
);
435 memset(ptr
, 0, size
);
439 char *qemu_strdup(const char *str
)
442 ptr
= qemu_malloc(strlen(str
) + 1);