1 /* Code for loading Linux executables. Mostly linux kernel code. */
3 #include "qemu/osdep.h"
5 #include "user-internals.h"
8 #include "qapi/error.h"
12 /* ??? This should really be somewhere else. */
13 abi_long
memcpy_to_target(abi_ulong dest
, const void *src
, unsigned long len
)
17 host_ptr
= lock_user(VERIFY_WRITE
, dest
, len
, 0);
19 return -TARGET_EFAULT
;
21 memcpy(host_ptr
, src
, len
);
22 unlock_user(host_ptr
, dest
, 1);
26 static int count(char **vec
)
30 for (i
= 0; *vec
; i
++) {
36 static int prepare_binprm(struct linux_binprm
*bprm
)
42 if (fstat(bprm
->src
.fd
, &st
) < 0) {
47 if (!S_ISREG(mode
)) { /* Must be regular file */
50 if (!(mode
& 0111)) { /* Must have at least one execute bit set */
54 bprm
->e_uid
= geteuid();
55 bprm
->e_gid
= getegid();
59 bprm
->e_uid
= st
.st_uid
;
64 * If setgid is set but no group execute bit then this
65 * is a candidate for mandatory locking, not a setgid
68 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
69 bprm
->e_gid
= st
.st_gid
;
72 retval
= read(bprm
->src
.fd
, bprm
->buf
, BPRM_BUF_SIZE
);
74 perror("prepare_binprm");
77 if (retval
< BPRM_BUF_SIZE
) {
78 /* Make sure the rest of the loader won't read garbage. */
79 memset(bprm
->buf
+ retval
, 0, BPRM_BUF_SIZE
- retval
);
82 bprm
->src
.cache
= bprm
->buf
;
83 bprm
->src
.cache_size
= retval
;
88 /* Construct the envp and argv tables on the target stack. */
89 abi_ulong
loader_build_argptr(int envc
, int argc
, abi_ulong sp
,
90 abi_ulong stringp
, int push_ptr
)
92 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
93 int n
= sizeof(abi_ulong
);
101 ts
->info
->envp
= envp
;
102 ts
->info
->envc
= envc
;
103 ts
->info
->argv
= argv
;
104 ts
->info
->argc
= argc
;
107 /* FIXME - handle put_user() failures */
109 put_user_ual(envp
, sp
);
111 put_user_ual(argv
, sp
);
115 /* FIXME - handle put_user() failures */
116 put_user_ual(argc
, sp
);
118 ts
->info
->arg_strings
= stringp
;
120 /* FIXME - handle put_user() failures */
121 put_user_ual(stringp
, argv
);
123 stringp
+= target_strlen(stringp
) + 1;
125 /* FIXME - handle put_user() failures */
126 put_user_ual(0, argv
);
128 ts
->info
->env_strings
= stringp
;
130 /* FIXME - handle put_user() failures */
131 put_user_ual(stringp
, envp
);
133 stringp
+= target_strlen(stringp
) + 1;
135 /* FIXME - handle put_user() failures */
136 put_user_ual(0, envp
);
141 int loader_exec(int fdexec
, const char *filename
, char **argv
, char **envp
,
142 struct target_pt_regs
*regs
, struct image_info
*infop
,
143 struct linux_binprm
*bprm
)
147 bprm
->src
.fd
= fdexec
;
148 bprm
->filename
= (char *)filename
;
149 bprm
->argc
= count(argv
);
151 bprm
->envc
= count(envp
);
154 retval
= prepare_binprm(bprm
);
159 if (bprm
->buf
[0] == 0x7f
160 && bprm
->buf
[1] == 'E'
161 && bprm
->buf
[2] == 'L'
162 && bprm
->buf
[3] == 'F') {
163 retval
= load_elf_binary(bprm
, infop
);
164 #if defined(TARGET_HAS_BFLT)
165 } else if (bprm
->buf
[0] == 'b'
166 && bprm
->buf
[1] == 'F'
167 && bprm
->buf
[2] == 'L'
168 && bprm
->buf
[3] == 'T') {
169 retval
= load_flt_binary(bprm
, infop
);
178 /* Success. Initialize important registers. */
179 do_init_thread(regs
, infop
);
183 bool imgsrc_read(void *dst
, off_t offset
, size_t len
,
184 const ImageSource
*img
, Error
**errp
)
188 if (offset
+ len
<= img
->cache_size
) {
189 memcpy(dst
, img
->cache
+ offset
, len
);
194 error_setg(errp
, "read past end of buffer");
198 ret
= pread(img
->fd
, dst
, len
, offset
);
203 error_setg_errno(errp
, errno
, "Error reading file header");
205 error_setg(errp
, "Incomplete read of file header");
210 void *imgsrc_read_alloc(off_t offset
, size_t len
,
211 const ImageSource
*img
, Error
**errp
)
213 void *alloc
= g_malloc(len
);
214 bool ok
= imgsrc_read(alloc
, offset
, len
, img
, errp
);
223 abi_long
imgsrc_mmap(abi_ulong start
, abi_ulong len
, int prot
,
224 int flags
, const ImageSource
*src
, abi_ulong offset
)
226 const int prot_write
= PROT_READ
| PROT_WRITE
;
230 assert(flags
== (MAP_PRIVATE
| MAP_FIXED
));
233 return target_mmap(start
, len
, prot
, flags
, src
->fd
, offset
);
237 * This case is for the vdso; we don't expect bad images.
238 * The mmap may extend beyond the end of the image, especially
239 * to the end of the page. Zero fill.
241 assert(offset
< src
->cache_size
);
243 ret
= target_mmap(start
, len
, prot_write
, flags
| MAP_ANON
, -1, 0);
248 haddr
= lock_user(VERIFY_WRITE
, start
, len
, 0);
249 assert(haddr
!= NULL
);
250 if (offset
+ len
<= src
->cache_size
) {
251 memcpy(haddr
, src
->cache
+ offset
, len
);
253 size_t rest
= src
->cache_size
- offset
;
254 memcpy(haddr
, src
->cache
+ offset
, rest
);
255 memset(haddr
+ rest
, 0, len
- rest
);
257 unlock_user(haddr
, start
, len
);
259 if (prot
!= prot_write
) {
260 target_mprotect(start
, len
, prot
);