Merge remote-tracking branch 'remotes/bkoppelmann2/tags/pull-tricore-20190822-1'...
[qemu/ar7.git] / util / oslib-posix.c
blob5fda67dedf30b9b785da09010f96a14d94f4debd
1 /*
2 * os-posix-lib.c
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2010 Red Hat, Inc.
7 * QEMU library functions on POSIX which are shared between QEMU and
8 * the QEMU tools.
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
29 #include "qemu/osdep.h"
30 #include <termios.h>
32 #include <glib/gprintf.h>
34 #include "qemu-common.h"
35 #include "sysemu/sysemu.h"
36 #include "trace.h"
37 #include "qapi/error.h"
38 #include "qemu/sockets.h"
39 #include "qemu/thread.h"
40 #include <libgen.h>
41 #include <sys/signal.h>
42 #include "qemu/cutils.h"
44 #ifdef CONFIG_LINUX
45 #include <sys/syscall.h>
46 #endif
48 #ifdef __FreeBSD__
49 #include <sys/sysctl.h>
50 #include <sys/user.h>
51 #include <libutil.h>
52 #endif
54 #ifdef __NetBSD__
55 #include <sys/sysctl.h>
56 #endif
58 #include "qemu/mmap-alloc.h"
60 #ifdef CONFIG_DEBUG_STACK_USAGE
61 #include "qemu/error-report.h"
62 #endif
64 #define MAX_MEM_PREALLOC_THREAD_COUNT 16
66 struct MemsetThread {
67 char *addr;
68 size_t numpages;
69 size_t hpagesize;
70 QemuThread pgthread;
71 sigjmp_buf env;
73 typedef struct MemsetThread MemsetThread;
75 static MemsetThread *memset_thread;
76 static int memset_num_threads;
77 static bool memset_thread_failed;
79 int qemu_get_thread_id(void)
81 #if defined(__linux__)
82 return syscall(SYS_gettid);
83 #else
84 return getpid();
85 #endif
88 int qemu_daemon(int nochdir, int noclose)
90 return daemon(nochdir, noclose);
93 bool qemu_write_pidfile(const char *path, Error **errp)
95 int fd;
96 char pidstr[32];
98 while (1) {
99 struct stat a, b;
100 struct flock lock = {
101 .l_type = F_WRLCK,
102 .l_whence = SEEK_SET,
103 .l_len = 0,
106 fd = qemu_open(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR);
107 if (fd == -1) {
108 error_setg_errno(errp, errno, "Cannot open pid file");
109 return false;
112 if (fstat(fd, &b) < 0) {
113 error_setg_errno(errp, errno, "Cannot stat file");
114 goto fail_close;
117 if (fcntl(fd, F_SETLK, &lock)) {
118 error_setg_errno(errp, errno, "Cannot lock pid file");
119 goto fail_close;
123 * Now make sure the path we locked is the same one that now
124 * exists on the filesystem.
126 if (stat(path, &a) < 0) {
128 * PID file disappeared, someone else must be racing with
129 * us, so try again.
131 close(fd);
132 continue;
135 if (a.st_ino == b.st_ino) {
136 break;
140 * PID file was recreated, someone else must be racing with
141 * us, so try again.
143 close(fd);
146 if (ftruncate(fd, 0) < 0) {
147 error_setg_errno(errp, errno, "Failed to truncate pid file");
148 goto fail_unlink;
151 snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid());
152 if (write(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
153 error_setg(errp, "Failed to write pid file");
154 goto fail_unlink;
157 return true;
159 fail_unlink:
160 unlink(path);
161 fail_close:
162 close(fd);
163 return false;
166 void *qemu_oom_check(void *ptr)
168 if (ptr == NULL) {
169 fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno));
170 abort();
172 return ptr;
175 void *qemu_try_memalign(size_t alignment, size_t size)
177 void *ptr;
179 if (alignment < sizeof(void*)) {
180 alignment = sizeof(void*);
183 #if defined(CONFIG_POSIX_MEMALIGN)
184 int ret;
185 ret = posix_memalign(&ptr, alignment, size);
186 if (ret != 0) {
187 errno = ret;
188 ptr = NULL;
190 #elif defined(CONFIG_BSD)
191 ptr = valloc(size);
192 #else
193 ptr = memalign(alignment, size);
194 #endif
195 trace_qemu_memalign(alignment, size, ptr);
196 return ptr;
199 void *qemu_memalign(size_t alignment, size_t size)
201 return qemu_oom_check(qemu_try_memalign(alignment, size));
204 /* alloc shared memory pages */
205 void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
207 size_t align = QEMU_VMALLOC_ALIGN;
208 void *ptr = qemu_ram_mmap(-1, size, align, shared, false);
210 if (ptr == MAP_FAILED) {
211 return NULL;
214 if (alignment) {
215 *alignment = align;
218 trace_qemu_anon_ram_alloc(size, ptr);
219 return ptr;
222 void qemu_vfree(void *ptr)
224 trace_qemu_vfree(ptr);
225 free(ptr);
228 void qemu_anon_ram_free(void *ptr, size_t size)
230 trace_qemu_anon_ram_free(ptr, size);
231 qemu_ram_munmap(-1, ptr, size);
234 void qemu_set_block(int fd)
236 int f;
237 f = fcntl(fd, F_GETFL);
238 assert(f != -1);
239 f = fcntl(fd, F_SETFL, f & ~O_NONBLOCK);
240 assert(f != -1);
243 void qemu_set_nonblock(int fd)
245 int f;
246 f = fcntl(fd, F_GETFL);
247 assert(f != -1);
248 f = fcntl(fd, F_SETFL, f | O_NONBLOCK);
249 #ifdef __OpenBSD__
250 if (f == -1) {
252 * Previous to OpenBSD 6.3, fcntl(F_SETFL) is not permitted on
253 * memory devices and sets errno to ENODEV.
254 * It's OK if we fail to set O_NONBLOCK on devices like /dev/null,
255 * because they will never block anyway.
257 assert(errno == ENODEV);
259 #else
260 assert(f != -1);
261 #endif
264 int socket_set_fast_reuse(int fd)
266 int val = 1, ret;
268 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
269 (const char *)&val, sizeof(val));
271 assert(ret == 0);
273 return ret;
276 void qemu_set_cloexec(int fd)
278 int f;
279 f = fcntl(fd, F_GETFD);
280 assert(f != -1);
281 f = fcntl(fd, F_SETFD, f | FD_CLOEXEC);
282 assert(f != -1);
286 * Creates a pipe with FD_CLOEXEC set on both file descriptors
288 int qemu_pipe(int pipefd[2])
290 int ret;
292 #ifdef CONFIG_PIPE2
293 ret = pipe2(pipefd, O_CLOEXEC);
294 if (ret != -1 || errno != ENOSYS) {
295 return ret;
297 #endif
298 ret = pipe(pipefd);
299 if (ret == 0) {
300 qemu_set_cloexec(pipefd[0]);
301 qemu_set_cloexec(pipefd[1]);
304 return ret;
307 char *
308 qemu_get_local_state_pathname(const char *relative_pathname)
310 return g_strdup_printf("%s/%s", CONFIG_QEMU_LOCALSTATEDIR,
311 relative_pathname);
314 void qemu_set_tty_echo(int fd, bool echo)
316 struct termios tty;
318 tcgetattr(fd, &tty);
320 if (echo) {
321 tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN;
322 } else {
323 tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN);
326 tcsetattr(fd, TCSANOW, &tty);
329 static char exec_dir[PATH_MAX];
331 void qemu_init_exec_dir(const char *argv0)
333 char *dir;
334 char *p = NULL;
335 char buf[PATH_MAX];
337 assert(!exec_dir[0]);
339 #if defined(__linux__)
341 int len;
342 len = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
343 if (len > 0) {
344 buf[len] = 0;
345 p = buf;
348 #elif defined(__FreeBSD__) \
349 || (defined(__NetBSD__) && defined(KERN_PROC_PATHNAME))
351 #if defined(__FreeBSD__)
352 static int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
353 #else
354 static int mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
355 #endif
356 size_t len = sizeof(buf) - 1;
358 *buf = '\0';
359 if (!sysctl(mib, ARRAY_SIZE(mib), buf, &len, NULL, 0) &&
360 *buf) {
361 buf[sizeof(buf) - 1] = '\0';
362 p = buf;
365 #endif
366 /* If we don't have any way of figuring out the actual executable
367 location then try argv[0]. */
368 if (!p) {
369 if (!argv0) {
370 return;
372 p = realpath(argv0, buf);
373 if (!p) {
374 return;
377 dir = g_path_get_dirname(p);
379 pstrcpy(exec_dir, sizeof(exec_dir), dir);
381 g_free(dir);
384 char *qemu_get_exec_dir(void)
386 return g_strdup(exec_dir);
389 static void sigbus_handler(int signal)
391 int i;
392 if (memset_thread) {
393 for (i = 0; i < memset_num_threads; i++) {
394 if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
395 siglongjmp(memset_thread[i].env, 1);
401 static void *do_touch_pages(void *arg)
403 MemsetThread *memset_args = (MemsetThread *)arg;
404 sigset_t set, oldset;
406 /* unblock SIGBUS */
407 sigemptyset(&set);
408 sigaddset(&set, SIGBUS);
409 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
411 if (sigsetjmp(memset_args->env, 1)) {
412 memset_thread_failed = true;
413 } else {
414 char *addr = memset_args->addr;
415 size_t numpages = memset_args->numpages;
416 size_t hpagesize = memset_args->hpagesize;
417 size_t i;
418 for (i = 0; i < numpages; i++) {
420 * Read & write back the same value, so we don't
421 * corrupt existing user/app data that might be
422 * stored.
424 * 'volatile' to stop compiler optimizing this away
425 * to a no-op
427 * TODO: get a better solution from kernel so we
428 * don't need to write at all so we don't cause
429 * wear on the storage backing the region...
431 *(volatile char *)addr = *addr;
432 addr += hpagesize;
435 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
436 return NULL;
439 static inline int get_memset_num_threads(int smp_cpus)
441 long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
442 int ret = 1;
444 if (host_procs > 0) {
445 ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
447 /* In case sysconf() fails, we fall back to single threaded */
448 return ret;
451 static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
452 int smp_cpus)
454 size_t numpages_per_thread;
455 size_t size_per_thread;
456 char *addr = area;
457 int i = 0;
459 memset_thread_failed = false;
460 memset_num_threads = get_memset_num_threads(smp_cpus);
461 memset_thread = g_new0(MemsetThread, memset_num_threads);
462 numpages_per_thread = (numpages / memset_num_threads);
463 size_per_thread = (hpagesize * numpages_per_thread);
464 for (i = 0; i < memset_num_threads; i++) {
465 memset_thread[i].addr = addr;
466 memset_thread[i].numpages = (i == (memset_num_threads - 1)) ?
467 numpages : numpages_per_thread;
468 memset_thread[i].hpagesize = hpagesize;
469 qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
470 do_touch_pages, &memset_thread[i],
471 QEMU_THREAD_JOINABLE);
472 addr += size_per_thread;
473 numpages -= numpages_per_thread;
475 for (i = 0; i < memset_num_threads; i++) {
476 qemu_thread_join(&memset_thread[i].pgthread);
478 g_free(memset_thread);
479 memset_thread = NULL;
481 return memset_thread_failed;
484 void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
485 Error **errp)
487 int ret;
488 struct sigaction act, oldact;
489 size_t hpagesize = qemu_fd_getpagesize(fd);
490 size_t numpages = DIV_ROUND_UP(memory, hpagesize);
492 memset(&act, 0, sizeof(act));
493 act.sa_handler = &sigbus_handler;
494 act.sa_flags = 0;
496 ret = sigaction(SIGBUS, &act, &oldact);
497 if (ret) {
498 error_setg_errno(errp, errno,
499 "os_mem_prealloc: failed to install signal handler");
500 return;
503 /* touch pages simultaneously */
504 if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) {
505 error_setg(errp, "os_mem_prealloc: Insufficient free host memory "
506 "pages available to allocate guest RAM");
509 ret = sigaction(SIGBUS, &oldact, NULL);
510 if (ret) {
511 /* Terminate QEMU since it can't recover from error */
512 perror("os_mem_prealloc: failed to reinstall signal handler");
513 exit(1);
517 uint64_t qemu_get_pmem_size(const char *filename, Error **errp)
519 struct stat st;
521 if (stat(filename, &st) < 0) {
522 error_setg(errp, "unable to stat pmem file \"%s\"", filename);
523 return 0;
526 #if defined(__linux__)
527 /* Special handling for devdax character devices */
528 if (S_ISCHR(st.st_mode)) {
529 char *subsystem_path = NULL;
530 char *subsystem = NULL;
531 char *size_path = NULL;
532 char *size_str = NULL;
533 uint64_t ret = 0;
535 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem",
536 major(st.st_rdev), minor(st.st_rdev));
537 subsystem = g_file_read_link(subsystem_path, NULL);
538 if (!subsystem) {
539 error_setg(errp, "unable to read subsystem for pmem file \"%s\"",
540 filename);
541 goto devdax_err;
544 if (!g_str_has_suffix(subsystem, "/dax")) {
545 error_setg(errp, "pmem file \"%s\" is not a dax device", filename);
546 goto devdax_err;
549 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size",
550 major(st.st_rdev), minor(st.st_rdev));
551 if (!g_file_get_contents(size_path, &size_str, NULL, NULL)) {
552 error_setg(errp, "unable to read size for pmem file \"%s\"",
553 size_path);
554 goto devdax_err;
557 ret = g_ascii_strtoull(size_str, NULL, 0);
559 devdax_err:
560 g_free(size_str);
561 g_free(size_path);
562 g_free(subsystem);
563 g_free(subsystem_path);
564 return ret;
566 #endif /* defined(__linux__) */
568 return st.st_size;
571 char *qemu_get_pid_name(pid_t pid)
573 char *name = NULL;
575 #if defined(__FreeBSD__)
576 /* BSDs don't have /proc, but they provide a nice substitute */
577 struct kinfo_proc *proc = kinfo_getproc(pid);
579 if (proc) {
580 name = g_strdup(proc->ki_comm);
581 free(proc);
583 #else
584 /* Assume a system with reasonable procfs */
585 char *pid_path;
586 size_t len;
588 pid_path = g_strdup_printf("/proc/%d/cmdline", pid);
589 g_file_get_contents(pid_path, &name, &len, NULL);
590 g_free(pid_path);
591 #endif
593 return name;
597 pid_t qemu_fork(Error **errp)
599 sigset_t oldmask, newmask;
600 struct sigaction sig_action;
601 int saved_errno;
602 pid_t pid;
605 * Need to block signals now, so that child process can safely
606 * kill off caller's signal handlers without a race.
608 sigfillset(&newmask);
609 if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) {
610 error_setg_errno(errp, errno,
611 "cannot block signals");
612 return -1;
615 pid = fork();
616 saved_errno = errno;
618 if (pid < 0) {
619 /* attempt to restore signal mask, but ignore failure, to
620 * avoid obscuring the fork failure */
621 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
622 error_setg_errno(errp, saved_errno,
623 "cannot fork child process");
624 errno = saved_errno;
625 return -1;
626 } else if (pid) {
627 /* parent process */
629 /* Restore our original signal mask now that the child is
630 * safely running. Only documented failures are EFAULT (not
631 * possible, since we are using just-grabbed mask) or EINVAL
632 * (not possible, since we are using correct arguments). */
633 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
634 } else {
635 /* child process */
636 size_t i;
638 /* Clear out all signal handlers from parent so nothing
639 * unexpected can happen in our child once we unblock
640 * signals */
641 sig_action.sa_handler = SIG_DFL;
642 sig_action.sa_flags = 0;
643 sigemptyset(&sig_action.sa_mask);
645 for (i = 1; i < NSIG; i++) {
646 /* Only possible errors are EFAULT or EINVAL The former
647 * won't happen, the latter we expect, so no need to check
648 * return value */
649 (void)sigaction(i, &sig_action, NULL);
652 /* Unmask all signals in child, since we've no idea what the
653 * caller's done with their signal mask and don't want to
654 * propagate that to children */
655 sigemptyset(&newmask);
656 if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) {
657 Error *local_err = NULL;
658 error_setg_errno(&local_err, errno,
659 "cannot unblock signals");
660 error_report_err(local_err);
661 _exit(1);
664 return pid;
667 void *qemu_alloc_stack(size_t *sz)
669 void *ptr, *guardpage;
670 int flags;
671 #ifdef CONFIG_DEBUG_STACK_USAGE
672 void *ptr2;
673 #endif
674 size_t pagesz = getpagesize();
675 #ifdef _SC_THREAD_STACK_MIN
676 /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
677 long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
678 *sz = MAX(MAX(min_stack_sz, 0), *sz);
679 #endif
680 /* adjust stack size to a multiple of the page size */
681 *sz = ROUND_UP(*sz, pagesz);
682 /* allocate one extra page for the guard page */
683 *sz += pagesz;
685 flags = MAP_PRIVATE | MAP_ANONYMOUS;
686 #if defined(MAP_STACK) && defined(__OpenBSD__)
687 /* Only enable MAP_STACK on OpenBSD. Other OS's such as
688 * Linux/FreeBSD/NetBSD have a flag with the same name
689 * but have differing functionality. OpenBSD will SEGV
690 * if it spots execution with a stack pointer pointing
691 * at memory that was not allocated with MAP_STACK.
693 flags |= MAP_STACK;
694 #endif
696 ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0);
697 if (ptr == MAP_FAILED) {
698 perror("failed to allocate memory for stack");
699 abort();
702 #if defined(HOST_IA64)
703 /* separate register stack */
704 guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
705 #elif defined(HOST_HPPA)
706 /* stack grows up */
707 guardpage = ptr + *sz - pagesz;
708 #else
709 /* stack grows down */
710 guardpage = ptr;
711 #endif
712 if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
713 perror("failed to set up stack guard page");
714 abort();
717 #ifdef CONFIG_DEBUG_STACK_USAGE
718 for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) {
719 *(uint32_t *)ptr2 = 0xdeadbeaf;
721 #endif
723 return ptr;
726 #ifdef CONFIG_DEBUG_STACK_USAGE
727 static __thread unsigned int max_stack_usage;
728 #endif
730 void qemu_free_stack(void *stack, size_t sz)
732 #ifdef CONFIG_DEBUG_STACK_USAGE
733 unsigned int usage;
734 void *ptr;
736 for (ptr = stack + getpagesize(); ptr < stack + sz;
737 ptr += sizeof(uint32_t)) {
738 if (*(uint32_t *)ptr != 0xdeadbeaf) {
739 break;
742 usage = sz - (uintptr_t) (ptr - stack);
743 if (usage > max_stack_usage) {
744 error_report("thread %d max stack usage increased from %u to %u",
745 qemu_get_thread_id(), max_stack_usage, usage);
746 max_stack_usage = usage;
748 #endif
750 munmap(stack, sz);
753 void sigaction_invoke(struct sigaction *action,
754 struct qemu_signalfd_siginfo *info)
756 siginfo_t si = {};
757 si.si_signo = info->ssi_signo;
758 si.si_errno = info->ssi_errno;
759 si.si_code = info->ssi_code;
761 /* Convert the minimal set of fields defined by POSIX.
762 * Positive si_code values are reserved for kernel-generated
763 * signals, where the valid siginfo fields are determined by
764 * the signal number. But according to POSIX, it is unspecified
765 * whether SI_USER and SI_QUEUE have values less than or equal to
766 * zero.
768 if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE ||
769 info->ssi_code <= 0) {
770 /* SIGTERM, etc. */
771 si.si_pid = info->ssi_pid;
772 si.si_uid = info->ssi_uid;
773 } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE ||
774 info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) {
775 si.si_addr = (void *)(uintptr_t)info->ssi_addr;
776 } else if (info->ssi_signo == SIGCHLD) {
777 si.si_pid = info->ssi_pid;
778 si.si_status = info->ssi_status;
779 si.si_uid = info->ssi_uid;
781 action->sa_sigaction(info->ssi_signo, &si, NULL);