fuzz: Expect the cmdline in a freeable GString
[qemu/ar7.git] / util / oslib-posix.c
blob36bf8593f8c1ff13582a2f79a4ea75fe16cbf23a
1 /*
2 * os-posix-lib.c
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2010 Red Hat, Inc.
7 * QEMU library functions on POSIX which are shared between QEMU and
8 * the QEMU tools.
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
29 #include "qemu/osdep.h"
30 #include <termios.h>
32 #include <glib/gprintf.h>
34 #include "qemu-common.h"
35 #include "sysemu/sysemu.h"
36 #include "trace.h"
37 #include "qapi/error.h"
38 #include "qemu/sockets.h"
39 #include "qemu/thread.h"
40 #include <libgen.h>
41 #include "qemu/cutils.h"
43 #ifdef CONFIG_LINUX
44 #include <sys/syscall.h>
45 #endif
47 #ifdef __FreeBSD__
48 #include <sys/sysctl.h>
49 #include <sys/user.h>
50 #include <sys/thr.h>
51 #include <libutil.h>
52 #endif
54 #ifdef __NetBSD__
55 #include <sys/sysctl.h>
56 #include <lwp.h>
57 #endif
59 #ifdef __APPLE__
60 #include <mach-o/dyld.h>
61 #endif
63 #ifdef __HAIKU__
64 #include <kernel/image.h>
65 #endif
67 #include "qemu/mmap-alloc.h"
69 #ifdef CONFIG_DEBUG_STACK_USAGE
70 #include "qemu/error-report.h"
71 #endif
73 #define MAX_MEM_PREALLOC_THREAD_COUNT 16
75 struct MemsetThread {
76 char *addr;
77 size_t numpages;
78 size_t hpagesize;
79 QemuThread pgthread;
80 sigjmp_buf env;
82 typedef struct MemsetThread MemsetThread;
84 static MemsetThread *memset_thread;
85 static int memset_num_threads;
86 static bool memset_thread_failed;
88 static QemuMutex page_mutex;
89 static QemuCond page_cond;
90 static bool threads_created_flag;
92 int qemu_get_thread_id(void)
94 #if defined(__linux__)
95 return syscall(SYS_gettid);
96 #elif defined(__FreeBSD__)
97 /* thread id is up to INT_MAX */
98 long tid;
99 thr_self(&tid);
100 return (int)tid;
101 #elif defined(__NetBSD__)
102 return _lwp_self();
103 #else
104 return getpid();
105 #endif
108 int qemu_daemon(int nochdir, int noclose)
110 return daemon(nochdir, noclose);
113 bool qemu_write_pidfile(const char *path, Error **errp)
115 int fd;
116 char pidstr[32];
118 while (1) {
119 struct stat a, b;
120 struct flock lock = {
121 .l_type = F_WRLCK,
122 .l_whence = SEEK_SET,
123 .l_len = 0,
126 fd = qemu_open(path, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR);
127 if (fd == -1) {
128 error_setg_errno(errp, errno, "Cannot open pid file");
129 return false;
132 if (fstat(fd, &b) < 0) {
133 error_setg_errno(errp, errno, "Cannot stat file");
134 goto fail_close;
137 if (fcntl(fd, F_SETLK, &lock)) {
138 error_setg_errno(errp, errno, "Cannot lock pid file");
139 goto fail_close;
143 * Now make sure the path we locked is the same one that now
144 * exists on the filesystem.
146 if (stat(path, &a) < 0) {
148 * PID file disappeared, someone else must be racing with
149 * us, so try again.
151 close(fd);
152 continue;
155 if (a.st_ino == b.st_ino) {
156 break;
160 * PID file was recreated, someone else must be racing with
161 * us, so try again.
163 close(fd);
166 if (ftruncate(fd, 0) < 0) {
167 error_setg_errno(errp, errno, "Failed to truncate pid file");
168 goto fail_unlink;
171 snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid());
172 if (write(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
173 error_setg(errp, "Failed to write pid file");
174 goto fail_unlink;
177 return true;
179 fail_unlink:
180 unlink(path);
181 fail_close:
182 close(fd);
183 return false;
186 void *qemu_oom_check(void *ptr)
188 if (ptr == NULL) {
189 fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno));
190 abort();
192 return ptr;
195 void *qemu_try_memalign(size_t alignment, size_t size)
197 void *ptr;
199 if (alignment < sizeof(void*)) {
200 alignment = sizeof(void*);
203 #if defined(CONFIG_POSIX_MEMALIGN)
204 int ret;
205 ret = posix_memalign(&ptr, alignment, size);
206 if (ret != 0) {
207 errno = ret;
208 ptr = NULL;
210 #elif defined(CONFIG_BSD)
211 ptr = valloc(size);
212 #else
213 ptr = memalign(alignment, size);
214 #endif
215 trace_qemu_memalign(alignment, size, ptr);
216 return ptr;
219 void *qemu_memalign(size_t alignment, size_t size)
221 return qemu_oom_check(qemu_try_memalign(alignment, size));
224 /* alloc shared memory pages */
225 void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
227 size_t align = QEMU_VMALLOC_ALIGN;
228 void *ptr = qemu_ram_mmap(-1, size, align, shared, false);
230 if (ptr == MAP_FAILED) {
231 return NULL;
234 if (alignment) {
235 *alignment = align;
238 trace_qemu_anon_ram_alloc(size, ptr);
239 return ptr;
242 void qemu_vfree(void *ptr)
244 trace_qemu_vfree(ptr);
245 free(ptr);
248 void qemu_anon_ram_free(void *ptr, size_t size)
250 trace_qemu_anon_ram_free(ptr, size);
251 qemu_ram_munmap(-1, ptr, size);
254 void qemu_set_block(int fd)
256 int f;
257 f = fcntl(fd, F_GETFL);
258 assert(f != -1);
259 f = fcntl(fd, F_SETFL, f & ~O_NONBLOCK);
260 assert(f != -1);
263 int qemu_try_set_nonblock(int fd)
265 int f;
266 f = fcntl(fd, F_GETFL);
267 if (f == -1) {
268 return -errno;
270 if (fcntl(fd, F_SETFL, f | O_NONBLOCK) == -1) {
271 #ifdef __OpenBSD__
273 * Previous to OpenBSD 6.3, fcntl(F_SETFL) is not permitted on
274 * memory devices and sets errno to ENODEV.
275 * It's OK if we fail to set O_NONBLOCK on devices like /dev/null,
276 * because they will never block anyway.
278 if (errno == ENODEV) {
279 return 0;
281 #endif
282 return -errno;
284 return 0;
287 void qemu_set_nonblock(int fd)
289 int f;
290 f = qemu_try_set_nonblock(fd);
291 assert(f == 0);
294 int socket_set_fast_reuse(int fd)
296 int val = 1, ret;
298 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
299 (const char *)&val, sizeof(val));
301 assert(ret == 0);
303 return ret;
306 void qemu_set_cloexec(int fd)
308 int f;
309 f = fcntl(fd, F_GETFD);
310 assert(f != -1);
311 f = fcntl(fd, F_SETFD, f | FD_CLOEXEC);
312 assert(f != -1);
316 * Creates a pipe with FD_CLOEXEC set on both file descriptors
318 int qemu_pipe(int pipefd[2])
320 int ret;
322 #ifdef CONFIG_PIPE2
323 ret = pipe2(pipefd, O_CLOEXEC);
324 if (ret != -1 || errno != ENOSYS) {
325 return ret;
327 #endif
328 ret = pipe(pipefd);
329 if (ret == 0) {
330 qemu_set_cloexec(pipefd[0]);
331 qemu_set_cloexec(pipefd[1]);
334 return ret;
337 char *
338 qemu_get_local_state_pathname(const char *relative_pathname)
340 return g_strdup_printf("%s/%s", CONFIG_QEMU_LOCALSTATEDIR,
341 relative_pathname);
344 void qemu_set_tty_echo(int fd, bool echo)
346 struct termios tty;
348 tcgetattr(fd, &tty);
350 if (echo) {
351 tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN;
352 } else {
353 tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN);
356 tcsetattr(fd, TCSANOW, &tty);
359 static char exec_dir[PATH_MAX];
361 void qemu_init_exec_dir(const char *argv0)
363 char *dir;
364 char *p = NULL;
365 char buf[PATH_MAX];
367 assert(!exec_dir[0]);
369 #if defined(__linux__)
371 int len;
372 len = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
373 if (len > 0) {
374 buf[len] = 0;
375 p = buf;
378 #elif defined(__FreeBSD__) \
379 || (defined(__NetBSD__) && defined(KERN_PROC_PATHNAME))
381 #if defined(__FreeBSD__)
382 static int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
383 #else
384 static int mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
385 #endif
386 size_t len = sizeof(buf) - 1;
388 *buf = '\0';
389 if (!sysctl(mib, ARRAY_SIZE(mib), buf, &len, NULL, 0) &&
390 *buf) {
391 buf[sizeof(buf) - 1] = '\0';
392 p = buf;
395 #elif defined(__APPLE__)
397 char fpath[PATH_MAX];
398 uint32_t len = sizeof(fpath);
399 if (_NSGetExecutablePath(fpath, &len) == 0) {
400 p = realpath(fpath, buf);
401 if (!p) {
402 return;
406 #elif defined(__HAIKU__)
408 image_info ii;
409 int32_t c = 0;
411 *buf = '\0';
412 while (get_next_image_info(0, &c, &ii) == B_OK) {
413 if (ii.type == B_APP_IMAGE) {
414 strncpy(buf, ii.name, sizeof(buf));
415 buf[sizeof(buf) - 1] = 0;
416 p = buf;
417 break;
421 #endif
422 /* If we don't have any way of figuring out the actual executable
423 location then try argv[0]. */
424 if (!p) {
425 if (!argv0) {
426 return;
428 p = realpath(argv0, buf);
429 if (!p) {
430 return;
433 dir = g_path_get_dirname(p);
435 pstrcpy(exec_dir, sizeof(exec_dir), dir);
437 g_free(dir);
440 char *qemu_get_exec_dir(void)
442 return g_strdup(exec_dir);
445 static void sigbus_handler(int signal)
447 int i;
448 if (memset_thread) {
449 for (i = 0; i < memset_num_threads; i++) {
450 if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
451 siglongjmp(memset_thread[i].env, 1);
457 static void *do_touch_pages(void *arg)
459 MemsetThread *memset_args = (MemsetThread *)arg;
460 sigset_t set, oldset;
463 * On Linux, the page faults from the loop below can cause mmap_sem
464 * contention with allocation of the thread stacks. Do not start
465 * clearing until all threads have been created.
467 qemu_mutex_lock(&page_mutex);
468 while(!threads_created_flag){
469 qemu_cond_wait(&page_cond, &page_mutex);
471 qemu_mutex_unlock(&page_mutex);
473 /* unblock SIGBUS */
474 sigemptyset(&set);
475 sigaddset(&set, SIGBUS);
476 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
478 if (sigsetjmp(memset_args->env, 1)) {
479 memset_thread_failed = true;
480 } else {
481 char *addr = memset_args->addr;
482 size_t numpages = memset_args->numpages;
483 size_t hpagesize = memset_args->hpagesize;
484 size_t i;
485 for (i = 0; i < numpages; i++) {
487 * Read & write back the same value, so we don't
488 * corrupt existing user/app data that might be
489 * stored.
491 * 'volatile' to stop compiler optimizing this away
492 * to a no-op
494 * TODO: get a better solution from kernel so we
495 * don't need to write at all so we don't cause
496 * wear on the storage backing the region...
498 *(volatile char *)addr = *addr;
499 addr += hpagesize;
502 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
503 return NULL;
506 static inline int get_memset_num_threads(int smp_cpus)
508 long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
509 int ret = 1;
511 if (host_procs > 0) {
512 ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
514 /* In case sysconf() fails, we fall back to single threaded */
515 return ret;
518 static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
519 int smp_cpus)
521 static gsize initialized = 0;
522 size_t numpages_per_thread, leftover;
523 char *addr = area;
524 int i = 0;
526 if (g_once_init_enter(&initialized)) {
527 qemu_mutex_init(&page_mutex);
528 qemu_cond_init(&page_cond);
529 g_once_init_leave(&initialized, 1);
532 memset_thread_failed = false;
533 threads_created_flag = false;
534 memset_num_threads = get_memset_num_threads(smp_cpus);
535 memset_thread = g_new0(MemsetThread, memset_num_threads);
536 numpages_per_thread = numpages / memset_num_threads;
537 leftover = numpages % memset_num_threads;
538 for (i = 0; i < memset_num_threads; i++) {
539 memset_thread[i].addr = addr;
540 memset_thread[i].numpages = numpages_per_thread + (i < leftover);
541 memset_thread[i].hpagesize = hpagesize;
542 qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
543 do_touch_pages, &memset_thread[i],
544 QEMU_THREAD_JOINABLE);
545 addr += memset_thread[i].numpages * hpagesize;
548 qemu_mutex_lock(&page_mutex);
549 threads_created_flag = true;
550 qemu_cond_broadcast(&page_cond);
551 qemu_mutex_unlock(&page_mutex);
553 for (i = 0; i < memset_num_threads; i++) {
554 qemu_thread_join(&memset_thread[i].pgthread);
556 g_free(memset_thread);
557 memset_thread = NULL;
559 return memset_thread_failed;
562 void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
563 Error **errp)
565 int ret;
566 struct sigaction act, oldact;
567 size_t hpagesize = qemu_fd_getpagesize(fd);
568 size_t numpages = DIV_ROUND_UP(memory, hpagesize);
570 memset(&act, 0, sizeof(act));
571 act.sa_handler = &sigbus_handler;
572 act.sa_flags = 0;
574 ret = sigaction(SIGBUS, &act, &oldact);
575 if (ret) {
576 error_setg_errno(errp, errno,
577 "os_mem_prealloc: failed to install signal handler");
578 return;
581 /* touch pages simultaneously */
582 if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) {
583 error_setg(errp, "os_mem_prealloc: Insufficient free host memory "
584 "pages available to allocate guest RAM");
587 ret = sigaction(SIGBUS, &oldact, NULL);
588 if (ret) {
589 /* Terminate QEMU since it can't recover from error */
590 perror("os_mem_prealloc: failed to reinstall signal handler");
591 exit(1);
595 char *qemu_get_pid_name(pid_t pid)
597 char *name = NULL;
599 #if defined(__FreeBSD__)
600 /* BSDs don't have /proc, but they provide a nice substitute */
601 struct kinfo_proc *proc = kinfo_getproc(pid);
603 if (proc) {
604 name = g_strdup(proc->ki_comm);
605 free(proc);
607 #else
608 /* Assume a system with reasonable procfs */
609 char *pid_path;
610 size_t len;
612 pid_path = g_strdup_printf("/proc/%d/cmdline", pid);
613 g_file_get_contents(pid_path, &name, &len, NULL);
614 g_free(pid_path);
615 #endif
617 return name;
621 pid_t qemu_fork(Error **errp)
623 sigset_t oldmask, newmask;
624 struct sigaction sig_action;
625 int saved_errno;
626 pid_t pid;
629 * Need to block signals now, so that child process can safely
630 * kill off caller's signal handlers without a race.
632 sigfillset(&newmask);
633 if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) {
634 error_setg_errno(errp, errno,
635 "cannot block signals");
636 return -1;
639 pid = fork();
640 saved_errno = errno;
642 if (pid < 0) {
643 /* attempt to restore signal mask, but ignore failure, to
644 * avoid obscuring the fork failure */
645 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
646 error_setg_errno(errp, saved_errno,
647 "cannot fork child process");
648 errno = saved_errno;
649 return -1;
650 } else if (pid) {
651 /* parent process */
653 /* Restore our original signal mask now that the child is
654 * safely running. Only documented failures are EFAULT (not
655 * possible, since we are using just-grabbed mask) or EINVAL
656 * (not possible, since we are using correct arguments). */
657 (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
658 } else {
659 /* child process */
660 size_t i;
662 /* Clear out all signal handlers from parent so nothing
663 * unexpected can happen in our child once we unblock
664 * signals */
665 sig_action.sa_handler = SIG_DFL;
666 sig_action.sa_flags = 0;
667 sigemptyset(&sig_action.sa_mask);
669 for (i = 1; i < NSIG; i++) {
670 /* Only possible errors are EFAULT or EINVAL The former
671 * won't happen, the latter we expect, so no need to check
672 * return value */
673 (void)sigaction(i, &sig_action, NULL);
676 /* Unmask all signals in child, since we've no idea what the
677 * caller's done with their signal mask and don't want to
678 * propagate that to children */
679 sigemptyset(&newmask);
680 if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) {
681 Error *local_err = NULL;
682 error_setg_errno(&local_err, errno,
683 "cannot unblock signals");
684 error_report_err(local_err);
685 _exit(1);
688 return pid;
691 void *qemu_alloc_stack(size_t *sz)
693 void *ptr, *guardpage;
694 int flags;
695 #ifdef CONFIG_DEBUG_STACK_USAGE
696 void *ptr2;
697 #endif
698 size_t pagesz = qemu_real_host_page_size;
699 #ifdef _SC_THREAD_STACK_MIN
700 /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
701 long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
702 *sz = MAX(MAX(min_stack_sz, 0), *sz);
703 #endif
704 /* adjust stack size to a multiple of the page size */
705 *sz = ROUND_UP(*sz, pagesz);
706 /* allocate one extra page for the guard page */
707 *sz += pagesz;
709 flags = MAP_PRIVATE | MAP_ANONYMOUS;
710 #if defined(MAP_STACK) && defined(__OpenBSD__)
711 /* Only enable MAP_STACK on OpenBSD. Other OS's such as
712 * Linux/FreeBSD/NetBSD have a flag with the same name
713 * but have differing functionality. OpenBSD will SEGV
714 * if it spots execution with a stack pointer pointing
715 * at memory that was not allocated with MAP_STACK.
717 flags |= MAP_STACK;
718 #endif
720 ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0);
721 if (ptr == MAP_FAILED) {
722 perror("failed to allocate memory for stack");
723 abort();
726 #if defined(HOST_IA64)
727 /* separate register stack */
728 guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
729 #elif defined(HOST_HPPA)
730 /* stack grows up */
731 guardpage = ptr + *sz - pagesz;
732 #else
733 /* stack grows down */
734 guardpage = ptr;
735 #endif
736 if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
737 perror("failed to set up stack guard page");
738 abort();
741 #ifdef CONFIG_DEBUG_STACK_USAGE
742 for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) {
743 *(uint32_t *)ptr2 = 0xdeadbeaf;
745 #endif
747 return ptr;
750 #ifdef CONFIG_DEBUG_STACK_USAGE
751 static __thread unsigned int max_stack_usage;
752 #endif
754 void qemu_free_stack(void *stack, size_t sz)
756 #ifdef CONFIG_DEBUG_STACK_USAGE
757 unsigned int usage;
758 void *ptr;
760 for (ptr = stack + qemu_real_host_page_size; ptr < stack + sz;
761 ptr += sizeof(uint32_t)) {
762 if (*(uint32_t *)ptr != 0xdeadbeaf) {
763 break;
766 usage = sz - (uintptr_t) (ptr - stack);
767 if (usage > max_stack_usage) {
768 error_report("thread %d max stack usage increased from %u to %u",
769 qemu_get_thread_id(), max_stack_usage, usage);
770 max_stack_usage = usage;
772 #endif
774 munmap(stack, sz);
777 void sigaction_invoke(struct sigaction *action,
778 struct qemu_signalfd_siginfo *info)
780 siginfo_t si = {};
781 si.si_signo = info->ssi_signo;
782 si.si_errno = info->ssi_errno;
783 si.si_code = info->ssi_code;
785 /* Convert the minimal set of fields defined by POSIX.
786 * Positive si_code values are reserved for kernel-generated
787 * signals, where the valid siginfo fields are determined by
788 * the signal number. But according to POSIX, it is unspecified
789 * whether SI_USER and SI_QUEUE have values less than or equal to
790 * zero.
792 if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE ||
793 info->ssi_code <= 0) {
794 /* SIGTERM, etc. */
795 si.si_pid = info->ssi_pid;
796 si.si_uid = info->ssi_uid;
797 } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE ||
798 info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) {
799 si.si_addr = (void *)(uintptr_t)info->ssi_addr;
800 } else if (info->ssi_signo == SIGCHLD) {
801 si.si_pid = info->ssi_pid;
802 si.si_status = info->ssi_status;
803 si.si_uid = info->ssi_uid;
805 action->sa_sigaction(info->ssi_signo, &si, NULL);
808 #ifndef HOST_NAME_MAX
809 # ifdef _POSIX_HOST_NAME_MAX
810 # define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
811 # else
812 # define HOST_NAME_MAX 255
813 # endif
814 #endif
816 char *qemu_get_host_name(Error **errp)
818 long len = -1;
819 g_autofree char *hostname = NULL;
821 #ifdef _SC_HOST_NAME_MAX
822 len = sysconf(_SC_HOST_NAME_MAX);
823 #endif /* _SC_HOST_NAME_MAX */
825 if (len < 0) {
826 len = HOST_NAME_MAX;
829 /* Unfortunately, gethostname() below does not guarantee a
830 * NULL terminated string. Therefore, allocate one byte more
831 * to be sure. */
832 hostname = g_new0(char, len + 1);
834 if (gethostname(hostname, len) < 0) {
835 error_setg_errno(errp, errno,
836 "cannot get hostname");
837 return NULL;
840 return g_steal_pointer(&hostname);