xen: additionally restrict xenforeignmemory operations
[qemu.git] / migration / postcopy-ram.c
blobdc80dbb67f4329744842c840c180db2252b0e531
1 /*
2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "migration/migration.h"
23 #include "migration/postcopy-ram.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/balloon.h"
26 #include "qemu/error-report.h"
27 #include "trace.h"
29 /* Arbitrary limit on size of each discard command,
30 * keeps them around ~200 bytes
32 #define MAX_DISCARDS_PER_COMMAND 12
34 struct PostcopyDiscardState {
35 const char *ramblock_name;
36 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
37 uint16_t cur_entry;
39 * Start and length of a discard range (bytes)
41 uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
42 uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
43 unsigned int nsentwords;
44 unsigned int nsentcmds;
47 /* Postcopy needs to detect accesses to pages that haven't yet been copied
48 * across, and efficiently map new pages in, the techniques for doing this
49 * are target OS specific.
51 #if defined(__linux__)
53 #include <poll.h>
54 #include <sys/ioctl.h>
55 #include <sys/syscall.h>
56 #include <asm/types.h> /* for __u64 */
57 #endif
59 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
60 #include <sys/eventfd.h>
61 #include <linux/userfaultfd.h>
63 static bool ufd_version_check(int ufd)
65 struct uffdio_api api_struct;
66 uint64_t ioctl_mask;
68 api_struct.api = UFFD_API;
69 api_struct.features = 0;
70 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
71 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
72 strerror(errno));
73 return false;
76 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
77 (__u64)1 << _UFFDIO_UNREGISTER;
78 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
79 error_report("Missing userfault features: %" PRIx64,
80 (uint64_t)(~api_struct.ioctls & ioctl_mask));
81 return false;
84 if (getpagesize() != ram_pagesize_summary()) {
85 bool have_hp = false;
86 /* We've got a huge page */
87 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
88 have_hp = api_struct.features & UFFD_FEATURE_MISSING_HUGETLBFS;
89 #endif
90 if (!have_hp) {
91 error_report("Userfault on this host does not support huge pages");
92 return false;
95 return true;
98 /* Callback from postcopy_ram_supported_by_host block iterator.
100 static int test_range_shared(const char *block_name, void *host_addr,
101 ram_addr_t offset, ram_addr_t length, void *opaque)
103 if (qemu_ram_is_shared(qemu_ram_block_by_name(block_name))) {
104 error_report("Postcopy on shared RAM (%s) is not yet supported",
105 block_name);
106 return 1;
108 return 0;
112 * Note: This has the side effect of munlock'ing all of RAM, that's
113 * normally fine since if the postcopy succeeds it gets turned back on at the
114 * end.
116 bool postcopy_ram_supported_by_host(void)
118 long pagesize = getpagesize();
119 int ufd = -1;
120 bool ret = false; /* Error unless we change it */
121 void *testarea = NULL;
122 struct uffdio_register reg_struct;
123 struct uffdio_range range_struct;
124 uint64_t feature_mask;
126 if ((1ul << qemu_target_page_bits()) > pagesize) {
127 error_report("Target page size bigger than host page size");
128 goto out;
131 ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
132 if (ufd == -1) {
133 error_report("%s: userfaultfd not available: %s", __func__,
134 strerror(errno));
135 goto out;
138 /* Version and features check */
139 if (!ufd_version_check(ufd)) {
140 goto out;
143 /* We don't support postcopy with shared RAM yet */
144 if (qemu_ram_foreach_block(test_range_shared, NULL)) {
145 goto out;
149 * userfault and mlock don't go together; we'll put it back later if
150 * it was enabled.
152 if (munlockall()) {
153 error_report("%s: munlockall: %s", __func__, strerror(errno));
154 return -1;
158 * We need to check that the ops we need are supported on anon memory
159 * To do that we need to register a chunk and see the flags that
160 * are returned.
162 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
163 MAP_ANONYMOUS, -1, 0);
164 if (testarea == MAP_FAILED) {
165 error_report("%s: Failed to map test area: %s", __func__,
166 strerror(errno));
167 goto out;
169 g_assert(((size_t)testarea & (pagesize-1)) == 0);
171 reg_struct.range.start = (uintptr_t)testarea;
172 reg_struct.range.len = pagesize;
173 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
175 if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
176 error_report("%s userfault register: %s", __func__, strerror(errno));
177 goto out;
180 range_struct.start = (uintptr_t)testarea;
181 range_struct.len = pagesize;
182 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
183 error_report("%s userfault unregister: %s", __func__, strerror(errno));
184 goto out;
187 feature_mask = (__u64)1 << _UFFDIO_WAKE |
188 (__u64)1 << _UFFDIO_COPY |
189 (__u64)1 << _UFFDIO_ZEROPAGE;
190 if ((reg_struct.ioctls & feature_mask) != feature_mask) {
191 error_report("Missing userfault map features: %" PRIx64,
192 (uint64_t)(~reg_struct.ioctls & feature_mask));
193 goto out;
196 /* Success! */
197 ret = true;
198 out:
199 if (testarea) {
200 munmap(testarea, pagesize);
202 if (ufd != -1) {
203 close(ufd);
205 return ret;
209 * Setup an area of RAM so that it *can* be used for postcopy later; this
210 * must be done right at the start prior to pre-copy.
211 * opaque should be the MIS.
213 static int init_range(const char *block_name, void *host_addr,
214 ram_addr_t offset, ram_addr_t length, void *opaque)
216 MigrationIncomingState *mis = opaque;
218 trace_postcopy_init_range(block_name, host_addr, offset, length);
221 * We need the whole of RAM to be truly empty for postcopy, so things
222 * like ROMs and any data tables built during init must be zero'd
223 * - we're going to get the copy from the source anyway.
224 * (Precopy will just overwrite this data, so doesn't need the discard)
226 if (ram_discard_range(mis, block_name, 0, length)) {
227 return -1;
230 return 0;
234 * At the end of migration, undo the effects of init_range
235 * opaque should be the MIS.
237 static int cleanup_range(const char *block_name, void *host_addr,
238 ram_addr_t offset, ram_addr_t length, void *opaque)
240 MigrationIncomingState *mis = opaque;
241 struct uffdio_range range_struct;
242 trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
245 * We turned off hugepage for the precopy stage with postcopy enabled
246 * we can turn it back on now.
248 qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
251 * We can also turn off userfault now since we should have all the
252 * pages. It can be useful to leave it on to debug postcopy
253 * if you're not sure it's always getting every page.
255 range_struct.start = (uintptr_t)host_addr;
256 range_struct.len = length;
258 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
259 error_report("%s: userfault unregister %s", __func__, strerror(errno));
261 return -1;
264 return 0;
268 * Initialise postcopy-ram, setting the RAM to a state where we can go into
269 * postcopy later; must be called prior to any precopy.
270 * called from arch_init's similarly named ram_postcopy_incoming_init
272 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
274 if (qemu_ram_foreach_block(init_range, mis)) {
275 return -1;
278 return 0;
282 * At the end of a migration where postcopy_ram_incoming_init was called.
284 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
286 trace_postcopy_ram_incoming_cleanup_entry();
288 if (mis->have_fault_thread) {
289 uint64_t tmp64;
291 if (qemu_ram_foreach_block(cleanup_range, mis)) {
292 return -1;
295 * Tell the fault_thread to exit, it's an eventfd that should
296 * currently be at 0, we're going to increment it to 1
298 tmp64 = 1;
299 if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) {
300 trace_postcopy_ram_incoming_cleanup_join();
301 qemu_thread_join(&mis->fault_thread);
302 } else {
303 /* Not much we can do here, but may as well report it */
304 error_report("%s: incrementing userfault_quit_fd: %s", __func__,
305 strerror(errno));
307 trace_postcopy_ram_incoming_cleanup_closeuf();
308 close(mis->userfault_fd);
309 close(mis->userfault_quit_fd);
310 mis->have_fault_thread = false;
313 qemu_balloon_inhibit(false);
315 if (enable_mlock) {
316 if (os_mlock() < 0) {
317 error_report("mlock: %s", strerror(errno));
319 * It doesn't feel right to fail at this point, we have a valid
320 * VM state.
325 postcopy_state_set(POSTCOPY_INCOMING_END);
326 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
328 if (mis->postcopy_tmp_page) {
329 munmap(mis->postcopy_tmp_page, mis->largest_page_size);
330 mis->postcopy_tmp_page = NULL;
332 if (mis->postcopy_tmp_zero_page) {
333 munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
334 mis->postcopy_tmp_zero_page = NULL;
336 trace_postcopy_ram_incoming_cleanup_exit();
337 return 0;
341 * Disable huge pages on an area
343 static int nhp_range(const char *block_name, void *host_addr,
344 ram_addr_t offset, ram_addr_t length, void *opaque)
346 trace_postcopy_nhp_range(block_name, host_addr, offset, length);
349 * Before we do discards we need to ensure those discards really
350 * do delete areas of the page, even if THP thinks a hugepage would
351 * be a good idea, so force hugepages off.
353 qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
355 return 0;
359 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
360 * however leaving it until after precopy means that most of the precopy
361 * data is still THPd
363 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
365 if (qemu_ram_foreach_block(nhp_range, mis)) {
366 return -1;
369 postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
371 return 0;
375 * Mark the given area of RAM as requiring notification to unwritten areas
376 * Used as a callback on qemu_ram_foreach_block.
377 * host_addr: Base of area to mark
378 * offset: Offset in the whole ram arena
379 * length: Length of the section
380 * opaque: MigrationIncomingState pointer
381 * Returns 0 on success
383 static int ram_block_enable_notify(const char *block_name, void *host_addr,
384 ram_addr_t offset, ram_addr_t length,
385 void *opaque)
387 MigrationIncomingState *mis = opaque;
388 struct uffdio_register reg_struct;
390 reg_struct.range.start = (uintptr_t)host_addr;
391 reg_struct.range.len = length;
392 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
394 /* Now tell our userfault_fd that it's responsible for this area */
395 if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
396 error_report("%s userfault register: %s", __func__, strerror(errno));
397 return -1;
399 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
400 error_report("%s userfault: Region doesn't support COPY", __func__);
401 return -1;
404 return 0;
408 * Handle faults detected by the USERFAULT markings
410 static void *postcopy_ram_fault_thread(void *opaque)
412 MigrationIncomingState *mis = opaque;
413 struct uffd_msg msg;
414 int ret;
415 RAMBlock *rb = NULL;
416 RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */
418 trace_postcopy_ram_fault_thread_entry();
419 qemu_sem_post(&mis->fault_thread_sem);
421 while (true) {
422 ram_addr_t rb_offset;
423 struct pollfd pfd[2];
426 * We're mainly waiting for the kernel to give us a faulting HVA,
427 * however we can be told to quit via userfault_quit_fd which is
428 * an eventfd
430 pfd[0].fd = mis->userfault_fd;
431 pfd[0].events = POLLIN;
432 pfd[0].revents = 0;
433 pfd[1].fd = mis->userfault_quit_fd;
434 pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
435 pfd[1].revents = 0;
437 if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
438 error_report("%s: userfault poll: %s", __func__, strerror(errno));
439 break;
442 if (pfd[1].revents) {
443 trace_postcopy_ram_fault_thread_quit();
444 break;
447 ret = read(mis->userfault_fd, &msg, sizeof(msg));
448 if (ret != sizeof(msg)) {
449 if (errno == EAGAIN) {
451 * if a wake up happens on the other thread just after
452 * the poll, there is nothing to read.
454 continue;
456 if (ret < 0) {
457 error_report("%s: Failed to read full userfault message: %s",
458 __func__, strerror(errno));
459 break;
460 } else {
461 error_report("%s: Read %d bytes from userfaultfd expected %zd",
462 __func__, ret, sizeof(msg));
463 break; /* Lost alignment, don't know what we'd read next */
466 if (msg.event != UFFD_EVENT_PAGEFAULT) {
467 error_report("%s: Read unexpected event %ud from userfaultfd",
468 __func__, msg.event);
469 continue; /* It's not a page fault, shouldn't happen */
472 rb = qemu_ram_block_from_host(
473 (void *)(uintptr_t)msg.arg.pagefault.address,
474 true, &rb_offset);
475 if (!rb) {
476 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
477 PRIx64, (uint64_t)msg.arg.pagefault.address);
478 break;
481 rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
482 trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
483 qemu_ram_get_idstr(rb),
484 rb_offset);
487 * Send the request to the source - we want to request one
488 * of our host page sizes (which is >= TPS)
490 if (rb != last_rb) {
491 last_rb = rb;
492 migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
493 rb_offset, qemu_ram_pagesize(rb));
494 } else {
495 /* Save some space */
496 migrate_send_rp_req_pages(mis, NULL,
497 rb_offset, qemu_ram_pagesize(rb));
500 trace_postcopy_ram_fault_thread_exit();
501 return NULL;
504 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
506 /* Open the fd for the kernel to give us userfaults */
507 mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
508 if (mis->userfault_fd == -1) {
509 error_report("%s: Failed to open userfault fd: %s", __func__,
510 strerror(errno));
511 return -1;
515 * Although the host check already tested the API, we need to
516 * do the check again as an ABI handshake on the new fd.
518 if (!ufd_version_check(mis->userfault_fd)) {
519 return -1;
522 /* Now an eventfd we use to tell the fault-thread to quit */
523 mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC);
524 if (mis->userfault_quit_fd == -1) {
525 error_report("%s: Opening userfault_quit_fd: %s", __func__,
526 strerror(errno));
527 close(mis->userfault_fd);
528 return -1;
531 qemu_sem_init(&mis->fault_thread_sem, 0);
532 qemu_thread_create(&mis->fault_thread, "postcopy/fault",
533 postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
534 qemu_sem_wait(&mis->fault_thread_sem);
535 qemu_sem_destroy(&mis->fault_thread_sem);
536 mis->have_fault_thread = true;
538 /* Mark so that we get notified of accesses to unwritten areas */
539 if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
540 return -1;
544 * Ballooning can mark pages as absent while we're postcopying
545 * that would cause false userfaults.
547 qemu_balloon_inhibit(true);
549 trace_postcopy_ram_enable_notify();
551 return 0;
555 * Place a host page (from) at (host) atomically
556 * returns 0 on success
558 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
559 size_t pagesize)
561 struct uffdio_copy copy_struct;
563 copy_struct.dst = (uint64_t)(uintptr_t)host;
564 copy_struct.src = (uint64_t)(uintptr_t)from;
565 copy_struct.len = pagesize;
566 copy_struct.mode = 0;
568 /* copy also acks to the kernel waking the stalled thread up
569 * TODO: We can inhibit that ack and only do it if it was requested
570 * which would be slightly cheaper, but we'd have to be careful
571 * of the order of updating our page state.
573 if (ioctl(mis->userfault_fd, UFFDIO_COPY, &copy_struct)) {
574 int e = errno;
575 error_report("%s: %s copy host: %p from: %p (size: %zd)",
576 __func__, strerror(e), host, from, pagesize);
578 return -e;
581 trace_postcopy_place_page(host);
582 return 0;
586 * Place a zero page at (host) atomically
587 * returns 0 on success
589 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
590 size_t pagesize)
592 trace_postcopy_place_page_zero(host);
594 if (pagesize == getpagesize()) {
595 struct uffdio_zeropage zero_struct;
596 zero_struct.range.start = (uint64_t)(uintptr_t)host;
597 zero_struct.range.len = getpagesize();
598 zero_struct.mode = 0;
600 if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) {
601 int e = errno;
602 error_report("%s: %s zero host: %p",
603 __func__, strerror(e), host);
605 return -e;
607 } else {
608 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
609 if (!mis->postcopy_tmp_zero_page) {
610 mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
611 PROT_READ | PROT_WRITE,
612 MAP_PRIVATE | MAP_ANONYMOUS,
613 -1, 0);
614 if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
615 int e = errno;
616 mis->postcopy_tmp_zero_page = NULL;
617 error_report("%s: %s mapping large zero page",
618 __func__, strerror(e));
619 return -e;
621 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
623 return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
624 pagesize);
627 return 0;
631 * Returns a target page of memory that can be mapped at a later point in time
632 * using postcopy_place_page
633 * The same address is used repeatedly, postcopy_place_page just takes the
634 * backing page away.
635 * Returns: Pointer to allocated page
638 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
640 if (!mis->postcopy_tmp_page) {
641 mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
642 PROT_READ | PROT_WRITE, MAP_PRIVATE |
643 MAP_ANONYMOUS, -1, 0);
644 if (mis->postcopy_tmp_page == MAP_FAILED) {
645 mis->postcopy_tmp_page = NULL;
646 error_report("%s: %s", __func__, strerror(errno));
647 return NULL;
651 return mis->postcopy_tmp_page;
654 #else
655 /* No target OS support, stubs just fail */
656 bool postcopy_ram_supported_by_host(void)
658 error_report("%s: No OS support", __func__);
659 return false;
662 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
664 error_report("postcopy_ram_incoming_init: No OS support");
665 return -1;
668 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
670 assert(0);
671 return -1;
674 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
676 assert(0);
677 return -1;
680 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
682 assert(0);
683 return -1;
686 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
687 size_t pagesize)
689 assert(0);
690 return -1;
693 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
694 size_t pagesize)
696 assert(0);
697 return -1;
700 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
702 assert(0);
703 return NULL;
706 #endif
708 /* ------------------------------------------------------------------------- */
711 * postcopy_discard_send_init: Called at the start of each RAMBlock before
712 * asking to discard individual ranges.
714 * @ms: The current migration state.
715 * @offset: the bitmap offset of the named RAMBlock in the migration
716 * bitmap.
717 * @name: RAMBlock that discards will operate on.
719 * returns: a new PDS.
721 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
722 unsigned long offset,
723 const char *name)
725 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
727 if (res) {
728 res->ramblock_name = name;
729 res->offset = offset;
732 return res;
736 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
737 * discard. May send a discard message, may just leave it queued to
738 * be sent later.
740 * @ms: Current migration state.
741 * @pds: Structure initialised by postcopy_discard_send_init().
742 * @start,@length: a range of pages in the migration bitmap in the
743 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
745 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
746 unsigned long start, unsigned long length)
748 size_t tp_bits = qemu_target_page_bits();
749 /* Convert to byte offsets within the RAM block */
750 pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
751 pds->length_list[pds->cur_entry] = length << tp_bits;
752 trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
753 pds->cur_entry++;
754 pds->nsentwords++;
756 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
757 /* Full set, ship it! */
758 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
759 pds->ramblock_name,
760 pds->cur_entry,
761 pds->start_list,
762 pds->length_list);
763 pds->nsentcmds++;
764 pds->cur_entry = 0;
769 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
770 * bitmap code. Sends any outstanding discard messages, frees the PDS
772 * @ms: Current migration state.
773 * @pds: Structure initialised by postcopy_discard_send_init().
775 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
777 /* Anything unsent? */
778 if (pds->cur_entry) {
779 qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
780 pds->ramblock_name,
781 pds->cur_entry,
782 pds->start_list,
783 pds->length_list);
784 pds->nsentcmds++;
787 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,
788 pds->nsentcmds);
790 g_free(pds);