Check for invalid initrd file
[qemu/aliguori-queue.git] / hw / virtio-balloon.c
blobf55f7eccb935d1a219368c46a290fdb49a52ec9d
1 /*
2 * Virtio Block Device
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "virtio.h"
16 #include "pc.h"
17 #include "sysemu.h"
18 #include "cpu.h"
19 #include "monitor.h"
20 #include "balloon.h"
21 #include "virtio-balloon.h"
22 #include "kvm.h"
23 #include "qlist.h"
24 #include "qint.h"
25 #include "qstring.h"
27 #if defined(__linux__)
28 #include <sys/mman.h>
29 #endif
31 typedef struct VirtIOBalloon
33 VirtIODevice vdev;
34 VirtQueue *ivq, *dvq, *svq;
35 uint32_t num_pages;
36 uint32_t actual;
37 uint64_t stats[VIRTIO_BALLOON_S_NR];
38 VirtQueueElement stats_vq_elem;
39 size_t stats_vq_offset;
40 MonitorCompletion *stats_callback;
41 void *stats_opaque_callback_data;
42 } VirtIOBalloon;
44 static VirtIOBalloon *to_virtio_balloon(VirtIODevice *vdev)
46 return (VirtIOBalloon *)vdev;
49 static void balloon_page(void *addr, int deflate)
51 #if defined(__linux__)
52 if (!kvm_enabled() || kvm_has_sync_mmu())
53 madvise(addr, TARGET_PAGE_SIZE,
54 deflate ? MADV_WILLNEED : MADV_DONTNEED);
55 #endif
59 * reset_stats - Mark all items in the stats array as unset
61 * This function needs to be called at device intialization and before
62 * before updating to a set of newly-generated stats. This will ensure that no
63 * stale values stick around in case the guest reports a subset of the supported
64 * statistics.
66 static inline void reset_stats(VirtIOBalloon *dev)
68 int i;
69 for (i = 0; i < VIRTIO_BALLOON_S_NR; dev->stats[i++] = -1);
72 static void stat_put(QDict *dict, const char *label, uint64_t val)
74 if (val != -1)
75 qdict_put(dict, label, qint_from_int(val));
78 static QObject *get_stats_qobject(VirtIOBalloon *dev)
80 QDict *dict = qdict_new();
81 uint64_t actual = ram_size - ((uint64_t) dev->actual <<
82 VIRTIO_BALLOON_PFN_SHIFT);
84 stat_put(dict, "actual", actual);
85 stat_put(dict, "mem_swapped_in", dev->stats[VIRTIO_BALLOON_S_SWAP_IN]);
86 stat_put(dict, "mem_swapped_out", dev->stats[VIRTIO_BALLOON_S_SWAP_OUT]);
87 stat_put(dict, "major_page_faults", dev->stats[VIRTIO_BALLOON_S_MAJFLT]);
88 stat_put(dict, "minor_page_faults", dev->stats[VIRTIO_BALLOON_S_MINFLT]);
89 stat_put(dict, "free_mem", dev->stats[VIRTIO_BALLOON_S_MEMFREE]);
90 stat_put(dict, "total_mem", dev->stats[VIRTIO_BALLOON_S_MEMTOT]);
92 return QOBJECT(dict);
95 /* FIXME: once we do a virtio refactoring, this will get subsumed into common
96 * code */
97 static size_t memcpy_from_iovector(void *data, size_t offset, size_t size,
98 struct iovec *iov, int iovlen)
100 int i;
101 uint8_t *ptr = data;
102 size_t iov_off = 0;
103 size_t data_off = 0;
105 for (i = 0; i < iovlen && size; i++) {
106 if (offset < (iov_off + iov[i].iov_len)) {
107 size_t len = MIN((iov_off + iov[i].iov_len) - offset , size);
109 memcpy(ptr + data_off, iov[i].iov_base + (offset - iov_off), len);
111 data_off += len;
112 offset += len;
113 size -= len;
116 iov_off += iov[i].iov_len;
119 return data_off;
122 static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
124 VirtIOBalloon *s = to_virtio_balloon(vdev);
125 VirtQueueElement elem;
127 while (virtqueue_pop(vq, &elem)) {
128 size_t offset = 0;
129 uint32_t pfn;
131 while (memcpy_from_iovector(&pfn, offset, 4,
132 elem.out_sg, elem.out_num) == 4) {
133 ram_addr_t pa;
134 ram_addr_t addr;
136 pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
137 offset += 4;
139 addr = cpu_get_physical_page_desc(pa);
140 if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM)
141 continue;
143 /* Using qemu_get_ram_ptr is bending the rules a bit, but
144 should be OK because we only want a single page. */
145 balloon_page(qemu_get_ram_ptr(addr), !!(vq == s->dvq));
148 virtqueue_push(vq, &elem, offset);
149 virtio_notify(vdev, vq);
153 static void complete_stats_request(VirtIOBalloon *vb)
155 QObject *stats;
157 if (!vb->stats_opaque_callback_data)
158 return;
160 stats = get_stats_qobject(vb);
161 vb->stats_callback(vb->stats_opaque_callback_data, stats);
162 qobject_decref(stats);
163 vb->stats_opaque_callback_data = NULL;
164 vb->stats_callback = NULL;
167 static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
169 VirtIOBalloon *s = DO_UPCAST(VirtIOBalloon, vdev, vdev);
170 VirtQueueElement *elem = &s->stats_vq_elem;
171 VirtIOBalloonStat stat;
172 size_t offset = 0;
174 if (!virtqueue_pop(vq, elem)) {
175 return;
178 /* Initialize the stats to get rid of any stale values. This is only
179 * needed to handle the case where a guest supports fewer stats than it
180 * used to (ie. it has booted into an old kernel).
182 reset_stats(s);
184 while (memcpy_from_iovector(&stat, offset, sizeof(stat), elem->out_sg,
185 elem->out_num) == sizeof(stat)) {
186 uint16_t tag = tswap16(stat.tag);
187 uint64_t val = tswap64(stat.val);
189 offset += sizeof(stat);
190 if (tag < VIRTIO_BALLOON_S_NR)
191 s->stats[tag] = val;
193 s->stats_vq_offset = offset;
195 complete_stats_request(s);
198 static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data)
200 VirtIOBalloon *dev = to_virtio_balloon(vdev);
201 struct virtio_balloon_config config;
203 config.num_pages = cpu_to_le32(dev->num_pages);
204 config.actual = cpu_to_le32(dev->actual);
206 memcpy(config_data, &config, 8);
209 static void virtio_balloon_set_config(VirtIODevice *vdev,
210 const uint8_t *config_data)
212 VirtIOBalloon *dev = to_virtio_balloon(vdev);
213 struct virtio_balloon_config config;
214 memcpy(&config, config_data, 8);
215 dev->actual = config.actual;
218 static uint32_t virtio_balloon_get_features(VirtIODevice *vdev, uint32_t f)
220 f |= (1 << VIRTIO_BALLOON_F_STATS_VQ);
221 return f;
224 static void virtio_balloon_to_target(void *opaque, ram_addr_t target,
225 MonitorCompletion cb, void *cb_data)
227 VirtIOBalloon *dev = opaque;
229 if (target > ram_size)
230 target = ram_size;
232 if (target) {
233 dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
234 virtio_notify_config(&dev->vdev);
235 } else {
236 /* For now, only allow one request at a time. This restriction can be
237 * removed later by queueing callback and data pairs.
239 if (dev->stats_callback != NULL) {
240 return;
242 dev->stats_callback = cb;
243 dev->stats_opaque_callback_data = cb_data;
244 if (dev->vdev.guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ)) {
245 virtqueue_push(dev->svq, &dev->stats_vq_elem, dev->stats_vq_offset);
246 virtio_notify(&dev->vdev, dev->svq);
247 } else {
248 /* Stats are not supported. Clear out any stale values that might
249 * have been set by a more featureful guest kernel.
251 reset_stats(dev);
252 complete_stats_request(dev);
257 static void virtio_balloon_save(QEMUFile *f, void *opaque)
259 VirtIOBalloon *s = opaque;
261 virtio_save(&s->vdev, f);
263 qemu_put_be32(f, s->num_pages);
264 qemu_put_be32(f, s->actual);
267 static int virtio_balloon_load(QEMUFile *f, void *opaque, int version_id)
269 VirtIOBalloon *s = opaque;
271 if (version_id != 1)
272 return -EINVAL;
274 virtio_load(&s->vdev, f);
276 s->num_pages = qemu_get_be32(f);
277 s->actual = qemu_get_be32(f);
278 return 0;
281 VirtIODevice *virtio_balloon_init(DeviceState *dev)
283 VirtIOBalloon *s;
285 s = (VirtIOBalloon *)virtio_common_init("virtio-balloon",
286 VIRTIO_ID_BALLOON,
287 8, sizeof(VirtIOBalloon));
289 s->vdev.get_config = virtio_balloon_get_config;
290 s->vdev.set_config = virtio_balloon_set_config;
291 s->vdev.get_features = virtio_balloon_get_features;
293 s->ivq = virtio_add_queue(&s->vdev, 128, virtio_balloon_handle_output);
294 s->dvq = virtio_add_queue(&s->vdev, 128, virtio_balloon_handle_output);
295 s->svq = virtio_add_queue(&s->vdev, 128, virtio_balloon_receive_stats);
297 reset_stats(s);
298 qemu_add_balloon_handler(virtio_balloon_to_target, s);
300 register_savevm("virtio-balloon", -1, 1, virtio_balloon_save, virtio_balloon_load, s);
302 return &s->vdev;