virtio-balloon: don't hardcode config size value
[qemu/ar7.git] / qapi / opts-visitor.c
blob96ed85899d596e81306c8213ac4b0f147fe4ec5d
1 /*
2 * Options Visitor
4 * Copyright Red Hat, Inc. 2012, 2013
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu-common.h"
14 #include "qapi/qmp/qerror.h"
15 #include "qapi/opts-visitor.h"
16 #include "qemu/queue.h"
17 #include "qemu/option_int.h"
18 #include "qapi/visitor-impl.h"
21 enum ListMode
23 LM_NONE, /* not traversing a list of repeated options */
24 LM_STARTED, /* opts_start_list() succeeded */
26 LM_IN_PROGRESS, /* opts_next_list() has been called.
28 * Generating the next list link will consume the most
29 * recently parsed QemuOpt instance of the repeated
30 * option.
32 * Parsing a value into the list link will examine the
33 * next QemuOpt instance of the repeated option, and
34 * possibly enter LM_SIGNED_INTERVAL or
35 * LM_UNSIGNED_INTERVAL.
38 LM_SIGNED_INTERVAL, /* opts_next_list() has been called.
40 * Generating the next list link will consume the most
41 * recently stored element from the signed interval,
42 * parsed from the most recent QemuOpt instance of the
43 * repeated option. This may consume QemuOpt itself
44 * and return to LM_IN_PROGRESS.
46 * Parsing a value into the list link will store the
47 * next element of the signed interval.
50 LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */
53 typedef enum ListMode ListMode;
55 struct OptsVisitor
57 Visitor visitor;
59 /* Ownership remains with opts_visitor_new()'s caller. */
60 const QemuOpts *opts_root;
62 unsigned depth;
64 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
65 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
66 * name. */
67 GHashTable *unprocessed_opts;
69 /* The list currently being traversed with opts_start_list() /
70 * opts_next_list(). The list must have a struct element type in the
71 * schema, with a single mandatory scalar member. */
72 ListMode list_mode;
73 GQueue *repeated_opts;
75 /* When parsing a list of repeating options as integers, values of the form
76 * "a-b", representing a closed interval, are allowed. Elements in the
77 * range are generated individually.
79 union {
80 int64_t s;
81 uint64_t u;
82 } range_next, range_limit;
84 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
85 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
86 * not survive or escape the OptsVisitor object.
88 QemuOpt *fake_id_opt;
92 static void
93 destroy_list(gpointer list)
95 g_queue_free(list);
99 static void
100 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
102 GQueue *list;
104 list = g_hash_table_lookup(unprocessed_opts, opt->name);
105 if (list == NULL) {
106 list = g_queue_new();
108 /* GHashTable will never try to free the keys -- we supply NULL as
109 * "key_destroy_func" in opts_start_struct(). Thus cast away key
110 * const-ness in order to suppress gcc's warning.
112 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
115 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
116 g_queue_push_tail(list, (gpointer)opt);
120 static void
121 opts_start_struct(Visitor *v, void **obj, const char *kind,
122 const char *name, size_t size, Error **errp)
124 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
125 const QemuOpt *opt;
127 *obj = g_malloc0(size > 0 ? size : 1);
128 if (ov->depth++ > 0) {
129 return;
132 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
133 NULL, &destroy_list);
134 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
135 /* ensured by qemu-option.c::opts_do_parse() */
136 assert(strcmp(opt->name, "id") != 0);
138 opts_visitor_insert(ov->unprocessed_opts, opt);
141 if (ov->opts_root->id != NULL) {
142 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
144 ov->fake_id_opt->name = "id";
145 ov->fake_id_opt->str = ov->opts_root->id;
146 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
151 static gboolean
152 ghr_true(gpointer ign_key, gpointer ign_value, gpointer ign_user_data)
154 return TRUE;
158 static void
159 opts_end_struct(Visitor *v, Error **errp)
161 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
162 GQueue *any;
164 if (--ov->depth > 0) {
165 return;
168 /* we should have processed all (distinct) QemuOpt instances */
169 any = g_hash_table_find(ov->unprocessed_opts, &ghr_true, NULL);
170 if (any) {
171 const QemuOpt *first;
173 first = g_queue_peek_head(any);
174 error_set(errp, QERR_INVALID_PARAMETER, first->name);
176 g_hash_table_destroy(ov->unprocessed_opts);
177 ov->unprocessed_opts = NULL;
178 g_free(ov->fake_id_opt);
179 ov->fake_id_opt = NULL;
183 static GQueue *
184 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
186 GQueue *list;
188 list = g_hash_table_lookup(ov->unprocessed_opts, name);
189 if (!list) {
190 error_set(errp, QERR_MISSING_PARAMETER, name);
192 return list;
196 static void
197 opts_start_list(Visitor *v, const char *name, Error **errp)
199 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
201 /* we can't traverse a list in a list */
202 assert(ov->list_mode == LM_NONE);
203 ov->repeated_opts = lookup_distinct(ov, name, errp);
204 if (ov->repeated_opts != NULL) {
205 ov->list_mode = LM_STARTED;
210 static GenericList *
211 opts_next_list(Visitor *v, GenericList **list, Error **errp)
213 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
214 GenericList **link;
216 switch (ov->list_mode) {
217 case LM_STARTED:
218 ov->list_mode = LM_IN_PROGRESS;
219 link = list;
220 break;
222 case LM_SIGNED_INTERVAL:
223 case LM_UNSIGNED_INTERVAL:
224 link = &(*list)->next;
226 if (ov->list_mode == LM_SIGNED_INTERVAL) {
227 if (ov->range_next.s < ov->range_limit.s) {
228 ++ov->range_next.s;
229 break;
231 } else if (ov->range_next.u < ov->range_limit.u) {
232 ++ov->range_next.u;
233 break;
235 ov->list_mode = LM_IN_PROGRESS;
236 /* range has been completed, fall through in order to pop option */
238 case LM_IN_PROGRESS: {
239 const QemuOpt *opt;
241 opt = g_queue_pop_head(ov->repeated_opts);
242 if (g_queue_is_empty(ov->repeated_opts)) {
243 g_hash_table_remove(ov->unprocessed_opts, opt->name);
244 return NULL;
246 link = &(*list)->next;
247 break;
250 default:
251 abort();
254 *link = g_malloc0(sizeof **link);
255 return *link;
259 static void
260 opts_end_list(Visitor *v, Error **errp)
262 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
264 assert(ov->list_mode == LM_STARTED ||
265 ov->list_mode == LM_IN_PROGRESS ||
266 ov->list_mode == LM_SIGNED_INTERVAL ||
267 ov->list_mode == LM_UNSIGNED_INTERVAL);
268 ov->repeated_opts = NULL;
269 ov->list_mode = LM_NONE;
273 static const QemuOpt *
274 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
276 if (ov->list_mode == LM_NONE) {
277 GQueue *list;
279 /* the last occurrence of any QemuOpt takes effect when queried by name
281 list = lookup_distinct(ov, name, errp);
282 return list ? g_queue_peek_tail(list) : NULL;
284 assert(ov->list_mode == LM_IN_PROGRESS);
285 return g_queue_peek_head(ov->repeated_opts);
289 static void
290 processed(OptsVisitor *ov, const char *name)
292 if (ov->list_mode == LM_NONE) {
293 g_hash_table_remove(ov->unprocessed_opts, name);
294 return;
296 assert(ov->list_mode == LM_IN_PROGRESS);
297 /* do nothing */
301 static void
302 opts_type_str(Visitor *v, char **obj, const char *name, Error **errp)
304 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
305 const QemuOpt *opt;
307 opt = lookup_scalar(ov, name, errp);
308 if (!opt) {
309 return;
311 *obj = g_strdup(opt->str ? opt->str : "");
312 processed(ov, name);
316 /* mimics qemu-option.c::parse_option_bool() */
317 static void
318 opts_type_bool(Visitor *v, bool *obj, const char *name, Error **errp)
320 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
321 const QemuOpt *opt;
323 opt = lookup_scalar(ov, name, errp);
324 if (!opt) {
325 return;
328 if (opt->str) {
329 if (strcmp(opt->str, "on") == 0 ||
330 strcmp(opt->str, "yes") == 0 ||
331 strcmp(opt->str, "y") == 0) {
332 *obj = true;
333 } else if (strcmp(opt->str, "off") == 0 ||
334 strcmp(opt->str, "no") == 0 ||
335 strcmp(opt->str, "n") == 0) {
336 *obj = false;
337 } else {
338 error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
339 "on|yes|y|off|no|n");
340 return;
342 } else {
343 *obj = true;
346 processed(ov, name);
350 static void
351 opts_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp)
353 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
354 const QemuOpt *opt;
355 const char *str;
356 long long val;
357 char *endptr;
359 if (ov->list_mode == LM_SIGNED_INTERVAL) {
360 *obj = ov->range_next.s;
361 return;
364 opt = lookup_scalar(ov, name, errp);
365 if (!opt) {
366 return;
368 str = opt->str ? opt->str : "";
370 /* we've gotten past lookup_scalar() */
371 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
373 errno = 0;
374 val = strtoll(str, &endptr, 0);
375 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
376 if (*endptr == '\0') {
377 *obj = val;
378 processed(ov, name);
379 return;
381 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
382 long long val2;
384 str = endptr + 1;
385 val2 = strtoll(str, &endptr, 0);
386 if (errno == 0 && endptr > str && *endptr == '\0' &&
387 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
388 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
389 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
390 ov->range_next.s = val;
391 ov->range_limit.s = val2;
392 ov->list_mode = LM_SIGNED_INTERVAL;
394 /* as if entering on the top */
395 *obj = ov->range_next.s;
396 return;
400 error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
401 (ov->list_mode == LM_NONE) ? "an int64 value" :
402 "an int64 value or range");
406 static void
407 opts_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp)
409 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
410 const QemuOpt *opt;
411 const char *str;
412 unsigned long long val;
413 char *endptr;
415 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
416 *obj = ov->range_next.u;
417 return;
420 opt = lookup_scalar(ov, name, errp);
421 if (!opt) {
422 return;
424 str = opt->str;
426 /* we've gotten past lookup_scalar() */
427 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
429 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
430 if (*endptr == '\0') {
431 *obj = val;
432 processed(ov, name);
433 return;
435 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
436 unsigned long long val2;
438 str = endptr + 1;
439 if (parse_uint_full(str, &val2, 0) == 0 &&
440 val2 <= UINT64_MAX && val <= val2 &&
441 val2 - val < OPTS_VISITOR_RANGE_MAX) {
442 ov->range_next.u = val;
443 ov->range_limit.u = val2;
444 ov->list_mode = LM_UNSIGNED_INTERVAL;
446 /* as if entering on the top */
447 *obj = ov->range_next.u;
448 return;
452 error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
453 (ov->list_mode == LM_NONE) ? "a uint64 value" :
454 "a uint64 value or range");
458 static void
459 opts_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp)
461 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
462 const QemuOpt *opt;
463 int64_t val;
464 char *endptr;
466 opt = lookup_scalar(ov, name, errp);
467 if (!opt) {
468 return;
471 val = strtosz_suffix(opt->str ? opt->str : "", &endptr,
472 STRTOSZ_DEFSUFFIX_B);
473 if (val != -1 && *endptr == '\0') {
474 *obj = val;
475 processed(ov, name);
476 return;
478 error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
479 "a size value representible as a non-negative int64");
483 static void
484 opts_start_optional(Visitor *v, bool *present, const char *name,
485 Error **errp)
487 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
489 /* we only support a single mandatory scalar field in a list node */
490 assert(ov->list_mode == LM_NONE);
491 *present = (lookup_distinct(ov, name, NULL) != NULL);
495 OptsVisitor *
496 opts_visitor_new(const QemuOpts *opts)
498 OptsVisitor *ov;
500 ov = g_malloc0(sizeof *ov);
502 ov->visitor.start_struct = &opts_start_struct;
503 ov->visitor.end_struct = &opts_end_struct;
505 ov->visitor.start_list = &opts_start_list;
506 ov->visitor.next_list = &opts_next_list;
507 ov->visitor.end_list = &opts_end_list;
509 /* input_type_enum() covers both "normal" enums and union discriminators.
510 * The union discriminator field is always generated as "type"; it should
511 * match the "type" QemuOpt child of any QemuOpts.
513 * input_type_enum() will remove the looked-up key from the
514 * "unprocessed_opts" hash even if the lookup fails, because the removal is
515 * done earlier in opts_type_str(). This should be harmless.
517 ov->visitor.type_enum = &input_type_enum;
519 ov->visitor.type_int = &opts_type_int;
520 ov->visitor.type_uint64 = &opts_type_uint64;
521 ov->visitor.type_size = &opts_type_size;
522 ov->visitor.type_bool = &opts_type_bool;
523 ov->visitor.type_str = &opts_type_str;
525 /* type_number() is not filled in, but this is not the first visitor to
526 * skip some mandatory methods... */
528 ov->visitor.start_optional = &opts_start_optional;
530 ov->opts_root = opts;
532 return ov;
536 void
537 opts_visitor_cleanup(OptsVisitor *ov)
539 if (ov->unprocessed_opts != NULL) {
540 g_hash_table_destroy(ov->unprocessed_opts);
542 g_free(ov->fake_id_opt);
543 g_free(ov);
547 Visitor *
548 opts_get_visitor(OptsVisitor *ov)
550 return &ov->visitor;