tests/Makefile: Add more dependencies for test-timed-average
[qemu.git] / qapi / opts-visitor.c
blobcd10392f181e13623dda90aab62298df3c34a078
1 /*
2 * Options Visitor
4 * Copyright Red Hat, Inc. 2012, 2013
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu-common.h"
14 #include "qapi/qmp/qerror.h"
15 #include "qapi/opts-visitor.h"
16 #include "qemu/queue.h"
17 #include "qemu/option_int.h"
18 #include "qapi/visitor-impl.h"
21 enum ListMode
23 LM_NONE, /* not traversing a list of repeated options */
24 LM_STARTED, /* opts_start_list() succeeded */
26 LM_IN_PROGRESS, /* opts_next_list() has been called.
28 * Generating the next list link will consume the most
29 * recently parsed QemuOpt instance of the repeated
30 * option.
32 * Parsing a value into the list link will examine the
33 * next QemuOpt instance of the repeated option, and
34 * possibly enter LM_SIGNED_INTERVAL or
35 * LM_UNSIGNED_INTERVAL.
38 LM_SIGNED_INTERVAL, /* opts_next_list() has been called.
40 * Generating the next list link will consume the most
41 * recently stored element from the signed interval,
42 * parsed from the most recent QemuOpt instance of the
43 * repeated option. This may consume QemuOpt itself
44 * and return to LM_IN_PROGRESS.
46 * Parsing a value into the list link will store the
47 * next element of the signed interval.
50 LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */
53 typedef enum ListMode ListMode;
55 struct OptsVisitor
57 Visitor visitor;
59 /* Ownership remains with opts_visitor_new()'s caller. */
60 const QemuOpts *opts_root;
62 unsigned depth;
64 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
65 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
66 * name. */
67 GHashTable *unprocessed_opts;
69 /* The list currently being traversed with opts_start_list() /
70 * opts_next_list(). The list must have a struct element type in the
71 * schema, with a single mandatory scalar member. */
72 ListMode list_mode;
73 GQueue *repeated_opts;
75 /* When parsing a list of repeating options as integers, values of the form
76 * "a-b", representing a closed interval, are allowed. Elements in the
77 * range are generated individually.
79 union {
80 int64_t s;
81 uint64_t u;
82 } range_next, range_limit;
84 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
85 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
86 * not survive or escape the OptsVisitor object.
88 QemuOpt *fake_id_opt;
92 static void
93 destroy_list(gpointer list)
95 g_queue_free(list);
99 static void
100 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
102 GQueue *list;
104 list = g_hash_table_lookup(unprocessed_opts, opt->name);
105 if (list == NULL) {
106 list = g_queue_new();
108 /* GHashTable will never try to free the keys -- we supply NULL as
109 * "key_destroy_func" in opts_start_struct(). Thus cast away key
110 * const-ness in order to suppress gcc's warning.
112 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
115 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
116 g_queue_push_tail(list, (gpointer)opt);
120 static void
121 opts_start_struct(Visitor *v, void **obj, const char *kind,
122 const char *name, size_t size, Error **errp)
124 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
125 const QemuOpt *opt;
127 if (obj) {
128 *obj = g_malloc0(size > 0 ? size : 1);
130 if (ov->depth++ > 0) {
131 return;
134 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
135 NULL, &destroy_list);
136 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
137 /* ensured by qemu-option.c::opts_do_parse() */
138 assert(strcmp(opt->name, "id") != 0);
140 opts_visitor_insert(ov->unprocessed_opts, opt);
143 if (ov->opts_root->id != NULL) {
144 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
146 ov->fake_id_opt->name = g_strdup("id");
147 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
148 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
153 static gboolean
154 ghr_true(gpointer ign_key, gpointer ign_value, gpointer ign_user_data)
156 return TRUE;
160 static void
161 opts_end_struct(Visitor *v, Error **errp)
163 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
164 GQueue *any;
166 if (--ov->depth > 0) {
167 return;
170 /* we should have processed all (distinct) QemuOpt instances */
171 any = g_hash_table_find(ov->unprocessed_opts, &ghr_true, NULL);
172 if (any) {
173 const QemuOpt *first;
175 first = g_queue_peek_head(any);
176 error_setg(errp, QERR_INVALID_PARAMETER, first->name);
178 g_hash_table_destroy(ov->unprocessed_opts);
179 ov->unprocessed_opts = NULL;
180 if (ov->fake_id_opt) {
181 g_free(ov->fake_id_opt->name);
182 g_free(ov->fake_id_opt->str);
183 g_free(ov->fake_id_opt);
185 ov->fake_id_opt = NULL;
189 static GQueue *
190 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
192 GQueue *list;
194 list = g_hash_table_lookup(ov->unprocessed_opts, name);
195 if (!list) {
196 error_setg(errp, QERR_MISSING_PARAMETER, name);
198 return list;
202 static void
203 opts_start_list(Visitor *v, const char *name, Error **errp)
205 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
207 /* we can't traverse a list in a list */
208 assert(ov->list_mode == LM_NONE);
209 ov->repeated_opts = lookup_distinct(ov, name, errp);
210 if (ov->repeated_opts != NULL) {
211 ov->list_mode = LM_STARTED;
216 static GenericList *
217 opts_next_list(Visitor *v, GenericList **list, Error **errp)
219 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
220 GenericList **link;
222 switch (ov->list_mode) {
223 case LM_STARTED:
224 ov->list_mode = LM_IN_PROGRESS;
225 link = list;
226 break;
228 case LM_SIGNED_INTERVAL:
229 case LM_UNSIGNED_INTERVAL:
230 link = &(*list)->next;
232 if (ov->list_mode == LM_SIGNED_INTERVAL) {
233 if (ov->range_next.s < ov->range_limit.s) {
234 ++ov->range_next.s;
235 break;
237 } else if (ov->range_next.u < ov->range_limit.u) {
238 ++ov->range_next.u;
239 break;
241 ov->list_mode = LM_IN_PROGRESS;
242 /* range has been completed, fall through in order to pop option */
244 case LM_IN_PROGRESS: {
245 const QemuOpt *opt;
247 opt = g_queue_pop_head(ov->repeated_opts);
248 if (g_queue_is_empty(ov->repeated_opts)) {
249 g_hash_table_remove(ov->unprocessed_opts, opt->name);
250 return NULL;
252 link = &(*list)->next;
253 break;
256 default:
257 abort();
260 *link = g_malloc0(sizeof **link);
261 return *link;
265 static void
266 opts_end_list(Visitor *v, Error **errp)
268 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
270 assert(ov->list_mode == LM_STARTED ||
271 ov->list_mode == LM_IN_PROGRESS ||
272 ov->list_mode == LM_SIGNED_INTERVAL ||
273 ov->list_mode == LM_UNSIGNED_INTERVAL);
274 ov->repeated_opts = NULL;
275 ov->list_mode = LM_NONE;
279 static const QemuOpt *
280 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
282 if (ov->list_mode == LM_NONE) {
283 GQueue *list;
285 /* the last occurrence of any QemuOpt takes effect when queried by name
287 list = lookup_distinct(ov, name, errp);
288 return list ? g_queue_peek_tail(list) : NULL;
290 assert(ov->list_mode == LM_IN_PROGRESS);
291 return g_queue_peek_head(ov->repeated_opts);
295 static void
296 processed(OptsVisitor *ov, const char *name)
298 if (ov->list_mode == LM_NONE) {
299 g_hash_table_remove(ov->unprocessed_opts, name);
300 return;
302 assert(ov->list_mode == LM_IN_PROGRESS);
303 /* do nothing */
307 static void
308 opts_type_str(Visitor *v, char **obj, const char *name, Error **errp)
310 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
311 const QemuOpt *opt;
313 opt = lookup_scalar(ov, name, errp);
314 if (!opt) {
315 return;
317 *obj = g_strdup(opt->str ? opt->str : "");
318 processed(ov, name);
322 /* mimics qemu-option.c::parse_option_bool() */
323 static void
324 opts_type_bool(Visitor *v, bool *obj, const char *name, Error **errp)
326 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
327 const QemuOpt *opt;
329 opt = lookup_scalar(ov, name, errp);
330 if (!opt) {
331 return;
334 if (opt->str) {
335 if (strcmp(opt->str, "on") == 0 ||
336 strcmp(opt->str, "yes") == 0 ||
337 strcmp(opt->str, "y") == 0) {
338 *obj = true;
339 } else if (strcmp(opt->str, "off") == 0 ||
340 strcmp(opt->str, "no") == 0 ||
341 strcmp(opt->str, "n") == 0) {
342 *obj = false;
343 } else {
344 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
345 "on|yes|y|off|no|n");
346 return;
348 } else {
349 *obj = true;
352 processed(ov, name);
356 static void
357 opts_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp)
359 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
360 const QemuOpt *opt;
361 const char *str;
362 long long val;
363 char *endptr;
365 if (ov->list_mode == LM_SIGNED_INTERVAL) {
366 *obj = ov->range_next.s;
367 return;
370 opt = lookup_scalar(ov, name, errp);
371 if (!opt) {
372 return;
374 str = opt->str ? opt->str : "";
376 /* we've gotten past lookup_scalar() */
377 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
379 errno = 0;
380 val = strtoll(str, &endptr, 0);
381 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
382 if (*endptr == '\0') {
383 *obj = val;
384 processed(ov, name);
385 return;
387 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
388 long long val2;
390 str = endptr + 1;
391 val2 = strtoll(str, &endptr, 0);
392 if (errno == 0 && endptr > str && *endptr == '\0' &&
393 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
394 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
395 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
396 ov->range_next.s = val;
397 ov->range_limit.s = val2;
398 ov->list_mode = LM_SIGNED_INTERVAL;
400 /* as if entering on the top */
401 *obj = ov->range_next.s;
402 return;
406 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
407 (ov->list_mode == LM_NONE) ? "an int64 value" :
408 "an int64 value or range");
412 static void
413 opts_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp)
415 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
416 const QemuOpt *opt;
417 const char *str;
418 unsigned long long val;
419 char *endptr;
421 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
422 *obj = ov->range_next.u;
423 return;
426 opt = lookup_scalar(ov, name, errp);
427 if (!opt) {
428 return;
430 str = opt->str;
432 /* we've gotten past lookup_scalar() */
433 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
435 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
436 if (*endptr == '\0') {
437 *obj = val;
438 processed(ov, name);
439 return;
441 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
442 unsigned long long val2;
444 str = endptr + 1;
445 if (parse_uint_full(str, &val2, 0) == 0 &&
446 val2 <= UINT64_MAX && val <= val2 &&
447 val2 - val < OPTS_VISITOR_RANGE_MAX) {
448 ov->range_next.u = val;
449 ov->range_limit.u = val2;
450 ov->list_mode = LM_UNSIGNED_INTERVAL;
452 /* as if entering on the top */
453 *obj = ov->range_next.u;
454 return;
458 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
459 (ov->list_mode == LM_NONE) ? "a uint64 value" :
460 "a uint64 value or range");
464 static void
465 opts_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp)
467 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
468 const QemuOpt *opt;
469 int64_t val;
470 char *endptr;
472 opt = lookup_scalar(ov, name, errp);
473 if (!opt) {
474 return;
477 val = qemu_strtosz_suffix(opt->str ? opt->str : "", &endptr,
478 QEMU_STRTOSZ_DEFSUFFIX_B);
479 if (val < 0 || *endptr) {
480 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
481 "a size value representible as a non-negative int64");
482 return;
485 *obj = val;
486 processed(ov, name);
490 static void
491 opts_optional(Visitor *v, bool *present, const char *name, Error **errp)
493 OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
495 /* we only support a single mandatory scalar field in a list node */
496 assert(ov->list_mode == LM_NONE);
497 *present = (lookup_distinct(ov, name, NULL) != NULL);
501 OptsVisitor *
502 opts_visitor_new(const QemuOpts *opts)
504 OptsVisitor *ov;
506 ov = g_malloc0(sizeof *ov);
508 ov->visitor.start_struct = &opts_start_struct;
509 ov->visitor.end_struct = &opts_end_struct;
511 ov->visitor.start_list = &opts_start_list;
512 ov->visitor.next_list = &opts_next_list;
513 ov->visitor.end_list = &opts_end_list;
515 /* input_type_enum() covers both "normal" enums and union discriminators.
516 * The union discriminator field is always generated as "type"; it should
517 * match the "type" QemuOpt child of any QemuOpts.
519 * input_type_enum() will remove the looked-up key from the
520 * "unprocessed_opts" hash even if the lookup fails, because the removal is
521 * done earlier in opts_type_str(). This should be harmless.
523 ov->visitor.type_enum = &input_type_enum;
525 ov->visitor.type_int = &opts_type_int;
526 ov->visitor.type_uint64 = &opts_type_uint64;
527 ov->visitor.type_size = &opts_type_size;
528 ov->visitor.type_bool = &opts_type_bool;
529 ov->visitor.type_str = &opts_type_str;
531 /* type_number() is not filled in, but this is not the first visitor to
532 * skip some mandatory methods... */
534 ov->visitor.optional = &opts_optional;
536 ov->opts_root = opts;
538 return ov;
542 void
543 opts_visitor_cleanup(OptsVisitor *ov)
545 if (ov->unprocessed_opts != NULL) {
546 g_hash_table_destroy(ov->unprocessed_opts);
548 g_free(ov->fake_id_opt);
549 g_free(ov);
553 Visitor *
554 opts_get_visitor(OptsVisitor *ov)
556 return &ov->visitor;