pci core: function pci_bus_init() cleanup
[qemu/ar7.git] / qapi / opts-visitor.c
blob73e4acea7b4bfa724b69c6a818572f83e1d51574
1 /*
2 * Options Visitor
4 * Copyright Red Hat, Inc. 2012-2016
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu-common.h"
15 #include "qapi/qmp/qerror.h"
16 #include "qapi/opts-visitor.h"
17 #include "qemu/queue.h"
18 #include "qemu/option_int.h"
19 #include "qapi/visitor-impl.h"
22 enum ListMode
24 LM_NONE, /* not traversing a list of repeated options */
25 LM_STARTED, /* opts_start_list() succeeded */
27 LM_IN_PROGRESS, /* opts_next_list() has been called.
29 * Generating the next list link will consume the most
30 * recently parsed QemuOpt instance of the repeated
31 * option.
33 * Parsing a value into the list link will examine the
34 * next QemuOpt instance of the repeated option, and
35 * possibly enter LM_SIGNED_INTERVAL or
36 * LM_UNSIGNED_INTERVAL.
39 LM_SIGNED_INTERVAL, /* opts_next_list() has been called.
41 * Generating the next list link will consume the most
42 * recently stored element from the signed interval,
43 * parsed from the most recent QemuOpt instance of the
44 * repeated option. This may consume QemuOpt itself
45 * and return to LM_IN_PROGRESS.
47 * Parsing a value into the list link will store the
48 * next element of the signed interval.
51 LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */
54 typedef enum ListMode ListMode;
56 struct OptsVisitor
58 Visitor visitor;
60 /* Ownership remains with opts_visitor_new()'s caller. */
61 const QemuOpts *opts_root;
63 unsigned depth;
65 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
66 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
67 * name. */
68 GHashTable *unprocessed_opts;
70 /* The list currently being traversed with opts_start_list() /
71 * opts_next_list(). The list must have a struct element type in the
72 * schema, with a single mandatory scalar member. */
73 ListMode list_mode;
74 GQueue *repeated_opts;
76 /* When parsing a list of repeating options as integers, values of the form
77 * "a-b", representing a closed interval, are allowed. Elements in the
78 * range are generated individually.
80 union {
81 int64_t s;
82 uint64_t u;
83 } range_next, range_limit;
85 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
86 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
87 * not survive or escape the OptsVisitor object.
89 QemuOpt *fake_id_opt;
93 static OptsVisitor *to_ov(Visitor *v)
95 return container_of(v, OptsVisitor, visitor);
99 static void
100 destroy_list(gpointer list)
102 g_queue_free(list);
106 static void
107 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
109 GQueue *list;
111 list = g_hash_table_lookup(unprocessed_opts, opt->name);
112 if (list == NULL) {
113 list = g_queue_new();
115 /* GHashTable will never try to free the keys -- we supply NULL as
116 * "key_destroy_func" in opts_start_struct(). Thus cast away key
117 * const-ness in order to suppress gcc's warning.
119 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
122 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
123 g_queue_push_tail(list, (gpointer)opt);
127 static void
128 opts_start_struct(Visitor *v, const char *name, void **obj,
129 size_t size, Error **errp)
131 OptsVisitor *ov = to_ov(v);
132 const QemuOpt *opt;
134 if (obj) {
135 *obj = g_malloc0(size > 0 ? size : 1);
137 if (ov->depth++ > 0) {
138 return;
141 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
142 NULL, &destroy_list);
143 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
144 /* ensured by qemu-option.c::opts_do_parse() */
145 assert(strcmp(opt->name, "id") != 0);
147 opts_visitor_insert(ov->unprocessed_opts, opt);
150 if (ov->opts_root->id != NULL) {
151 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
153 ov->fake_id_opt->name = g_strdup("id");
154 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
155 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
160 static void
161 opts_end_struct(Visitor *v, Error **errp)
163 OptsVisitor *ov = to_ov(v);
164 GHashTableIter iter;
165 GQueue *any;
167 if (--ov->depth > 0) {
168 return;
171 /* we should have processed all (distinct) QemuOpt instances */
172 g_hash_table_iter_init(&iter, ov->unprocessed_opts);
173 if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) {
174 const QemuOpt *first;
176 first = g_queue_peek_head(any);
177 error_setg(errp, QERR_INVALID_PARAMETER, first->name);
179 g_hash_table_destroy(ov->unprocessed_opts);
180 ov->unprocessed_opts = NULL;
181 if (ov->fake_id_opt) {
182 g_free(ov->fake_id_opt->name);
183 g_free(ov->fake_id_opt->str);
184 g_free(ov->fake_id_opt);
186 ov->fake_id_opt = NULL;
190 static GQueue *
191 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
193 GQueue *list;
195 list = g_hash_table_lookup(ov->unprocessed_opts, name);
196 if (!list) {
197 error_setg(errp, QERR_MISSING_PARAMETER, name);
199 return list;
203 static void
204 opts_start_list(Visitor *v, const char *name, Error **errp)
206 OptsVisitor *ov = to_ov(v);
208 /* we can't traverse a list in a list */
209 assert(ov->list_mode == LM_NONE);
210 ov->repeated_opts = lookup_distinct(ov, name, errp);
211 if (ov->repeated_opts != NULL) {
212 ov->list_mode = LM_STARTED;
217 static GenericList *
218 opts_next_list(Visitor *v, GenericList **list, size_t size)
220 OptsVisitor *ov = to_ov(v);
221 GenericList **link;
223 switch (ov->list_mode) {
224 case LM_STARTED:
225 ov->list_mode = LM_IN_PROGRESS;
226 link = list;
227 break;
229 case LM_SIGNED_INTERVAL:
230 case LM_UNSIGNED_INTERVAL:
231 link = &(*list)->next;
233 if (ov->list_mode == LM_SIGNED_INTERVAL) {
234 if (ov->range_next.s < ov->range_limit.s) {
235 ++ov->range_next.s;
236 break;
238 } else if (ov->range_next.u < ov->range_limit.u) {
239 ++ov->range_next.u;
240 break;
242 ov->list_mode = LM_IN_PROGRESS;
243 /* range has been completed, fall through in order to pop option */
245 case LM_IN_PROGRESS: {
246 const QemuOpt *opt;
248 opt = g_queue_pop_head(ov->repeated_opts);
249 if (g_queue_is_empty(ov->repeated_opts)) {
250 g_hash_table_remove(ov->unprocessed_opts, opt->name);
251 return NULL;
253 link = &(*list)->next;
254 break;
257 default:
258 abort();
261 *link = g_malloc0(size);
262 return *link;
266 static void
267 opts_end_list(Visitor *v)
269 OptsVisitor *ov = to_ov(v);
271 assert(ov->list_mode == LM_STARTED ||
272 ov->list_mode == LM_IN_PROGRESS ||
273 ov->list_mode == LM_SIGNED_INTERVAL ||
274 ov->list_mode == LM_UNSIGNED_INTERVAL);
275 ov->repeated_opts = NULL;
276 ov->list_mode = LM_NONE;
280 static const QemuOpt *
281 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
283 if (ov->list_mode == LM_NONE) {
284 GQueue *list;
286 /* the last occurrence of any QemuOpt takes effect when queried by name
288 list = lookup_distinct(ov, name, errp);
289 return list ? g_queue_peek_tail(list) : NULL;
291 assert(ov->list_mode == LM_IN_PROGRESS);
292 return g_queue_peek_head(ov->repeated_opts);
296 static void
297 processed(OptsVisitor *ov, const char *name)
299 if (ov->list_mode == LM_NONE) {
300 g_hash_table_remove(ov->unprocessed_opts, name);
301 return;
303 assert(ov->list_mode == LM_IN_PROGRESS);
304 /* do nothing */
308 static void
309 opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
311 OptsVisitor *ov = to_ov(v);
312 const QemuOpt *opt;
314 opt = lookup_scalar(ov, name, errp);
315 if (!opt) {
316 return;
318 *obj = g_strdup(opt->str ? opt->str : "");
319 processed(ov, name);
323 /* mimics qemu-option.c::parse_option_bool() */
324 static void
325 opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
327 OptsVisitor *ov = to_ov(v);
328 const QemuOpt *opt;
330 opt = lookup_scalar(ov, name, errp);
331 if (!opt) {
332 return;
335 if (opt->str) {
336 if (strcmp(opt->str, "on") == 0 ||
337 strcmp(opt->str, "yes") == 0 ||
338 strcmp(opt->str, "y") == 0) {
339 *obj = true;
340 } else if (strcmp(opt->str, "off") == 0 ||
341 strcmp(opt->str, "no") == 0 ||
342 strcmp(opt->str, "n") == 0) {
343 *obj = false;
344 } else {
345 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
346 "on|yes|y|off|no|n");
347 return;
349 } else {
350 *obj = true;
353 processed(ov, name);
357 static void
358 opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp)
360 OptsVisitor *ov = to_ov(v);
361 const QemuOpt *opt;
362 const char *str;
363 long long val;
364 char *endptr;
366 if (ov->list_mode == LM_SIGNED_INTERVAL) {
367 *obj = ov->range_next.s;
368 return;
371 opt = lookup_scalar(ov, name, errp);
372 if (!opt) {
373 return;
375 str = opt->str ? opt->str : "";
377 /* we've gotten past lookup_scalar() */
378 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
380 errno = 0;
381 val = strtoll(str, &endptr, 0);
382 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
383 if (*endptr == '\0') {
384 *obj = val;
385 processed(ov, name);
386 return;
388 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
389 long long val2;
391 str = endptr + 1;
392 val2 = strtoll(str, &endptr, 0);
393 if (errno == 0 && endptr > str && *endptr == '\0' &&
394 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
395 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
396 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
397 ov->range_next.s = val;
398 ov->range_limit.s = val2;
399 ov->list_mode = LM_SIGNED_INTERVAL;
401 /* as if entering on the top */
402 *obj = ov->range_next.s;
403 return;
407 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
408 (ov->list_mode == LM_NONE) ? "an int64 value" :
409 "an int64 value or range");
413 static void
414 opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp)
416 OptsVisitor *ov = to_ov(v);
417 const QemuOpt *opt;
418 const char *str;
419 unsigned long long val;
420 char *endptr;
422 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
423 *obj = ov->range_next.u;
424 return;
427 opt = lookup_scalar(ov, name, errp);
428 if (!opt) {
429 return;
431 str = opt->str;
433 /* we've gotten past lookup_scalar() */
434 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
436 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
437 if (*endptr == '\0') {
438 *obj = val;
439 processed(ov, name);
440 return;
442 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
443 unsigned long long val2;
445 str = endptr + 1;
446 if (parse_uint_full(str, &val2, 0) == 0 &&
447 val2 <= UINT64_MAX && val <= val2 &&
448 val2 - val < OPTS_VISITOR_RANGE_MAX) {
449 ov->range_next.u = val;
450 ov->range_limit.u = val2;
451 ov->list_mode = LM_UNSIGNED_INTERVAL;
453 /* as if entering on the top */
454 *obj = ov->range_next.u;
455 return;
459 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
460 (ov->list_mode == LM_NONE) ? "a uint64 value" :
461 "a uint64 value or range");
465 static void
466 opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp)
468 OptsVisitor *ov = to_ov(v);
469 const QemuOpt *opt;
470 int64_t val;
471 char *endptr;
473 opt = lookup_scalar(ov, name, errp);
474 if (!opt) {
475 return;
478 val = qemu_strtosz_suffix(opt->str ? opt->str : "", &endptr,
479 QEMU_STRTOSZ_DEFSUFFIX_B);
480 if (val < 0 || *endptr) {
481 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
482 "a size value representible as a non-negative int64");
483 return;
486 *obj = val;
487 processed(ov, name);
491 static void
492 opts_optional(Visitor *v, const char *name, bool *present)
494 OptsVisitor *ov = to_ov(v);
496 /* we only support a single mandatory scalar field in a list node */
497 assert(ov->list_mode == LM_NONE);
498 *present = (lookup_distinct(ov, name, NULL) != NULL);
502 OptsVisitor *
503 opts_visitor_new(const QemuOpts *opts)
505 OptsVisitor *ov;
507 ov = g_malloc0(sizeof *ov);
509 ov->visitor.start_struct = &opts_start_struct;
510 ov->visitor.end_struct = &opts_end_struct;
512 ov->visitor.start_list = &opts_start_list;
513 ov->visitor.next_list = &opts_next_list;
514 ov->visitor.end_list = &opts_end_list;
516 /* input_type_enum() covers both "normal" enums and union discriminators.
517 * The union discriminator field is always generated as "type"; it should
518 * match the "type" QemuOpt child of any QemuOpts.
520 * input_type_enum() will remove the looked-up key from the
521 * "unprocessed_opts" hash even if the lookup fails, because the removal is
522 * done earlier in opts_type_str(). This should be harmless.
524 ov->visitor.type_enum = &input_type_enum;
526 ov->visitor.type_int64 = &opts_type_int64;
527 ov->visitor.type_uint64 = &opts_type_uint64;
528 ov->visitor.type_size = &opts_type_size;
529 ov->visitor.type_bool = &opts_type_bool;
530 ov->visitor.type_str = &opts_type_str;
532 /* type_number() is not filled in, but this is not the first visitor to
533 * skip some mandatory methods... */
535 ov->visitor.optional = &opts_optional;
537 ov->opts_root = opts;
539 return ov;
543 void
544 opts_visitor_cleanup(OptsVisitor *ov)
546 if (ov->unprocessed_opts != NULL) {
547 g_hash_table_destroy(ov->unprocessed_opts);
549 g_free(ov->fake_id_opt);
550 g_free(ov);
554 Visitor *
555 opts_get_visitor(OptsVisitor *ov)
557 return &ov->visitor;