4 * Copyright Red Hat, Inc. 2012, 2013
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu-common.h"
14 #include "qapi/qmp/qerror.h"
15 #include "qapi/opts-visitor.h"
16 #include "qemu/queue.h"
17 #include "qemu/option_int.h"
18 #include "qapi/visitor-impl.h"
23 LM_NONE
, /* not traversing a list of repeated options */
24 LM_STARTED
, /* opts_start_list() succeeded */
26 LM_IN_PROGRESS
, /* opts_next_list() has been called.
28 * Generating the next list link will consume the most
29 * recently parsed QemuOpt instance of the repeated
32 * Parsing a value into the list link will examine the
33 * next QemuOpt instance of the repeated option, and
34 * possibly enter LM_SIGNED_INTERVAL or
35 * LM_UNSIGNED_INTERVAL.
38 LM_SIGNED_INTERVAL
, /* opts_next_list() has been called.
40 * Generating the next list link will consume the most
41 * recently stored element from the signed interval,
42 * parsed from the most recent QemuOpt instance of the
43 * repeated option. This may consume QemuOpt itself
44 * and return to LM_IN_PROGRESS.
46 * Parsing a value into the list link will store the
47 * next element of the signed interval.
50 LM_UNSIGNED_INTERVAL
/* Same as above, only for an unsigned interval. */
53 typedef enum ListMode ListMode
;
59 /* Ownership remains with opts_visitor_new()'s caller. */
60 const QemuOpts
*opts_root
;
64 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
65 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
67 GHashTable
*unprocessed_opts
;
69 /* The list currently being traversed with opts_start_list() /
70 * opts_next_list(). The list must have a struct element type in the
71 * schema, with a single mandatory scalar member. */
73 GQueue
*repeated_opts
;
75 /* When parsing a list of repeating options as integers, values of the form
76 * "a-b", representing a closed interval, are allowed. Elements in the
77 * range are generated individually.
82 } range_next
, range_limit
;
84 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
85 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
86 * not survive or escape the OptsVisitor object.
93 destroy_list(gpointer list
)
100 opts_visitor_insert(GHashTable
*unprocessed_opts
, const QemuOpt
*opt
)
104 list
= g_hash_table_lookup(unprocessed_opts
, opt
->name
);
106 list
= g_queue_new();
108 /* GHashTable will never try to free the keys -- we supply NULL as
109 * "key_destroy_func" in opts_start_struct(). Thus cast away key
110 * const-ness in order to suppress gcc's warning.
112 g_hash_table_insert(unprocessed_opts
, (gpointer
)opt
->name
, list
);
115 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
116 g_queue_push_tail(list
, (gpointer
)opt
);
121 opts_start_struct(Visitor
*v
, void **obj
, const char *kind
,
122 const char *name
, size_t size
, Error
**errp
)
124 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
128 *obj
= g_malloc0(size
> 0 ? size
: 1);
130 if (ov
->depth
++ > 0) {
134 ov
->unprocessed_opts
= g_hash_table_new_full(&g_str_hash
, &g_str_equal
,
135 NULL
, &destroy_list
);
136 QTAILQ_FOREACH(opt
, &ov
->opts_root
->head
, next
) {
137 /* ensured by qemu-option.c::opts_do_parse() */
138 assert(strcmp(opt
->name
, "id") != 0);
140 opts_visitor_insert(ov
->unprocessed_opts
, opt
);
143 if (ov
->opts_root
->id
!= NULL
) {
144 ov
->fake_id_opt
= g_malloc0(sizeof *ov
->fake_id_opt
);
146 ov
->fake_id_opt
->name
= g_strdup("id");
147 ov
->fake_id_opt
->str
= g_strdup(ov
->opts_root
->id
);
148 opts_visitor_insert(ov
->unprocessed_opts
, ov
->fake_id_opt
);
154 ghr_true(gpointer ign_key
, gpointer ign_value
, gpointer ign_user_data
)
161 opts_end_struct(Visitor
*v
, Error
**errp
)
163 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
166 if (--ov
->depth
> 0) {
170 /* we should have processed all (distinct) QemuOpt instances */
171 any
= g_hash_table_find(ov
->unprocessed_opts
, &ghr_true
, NULL
);
173 const QemuOpt
*first
;
175 first
= g_queue_peek_head(any
);
176 error_setg(errp
, QERR_INVALID_PARAMETER
, first
->name
);
178 g_hash_table_destroy(ov
->unprocessed_opts
);
179 ov
->unprocessed_opts
= NULL
;
180 if (ov
->fake_id_opt
) {
181 g_free(ov
->fake_id_opt
->name
);
182 g_free(ov
->fake_id_opt
->str
);
183 g_free(ov
->fake_id_opt
);
185 ov
->fake_id_opt
= NULL
;
190 lookup_distinct(const OptsVisitor
*ov
, const char *name
, Error
**errp
)
194 list
= g_hash_table_lookup(ov
->unprocessed_opts
, name
);
196 error_setg(errp
, QERR_MISSING_PARAMETER
, name
);
203 opts_start_list(Visitor
*v
, const char *name
, Error
**errp
)
205 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
207 /* we can't traverse a list in a list */
208 assert(ov
->list_mode
== LM_NONE
);
209 ov
->repeated_opts
= lookup_distinct(ov
, name
, errp
);
210 if (ov
->repeated_opts
!= NULL
) {
211 ov
->list_mode
= LM_STARTED
;
217 opts_next_list(Visitor
*v
, GenericList
**list
, Error
**errp
)
219 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
222 switch (ov
->list_mode
) {
224 ov
->list_mode
= LM_IN_PROGRESS
;
228 case LM_SIGNED_INTERVAL
:
229 case LM_UNSIGNED_INTERVAL
:
230 link
= &(*list
)->next
;
232 if (ov
->list_mode
== LM_SIGNED_INTERVAL
) {
233 if (ov
->range_next
.s
< ov
->range_limit
.s
) {
237 } else if (ov
->range_next
.u
< ov
->range_limit
.u
) {
241 ov
->list_mode
= LM_IN_PROGRESS
;
242 /* range has been completed, fall through in order to pop option */
244 case LM_IN_PROGRESS
: {
247 opt
= g_queue_pop_head(ov
->repeated_opts
);
248 if (g_queue_is_empty(ov
->repeated_opts
)) {
249 g_hash_table_remove(ov
->unprocessed_opts
, opt
->name
);
252 link
= &(*list
)->next
;
260 *link
= g_malloc0(sizeof **link
);
266 opts_end_list(Visitor
*v
, Error
**errp
)
268 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
270 assert(ov
->list_mode
== LM_STARTED
||
271 ov
->list_mode
== LM_IN_PROGRESS
||
272 ov
->list_mode
== LM_SIGNED_INTERVAL
||
273 ov
->list_mode
== LM_UNSIGNED_INTERVAL
);
274 ov
->repeated_opts
= NULL
;
275 ov
->list_mode
= LM_NONE
;
279 static const QemuOpt
*
280 lookup_scalar(const OptsVisitor
*ov
, const char *name
, Error
**errp
)
282 if (ov
->list_mode
== LM_NONE
) {
285 /* the last occurrence of any QemuOpt takes effect when queried by name
287 list
= lookup_distinct(ov
, name
, errp
);
288 return list
? g_queue_peek_tail(list
) : NULL
;
290 assert(ov
->list_mode
== LM_IN_PROGRESS
);
291 return g_queue_peek_head(ov
->repeated_opts
);
296 processed(OptsVisitor
*ov
, const char *name
)
298 if (ov
->list_mode
== LM_NONE
) {
299 g_hash_table_remove(ov
->unprocessed_opts
, name
);
302 assert(ov
->list_mode
== LM_IN_PROGRESS
);
308 opts_type_str(Visitor
*v
, char **obj
, const char *name
, Error
**errp
)
310 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
313 opt
= lookup_scalar(ov
, name
, errp
);
317 *obj
= g_strdup(opt
->str
? opt
->str
: "");
322 /* mimics qemu-option.c::parse_option_bool() */
324 opts_type_bool(Visitor
*v
, bool *obj
, const char *name
, Error
**errp
)
326 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
329 opt
= lookup_scalar(ov
, name
, errp
);
335 if (strcmp(opt
->str
, "on") == 0 ||
336 strcmp(opt
->str
, "yes") == 0 ||
337 strcmp(opt
->str
, "y") == 0) {
339 } else if (strcmp(opt
->str
, "off") == 0 ||
340 strcmp(opt
->str
, "no") == 0 ||
341 strcmp(opt
->str
, "n") == 0) {
344 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
345 "on|yes|y|off|no|n");
357 opts_type_int(Visitor
*v
, int64_t *obj
, const char *name
, Error
**errp
)
359 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
365 if (ov
->list_mode
== LM_SIGNED_INTERVAL
) {
366 *obj
= ov
->range_next
.s
;
370 opt
= lookup_scalar(ov
, name
, errp
);
374 str
= opt
->str
? opt
->str
: "";
376 /* we've gotten past lookup_scalar() */
377 assert(ov
->list_mode
== LM_NONE
|| ov
->list_mode
== LM_IN_PROGRESS
);
380 val
= strtoll(str
, &endptr
, 0);
381 if (errno
== 0 && endptr
> str
&& INT64_MIN
<= val
&& val
<= INT64_MAX
) {
382 if (*endptr
== '\0') {
387 if (*endptr
== '-' && ov
->list_mode
== LM_IN_PROGRESS
) {
391 val2
= strtoll(str
, &endptr
, 0);
392 if (errno
== 0 && endptr
> str
&& *endptr
== '\0' &&
393 INT64_MIN
<= val2
&& val2
<= INT64_MAX
&& val
<= val2
&&
394 (val
> INT64_MAX
- OPTS_VISITOR_RANGE_MAX
||
395 val2
< val
+ OPTS_VISITOR_RANGE_MAX
)) {
396 ov
->range_next
.s
= val
;
397 ov
->range_limit
.s
= val2
;
398 ov
->list_mode
= LM_SIGNED_INTERVAL
;
400 /* as if entering on the top */
401 *obj
= ov
->range_next
.s
;
406 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
407 (ov
->list_mode
== LM_NONE
) ? "an int64 value" :
408 "an int64 value or range");
413 opts_type_uint64(Visitor
*v
, uint64_t *obj
, const char *name
, Error
**errp
)
415 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
418 unsigned long long val
;
421 if (ov
->list_mode
== LM_UNSIGNED_INTERVAL
) {
422 *obj
= ov
->range_next
.u
;
426 opt
= lookup_scalar(ov
, name
, errp
);
432 /* we've gotten past lookup_scalar() */
433 assert(ov
->list_mode
== LM_NONE
|| ov
->list_mode
== LM_IN_PROGRESS
);
435 if (parse_uint(str
, &val
, &endptr
, 0) == 0 && val
<= UINT64_MAX
) {
436 if (*endptr
== '\0') {
441 if (*endptr
== '-' && ov
->list_mode
== LM_IN_PROGRESS
) {
442 unsigned long long val2
;
445 if (parse_uint_full(str
, &val2
, 0) == 0 &&
446 val2
<= UINT64_MAX
&& val
<= val2
&&
447 val2
- val
< OPTS_VISITOR_RANGE_MAX
) {
448 ov
->range_next
.u
= val
;
449 ov
->range_limit
.u
= val2
;
450 ov
->list_mode
= LM_UNSIGNED_INTERVAL
;
452 /* as if entering on the top */
453 *obj
= ov
->range_next
.u
;
458 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
459 (ov
->list_mode
== LM_NONE
) ? "a uint64 value" :
460 "a uint64 value or range");
465 opts_type_size(Visitor
*v
, uint64_t *obj
, const char *name
, Error
**errp
)
467 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
472 opt
= lookup_scalar(ov
, name
, errp
);
477 val
= qemu_strtosz_suffix(opt
->str
? opt
->str
: "", &endptr
,
478 QEMU_STRTOSZ_DEFSUFFIX_B
);
479 if (val
< 0 || *endptr
) {
480 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
481 "a size value representible as a non-negative int64");
491 opts_optional(Visitor
*v
, bool *present
, const char *name
, Error
**errp
)
493 OptsVisitor
*ov
= DO_UPCAST(OptsVisitor
, visitor
, v
);
495 /* we only support a single mandatory scalar field in a list node */
496 assert(ov
->list_mode
== LM_NONE
);
497 *present
= (lookup_distinct(ov
, name
, NULL
) != NULL
);
502 opts_visitor_new(const QemuOpts
*opts
)
506 ov
= g_malloc0(sizeof *ov
);
508 ov
->visitor
.start_struct
= &opts_start_struct
;
509 ov
->visitor
.end_struct
= &opts_end_struct
;
511 ov
->visitor
.start_list
= &opts_start_list
;
512 ov
->visitor
.next_list
= &opts_next_list
;
513 ov
->visitor
.end_list
= &opts_end_list
;
515 /* input_type_enum() covers both "normal" enums and union discriminators.
516 * The union discriminator field is always generated as "type"; it should
517 * match the "type" QemuOpt child of any QemuOpts.
519 * input_type_enum() will remove the looked-up key from the
520 * "unprocessed_opts" hash even if the lookup fails, because the removal is
521 * done earlier in opts_type_str(). This should be harmless.
523 ov
->visitor
.type_enum
= &input_type_enum
;
525 ov
->visitor
.type_int
= &opts_type_int
;
526 ov
->visitor
.type_uint64
= &opts_type_uint64
;
527 ov
->visitor
.type_size
= &opts_type_size
;
528 ov
->visitor
.type_bool
= &opts_type_bool
;
529 ov
->visitor
.type_str
= &opts_type_str
;
531 /* type_number() is not filled in, but this is not the first visitor to
532 * skip some mandatory methods... */
534 ov
->visitor
.optional
= &opts_optional
;
536 ov
->opts_root
= opts
;
543 opts_visitor_cleanup(OptsVisitor
*ov
)
545 if (ov
->unprocessed_opts
!= NULL
) {
546 g_hash_table_destroy(ov
->unprocessed_opts
);
548 g_free(ov
->fake_id_opt
);
554 opts_get_visitor(OptsVisitor
*ov
)