4 * Copyright Red Hat, Inc. 2012-2016
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/cutils.h"
16 #include "qapi/qmp/qerror.h"
17 #include "qapi/opts-visitor.h"
18 #include "qemu/queue.h"
19 #include "qemu/option_int.h"
20 #include "qapi/visitor-impl.h"
25 LM_NONE
, /* not traversing a list of repeated options */
28 * opts_next_list() ready to be called.
30 * Generating the next list link will consume the most
31 * recently parsed QemuOpt instance of the repeated
34 * Parsing a value into the list link will examine the
35 * next QemuOpt instance of the repeated option, and
36 * possibly enter LM_SIGNED_INTERVAL or
37 * LM_UNSIGNED_INTERVAL.
40 LM_SIGNED_INTERVAL
, /*
41 * opts_next_list() has been called.
43 * Generating the next list link will consume the most
44 * recently stored element from the signed interval,
45 * parsed from the most recent QemuOpt instance of the
46 * repeated option. This may consume QemuOpt itself
47 * and return to LM_IN_PROGRESS.
49 * Parsing a value into the list link will store the
50 * next element of the signed interval.
53 LM_UNSIGNED_INTERVAL
, /* Same as above, only for an unsigned interval. */
56 * opts_next_list() has been called.
58 * No more QemuOpt instance in the list.
59 * The traversal has been completed.
63 typedef enum ListMode ListMode
;
69 /* Ownership remains with opts_visitor_new()'s caller. */
70 const QemuOpts
*opts_root
;
74 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
75 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
77 GHashTable
*unprocessed_opts
;
79 /* The list currently being traversed with opts_start_list() /
80 * opts_next_list(). The list must have a struct element type in the
81 * schema, with a single mandatory scalar member. */
83 GQueue
*repeated_opts
;
85 /* When parsing a list of repeating options as integers, values of the form
86 * "a-b", representing a closed interval, are allowed. Elements in the
87 * range are generated individually.
92 } range_next
, range_limit
;
94 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
95 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
96 * not survive or escape the OptsVisitor object.
102 static OptsVisitor
*to_ov(Visitor
*v
)
104 return container_of(v
, OptsVisitor
, visitor
);
109 destroy_list(gpointer list
)
116 opts_visitor_insert(GHashTable
*unprocessed_opts
, const QemuOpt
*opt
)
120 list
= g_hash_table_lookup(unprocessed_opts
, opt
->name
);
122 list
= g_queue_new();
124 /* GHashTable will never try to free the keys -- we supply NULL as
125 * "key_destroy_func" in opts_start_struct(). Thus cast away key
126 * const-ness in order to suppress gcc's warning.
128 g_hash_table_insert(unprocessed_opts
, (gpointer
)opt
->name
, list
);
131 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
132 g_queue_push_tail(list
, (gpointer
)opt
);
137 opts_start_struct(Visitor
*v
, const char *name
, void **obj
,
138 size_t size
, Error
**errp
)
140 OptsVisitor
*ov
= to_ov(v
);
144 *obj
= g_malloc0(size
);
146 if (ov
->depth
++ > 0) {
150 ov
->unprocessed_opts
= g_hash_table_new_full(&g_str_hash
, &g_str_equal
,
151 NULL
, &destroy_list
);
152 QTAILQ_FOREACH(opt
, &ov
->opts_root
->head
, next
) {
153 /* ensured by qemu-option.c::opts_do_parse() */
154 assert(strcmp(opt
->name
, "id") != 0);
156 opts_visitor_insert(ov
->unprocessed_opts
, opt
);
159 if (ov
->opts_root
->id
!= NULL
) {
160 ov
->fake_id_opt
= g_malloc0(sizeof *ov
->fake_id_opt
);
162 ov
->fake_id_opt
->name
= g_strdup("id");
163 ov
->fake_id_opt
->str
= g_strdup(ov
->opts_root
->id
);
164 opts_visitor_insert(ov
->unprocessed_opts
, ov
->fake_id_opt
);
171 opts_check_struct(Visitor
*v
, Error
**errp
)
173 OptsVisitor
*ov
= to_ov(v
);
181 /* we should have processed all (distinct) QemuOpt instances */
182 g_hash_table_iter_init(&iter
, ov
->unprocessed_opts
);
183 if (g_hash_table_iter_next(&iter
, NULL
, (void **)&any
)) {
184 const QemuOpt
*first
;
186 first
= g_queue_peek_head(any
);
187 error_setg(errp
, QERR_INVALID_PARAMETER
, first
->name
);
195 opts_end_struct(Visitor
*v
, void **obj
)
197 OptsVisitor
*ov
= to_ov(v
);
199 if (--ov
->depth
> 0) {
203 g_hash_table_destroy(ov
->unprocessed_opts
);
204 ov
->unprocessed_opts
= NULL
;
205 if (ov
->fake_id_opt
) {
206 g_free(ov
->fake_id_opt
->name
);
207 g_free(ov
->fake_id_opt
->str
);
208 g_free(ov
->fake_id_opt
);
210 ov
->fake_id_opt
= NULL
;
215 lookup_distinct(const OptsVisitor
*ov
, const char *name
, Error
**errp
)
219 list
= g_hash_table_lookup(ov
->unprocessed_opts
, name
);
221 error_setg(errp
, QERR_MISSING_PARAMETER
, name
);
228 opts_start_list(Visitor
*v
, const char *name
, GenericList
**list
, size_t size
,
231 OptsVisitor
*ov
= to_ov(v
);
233 /* we can't traverse a list in a list */
234 assert(ov
->list_mode
== LM_NONE
);
235 /* we don't support visits without a list */
237 ov
->repeated_opts
= lookup_distinct(ov
, name
, errp
);
238 if (!ov
->repeated_opts
) {
242 ov
->list_mode
= LM_IN_PROGRESS
;
243 *list
= g_malloc0(size
);
249 opts_next_list(Visitor
*v
, GenericList
*tail
, size_t size
)
251 OptsVisitor
*ov
= to_ov(v
);
253 switch (ov
->list_mode
) {
256 case LM_SIGNED_INTERVAL
:
257 case LM_UNSIGNED_INTERVAL
:
258 if (ov
->list_mode
== LM_SIGNED_INTERVAL
) {
259 if (ov
->range_next
.s
< ov
->range_limit
.s
) {
263 } else if (ov
->range_next
.u
< ov
->range_limit
.u
) {
267 ov
->list_mode
= LM_IN_PROGRESS
;
268 /* range has been completed, fall through in order to pop option */
270 case LM_IN_PROGRESS
: {
273 opt
= g_queue_pop_head(ov
->repeated_opts
);
274 if (g_queue_is_empty(ov
->repeated_opts
)) {
275 g_hash_table_remove(ov
->unprocessed_opts
, opt
->name
);
276 ov
->repeated_opts
= NULL
;
277 ov
->list_mode
= LM_TRAVERSED
;
287 tail
->next
= g_malloc0(size
);
293 opts_check_list(Visitor
*v
, Error
**errp
)
296 * Unvisited list elements will be reported later when checking
297 * whether unvisited struct members remain.
304 opts_end_list(Visitor
*v
, void **obj
)
306 OptsVisitor
*ov
= to_ov(v
);
308 assert(ov
->list_mode
== LM_IN_PROGRESS
||
309 ov
->list_mode
== LM_SIGNED_INTERVAL
||
310 ov
->list_mode
== LM_UNSIGNED_INTERVAL
||
311 ov
->list_mode
== LM_TRAVERSED
);
312 ov
->repeated_opts
= NULL
;
313 ov
->list_mode
= LM_NONE
;
317 static const QemuOpt
*
318 lookup_scalar(const OptsVisitor
*ov
, const char *name
, Error
**errp
)
320 if (ov
->list_mode
== LM_NONE
) {
323 /* the last occurrence of any QemuOpt takes effect when queried by name
325 list
= lookup_distinct(ov
, name
, errp
);
326 return list
? g_queue_peek_tail(list
) : NULL
;
328 if (ov
->list_mode
== LM_TRAVERSED
) {
329 error_setg(errp
, "Fewer list elements than expected");
332 assert(ov
->list_mode
== LM_IN_PROGRESS
);
333 return g_queue_peek_head(ov
->repeated_opts
);
338 processed(OptsVisitor
*ov
, const char *name
)
340 if (ov
->list_mode
== LM_NONE
) {
341 g_hash_table_remove(ov
->unprocessed_opts
, name
);
344 assert(ov
->list_mode
== LM_IN_PROGRESS
);
350 opts_type_str(Visitor
*v
, const char *name
, char **obj
, Error
**errp
)
352 OptsVisitor
*ov
= to_ov(v
);
355 opt
= lookup_scalar(ov
, name
, errp
);
360 *obj
= g_strdup(opt
->str
? opt
->str
: "");
361 /* Note that we consume a string even if this is called as part of
362 * an enum visit that later fails because the string is not a
363 * valid enum value; this is harmless because tracking what gets
364 * consumed only matters to visit_end_struct() as the final error
365 * check if there were no other failures during the visit. */
372 opts_type_bool(Visitor
*v
, const char *name
, bool *obj
, Error
**errp
)
374 OptsVisitor
*ov
= to_ov(v
);
377 opt
= lookup_scalar(ov
, name
, errp
);
382 if (!qapi_bool_parse(opt
->name
, opt
->str
, obj
, errp
)) {
395 opts_type_int64(Visitor
*v
, const char *name
, int64_t *obj
, Error
**errp
)
397 OptsVisitor
*ov
= to_ov(v
);
403 if (ov
->list_mode
== LM_SIGNED_INTERVAL
) {
404 *obj
= ov
->range_next
.s
;
408 opt
= lookup_scalar(ov
, name
, errp
);
412 str
= opt
->str
? opt
->str
: "";
414 /* we've gotten past lookup_scalar() */
415 assert(ov
->list_mode
== LM_NONE
|| ov
->list_mode
== LM_IN_PROGRESS
);
418 val
= strtoll(str
, &endptr
, 0);
419 if (errno
== 0 && endptr
> str
&& INT64_MIN
<= val
&& val
<= INT64_MAX
) {
420 if (*endptr
== '\0') {
425 if (*endptr
== '-' && ov
->list_mode
== LM_IN_PROGRESS
) {
429 val2
= strtoll(str
, &endptr
, 0);
430 if (errno
== 0 && endptr
> str
&& *endptr
== '\0' &&
431 INT64_MIN
<= val2
&& val2
<= INT64_MAX
&& val
<= val2
&&
432 (val
> INT64_MAX
- OPTS_VISITOR_RANGE_MAX
||
433 val2
< val
+ OPTS_VISITOR_RANGE_MAX
)) {
434 ov
->range_next
.s
= val
;
435 ov
->range_limit
.s
= val2
;
436 ov
->list_mode
= LM_SIGNED_INTERVAL
;
438 /* as if entering on the top */
439 *obj
= ov
->range_next
.s
;
444 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
445 (ov
->list_mode
== LM_NONE
) ? "an int64 value" :
446 "an int64 value or range");
452 opts_type_uint64(Visitor
*v
, const char *name
, uint64_t *obj
, Error
**errp
)
454 OptsVisitor
*ov
= to_ov(v
);
457 unsigned long long val
;
460 if (ov
->list_mode
== LM_UNSIGNED_INTERVAL
) {
461 *obj
= ov
->range_next
.u
;
465 opt
= lookup_scalar(ov
, name
, errp
);
471 /* we've gotten past lookup_scalar() */
472 assert(ov
->list_mode
== LM_NONE
|| ov
->list_mode
== LM_IN_PROGRESS
);
474 if (parse_uint(str
, &val
, &endptr
, 0) == 0 && val
<= UINT64_MAX
) {
475 if (*endptr
== '\0') {
480 if (*endptr
== '-' && ov
->list_mode
== LM_IN_PROGRESS
) {
481 unsigned long long val2
;
484 if (parse_uint_full(str
, &val2
, 0) == 0 &&
485 val2
<= UINT64_MAX
&& val
<= val2
&&
486 val2
- val
< OPTS_VISITOR_RANGE_MAX
) {
487 ov
->range_next
.u
= val
;
488 ov
->range_limit
.u
= val2
;
489 ov
->list_mode
= LM_UNSIGNED_INTERVAL
;
491 /* as if entering on the top */
492 *obj
= ov
->range_next
.u
;
497 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
498 (ov
->list_mode
== LM_NONE
) ? "a uint64 value" :
499 "a uint64 value or range");
505 opts_type_size(Visitor
*v
, const char *name
, uint64_t *obj
, Error
**errp
)
507 OptsVisitor
*ov
= to_ov(v
);
511 opt
= lookup_scalar(ov
, name
, errp
);
516 err
= qemu_strtosz(opt
->str
? opt
->str
: "", NULL
, obj
);
518 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, opt
->name
,
529 opts_optional(Visitor
*v
, const char *name
, bool *present
)
531 OptsVisitor
*ov
= to_ov(v
);
533 /* we only support a single mandatory scalar field in a list node */
534 assert(ov
->list_mode
== LM_NONE
);
535 *present
= (lookup_distinct(ov
, name
, NULL
) != NULL
);
540 opts_free(Visitor
*v
)
542 OptsVisitor
*ov
= to_ov(v
);
544 if (ov
->unprocessed_opts
!= NULL
) {
545 g_hash_table_destroy(ov
->unprocessed_opts
);
547 g_free(ov
->fake_id_opt
);
553 opts_visitor_new(const QemuOpts
*opts
)
558 ov
= g_malloc0(sizeof *ov
);
560 ov
->visitor
.type
= VISITOR_INPUT
;
562 ov
->visitor
.start_struct
= &opts_start_struct
;
563 ov
->visitor
.check_struct
= &opts_check_struct
;
564 ov
->visitor
.end_struct
= &opts_end_struct
;
566 ov
->visitor
.start_list
= &opts_start_list
;
567 ov
->visitor
.next_list
= &opts_next_list
;
568 ov
->visitor
.check_list
= &opts_check_list
;
569 ov
->visitor
.end_list
= &opts_end_list
;
571 ov
->visitor
.type_int64
= &opts_type_int64
;
572 ov
->visitor
.type_uint64
= &opts_type_uint64
;
573 ov
->visitor
.type_size
= &opts_type_size
;
574 ov
->visitor
.type_bool
= &opts_type_bool
;
575 ov
->visitor
.type_str
= &opts_type_str
;
577 /* type_number() is not filled in, but this is not the first visitor to
578 * skip some mandatory methods... */
580 ov
->visitor
.optional
= &opts_optional
;
581 ov
->visitor
.free
= opts_free
;
583 ov
->opts_root
= opts
;