target-arm: Add PMUSERENR_EL0 register
[qemu/cris-port.git] / qapi / opts-visitor.c
blobd54f75b5e754595cbd714ab2e92321629e6ec7b4
1 /*
2 * Options Visitor
4 * Copyright Red Hat, Inc. 2012-2016
6 * Author: Laszlo Ersek <lersek@redhat.com>
8 * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
9 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu-common.h"
15 #include "qapi/qmp/qerror.h"
16 #include "qapi/opts-visitor.h"
17 #include "qemu/queue.h"
18 #include "qemu/option_int.h"
19 #include "qapi/visitor-impl.h"
22 enum ListMode
24 LM_NONE, /* not traversing a list of repeated options */
25 LM_STARTED, /* opts_start_list() succeeded */
27 LM_IN_PROGRESS, /* opts_next_list() has been called.
29 * Generating the next list link will consume the most
30 * recently parsed QemuOpt instance of the repeated
31 * option.
33 * Parsing a value into the list link will examine the
34 * next QemuOpt instance of the repeated option, and
35 * possibly enter LM_SIGNED_INTERVAL or
36 * LM_UNSIGNED_INTERVAL.
39 LM_SIGNED_INTERVAL, /* opts_next_list() has been called.
41 * Generating the next list link will consume the most
42 * recently stored element from the signed interval,
43 * parsed from the most recent QemuOpt instance of the
44 * repeated option. This may consume QemuOpt itself
45 * and return to LM_IN_PROGRESS.
47 * Parsing a value into the list link will store the
48 * next element of the signed interval.
51 LM_UNSIGNED_INTERVAL /* Same as above, only for an unsigned interval. */
54 typedef enum ListMode ListMode;
56 struct OptsVisitor
58 Visitor visitor;
60 /* Ownership remains with opts_visitor_new()'s caller. */
61 const QemuOpts *opts_root;
63 unsigned depth;
65 /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
66 * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
67 * name. */
68 GHashTable *unprocessed_opts;
70 /* The list currently being traversed with opts_start_list() /
71 * opts_next_list(). The list must have a struct element type in the
72 * schema, with a single mandatory scalar member. */
73 ListMode list_mode;
74 GQueue *repeated_opts;
76 /* When parsing a list of repeating options as integers, values of the form
77 * "a-b", representing a closed interval, are allowed. Elements in the
78 * range are generated individually.
80 union {
81 int64_t s;
82 uint64_t u;
83 } range_next, range_limit;
85 /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
86 * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
87 * not survive or escape the OptsVisitor object.
89 QemuOpt *fake_id_opt;
93 static OptsVisitor *to_ov(Visitor *v)
95 return container_of(v, OptsVisitor, visitor);
99 static void
100 destroy_list(gpointer list)
102 g_queue_free(list);
106 static void
107 opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
109 GQueue *list;
111 list = g_hash_table_lookup(unprocessed_opts, opt->name);
112 if (list == NULL) {
113 list = g_queue_new();
115 /* GHashTable will never try to free the keys -- we supply NULL as
116 * "key_destroy_func" in opts_start_struct(). Thus cast away key
117 * const-ness in order to suppress gcc's warning.
119 g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
122 /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
123 g_queue_push_tail(list, (gpointer)opt);
127 static void
128 opts_start_struct(Visitor *v, const char *name, void **obj,
129 size_t size, Error **errp)
131 OptsVisitor *ov = to_ov(v);
132 const QemuOpt *opt;
134 if (obj) {
135 *obj = g_malloc0(size > 0 ? size : 1);
137 if (ov->depth++ > 0) {
138 return;
141 ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
142 NULL, &destroy_list);
143 QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
144 /* ensured by qemu-option.c::opts_do_parse() */
145 assert(strcmp(opt->name, "id") != 0);
147 opts_visitor_insert(ov->unprocessed_opts, opt);
150 if (ov->opts_root->id != NULL) {
151 ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
153 ov->fake_id_opt->name = g_strdup("id");
154 ov->fake_id_opt->str = g_strdup(ov->opts_root->id);
155 opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
160 static gboolean
161 ghr_true(gpointer ign_key, gpointer ign_value, gpointer ign_user_data)
163 return TRUE;
167 static void
168 opts_end_struct(Visitor *v, Error **errp)
170 OptsVisitor *ov = to_ov(v);
171 GQueue *any;
173 if (--ov->depth > 0) {
174 return;
177 /* we should have processed all (distinct) QemuOpt instances */
178 any = g_hash_table_find(ov->unprocessed_opts, &ghr_true, NULL);
179 if (any) {
180 const QemuOpt *first;
182 first = g_queue_peek_head(any);
183 error_setg(errp, QERR_INVALID_PARAMETER, first->name);
185 g_hash_table_destroy(ov->unprocessed_opts);
186 ov->unprocessed_opts = NULL;
187 if (ov->fake_id_opt) {
188 g_free(ov->fake_id_opt->name);
189 g_free(ov->fake_id_opt->str);
190 g_free(ov->fake_id_opt);
192 ov->fake_id_opt = NULL;
196 static GQueue *
197 lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
199 GQueue *list;
201 list = g_hash_table_lookup(ov->unprocessed_opts, name);
202 if (!list) {
203 error_setg(errp, QERR_MISSING_PARAMETER, name);
205 return list;
209 static void
210 opts_start_list(Visitor *v, const char *name, Error **errp)
212 OptsVisitor *ov = to_ov(v);
214 /* we can't traverse a list in a list */
215 assert(ov->list_mode == LM_NONE);
216 ov->repeated_opts = lookup_distinct(ov, name, errp);
217 if (ov->repeated_opts != NULL) {
218 ov->list_mode = LM_STARTED;
223 static GenericList *
224 opts_next_list(Visitor *v, GenericList **list)
226 OptsVisitor *ov = to_ov(v);
227 GenericList **link;
229 switch (ov->list_mode) {
230 case LM_STARTED:
231 ov->list_mode = LM_IN_PROGRESS;
232 link = list;
233 break;
235 case LM_SIGNED_INTERVAL:
236 case LM_UNSIGNED_INTERVAL:
237 link = &(*list)->next;
239 if (ov->list_mode == LM_SIGNED_INTERVAL) {
240 if (ov->range_next.s < ov->range_limit.s) {
241 ++ov->range_next.s;
242 break;
244 } else if (ov->range_next.u < ov->range_limit.u) {
245 ++ov->range_next.u;
246 break;
248 ov->list_mode = LM_IN_PROGRESS;
249 /* range has been completed, fall through in order to pop option */
251 case LM_IN_PROGRESS: {
252 const QemuOpt *opt;
254 opt = g_queue_pop_head(ov->repeated_opts);
255 if (g_queue_is_empty(ov->repeated_opts)) {
256 g_hash_table_remove(ov->unprocessed_opts, opt->name);
257 return NULL;
259 link = &(*list)->next;
260 break;
263 default:
264 abort();
267 *link = g_malloc0(sizeof **link);
268 return *link;
272 static void
273 opts_end_list(Visitor *v)
275 OptsVisitor *ov = to_ov(v);
277 assert(ov->list_mode == LM_STARTED ||
278 ov->list_mode == LM_IN_PROGRESS ||
279 ov->list_mode == LM_SIGNED_INTERVAL ||
280 ov->list_mode == LM_UNSIGNED_INTERVAL);
281 ov->repeated_opts = NULL;
282 ov->list_mode = LM_NONE;
286 static const QemuOpt *
287 lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
289 if (ov->list_mode == LM_NONE) {
290 GQueue *list;
292 /* the last occurrence of any QemuOpt takes effect when queried by name
294 list = lookup_distinct(ov, name, errp);
295 return list ? g_queue_peek_tail(list) : NULL;
297 assert(ov->list_mode == LM_IN_PROGRESS);
298 return g_queue_peek_head(ov->repeated_opts);
302 static void
303 processed(OptsVisitor *ov, const char *name)
305 if (ov->list_mode == LM_NONE) {
306 g_hash_table_remove(ov->unprocessed_opts, name);
307 return;
309 assert(ov->list_mode == LM_IN_PROGRESS);
310 /* do nothing */
314 static void
315 opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
317 OptsVisitor *ov = to_ov(v);
318 const QemuOpt *opt;
320 opt = lookup_scalar(ov, name, errp);
321 if (!opt) {
322 return;
324 *obj = g_strdup(opt->str ? opt->str : "");
325 processed(ov, name);
329 /* mimics qemu-option.c::parse_option_bool() */
330 static void
331 opts_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
333 OptsVisitor *ov = to_ov(v);
334 const QemuOpt *opt;
336 opt = lookup_scalar(ov, name, errp);
337 if (!opt) {
338 return;
341 if (opt->str) {
342 if (strcmp(opt->str, "on") == 0 ||
343 strcmp(opt->str, "yes") == 0 ||
344 strcmp(opt->str, "y") == 0) {
345 *obj = true;
346 } else if (strcmp(opt->str, "off") == 0 ||
347 strcmp(opt->str, "no") == 0 ||
348 strcmp(opt->str, "n") == 0) {
349 *obj = false;
350 } else {
351 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
352 "on|yes|y|off|no|n");
353 return;
355 } else {
356 *obj = true;
359 processed(ov, name);
363 static void
364 opts_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp)
366 OptsVisitor *ov = to_ov(v);
367 const QemuOpt *opt;
368 const char *str;
369 long long val;
370 char *endptr;
372 if (ov->list_mode == LM_SIGNED_INTERVAL) {
373 *obj = ov->range_next.s;
374 return;
377 opt = lookup_scalar(ov, name, errp);
378 if (!opt) {
379 return;
381 str = opt->str ? opt->str : "";
383 /* we've gotten past lookup_scalar() */
384 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
386 errno = 0;
387 val = strtoll(str, &endptr, 0);
388 if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) {
389 if (*endptr == '\0') {
390 *obj = val;
391 processed(ov, name);
392 return;
394 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
395 long long val2;
397 str = endptr + 1;
398 val2 = strtoll(str, &endptr, 0);
399 if (errno == 0 && endptr > str && *endptr == '\0' &&
400 INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2 &&
401 (val > INT64_MAX - OPTS_VISITOR_RANGE_MAX ||
402 val2 < val + OPTS_VISITOR_RANGE_MAX)) {
403 ov->range_next.s = val;
404 ov->range_limit.s = val2;
405 ov->list_mode = LM_SIGNED_INTERVAL;
407 /* as if entering on the top */
408 *obj = ov->range_next.s;
409 return;
413 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
414 (ov->list_mode == LM_NONE) ? "an int64 value" :
415 "an int64 value or range");
419 static void
420 opts_type_uint64(Visitor *v, const char *name, uint64_t *obj, Error **errp)
422 OptsVisitor *ov = to_ov(v);
423 const QemuOpt *opt;
424 const char *str;
425 unsigned long long val;
426 char *endptr;
428 if (ov->list_mode == LM_UNSIGNED_INTERVAL) {
429 *obj = ov->range_next.u;
430 return;
433 opt = lookup_scalar(ov, name, errp);
434 if (!opt) {
435 return;
437 str = opt->str;
439 /* we've gotten past lookup_scalar() */
440 assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS);
442 if (parse_uint(str, &val, &endptr, 0) == 0 && val <= UINT64_MAX) {
443 if (*endptr == '\0') {
444 *obj = val;
445 processed(ov, name);
446 return;
448 if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) {
449 unsigned long long val2;
451 str = endptr + 1;
452 if (parse_uint_full(str, &val2, 0) == 0 &&
453 val2 <= UINT64_MAX && val <= val2 &&
454 val2 - val < OPTS_VISITOR_RANGE_MAX) {
455 ov->range_next.u = val;
456 ov->range_limit.u = val2;
457 ov->list_mode = LM_UNSIGNED_INTERVAL;
459 /* as if entering on the top */
460 *obj = ov->range_next.u;
461 return;
465 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
466 (ov->list_mode == LM_NONE) ? "a uint64 value" :
467 "a uint64 value or range");
471 static void
472 opts_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp)
474 OptsVisitor *ov = to_ov(v);
475 const QemuOpt *opt;
476 int64_t val;
477 char *endptr;
479 opt = lookup_scalar(ov, name, errp);
480 if (!opt) {
481 return;
484 val = qemu_strtosz_suffix(opt->str ? opt->str : "", &endptr,
485 QEMU_STRTOSZ_DEFSUFFIX_B);
486 if (val < 0 || *endptr) {
487 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
488 "a size value representible as a non-negative int64");
489 return;
492 *obj = val;
493 processed(ov, name);
497 static void
498 opts_optional(Visitor *v, const char *name, bool *present)
500 OptsVisitor *ov = to_ov(v);
502 /* we only support a single mandatory scalar field in a list node */
503 assert(ov->list_mode == LM_NONE);
504 *present = (lookup_distinct(ov, name, NULL) != NULL);
508 OptsVisitor *
509 opts_visitor_new(const QemuOpts *opts)
511 OptsVisitor *ov;
513 ov = g_malloc0(sizeof *ov);
515 ov->visitor.start_struct = &opts_start_struct;
516 ov->visitor.end_struct = &opts_end_struct;
518 ov->visitor.start_list = &opts_start_list;
519 ov->visitor.next_list = &opts_next_list;
520 ov->visitor.end_list = &opts_end_list;
522 /* input_type_enum() covers both "normal" enums and union discriminators.
523 * The union discriminator field is always generated as "type"; it should
524 * match the "type" QemuOpt child of any QemuOpts.
526 * input_type_enum() will remove the looked-up key from the
527 * "unprocessed_opts" hash even if the lookup fails, because the removal is
528 * done earlier in opts_type_str(). This should be harmless.
530 ov->visitor.type_enum = &input_type_enum;
532 ov->visitor.type_int64 = &opts_type_int64;
533 ov->visitor.type_uint64 = &opts_type_uint64;
534 ov->visitor.type_size = &opts_type_size;
535 ov->visitor.type_bool = &opts_type_bool;
536 ov->visitor.type_str = &opts_type_str;
538 /* type_number() is not filled in, but this is not the first visitor to
539 * skip some mandatory methods... */
541 ov->visitor.optional = &opts_optional;
543 ov->opts_root = opts;
545 return ov;
549 void
550 opts_visitor_cleanup(OptsVisitor *ov)
552 if (ov->unprocessed_opts != NULL) {
553 g_hash_table_destroy(ov->unprocessed_opts);
555 g_free(ov->fake_id_opt);
556 g_free(ov);
560 Visitor *
561 opts_get_visitor(OptsVisitor *ov)
563 return &ov->visitor;