2 Copyright (C) 2019-2023 Free Software Foundation, Inc.
4 This file is part of libctf.
6 libctf is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include <sys/param.h>
26 #define EOVERFLOW ERANGE
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
33 /* The initial size of a dynamic type's vlen in members. Arbitrary: the bigger
34 this is, the less allocation needs to be done for small structure
35 initialization, and the more memory is wasted for small structures during CTF
36 construction. No effect on generated CTF or ctf_open()ed CTF. */
37 #define INITIAL_VLEN 16
39 /* Make sure the ptrtab has enough space for at least one more type.
41 We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25%
45 ctf_grow_ptrtab (ctf_dict_t
*fp
)
47 size_t new_ptrtab_len
= fp
->ctf_ptrtab_len
;
49 /* We allocate one more ptrtab entry than we need, for the initial zero,
50 plus one because the caller will probably allocate a new type. */
52 if (fp
->ctf_ptrtab
== NULL
)
53 new_ptrtab_len
= 1024;
54 else if ((fp
->ctf_typemax
+ 2) > fp
->ctf_ptrtab_len
)
55 new_ptrtab_len
= fp
->ctf_ptrtab_len
* 1.25;
57 if (new_ptrtab_len
!= fp
->ctf_ptrtab_len
)
61 if ((new_ptrtab
= realloc (fp
->ctf_ptrtab
,
62 new_ptrtab_len
* sizeof (uint32_t))) == NULL
)
63 return (ctf_set_errno (fp
, ENOMEM
));
65 fp
->ctf_ptrtab
= new_ptrtab
;
66 memset (fp
->ctf_ptrtab
+ fp
->ctf_ptrtab_len
, 0,
67 (new_ptrtab_len
- fp
->ctf_ptrtab_len
) * sizeof (uint32_t));
68 fp
->ctf_ptrtab_len
= new_ptrtab_len
;
73 /* Make sure a vlen has enough space: expand it otherwise. Unlike the ptrtab,
74 which grows quite slowly, the vlen grows in big jumps because it is quite
75 expensive to expand: the caller has to scan the old vlen for string refs
76 first and remove them, then re-add them afterwards. The initial size is
77 more or less arbitrary. */
79 ctf_grow_vlen (ctf_dict_t
*fp
, ctf_dtdef_t
*dtd
, size_t vlen
)
81 unsigned char *old
= dtd
->dtd_vlen
;
83 if (dtd
->dtd_vlen_alloc
> vlen
)
86 if ((dtd
->dtd_vlen
= realloc (dtd
->dtd_vlen
,
87 dtd
->dtd_vlen_alloc
* 2)) == NULL
)
90 return (ctf_set_errno (fp
, ENOMEM
));
92 memset (dtd
->dtd_vlen
+ dtd
->dtd_vlen_alloc
, 0, dtd
->dtd_vlen_alloc
);
93 dtd
->dtd_vlen_alloc
*= 2;
97 /* To create an empty CTF dict, we just declare a zeroed header and call
98 ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and
99 initialize the dynamic members. We start assigning type IDs at 1 because
100 type ID 0 is used as a sentinel and a not-found indicator. */
103 ctf_create (int *errp
)
105 static const ctf_header_t hdr
= { .cth_preamble
= { CTF_MAGIC
, CTF_VERSION
, 0 } };
107 ctf_dynhash_t
*dthash
;
108 ctf_dynhash_t
*dvhash
;
109 ctf_dynhash_t
*structs
= NULL
, *unions
= NULL
, *enums
= NULL
, *names
= NULL
;
110 ctf_dynhash_t
*objthash
= NULL
, *funchash
= NULL
;
115 dthash
= ctf_dynhash_create (ctf_hash_integer
, ctf_hash_eq_integer
,
119 ctf_set_open_errno (errp
, EAGAIN
);
123 dvhash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
127 ctf_set_open_errno (errp
, EAGAIN
);
131 structs
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
133 unions
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
135 enums
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
137 names
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
139 objthash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
141 funchash
= ctf_dynhash_create (ctf_hash_string
, ctf_hash_eq_string
,
143 if (!structs
|| !unions
|| !enums
|| !names
)
145 ctf_set_open_errno (errp
, EAGAIN
);
149 cts
.cts_name
= _CTF_SECTION
;
151 cts
.cts_size
= sizeof (hdr
);
154 if ((fp
= ctf_bufopen_internal (&cts
, NULL
, NULL
, NULL
, 1, errp
)) == NULL
)
157 fp
->ctf_structs
.ctn_writable
= structs
;
158 fp
->ctf_unions
.ctn_writable
= unions
;
159 fp
->ctf_enums
.ctn_writable
= enums
;
160 fp
->ctf_names
.ctn_writable
= names
;
161 fp
->ctf_objthash
= objthash
;
162 fp
->ctf_funchash
= funchash
;
163 fp
->ctf_dthash
= dthash
;
164 fp
->ctf_dvhash
= dvhash
;
166 fp
->ctf_snapshots
= 1;
167 fp
->ctf_snapshot_lu
= 0;
168 fp
->ctf_flags
|= LCTF_DIRTY
;
170 ctf_set_ctl_hashes (fp
);
171 ctf_setmodel (fp
, CTF_MODEL_NATIVE
);
172 if (ctf_grow_ptrtab (fp
) < 0)
174 ctf_set_open_errno (errp
, ctf_errno (fp
));
182 ctf_dynhash_destroy (structs
);
183 ctf_dynhash_destroy (unions
);
184 ctf_dynhash_destroy (enums
);
185 ctf_dynhash_destroy (names
);
186 ctf_dynhash_destroy (objthash
);
187 ctf_dynhash_destroy (funchash
);
188 ctf_dynhash_destroy (dvhash
);
190 ctf_dynhash_destroy (dthash
);
195 /* Compatibility: just update the threshold for ctf_discard. */
197 ctf_update (ctf_dict_t
*fp
)
199 if (!(fp
->ctf_flags
& LCTF_RDWR
))
200 return (ctf_set_errno (fp
, ECTF_RDONLY
));
202 fp
->ctf_dtoldid
= fp
->ctf_typemax
;
207 ctf_name_table (ctf_dict_t
*fp
, int kind
)
212 return &fp
->ctf_structs
;
214 return &fp
->ctf_unions
;
216 return &fp
->ctf_enums
;
218 return &fp
->ctf_names
;
223 ctf_dtd_insert (ctf_dict_t
*fp
, ctf_dtdef_t
*dtd
, int flag
, int kind
)
226 if (ctf_dynhash_insert (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
,
229 ctf_set_errno (fp
, ENOMEM
);
233 if (flag
== CTF_ADD_ROOT
&& dtd
->dtd_data
.ctt_name
234 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
)
236 if (ctf_dynhash_insert (ctf_name_table (fp
, kind
)->ctn_writable
,
237 (char *) name
, (void *) (uintptr_t)
240 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t)
242 ctf_set_errno (fp
, ENOMEM
);
246 ctf_list_append (&fp
->ctf_dtdefs
, dtd
);
251 ctf_dtd_delete (ctf_dict_t
*fp
, ctf_dtdef_t
*dtd
)
253 int kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
254 size_t vlen
= LCTF_INFO_VLEN (fp
, dtd
->dtd_data
.ctt_info
);
255 int name_kind
= kind
;
258 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
);
265 ctf_lmember_t
*memb
= (ctf_lmember_t
*) dtd
->dtd_vlen
;
268 for (i
= 0; i
< vlen
; i
++)
269 ctf_str_remove_ref (fp
, ctf_strraw (fp
, memb
[i
].ctlm_name
),
275 ctf_enum_t
*en
= (ctf_enum_t
*) dtd
->dtd_vlen
;
278 for (i
= 0; i
< vlen
; i
++)
279 ctf_str_remove_ref (fp
, ctf_strraw (fp
, en
[i
].cte_name
),
284 name_kind
= dtd
->dtd_data
.ctt_type
;
287 free (dtd
->dtd_vlen
);
288 dtd
->dtd_vlen_alloc
= 0;
290 if (dtd
->dtd_data
.ctt_name
291 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
292 && LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
))
294 ctf_dynhash_remove (ctf_name_table (fp
, name_kind
)->ctn_writable
,
296 ctf_str_remove_ref (fp
, name
, &dtd
->dtd_data
.ctt_name
);
299 ctf_list_delete (&fp
->ctf_dtdefs
, dtd
);
304 ctf_dtd_lookup (const ctf_dict_t
*fp
, ctf_id_t type
)
306 return (ctf_dtdef_t
*)
307 ctf_dynhash_lookup (fp
->ctf_dthash
, (void *) (uintptr_t) type
);
311 ctf_dynamic_type (const ctf_dict_t
*fp
, ctf_id_t id
)
315 if (!(fp
->ctf_flags
& LCTF_RDWR
))
318 if ((fp
->ctf_flags
& LCTF_CHILD
) && LCTF_TYPE_ISPARENT (fp
, id
))
321 idx
= LCTF_TYPE_TO_INDEX(fp
, id
);
323 if ((unsigned long) idx
<= fp
->ctf_typemax
)
324 return ctf_dtd_lookup (fp
, id
);
329 ctf_dvd_insert (ctf_dict_t
*fp
, ctf_dvdef_t
*dvd
)
331 if (ctf_dynhash_insert (fp
->ctf_dvhash
, dvd
->dvd_name
, dvd
) < 0)
333 ctf_set_errno (fp
, ENOMEM
);
336 ctf_list_append (&fp
->ctf_dvdefs
, dvd
);
341 ctf_dvd_delete (ctf_dict_t
*fp
, ctf_dvdef_t
*dvd
)
343 ctf_dynhash_remove (fp
->ctf_dvhash
, dvd
->dvd_name
);
344 free (dvd
->dvd_name
);
346 ctf_list_delete (&fp
->ctf_dvdefs
, dvd
);
351 ctf_dvd_lookup (const ctf_dict_t
*fp
, const char *name
)
353 return (ctf_dvdef_t
*) ctf_dynhash_lookup (fp
->ctf_dvhash
, name
);
356 /* Discard all of the dynamic type definitions and variable definitions that
357 have been added to the dict since the last call to ctf_update(). We locate
358 such types by scanning the dtd list and deleting elements that have type IDs
359 greater than ctf_dtoldid, which is set by ctf_update(), above, and by
360 scanning the variable list and deleting elements that have update IDs equal
361 to the current value of the last-update snapshot count (indicating that they
362 were added after the most recent call to ctf_update()). */
364 ctf_discard (ctf_dict_t
*fp
)
366 ctf_snapshot_id_t last_update
=
368 fp
->ctf_snapshot_lu
+ 1 };
370 /* Update required? */
371 if (!(fp
->ctf_flags
& LCTF_DIRTY
))
374 return (ctf_rollback (fp
, last_update
));
378 ctf_snapshot (ctf_dict_t
*fp
)
380 ctf_snapshot_id_t snapid
;
381 snapid
.dtd_id
= fp
->ctf_typemax
;
382 snapid
.snapshot_id
= fp
->ctf_snapshots
++;
386 /* Like ctf_discard(), only discards everything after a particular ID. */
388 ctf_rollback (ctf_dict_t
*fp
, ctf_snapshot_id_t id
)
390 ctf_dtdef_t
*dtd
, *ntd
;
391 ctf_dvdef_t
*dvd
, *nvd
;
393 if (!(fp
->ctf_flags
& LCTF_RDWR
))
394 return (ctf_set_errno (fp
, ECTF_RDONLY
));
396 if (fp
->ctf_snapshot_lu
>= id
.snapshot_id
)
397 return (ctf_set_errno (fp
, ECTF_OVERROLLBACK
));
399 for (dtd
= ctf_list_next (&fp
->ctf_dtdefs
); dtd
!= NULL
; dtd
= ntd
)
404 ntd
= ctf_list_next (dtd
);
406 if (LCTF_TYPE_TO_INDEX (fp
, dtd
->dtd_type
) <= id
.dtd_id
)
409 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
410 if (kind
== CTF_K_FORWARD
)
411 kind
= dtd
->dtd_data
.ctt_type
;
413 if (dtd
->dtd_data
.ctt_name
414 && (name
= ctf_strraw (fp
, dtd
->dtd_data
.ctt_name
)) != NULL
415 && LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
))
417 ctf_dynhash_remove (ctf_name_table (fp
, kind
)->ctn_writable
,
419 ctf_str_remove_ref (fp
, name
, &dtd
->dtd_data
.ctt_name
);
422 ctf_dynhash_remove (fp
->ctf_dthash
, (void *) (uintptr_t) dtd
->dtd_type
);
423 ctf_dtd_delete (fp
, dtd
);
426 for (dvd
= ctf_list_next (&fp
->ctf_dvdefs
); dvd
!= NULL
; dvd
= nvd
)
428 nvd
= ctf_list_next (dvd
);
430 if (dvd
->dvd_snapshots
<= id
.snapshot_id
)
433 ctf_dvd_delete (fp
, dvd
);
436 fp
->ctf_typemax
= id
.dtd_id
;
437 fp
->ctf_snapshots
= id
.snapshot_id
;
439 if (fp
->ctf_snapshots
== fp
->ctf_snapshot_lu
)
440 fp
->ctf_flags
&= ~LCTF_DIRTY
;
445 /* Note: vlen is the amount of space *allocated* for the vlen. It may well not
446 be the amount of space used (yet): the space used is declared in per-kind
447 fashion in the dtd_data's info word. */
449 ctf_add_generic (ctf_dict_t
*fp
, uint32_t flag
, const char *name
, int kind
,
450 size_t vlen
, ctf_dtdef_t
**rp
)
455 if (flag
!= CTF_ADD_NONROOT
&& flag
!= CTF_ADD_ROOT
)
456 return (ctf_set_errno (fp
, EINVAL
));
458 if (!(fp
->ctf_flags
& LCTF_RDWR
))
459 return (ctf_set_errno (fp
, ECTF_RDONLY
));
461 if (LCTF_INDEX_TO_TYPE (fp
, fp
->ctf_typemax
, 1) >= CTF_MAX_TYPE
)
462 return (ctf_set_errno (fp
, ECTF_FULL
));
464 if (LCTF_INDEX_TO_TYPE (fp
, fp
->ctf_typemax
, 1) == (CTF_MAX_PTYPE
- 1))
465 return (ctf_set_errno (fp
, ECTF_FULL
));
467 /* Make sure ptrtab always grows to be big enough for all types. */
468 if (ctf_grow_ptrtab (fp
) < 0)
469 return CTF_ERR
; /* errno is set for us. */
471 if ((dtd
= calloc (1, sizeof (ctf_dtdef_t
))) == NULL
)
472 return (ctf_set_errno (fp
, EAGAIN
));
474 dtd
->dtd_vlen_alloc
= vlen
;
477 if ((dtd
->dtd_vlen
= calloc (1, vlen
)) == NULL
)
481 dtd
->dtd_vlen
= NULL
;
483 type
= ++fp
->ctf_typemax
;
484 type
= LCTF_INDEX_TO_TYPE (fp
, type
, (fp
->ctf_flags
& LCTF_CHILD
));
486 dtd
->dtd_data
.ctt_name
= ctf_str_add_pending (fp
, name
,
487 &dtd
->dtd_data
.ctt_name
);
488 dtd
->dtd_type
= type
;
490 if (dtd
->dtd_data
.ctt_name
== 0 && name
!= NULL
&& name
[0] != '\0')
493 if (ctf_dtd_insert (fp
, dtd
, flag
, kind
) < 0)
494 goto err
; /* errno is set for us. */
496 fp
->ctf_flags
|= LCTF_DIRTY
;
502 ctf_set_errno (fp
, EAGAIN
);
504 free (dtd
->dtd_vlen
);
509 /* When encoding integer sizes, we want to convert a byte count in the range
510 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function
511 is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */
527 ctf_add_encoded (ctf_dict_t
*fp
, uint32_t flag
,
528 const char *name
, const ctf_encoding_t
*ep
, uint32_t kind
)
535 return (ctf_set_errno (fp
, EINVAL
));
537 if (name
== NULL
|| name
[0] == '\0')
538 return (ctf_set_errno (fp
, ECTF_NONAME
));
540 if (!ctf_assert (fp
, kind
== CTF_K_INTEGER
|| kind
== CTF_K_FLOAT
))
541 return -1; /* errno is set for us. */
543 if ((type
= ctf_add_generic (fp
, flag
, name
, kind
, sizeof (uint32_t),
545 return CTF_ERR
; /* errno is set for us. */
547 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, flag
, 0);
548 dtd
->dtd_data
.ctt_size
= clp2 (P2ROUNDUP (ep
->cte_bits
, CHAR_BIT
)
553 encoding
= CTF_INT_DATA (ep
->cte_format
, ep
->cte_offset
, ep
->cte_bits
);
556 encoding
= CTF_FP_DATA (ep
->cte_format
, ep
->cte_offset
, ep
->cte_bits
);
559 memcpy (dtd
->dtd_vlen
, &encoding
, sizeof (encoding
));
565 ctf_add_reftype (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
, uint32_t kind
)
569 ctf_dict_t
*tmp
= fp
;
570 int child
= fp
->ctf_flags
& LCTF_CHILD
;
572 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
573 return (ctf_set_errno (fp
, EINVAL
));
575 if (ref
!= 0 && ctf_lookup_by_id (&tmp
, ref
) == NULL
)
576 return CTF_ERR
; /* errno is set for us. */
578 if ((type
= ctf_add_generic (fp
, flag
, NULL
, kind
, 0, &dtd
)) == CTF_ERR
)
579 return CTF_ERR
; /* errno is set for us. */
581 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, flag
, 0);
582 dtd
->dtd_data
.ctt_type
= (uint32_t) ref
;
584 if (kind
!= CTF_K_POINTER
)
587 /* If we are adding a pointer, update the ptrtab, pointing at this type from
588 the type it points to. Note that ctf_typemax is at this point one higher
589 than we want to check against, because it's just been incremented for the
590 addition of this type. The pptrtab is lazily-updated as needed, so is not
593 uint32_t type_idx
= LCTF_TYPE_TO_INDEX (fp
, type
);
594 uint32_t ref_idx
= LCTF_TYPE_TO_INDEX (fp
, ref
);
596 if (LCTF_TYPE_ISCHILD (fp
, ref
) == child
597 && ref_idx
< fp
->ctf_typemax
)
598 fp
->ctf_ptrtab
[ref_idx
] = type_idx
;
604 ctf_add_slice (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
,
605 const ctf_encoding_t
*ep
)
609 ctf_id_t resolved_ref
= ref
;
612 const ctf_type_t
*tp
;
613 ctf_dict_t
*tmp
= fp
;
616 return (ctf_set_errno (fp
, EINVAL
));
618 if ((ep
->cte_bits
> 255) || (ep
->cte_offset
> 255))
619 return (ctf_set_errno (fp
, ECTF_SLICEOVERFLOW
));
621 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
622 return (ctf_set_errno (fp
, EINVAL
));
624 if (ref
!= 0 && ((tp
= ctf_lookup_by_id (&tmp
, ref
)) == NULL
))
625 return CTF_ERR
; /* errno is set for us. */
627 /* Make sure we ultimately point to an integral type. We also allow slices to
628 point to the unimplemented type, for now, because the compiler can emit
629 such slices, though they're not very much use. */
631 resolved_ref
= ctf_type_resolve_unsliced (fp
, ref
);
632 kind
= ctf_type_kind_unsliced (fp
, resolved_ref
);
634 if ((kind
!= CTF_K_INTEGER
) && (kind
!= CTF_K_FLOAT
) &&
637 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
639 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_SLICE
,
640 sizeof (ctf_slice_t
), &dtd
)) == CTF_ERR
)
641 return CTF_ERR
; /* errno is set for us. */
643 memset (&slice
, 0, sizeof (ctf_slice_t
));
645 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_SLICE
, flag
, 0);
646 dtd
->dtd_data
.ctt_size
= clp2 (P2ROUNDUP (ep
->cte_bits
, CHAR_BIT
)
648 slice
.cts_type
= (uint32_t) ref
;
649 slice
.cts_bits
= ep
->cte_bits
;
650 slice
.cts_offset
= ep
->cte_offset
;
651 memcpy (dtd
->dtd_vlen
, &slice
, sizeof (ctf_slice_t
));
657 ctf_add_integer (ctf_dict_t
*fp
, uint32_t flag
,
658 const char *name
, const ctf_encoding_t
*ep
)
660 return (ctf_add_encoded (fp
, flag
, name
, ep
, CTF_K_INTEGER
));
664 ctf_add_float (ctf_dict_t
*fp
, uint32_t flag
,
665 const char *name
, const ctf_encoding_t
*ep
)
667 return (ctf_add_encoded (fp
, flag
, name
, ep
, CTF_K_FLOAT
));
671 ctf_add_pointer (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
673 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_POINTER
));
677 ctf_add_array (ctf_dict_t
*fp
, uint32_t flag
, const ctf_arinfo_t
*arp
)
682 ctf_dict_t
*tmp
= fp
;
685 return (ctf_set_errno (fp
, EINVAL
));
687 if (arp
->ctr_contents
!= 0
688 && ctf_lookup_by_id (&tmp
, arp
->ctr_contents
) == NULL
)
689 return CTF_ERR
; /* errno is set for us. */
692 if (ctf_lookup_by_id (&tmp
, arp
->ctr_index
) == NULL
)
693 return CTF_ERR
; /* errno is set for us. */
695 if (ctf_type_kind (fp
, arp
->ctr_index
) == CTF_K_FORWARD
)
697 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
698 _("ctf_add_array: index type %lx is incomplete"),
700 return (ctf_set_errno (fp
, ECTF_INCOMPLETE
));
703 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_ARRAY
,
704 sizeof (ctf_array_t
), &dtd
)) == CTF_ERR
)
705 return CTF_ERR
; /* errno is set for us. */
707 memset (&cta
, 0, sizeof (ctf_array_t
));
709 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_ARRAY
, flag
, 0);
710 dtd
->dtd_data
.ctt_size
= 0;
711 cta
.cta_contents
= (uint32_t) arp
->ctr_contents
;
712 cta
.cta_index
= (uint32_t) arp
->ctr_index
;
713 cta
.cta_nelems
= arp
->ctr_nelems
;
714 memcpy (dtd
->dtd_vlen
, &cta
, sizeof (ctf_array_t
));
720 ctf_set_array (ctf_dict_t
*fp
, ctf_id_t type
, const ctf_arinfo_t
*arp
)
722 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, type
);
725 if (!(fp
->ctf_flags
& LCTF_RDWR
))
726 return (ctf_set_errno (fp
, ECTF_RDONLY
));
729 || LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
) != CTF_K_ARRAY
)
730 return (ctf_set_errno (fp
, ECTF_BADID
));
732 vlen
= (ctf_array_t
*) dtd
->dtd_vlen
;
733 fp
->ctf_flags
|= LCTF_DIRTY
;
734 vlen
->cta_contents
= (uint32_t) arp
->ctr_contents
;
735 vlen
->cta_index
= (uint32_t) arp
->ctr_index
;
736 vlen
->cta_nelems
= arp
->ctr_nelems
;
742 ctf_add_function (ctf_dict_t
*fp
, uint32_t flag
,
743 const ctf_funcinfo_t
*ctc
, const ctf_id_t
*argv
)
749 ctf_dict_t
*tmp
= fp
;
753 if (!(fp
->ctf_flags
& LCTF_RDWR
))
754 return (ctf_set_errno (fp
, ECTF_RDONLY
));
756 if (ctc
== NULL
|| (ctc
->ctc_flags
& ~CTF_FUNC_VARARG
) != 0
757 || (ctc
->ctc_argc
!= 0 && argv
== NULL
))
758 return (ctf_set_errno (fp
, EINVAL
));
760 vlen
= ctc
->ctc_argc
;
761 if (ctc
->ctc_flags
& CTF_FUNC_VARARG
)
762 vlen
++; /* Add trailing zero to indicate varargs (see below). */
764 if (ctc
->ctc_return
!= 0
765 && ctf_lookup_by_id (&tmp
, ctc
->ctc_return
) == NULL
)
766 return CTF_ERR
; /* errno is set for us. */
768 if (vlen
> CTF_MAX_VLEN
)
769 return (ctf_set_errno (fp
, EOVERFLOW
));
771 /* One word extra allocated for padding for 4-byte alignment if need be.
772 Not reflected in vlen: we don't want to copy anything into it, and
773 it's in addition to (e.g.) the trailing 0 indicating varargs. */
775 initial_vlen
= (sizeof (uint32_t) * (vlen
+ (vlen
& 1)));
776 if ((type
= ctf_add_generic (fp
, flag
, NULL
, CTF_K_FUNCTION
,
777 initial_vlen
, &dtd
)) == CTF_ERR
)
778 return CTF_ERR
; /* errno is set for us. */
780 vdat
= (uint32_t *) dtd
->dtd_vlen
;
782 for (i
= 0; i
< ctc
->ctc_argc
; i
++)
785 if (argv
[i
] != 0 && ctf_lookup_by_id (&tmp
, argv
[i
]) == NULL
)
786 return CTF_ERR
; /* errno is set for us. */
787 vdat
[i
] = (uint32_t) argv
[i
];
790 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_FUNCTION
, flag
, vlen
);
791 dtd
->dtd_data
.ctt_type
= (uint32_t) ctc
->ctc_return
;
793 if (ctc
->ctc_flags
& CTF_FUNC_VARARG
)
794 vdat
[vlen
- 1] = 0; /* Add trailing zero to indicate varargs. */
800 ctf_add_struct_sized (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
805 size_t initial_vlen
= sizeof (ctf_lmember_t
) * INITIAL_VLEN
;
807 /* Promote root-visible forwards to structs. */
809 type
= ctf_lookup_by_rawname (fp
, CTF_K_STRUCT
, name
);
811 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
812 dtd
= ctf_dtd_lookup (fp
, type
);
813 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_STRUCT
,
814 initial_vlen
, &dtd
)) == CTF_ERR
)
815 return CTF_ERR
; /* errno is set for us. */
817 /* Forwards won't have any vlen yet. */
818 if (dtd
->dtd_vlen_alloc
== 0)
820 if ((dtd
->dtd_vlen
= calloc (1, initial_vlen
)) == NULL
)
821 return (ctf_set_errno (fp
, ENOMEM
));
822 dtd
->dtd_vlen_alloc
= initial_vlen
;
825 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_STRUCT
, flag
, 0);
826 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
827 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (size
);
828 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (size
);
834 ctf_add_struct (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
836 return (ctf_add_struct_sized (fp
, flag
, name
, 0));
840 ctf_add_union_sized (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
845 size_t initial_vlen
= sizeof (ctf_lmember_t
) * INITIAL_VLEN
;
847 /* Promote root-visible forwards to unions. */
849 type
= ctf_lookup_by_rawname (fp
, CTF_K_UNION
, name
);
851 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
852 dtd
= ctf_dtd_lookup (fp
, type
);
853 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_UNION
,
854 initial_vlen
, &dtd
)) == CTF_ERR
)
855 return CTF_ERR
; /* errno is set for us */
857 /* Forwards won't have any vlen yet. */
858 if (dtd
->dtd_vlen_alloc
== 0)
860 if ((dtd
->dtd_vlen
= calloc (1, initial_vlen
)) == NULL
)
861 return (ctf_set_errno (fp
, ENOMEM
));
862 dtd
->dtd_vlen_alloc
= initial_vlen
;
865 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_UNION
, flag
, 0);
866 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
867 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (size
);
868 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (size
);
874 ctf_add_union (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
876 return (ctf_add_union_sized (fp
, flag
, name
, 0));
880 ctf_add_enum (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
884 size_t initial_vlen
= sizeof (ctf_enum_t
) * INITIAL_VLEN
;
886 /* Promote root-visible forwards to enums. */
888 type
= ctf_lookup_by_rawname (fp
, CTF_K_ENUM
, name
);
890 if (type
!= 0 && ctf_type_kind (fp
, type
) == CTF_K_FORWARD
)
891 dtd
= ctf_dtd_lookup (fp
, type
);
892 else if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_ENUM
,
893 initial_vlen
, &dtd
)) == CTF_ERR
)
894 return CTF_ERR
; /* errno is set for us. */
896 /* Forwards won't have any vlen yet. */
897 if (dtd
->dtd_vlen_alloc
== 0)
899 if ((dtd
->dtd_vlen
= calloc (1, initial_vlen
)) == NULL
)
900 return (ctf_set_errno (fp
, ENOMEM
));
901 dtd
->dtd_vlen_alloc
= initial_vlen
;
904 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_ENUM
, flag
, 0);
905 dtd
->dtd_data
.ctt_size
= fp
->ctf_dmodel
->ctd_int
;
911 ctf_add_enum_encoded (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
912 const ctf_encoding_t
*ep
)
916 /* First, create the enum if need be, using most of the same machinery as
917 ctf_add_enum(), to ensure that we do not allow things past that are not
918 enums or forwards to them. (This includes other slices: you cannot slice a
919 slice, which would be a useless thing to do anyway.) */
922 type
= ctf_lookup_by_rawname (fp
, CTF_K_ENUM
, name
);
926 if ((ctf_type_kind (fp
, type
) != CTF_K_FORWARD
) &&
927 (ctf_type_kind_unsliced (fp
, type
) != CTF_K_ENUM
))
928 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
930 else if ((type
= ctf_add_enum (fp
, flag
, name
)) == CTF_ERR
)
931 return CTF_ERR
; /* errno is set for us. */
933 /* Now attach a suitable slice to it. */
935 return ctf_add_slice (fp
, flag
, type
, ep
);
939 ctf_add_forward (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
945 if (!ctf_forwardable_kind (kind
))
946 return (ctf_set_errno (fp
, ECTF_NOTSUE
));
948 if (name
== NULL
|| name
[0] == '\0')
949 return (ctf_set_errno (fp
, ECTF_NONAME
));
951 /* If the type is already defined or exists as a forward tag, just
952 return the ctf_id_t of the existing definition. */
954 type
= ctf_lookup_by_rawname (fp
, kind
, name
);
959 if ((type
= ctf_add_generic (fp
, flag
, name
, kind
, 0, &dtd
)) == CTF_ERR
)
960 return CTF_ERR
; /* errno is set for us. */
962 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_FORWARD
, flag
, 0);
963 dtd
->dtd_data
.ctt_type
= kind
;
969 ctf_add_unknown (ctf_dict_t
*fp
, uint32_t flag
, const char *name
)
974 /* If a type is already defined with this name, error (if not CTF_K_UNKNOWN)
975 or just return it. */
977 if (name
!= NULL
&& name
[0] != '\0' && flag
== CTF_ADD_ROOT
978 && (type
= ctf_lookup_by_rawname (fp
, CTF_K_UNKNOWN
, name
)))
980 if (ctf_type_kind (fp
, type
) == CTF_K_UNKNOWN
)
984 ctf_err_warn (fp
, 1, ECTF_CONFLICT
,
985 _("ctf_add_unknown: cannot add unknown type "
986 "named %s: type of this name already defined"),
987 name
? name
: _("(unnamed type)"));
988 return (ctf_set_errno (fp
, ECTF_CONFLICT
));
992 if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_UNKNOWN
, 0, &dtd
)) == CTF_ERR
)
993 return CTF_ERR
; /* errno is set for us. */
995 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_UNKNOWN
, flag
, 0);
996 dtd
->dtd_data
.ctt_type
= 0;
1002 ctf_add_typedef (ctf_dict_t
*fp
, uint32_t flag
, const char *name
,
1007 ctf_dict_t
*tmp
= fp
;
1009 if (ref
== CTF_ERR
|| ref
> CTF_MAX_TYPE
)
1010 return (ctf_set_errno (fp
, EINVAL
));
1012 if (name
== NULL
|| name
[0] == '\0')
1013 return (ctf_set_errno (fp
, ECTF_NONAME
));
1015 if (ref
!= 0 && ctf_lookup_by_id (&tmp
, ref
) == NULL
)
1016 return CTF_ERR
; /* errno is set for us. */
1018 if ((type
= ctf_add_generic (fp
, flag
, name
, CTF_K_TYPEDEF
, 0,
1020 return CTF_ERR
; /* errno is set for us. */
1022 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (CTF_K_TYPEDEF
, flag
, 0);
1023 dtd
->dtd_data
.ctt_type
= (uint32_t) ref
;
1029 ctf_add_volatile (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
1031 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_VOLATILE
));
1035 ctf_add_const (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
1037 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_CONST
));
1041 ctf_add_restrict (ctf_dict_t
*fp
, uint32_t flag
, ctf_id_t ref
)
1043 return (ctf_add_reftype (fp
, flag
, ref
, CTF_K_RESTRICT
));
1047 ctf_add_enumerator (ctf_dict_t
*fp
, ctf_id_t enid
, const char *name
,
1050 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, enid
);
1051 unsigned char *old_vlen
;
1055 uint32_t kind
, vlen
, root
;
1058 return (ctf_set_errno (fp
, EINVAL
));
1060 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1061 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1064 return (ctf_set_errno (fp
, ECTF_BADID
));
1066 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
1067 root
= LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
);
1068 vlen
= LCTF_INFO_VLEN (fp
, dtd
->dtd_data
.ctt_info
);
1070 if (kind
!= CTF_K_ENUM
)
1071 return (ctf_set_errno (fp
, ECTF_NOTENUM
));
1073 if (vlen
== CTF_MAX_VLEN
)
1074 return (ctf_set_errno (fp
, ECTF_DTFULL
));
1076 old_vlen
= dtd
->dtd_vlen
;
1077 if (ctf_grow_vlen (fp
, dtd
, sizeof (ctf_enum_t
) * (vlen
+ 1)) < 0)
1078 return -1; /* errno is set for us. */
1079 en
= (ctf_enum_t
*) dtd
->dtd_vlen
;
1081 if (dtd
->dtd_vlen
!= old_vlen
)
1083 ptrdiff_t move
= (signed char *) dtd
->dtd_vlen
- (signed char *) old_vlen
;
1085 /* Remove pending refs in the old vlen region and reapply them. */
1087 for (i
= 0; i
< vlen
; i
++)
1088 ctf_str_move_pending (fp
, &en
[i
].cte_name
, move
);
1091 for (i
= 0; i
< vlen
; i
++)
1092 if (strcmp (ctf_strptr (fp
, en
[i
].cte_name
), name
) == 0)
1093 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1095 en
[i
].cte_name
= ctf_str_add_pending (fp
, name
, &en
[i
].cte_name
);
1096 en
[i
].cte_value
= value
;
1098 if (en
[i
].cte_name
== 0 && name
!= NULL
&& name
[0] != '\0')
1099 return -1; /* errno is set for us. */
1101 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, root
, vlen
+ 1);
1103 fp
->ctf_flags
|= LCTF_DIRTY
;
1109 ctf_add_member_offset (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1110 ctf_id_t type
, unsigned long bit_offset
)
1112 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, souid
);
1114 ssize_t msize
, malign
, ssize
;
1115 uint32_t kind
, vlen
, root
;
1117 int is_incomplete
= 0;
1118 unsigned char *old_vlen
;
1119 ctf_lmember_t
*memb
;
1121 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1122 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1125 return (ctf_set_errno (fp
, ECTF_BADID
));
1127 if (name
!= NULL
&& name
[0] == '\0')
1130 kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
1131 root
= LCTF_INFO_ISROOT (fp
, dtd
->dtd_data
.ctt_info
);
1132 vlen
= LCTF_INFO_VLEN (fp
, dtd
->dtd_data
.ctt_info
);
1134 if (kind
!= CTF_K_STRUCT
&& kind
!= CTF_K_UNION
)
1135 return (ctf_set_errno (fp
, ECTF_NOTSOU
));
1137 if (vlen
== CTF_MAX_VLEN
)
1138 return (ctf_set_errno (fp
, ECTF_DTFULL
));
1140 old_vlen
= dtd
->dtd_vlen
;
1141 if (ctf_grow_vlen (fp
, dtd
, sizeof (ctf_lmember_t
) * (vlen
+ 1)) < 0)
1142 return -1; /* errno is set for us. */
1143 memb
= (ctf_lmember_t
*) dtd
->dtd_vlen
;
1145 if (dtd
->dtd_vlen
!= old_vlen
)
1147 ptrdiff_t move
= (signed char *) dtd
->dtd_vlen
- (signed char *) old_vlen
;
1149 /* Remove pending refs in the old vlen region and reapply them. */
1151 for (i
= 0; i
< vlen
; i
++)
1152 ctf_str_move_pending (fp
, &memb
[i
].ctlm_name
, move
);
1157 for (i
= 0; i
< vlen
; i
++)
1158 if (strcmp (ctf_strptr (fp
, memb
[i
].ctlm_name
), name
) == 0)
1159 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1162 if ((msize
= ctf_type_size (fp
, type
)) < 0 ||
1163 (malign
= ctf_type_align (fp
, type
)) < 0)
1165 /* The unimplemented type, and any type that resolves to it, has no size
1166 and no alignment: it can correspond to any number of compiler-inserted
1167 types. We allow incomplete types through since they are routinely
1168 added to the ends of structures, and can even be added elsewhere in
1169 structures by the deduplicator. They are assumed to be zero-size with
1170 no alignment: this is often wrong, but problems can be avoided in this
1171 case by explicitly specifying the size of the structure via the _sized
1172 functions. The deduplicator always does this. */
1176 if (ctf_errno (fp
) == ECTF_NONREPRESENTABLE
)
1177 ctf_set_errno (fp
, 0);
1178 else if (ctf_errno (fp
) == ECTF_INCOMPLETE
)
1181 return -1; /* errno is set for us. */
1184 memb
[vlen
].ctlm_name
= ctf_str_add_pending (fp
, name
, &memb
[vlen
].ctlm_name
);
1185 memb
[vlen
].ctlm_type
= type
;
1186 if (memb
[vlen
].ctlm_name
== 0 && name
!= NULL
&& name
[0] != '\0')
1187 return -1; /* errno is set for us. */
1189 if (kind
== CTF_K_STRUCT
&& vlen
!= 0)
1191 if (bit_offset
== (unsigned long) - 1)
1193 /* Natural alignment. */
1195 ctf_id_t ltype
= ctf_type_resolve (fp
, memb
[vlen
- 1].ctlm_type
);
1196 size_t off
= CTF_LMEM_OFFSET(&memb
[vlen
- 1]);
1198 ctf_encoding_t linfo
;
1201 /* Propagate any error from ctf_type_resolve. If the last member was
1202 of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we
1203 cannot insert right after such a member without explicit offset
1204 specification, because its alignment and size is not known. */
1205 if (ltype
== CTF_ERR
)
1206 return -1; /* errno is set for us. */
1210 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
1211 _("ctf_add_member_offset: cannot add member %s of "
1212 "incomplete type %lx to struct %lx without "
1213 "specifying explicit offset\n"),
1214 name
? name
: _("(unnamed member)"), type
, souid
);
1215 return (ctf_set_errno (fp
, ECTF_INCOMPLETE
));
1218 if (ctf_type_encoding (fp
, ltype
, &linfo
) == 0)
1219 off
+= linfo
.cte_bits
;
1220 else if ((lsize
= ctf_type_size (fp
, ltype
)) > 0)
1221 off
+= lsize
* CHAR_BIT
;
1222 else if (lsize
== -1 && ctf_errno (fp
) == ECTF_INCOMPLETE
)
1224 const char *lname
= ctf_strraw (fp
, memb
[vlen
- 1].ctlm_name
);
1226 ctf_err_warn (fp
, 1, ECTF_INCOMPLETE
,
1227 _("ctf_add_member_offset: cannot add member %s of "
1228 "type %lx to struct %lx without specifying "
1229 "explicit offset after member %s of type %lx, "
1230 "which is an incomplete type\n"),
1231 name
? name
: _("(unnamed member)"), type
, souid
,
1232 lname
? lname
: _("(unnamed member)"), ltype
);
1233 return -1; /* errno is set for us. */
1236 /* Round up the offset of the end of the last member to
1237 the next byte boundary, convert 'off' to bytes, and
1238 then round it up again to the next multiple of the
1239 alignment required by the new member. Finally,
1240 convert back to bits and store the result in
1241 dmd_offset. Technically we could do more efficient
1242 packing if the new member is a bit-field, but we're
1243 the "compiler" and ANSI says we can do as we choose. */
1245 off
= roundup (off
, CHAR_BIT
) / CHAR_BIT
;
1246 off
= roundup (off
, MAX (malign
, 1));
1247 memb
[vlen
].ctlm_offsethi
= CTF_OFFSET_TO_LMEMHI (off
* CHAR_BIT
);
1248 memb
[vlen
].ctlm_offsetlo
= CTF_OFFSET_TO_LMEMLO (off
* CHAR_BIT
);
1249 ssize
= off
+ msize
;
1253 /* Specified offset in bits. */
1255 memb
[vlen
].ctlm_offsethi
= CTF_OFFSET_TO_LMEMHI (bit_offset
);
1256 memb
[vlen
].ctlm_offsetlo
= CTF_OFFSET_TO_LMEMLO (bit_offset
);
1257 ssize
= ctf_get_ctt_size (fp
, &dtd
->dtd_data
, NULL
, NULL
);
1258 ssize
= MAX (ssize
, ((signed) bit_offset
/ CHAR_BIT
) + msize
);
1263 memb
[vlen
].ctlm_offsethi
= 0;
1264 memb
[vlen
].ctlm_offsetlo
= 0;
1265 ssize
= ctf_get_ctt_size (fp
, &dtd
->dtd_data
, NULL
, NULL
);
1266 ssize
= MAX (ssize
, msize
);
1269 dtd
->dtd_data
.ctt_size
= CTF_LSIZE_SENT
;
1270 dtd
->dtd_data
.ctt_lsizehi
= CTF_SIZE_TO_LSIZE_HI (ssize
);
1271 dtd
->dtd_data
.ctt_lsizelo
= CTF_SIZE_TO_LSIZE_LO (ssize
);
1272 dtd
->dtd_data
.ctt_info
= CTF_TYPE_INFO (kind
, root
, vlen
+ 1);
1274 fp
->ctf_flags
|= LCTF_DIRTY
;
1279 ctf_add_member_encoded (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1280 ctf_id_t type
, unsigned long bit_offset
,
1281 const ctf_encoding_t encoding
)
1283 ctf_dtdef_t
*dtd
= ctf_dtd_lookup (fp
, type
);
1284 int kind
= LCTF_INFO_KIND (fp
, dtd
->dtd_data
.ctt_info
);
1287 if ((kind
!= CTF_K_INTEGER
) && (kind
!= CTF_K_FLOAT
) && (kind
!= CTF_K_ENUM
))
1288 return (ctf_set_errno (fp
, ECTF_NOTINTFP
));
1290 if ((type
= ctf_add_slice (fp
, CTF_ADD_NONROOT
, otype
, &encoding
)) == CTF_ERR
)
1291 return -1; /* errno is set for us. */
1293 return ctf_add_member_offset (fp
, souid
, name
, type
, bit_offset
);
1297 ctf_add_member (ctf_dict_t
*fp
, ctf_id_t souid
, const char *name
,
1300 return ctf_add_member_offset (fp
, souid
, name
, type
, (unsigned long) - 1);
1304 ctf_add_variable (ctf_dict_t
*fp
, const char *name
, ctf_id_t ref
)
1307 ctf_dict_t
*tmp
= fp
;
1309 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1310 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1312 if (ctf_dvd_lookup (fp
, name
) != NULL
)
1313 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1315 if (ctf_lookup_by_id (&tmp
, ref
) == NULL
)
1316 return -1; /* errno is set for us. */
1318 /* Make sure this type is representable. */
1319 if ((ctf_type_resolve (fp
, ref
) == CTF_ERR
)
1320 && (ctf_errno (fp
) == ECTF_NONREPRESENTABLE
))
1323 if ((dvd
= malloc (sizeof (ctf_dvdef_t
))) == NULL
)
1324 return (ctf_set_errno (fp
, EAGAIN
));
1326 if (name
!= NULL
&& (dvd
->dvd_name
= strdup (name
)) == NULL
)
1329 return (ctf_set_errno (fp
, EAGAIN
));
1331 dvd
->dvd_type
= ref
;
1332 dvd
->dvd_snapshots
= fp
->ctf_snapshots
;
1334 if (ctf_dvd_insert (fp
, dvd
) < 0)
1336 free (dvd
->dvd_name
);
1338 return -1; /* errno is set for us. */
1341 fp
->ctf_flags
|= LCTF_DIRTY
;
1346 ctf_add_funcobjt_sym (ctf_dict_t
*fp
, int is_function
, const char *name
, ctf_id_t id
)
1348 ctf_dict_t
*tmp
= fp
;
1350 ctf_dynhash_t
*h
= is_function
? fp
->ctf_funchash
: fp
->ctf_objthash
;
1352 if (!(fp
->ctf_flags
& LCTF_RDWR
))
1353 return (ctf_set_errno (fp
, ECTF_RDONLY
));
1355 if (ctf_dynhash_lookup (fp
->ctf_objthash
, name
) != NULL
||
1356 ctf_dynhash_lookup (fp
->ctf_funchash
, name
) != NULL
)
1357 return (ctf_set_errno (fp
, ECTF_DUPLICATE
));
1359 if (ctf_lookup_by_id (&tmp
, id
) == NULL
)
1360 return -1; /* errno is set for us. */
1362 if (is_function
&& ctf_type_kind (fp
, id
) != CTF_K_FUNCTION
)
1363 return (ctf_set_errno (fp
, ECTF_NOTFUNC
));
1365 if ((dupname
= strdup (name
)) == NULL
)
1366 return (ctf_set_errno (fp
, ENOMEM
));
1368 if (ctf_dynhash_insert (h
, dupname
, (void *) (uintptr_t) id
) < 0)
1371 return (ctf_set_errno (fp
, ENOMEM
));
1377 ctf_add_objt_sym (ctf_dict_t
*fp
, const char *name
, ctf_id_t id
)
1379 return (ctf_add_funcobjt_sym (fp
, 0, name
, id
));
1383 ctf_add_func_sym (ctf_dict_t
*fp
, const char *name
, ctf_id_t id
)
1385 return (ctf_add_funcobjt_sym (fp
, 1, name
, id
));
1388 typedef struct ctf_bundle
1390 ctf_dict_t
*ctb_dict
; /* CTF dict handle. */
1391 ctf_id_t ctb_type
; /* CTF type identifier. */
1392 ctf_dtdef_t
*ctb_dtd
; /* CTF dynamic type definition (if any). */
1396 enumcmp (const char *name
, int value
, void *arg
)
1398 ctf_bundle_t
*ctb
= arg
;
1401 if (ctf_enum_value (ctb
->ctb_dict
, ctb
->ctb_type
, name
, &bvalue
) < 0)
1403 ctf_err_warn (ctb
->ctb_dict
, 0, 0,
1404 _("conflict due to enum %s iteration error"), name
);
1407 if (value
!= bvalue
)
1409 ctf_err_warn (ctb
->ctb_dict
, 1, ECTF_CONFLICT
,
1410 _("conflict due to enum value change: %i versus %i"),
1418 enumadd (const char *name
, int value
, void *arg
)
1420 ctf_bundle_t
*ctb
= arg
;
1422 return (ctf_add_enumerator (ctb
->ctb_dict
, ctb
->ctb_type
,
1427 membcmp (const char *name
, ctf_id_t type _libctf_unused_
, unsigned long offset
,
1430 ctf_bundle_t
*ctb
= arg
;
1433 /* Don't check nameless members (e.g. anonymous structs/unions) against each
1438 if (ctf_member_info (ctb
->ctb_dict
, ctb
->ctb_type
, name
, &ctm
) < 0)
1440 ctf_err_warn (ctb
->ctb_dict
, 0, 0,
1441 _("conflict due to struct member %s iteration error"),
1445 if (ctm
.ctm_offset
!= offset
)
1447 ctf_err_warn (ctb
->ctb_dict
, 1, ECTF_CONFLICT
,
1448 _("conflict due to struct member %s offset change: "
1450 name
, ctm
.ctm_offset
, offset
);
1456 /* Record the correspondence between a source and ctf_add_type()-added
1457 destination type: both types are translated into parent type IDs if need be,
1458 so they relate to the actual dictionary they are in. Outside controlled
1459 circumstances (like linking) it is probably not useful to do more than
1460 compare these pointers, since there is nothing stopping the user closing the
1461 source dict whenever they want to.
1463 Our OOM handling here is just to not do anything, because this is called deep
1464 enough in the call stack that doing anything useful is painfully difficult:
1465 the worst consequence if we do OOM is a bit of type duplication anyway. */
1468 ctf_add_type_mapping (ctf_dict_t
*src_fp
, ctf_id_t src_type
,
1469 ctf_dict_t
*dst_fp
, ctf_id_t dst_type
)
1471 if (LCTF_TYPE_ISPARENT (src_fp
, src_type
) && src_fp
->ctf_parent
)
1472 src_fp
= src_fp
->ctf_parent
;
1474 src_type
= LCTF_TYPE_TO_INDEX(src_fp
, src_type
);
1476 if (LCTF_TYPE_ISPARENT (dst_fp
, dst_type
) && dst_fp
->ctf_parent
)
1477 dst_fp
= dst_fp
->ctf_parent
;
1479 dst_type
= LCTF_TYPE_TO_INDEX(dst_fp
, dst_type
);
1481 if (dst_fp
->ctf_link_type_mapping
== NULL
)
1483 ctf_hash_fun f
= ctf_hash_type_key
;
1484 ctf_hash_eq_fun e
= ctf_hash_eq_type_key
;
1486 if ((dst_fp
->ctf_link_type_mapping
= ctf_dynhash_create (f
, e
, free
,
1491 ctf_link_type_key_t
*key
;
1492 key
= calloc (1, sizeof (struct ctf_link_type_key
));
1496 key
->cltk_fp
= src_fp
;
1497 key
->cltk_idx
= src_type
;
1499 /* No OOM checking needed, because if this doesn't work the worst we'll do is
1500 add a few more duplicate types (which will probably run out of memory
1502 ctf_dynhash_insert (dst_fp
->ctf_link_type_mapping
, key
,
1503 (void *) (uintptr_t) dst_type
);
1506 /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to
1507 the parent if need be. The ID returned is from the dst_fp's perspective. */
1509 ctf_type_mapping (ctf_dict_t
*src_fp
, ctf_id_t src_type
, ctf_dict_t
**dst_fp
)
1511 ctf_link_type_key_t key
;
1512 ctf_dict_t
*target_fp
= *dst_fp
;
1513 ctf_id_t dst_type
= 0;
1515 if (LCTF_TYPE_ISPARENT (src_fp
, src_type
) && src_fp
->ctf_parent
)
1516 src_fp
= src_fp
->ctf_parent
;
1518 src_type
= LCTF_TYPE_TO_INDEX(src_fp
, src_type
);
1519 key
.cltk_fp
= src_fp
;
1520 key
.cltk_idx
= src_type
;
1522 if (target_fp
->ctf_link_type_mapping
)
1523 dst_type
= (uintptr_t) ctf_dynhash_lookup (target_fp
->ctf_link_type_mapping
,
1528 dst_type
= LCTF_INDEX_TO_TYPE (target_fp
, dst_type
,
1529 target_fp
->ctf_parent
!= NULL
);
1530 *dst_fp
= target_fp
;
1534 if (target_fp
->ctf_parent
)
1535 target_fp
= target_fp
->ctf_parent
;
1539 if (target_fp
->ctf_link_type_mapping
)
1540 dst_type
= (uintptr_t) ctf_dynhash_lookup (target_fp
->ctf_link_type_mapping
,
1544 dst_type
= LCTF_INDEX_TO_TYPE (target_fp
, dst_type
,
1545 target_fp
->ctf_parent
!= NULL
);
1547 *dst_fp
= target_fp
;
1551 /* The ctf_add_type routine is used to copy a type from a source CTF dictionary
1552 to a dynamic destination dictionary. This routine operates recursively by
1553 following the source type's links and embedded member types. If the
1554 destination dict already contains a named type which has the same attributes,
1555 then we succeed and return this type but no changes occur. */
1557 ctf_add_type_internal (ctf_dict_t
*dst_fp
, ctf_dict_t
*src_fp
, ctf_id_t src_type
,
1558 ctf_dict_t
*proc_tracking_fp
)
1560 ctf_id_t dst_type
= CTF_ERR
;
1561 uint32_t dst_kind
= CTF_K_UNKNOWN
;
1562 ctf_dict_t
*tmp_fp
= dst_fp
;
1566 uint32_t kind
, forward_kind
, flag
, vlen
;
1568 const ctf_type_t
*src_tp
, *dst_tp
;
1569 ctf_bundle_t src
, dst
;
1570 ctf_encoding_t src_en
, dst_en
;
1571 ctf_arinfo_t src_ar
, dst_ar
;
1575 ctf_id_t orig_src_type
= src_type
;
1577 if (!(dst_fp
->ctf_flags
& LCTF_RDWR
))
1578 return (ctf_set_errno (dst_fp
, ECTF_RDONLY
));
1580 if ((src_tp
= ctf_lookup_by_id (&src_fp
, src_type
)) == NULL
)
1581 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1583 if ((ctf_type_resolve (src_fp
, src_type
) == CTF_ERR
)
1584 && (ctf_errno (src_fp
) == ECTF_NONREPRESENTABLE
))
1585 return (ctf_set_errno (dst_fp
, ECTF_NONREPRESENTABLE
));
1587 name
= ctf_strptr (src_fp
, src_tp
->ctt_name
);
1588 kind
= LCTF_INFO_KIND (src_fp
, src_tp
->ctt_info
);
1589 flag
= LCTF_INFO_ISROOT (src_fp
, src_tp
->ctt_info
);
1590 vlen
= LCTF_INFO_VLEN (src_fp
, src_tp
->ctt_info
);
1592 /* If this is a type we are currently in the middle of adding, hand it
1593 straight back. (This lets us handle self-referential structures without
1594 considering forwards and empty structures the same as their completed
1597 tmp
= ctf_type_mapping (src_fp
, src_type
, &tmp_fp
);
1601 if (ctf_dynhash_lookup (proc_tracking_fp
->ctf_add_processing
,
1602 (void *) (uintptr_t) src_type
))
1605 /* If this type has already been added from this dictionary, and is the
1606 same kind and (if a struct or union) has the same number of members,
1607 hand it straight back. */
1609 if (ctf_type_kind_unsliced (tmp_fp
, tmp
) == (int) kind
)
1611 if (kind
== CTF_K_STRUCT
|| kind
== CTF_K_UNION
1612 || kind
== CTF_K_ENUM
)
1614 if ((dst_tp
= ctf_lookup_by_id (&tmp_fp
, dst_type
)) != NULL
)
1615 if (vlen
== LCTF_INFO_VLEN (tmp_fp
, dst_tp
->ctt_info
))
1623 forward_kind
= kind
;
1624 if (kind
== CTF_K_FORWARD
)
1625 forward_kind
= src_tp
->ctt_type
;
1627 /* If the source type has a name and is a root type (visible at the top-level
1628 scope), lookup the name in the destination dictionary and verify that it is
1629 of the same kind before we do anything else. */
1631 if ((flag
& CTF_ADD_ROOT
) && name
[0] != '\0'
1632 && (tmp
= ctf_lookup_by_rawname (dst_fp
, forward_kind
, name
)) != 0)
1635 dst_kind
= ctf_type_kind_unsliced (dst_fp
, dst_type
);
1638 /* If an identically named dst_type exists, fail with ECTF_CONFLICT
1639 unless dst_type is a forward declaration and src_type is a struct,
1640 union, or enum (i.e. the definition of the previous forward decl).
1642 We also allow addition in the opposite order (addition of a forward when a
1643 struct, union, or enum already exists), which is a NOP and returns the
1644 already-present struct, union, or enum. */
1646 if (dst_type
!= CTF_ERR
&& dst_kind
!= kind
)
1648 if (kind
== CTF_K_FORWARD
1649 && (dst_kind
== CTF_K_ENUM
|| dst_kind
== CTF_K_STRUCT
1650 || dst_kind
== CTF_K_UNION
))
1652 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1656 if (dst_kind
!= CTF_K_FORWARD
1657 || (kind
!= CTF_K_ENUM
&& kind
!= CTF_K_STRUCT
1658 && kind
!= CTF_K_UNION
))
1660 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1661 _("ctf_add_type: conflict for type %s: "
1662 "kinds differ, new: %i; old (ID %lx): %i"),
1663 name
, kind
, dst_type
, dst_kind
);
1664 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1668 /* We take special action for an integer, float, or slice since it is
1669 described not only by its name but also its encoding. For integers,
1670 bit-fields exploit this degeneracy. */
1672 if (kind
== CTF_K_INTEGER
|| kind
== CTF_K_FLOAT
|| kind
== CTF_K_SLICE
)
1674 if (ctf_type_encoding (src_fp
, src_type
, &src_en
) != 0)
1675 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1677 if (dst_type
!= CTF_ERR
)
1679 ctf_dict_t
*fp
= dst_fp
;
1681 if ((dst_tp
= ctf_lookup_by_id (&fp
, dst_type
)) == NULL
)
1684 if (ctf_type_encoding (dst_fp
, dst_type
, &dst_en
) != 0)
1685 return CTF_ERR
; /* errno set for us. */
1687 if (LCTF_INFO_ISROOT (fp
, dst_tp
->ctt_info
) & CTF_ADD_ROOT
)
1689 /* The type that we found in the hash is also root-visible. If
1690 the two types match then use the existing one; otherwise,
1691 declare a conflict. Note: slices are not certain to match
1692 even if there is no conflict: we must check the contained type
1695 if (memcmp (&src_en
, &dst_en
, sizeof (ctf_encoding_t
)) == 0)
1697 if (kind
!= CTF_K_SLICE
)
1699 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1705 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1710 /* We found a non-root-visible type in the hash. If its encoding
1711 is the same, we can reuse it, unless it is a slice. */
1713 if (memcmp (&src_en
, &dst_en
, sizeof (ctf_encoding_t
)) == 0)
1715 if (kind
!= CTF_K_SLICE
)
1717 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1725 src
.ctb_dict
= src_fp
;
1726 src
.ctb_type
= src_type
;
1729 dst
.ctb_dict
= dst_fp
;
1730 dst
.ctb_type
= dst_type
;
1733 /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add
1734 a new type with the same properties as src_type to dst_fp. If dst_type is
1735 not CTF_ERR, then we verify that dst_type has the same attributes as
1736 src_type. We recurse for embedded references. Before we start, we note
1737 that we are processing this type, to prevent infinite recursion: we do not
1738 re-process any type that appears in this list. The list is emptied
1739 wholesale at the end of processing everything in this recursive stack. */
1741 if (ctf_dynhash_insert (proc_tracking_fp
->ctf_add_processing
,
1742 (void *) (uintptr_t) src_type
, (void *) 1) < 0)
1743 return ctf_set_errno (dst_fp
, ENOMEM
);
1748 /* If we found a match we will have either returned it or declared a
1750 dst_type
= ctf_add_integer (dst_fp
, flag
, name
, &src_en
);
1754 /* If we found a match we will have either returned it or declared a
1756 dst_type
= ctf_add_float (dst_fp
, flag
, name
, &src_en
);
1760 /* We have checked for conflicting encodings: now try to add the
1762 src_type
= ctf_type_reference (src_fp
, src_type
);
1763 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1766 if (src_type
== CTF_ERR
)
1767 return CTF_ERR
; /* errno is set for us. */
1769 dst_type
= ctf_add_slice (dst_fp
, flag
, src_type
, &src_en
);
1773 case CTF_K_VOLATILE
:
1775 case CTF_K_RESTRICT
:
1776 src_type
= ctf_type_reference (src_fp
, src_type
);
1777 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1780 if (src_type
== CTF_ERR
)
1781 return CTF_ERR
; /* errno is set for us. */
1783 dst_type
= ctf_add_reftype (dst_fp
, flag
, src_type
, kind
);
1787 if (ctf_array_info (src_fp
, src_type
, &src_ar
) != 0)
1788 return (ctf_set_errno (dst_fp
, ctf_errno (src_fp
)));
1790 src_ar
.ctr_contents
=
1791 ctf_add_type_internal (dst_fp
, src_fp
, src_ar
.ctr_contents
,
1793 src_ar
.ctr_index
= ctf_add_type_internal (dst_fp
, src_fp
,
1796 src_ar
.ctr_nelems
= src_ar
.ctr_nelems
;
1798 if (src_ar
.ctr_contents
== CTF_ERR
|| src_ar
.ctr_index
== CTF_ERR
)
1799 return CTF_ERR
; /* errno is set for us. */
1801 if (dst_type
!= CTF_ERR
)
1803 if (ctf_array_info (dst_fp
, dst_type
, &dst_ar
) != 0)
1804 return CTF_ERR
; /* errno is set for us. */
1806 if (memcmp (&src_ar
, &dst_ar
, sizeof (ctf_arinfo_t
)))
1808 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1809 _("conflict for type %s against ID %lx: array info "
1810 "differs, old %lx/%lx/%x; new: %lx/%lx/%x"),
1811 name
, dst_type
, src_ar
.ctr_contents
,
1812 src_ar
.ctr_index
, src_ar
.ctr_nelems
,
1813 dst_ar
.ctr_contents
, dst_ar
.ctr_index
,
1815 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1819 dst_type
= ctf_add_array (dst_fp
, flag
, &src_ar
);
1822 case CTF_K_FUNCTION
:
1823 ctc
.ctc_return
= ctf_add_type_internal (dst_fp
, src_fp
,
1829 if (ctc
.ctc_return
== CTF_ERR
)
1830 return CTF_ERR
; /* errno is set for us. */
1832 dst_type
= ctf_add_function (dst_fp
, flag
, &ctc
, NULL
);
1838 ctf_next_t
*i
= NULL
;
1840 const char *membname
;
1841 ctf_id_t src_membtype
;
1843 /* Technically to match a struct or union we need to check both
1844 ways (src members vs. dst, dst members vs. src) but we make
1845 this more optimal by only checking src vs. dst and comparing
1846 the total size of the structure (which we must do anyway)
1847 which covers the possibility of dst members not in src.
1848 This optimization can be defeated for unions, but is so
1849 pathological as to render it irrelevant for our purposes. */
1851 if (dst_type
!= CTF_ERR
&& kind
!= CTF_K_FORWARD
1852 && dst_kind
!= CTF_K_FORWARD
)
1854 if (ctf_type_size (src_fp
, src_type
) !=
1855 ctf_type_size (dst_fp
, dst_type
))
1857 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1858 _("conflict for type %s against ID %lx: union "
1859 "size differs, old %li, new %li"), name
,
1860 dst_type
, (long) ctf_type_size (src_fp
, src_type
),
1861 (long) ctf_type_size (dst_fp
, dst_type
));
1862 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1865 if (ctf_member_iter (src_fp
, src_type
, membcmp
, &dst
))
1867 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1868 _("conflict for type %s against ID %lx: members "
1869 "differ, see above"), name
, dst_type
);
1870 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1876 dst_type
= ctf_add_struct_sized (dst_fp
, flag
, name
,
1877 ctf_type_size (src_fp
, src_type
));
1878 if (dst_type
== CTF_ERR
)
1879 return CTF_ERR
; /* errno is set for us. */
1881 /* Pre-emptively add this struct to the type mapping so that
1882 structures that refer to themselves work. */
1883 ctf_add_type_mapping (src_fp
, src_type
, dst_fp
, dst_type
);
1885 while ((offset
= ctf_member_next (src_fp
, src_type
, &i
, &membname
,
1886 &src_membtype
, 0)) >= 0)
1888 ctf_dict_t
*dst
= dst_fp
;
1889 ctf_id_t dst_membtype
= ctf_type_mapping (src_fp
, src_membtype
, &dst
);
1891 if (dst_membtype
== 0)
1893 dst_membtype
= ctf_add_type_internal (dst_fp
, src_fp
,
1896 if (dst_membtype
== CTF_ERR
)
1898 if (ctf_errno (dst_fp
) != ECTF_NONREPRESENTABLE
)
1900 ctf_next_destroy (i
);
1906 if (ctf_add_member_offset (dst_fp
, dst_type
, membname
,
1907 dst_membtype
, offset
) < 0)
1909 ctf_next_destroy (i
);
1913 if (ctf_errno (src_fp
) != ECTF_NEXT_END
)
1914 return CTF_ERR
; /* errno is set for us. */
1919 if (dst_type
!= CTF_ERR
&& kind
!= CTF_K_FORWARD
1920 && dst_kind
!= CTF_K_FORWARD
)
1922 if (ctf_enum_iter (src_fp
, src_type
, enumcmp
, &dst
)
1923 || ctf_enum_iter (dst_fp
, dst_type
, enumcmp
, &src
))
1925 ctf_err_warn (dst_fp
, 1, ECTF_CONFLICT
,
1926 _("conflict for enum %s against ID %lx: members "
1927 "differ, see above"), name
, dst_type
);
1928 return (ctf_set_errno (dst_fp
, ECTF_CONFLICT
));
1933 dst_type
= ctf_add_enum (dst_fp
, flag
, name
);
1934 if ((dst
.ctb_type
= dst_type
) == CTF_ERR
1935 || ctf_enum_iter (src_fp
, src_type
, enumadd
, &dst
))
1936 return CTF_ERR
; /* errno is set for us */
1941 if (dst_type
== CTF_ERR
)
1942 dst_type
= ctf_add_forward (dst_fp
, flag
, name
, forward_kind
);
1946 src_type
= ctf_type_reference (src_fp
, src_type
);
1947 src_type
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
,
1950 if (src_type
== CTF_ERR
)
1951 return CTF_ERR
; /* errno is set for us. */
1953 /* If dst_type is not CTF_ERR at this point, we should check if
1954 ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with
1955 ECTF_CONFLICT. However, this causes problems with bitness typedefs
1956 that vary based on things like if 32-bit then pid_t is int otherwise
1957 long. We therefore omit this check and assume that if the identically
1958 named typedef already exists in dst_fp, it is correct or
1961 if (dst_type
== CTF_ERR
)
1962 dst_type
= ctf_add_typedef (dst_fp
, flag
, name
, src_type
);
1967 return (ctf_set_errno (dst_fp
, ECTF_CORRUPT
));
1970 if (dst_type
!= CTF_ERR
)
1971 ctf_add_type_mapping (src_fp
, orig_src_type
, dst_fp
, dst_type
);
1976 ctf_add_type (ctf_dict_t
*dst_fp
, ctf_dict_t
*src_fp
, ctf_id_t src_type
)
1980 if (!src_fp
->ctf_add_processing
)
1981 src_fp
->ctf_add_processing
= ctf_dynhash_create (ctf_hash_integer
,
1982 ctf_hash_eq_integer
,
1985 /* We store the hash on the source, because it contains only source type IDs:
1986 but callers will invariably expect errors to appear on the dest. */
1987 if (!src_fp
->ctf_add_processing
)
1988 return (ctf_set_errno (dst_fp
, ENOMEM
));
1990 id
= ctf_add_type_internal (dst_fp
, src_fp
, src_type
, src_fp
);
1991 ctf_dynhash_empty (src_fp
->ctf_add_processing
);