1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "arch-utils.h"
33 #include "target-float.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
39 #include "tracepoint.h"
41 #include "user-regs.h"
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
55 /* Definition of a user function. */
56 struct internal_function
58 /* The name of the function. It is a bit odd to have this in the
59 function itself -- the user might use a differently-named
60 convenience variable to hold the function. */
64 internal_function_fn handler
;
66 /* User data for the handler. */
70 /* Returns true if the ranges defined by [offset1, offset1+len1) and
71 [offset2, offset2+len2) overlap. */
74 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
75 LONGEST offset2
, ULONGEST len2
)
79 l
= std::max (offset1
, offset2
);
80 h
= std::min (offset1
+ len1
, offset2
+ len2
);
84 /* Returns true if RANGES contains any range that overlaps [OFFSET,
88 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
96 /* We keep ranges sorted by offset and coalesce overlapping and
97 contiguous ranges, so to check if a range list contains a given
98 range, we can do a binary search for the position the given range
99 would be inserted if we only considered the starting OFFSET of
100 ranges. We call that position I. Since we also have LENGTH to
101 care for (this is a range afterall), we need to check if the
102 _previous_ range overlaps the I range. E.g.,
106 |---| |---| |------| ... |--|
111 In the case above, the binary search would return `I=1', meaning,
112 this OFFSET should be inserted at position 1, and the current
113 position 1 should be pushed further (and before 2). But, `0'
116 Then we need to check if the I range overlaps the I range itself.
121 |---| |---| |-------| ... |--|
128 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
130 if (i
> ranges
.begin ())
132 const struct range
&bef
= *(i
- 1);
134 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
138 if (i
< ranges
.end ())
140 const struct range
&r
= *i
;
142 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
149 static struct cmd_list_element
*functionlist
;
153 if (this->lval () == lval_computed
)
155 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
157 if (funcs
->free_closure
)
158 funcs
->free_closure (this);
160 else if (this->lval () == lval_xcallable
)
161 delete m_location
.xm_worker
;
169 return type ()->arch ();
173 value::bits_available (LONGEST offset
, ULONGEST length
) const
175 gdb_assert (!m_lazy
);
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len
= TARGET_CHAR_BIT
* enclosing_type ()->length ();
181 return !((m_in_history
182 && (offset
< 0 || offset
+ length
> val_len
))
183 || ranges_contain (m_unavailable
, offset
, length
));
187 value::bytes_available (LONGEST offset
, ULONGEST length
) const
189 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
190 ULONGEST mask
= (sign
<< 1) - 1;
192 if (offset
!= ((offset
& mask
) ^ sign
) - sign
193 || length
!= ((length
& mask
) ^ sign
) - sign
194 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
195 error (_("Integer overflow in data location calculation"));
197 return bits_available (offset
* TARGET_CHAR_BIT
, length
* TARGET_CHAR_BIT
);
201 value::bits_any_optimized_out (int bit_offset
, int bit_length
) const
203 gdb_assert (!m_lazy
);
205 return ranges_contain (m_optimized_out
, bit_offset
, bit_length
);
209 value::entirely_available ()
211 /* We can only tell whether the whole value is available when we try
216 if (m_unavailable
.empty ())
224 value::entirely_covered_by_range_vector (const std::vector
<range
> &ranges
)
226 /* We can only tell whether the whole value is optimized out /
227 unavailable when we try to read it. */
231 if (ranges
.size () == 1)
233 const struct range
&t
= ranges
[0];
236 && t
.length
== TARGET_CHAR_BIT
* enclosing_type ()->length ())
243 /* Insert into the vector pointed to by VECTORP the bit range starting of
244 OFFSET bits, and extending for the next LENGTH bits. */
247 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
248 LONGEST offset
, ULONGEST length
)
252 /* Insert the range sorted. If there's overlap or the new range
253 would be contiguous with an existing range, merge. */
255 newr
.offset
= offset
;
256 newr
.length
= length
;
258 /* Do a binary search for the position the given range would be
259 inserted if we only considered the starting OFFSET of ranges.
260 Call that position I. Since we also have LENGTH to care for
261 (this is a range afterall), we need to check if the _previous_
262 range overlaps the I range. E.g., calling R the new range:
264 #1 - overlaps with previous
268 |---| |---| |------| ... |--|
273 In the case #1 above, the binary search would return `I=1',
274 meaning, this OFFSET should be inserted at position 1, and the
275 current position 1 should be pushed further (and become 2). But,
276 note that `0' overlaps with R, so we want to merge them.
278 A similar consideration needs to be taken if the new range would
279 be contiguous with the previous range:
281 #2 - contiguous with previous
285 |--| |---| |------| ... |--|
290 If there's no overlap with the previous range, as in:
292 #3 - not overlapping and not contiguous
296 |--| |---| |------| ... |--|
303 #4 - R is the range with lowest offset
307 |--| |---| |------| ... |--|
312 ... we just push the new range to I.
314 All the 4 cases above need to consider that the new range may
315 also overlap several of the ranges that follow, or that R may be
316 contiguous with the following range, and merge. E.g.,
318 #5 - overlapping following ranges
321 |------------------------|
322 |--| |---| |------| ... |--|
331 |--| |---| |------| ... |--|
338 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
339 if (i
> vectorp
->begin ())
341 struct range
&bef
= *(i
- 1);
343 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
346 LONGEST l
= std::min (bef
.offset
, offset
);
347 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
353 else if (offset
== bef
.offset
+ bef
.length
)
356 bef
.length
+= length
;
362 i
= vectorp
->insert (i
, newr
);
368 i
= vectorp
->insert (i
, newr
);
371 /* Check whether the ranges following the one we've just added or
372 touched can be folded in (#5 above). */
373 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
378 /* Get the range we just touched. */
379 struct range
&t
= *i
;
383 for (; i
< vectorp
->end (); i
++)
385 struct range
&r
= *i
;
386 if (r
.offset
<= t
.offset
+ t
.length
)
390 l
= std::min (t
.offset
, r
.offset
);
391 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
400 /* If we couldn't merge this one, we won't be able to
401 merge following ones either, since the ranges are
402 always sorted by OFFSET. */
408 vectorp
->erase (next
, next
+ removed
);
413 value::mark_bits_unavailable (LONGEST offset
, ULONGEST length
)
415 insert_into_bit_range_vector (&m_unavailable
, offset
, length
);
419 value::mark_bytes_unavailable (LONGEST offset
, ULONGEST length
)
421 mark_bits_unavailable (offset
* TARGET_CHAR_BIT
,
422 length
* TARGET_CHAR_BIT
);
425 /* Find the first range in RANGES that overlaps the range defined by
426 OFFSET and LENGTH, starting at element POS in the RANGES vector,
427 Returns the index into RANGES where such overlapping range was
428 found, or -1 if none was found. */
431 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
432 LONGEST offset
, LONGEST length
)
436 for (i
= pos
; i
< ranges
->size (); i
++)
438 const range
&r
= (*ranges
)[i
];
439 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
446 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
447 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
450 It must always be the case that:
451 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
453 It is assumed that memory can be accessed from:
454 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
456 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
457 / TARGET_CHAR_BIT) */
459 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
460 const gdb_byte
*ptr2
, size_t offset2_bits
,
463 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
464 == offset2_bits
% TARGET_CHAR_BIT
);
466 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
469 gdb_byte mask
, b1
, b2
;
471 /* The offset from the base pointers PTR1 and PTR2 is not a complete
472 number of bytes. A number of bits up to either the next exact
473 byte boundary, or LENGTH_BITS (which ever is sooner) will be
475 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
476 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
477 mask
= (1 << bits
) - 1;
479 if (length_bits
< bits
)
481 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
485 /* Now load the two bytes and mask off the bits we care about. */
486 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
487 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
492 /* Now update the length and offsets to take account of the bits
493 we've just compared. */
495 offset1_bits
+= bits
;
496 offset2_bits
+= bits
;
499 if (length_bits
% TARGET_CHAR_BIT
!= 0)
503 gdb_byte mask
, b1
, b2
;
505 /* The length is not an exact number of bytes. After the previous
506 IF.. block then the offsets are byte aligned, or the
507 length is zero (in which case this code is not reached). Compare
508 a number of bits at the end of the region, starting from an exact
510 bits
= length_bits
% TARGET_CHAR_BIT
;
511 o1
= offset1_bits
+ length_bits
- bits
;
512 o2
= offset2_bits
+ length_bits
- bits
;
514 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
515 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
517 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
518 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
520 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
521 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
531 /* We've now taken care of any stray "bits" at the start, or end of
532 the region to compare, the remainder can be covered with a simple
534 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
535 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
536 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
538 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
539 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
540 length_bits
/ TARGET_CHAR_BIT
);
543 /* Length is zero, regions match. */
547 /* Helper struct for find_first_range_overlap_and_match and
548 value_contents_bits_eq. Keep track of which slot of a given ranges
549 vector have we last looked at. */
551 struct ranges_and_idx
554 const std::vector
<range
> *ranges
;
556 /* The range we've last found in RANGES. Given ranges are sorted,
557 we can start the next lookup here. */
561 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
562 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
563 ranges starting at OFFSET2 bits. Return true if the ranges match
564 and fill in *L and *H with the overlapping window relative to
565 (both) OFFSET1 or OFFSET2. */
568 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
569 struct ranges_and_idx
*rp2
,
570 LONGEST offset1
, LONGEST offset2
,
571 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
573 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
575 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
578 if (rp1
->idx
== -1 && rp2
->idx
== -1)
584 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
588 const range
*r1
, *r2
;
592 r1
= &(*rp1
->ranges
)[rp1
->idx
];
593 r2
= &(*rp2
->ranges
)[rp2
->idx
];
595 /* Get the unavailable windows intersected by the incoming
596 ranges. The first and last ranges that overlap the argument
597 range may be wider than said incoming arguments ranges. */
598 l1
= std::max (offset1
, r1
->offset
);
599 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
601 l2
= std::max (offset2
, r2
->offset
);
602 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
604 /* Make them relative to the respective start offsets, so we can
605 compare them for equality. */
612 /* Different ranges, no match. */
613 if (l1
!= l2
|| h1
!= h2
)
622 /* Helper function for value_contents_eq. The only difference is that
623 this function is bit rather than byte based.
625 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
626 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
627 Return true if the available bits match. */
630 value::contents_bits_eq (int offset1
, const struct value
*val2
, int offset2
,
633 /* Each array element corresponds to a ranges source (unavailable,
634 optimized out). '1' is for VAL1, '2' for VAL2. */
635 struct ranges_and_idx rp1
[2], rp2
[2];
637 /* See function description in value.h. */
638 gdb_assert (!m_lazy
&& !val2
->m_lazy
);
640 /* We shouldn't be trying to compare past the end of the values. */
641 gdb_assert (offset1
+ length
642 <= m_enclosing_type
->length () * TARGET_CHAR_BIT
);
643 gdb_assert (offset2
+ length
644 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
646 memset (&rp1
, 0, sizeof (rp1
));
647 memset (&rp2
, 0, sizeof (rp2
));
648 rp1
[0].ranges
= &m_unavailable
;
649 rp2
[0].ranges
= &val2
->m_unavailable
;
650 rp1
[1].ranges
= &m_optimized_out
;
651 rp2
[1].ranges
= &val2
->m_optimized_out
;
655 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
658 for (i
= 0; i
< 2; i
++)
660 ULONGEST l_tmp
, h_tmp
;
662 /* The contents only match equal if the invalid/unavailable
663 contents ranges match as well. */
664 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
665 offset1
, offset2
, length
,
669 /* We're interested in the lowest/first range found. */
670 if (i
== 0 || l_tmp
< l
)
677 /* Compare the available/valid contents. */
678 if (memcmp_with_bit_offsets (m_contents
.get (), offset1
,
679 val2
->m_contents
.get (), offset2
, l
) != 0)
693 value::contents_eq (LONGEST offset1
,
694 const struct value
*val2
, LONGEST offset2
,
695 LONGEST length
) const
697 return contents_bits_eq (offset1
* TARGET_CHAR_BIT
,
698 val2
, offset2
* TARGET_CHAR_BIT
,
699 length
* TARGET_CHAR_BIT
);
705 value::contents_eq (const struct value
*val2
) const
707 ULONGEST len1
= check_typedef (enclosing_type ())->length ();
708 ULONGEST len2
= check_typedef (val2
->enclosing_type ())->length ();
711 return contents_eq (0, val2
, 0, len1
);
714 /* The value-history records all the values printed by print commands
715 during this session. */
717 static std::vector
<value_ref_ptr
> value_history
;
720 /* List of all value objects currently allocated
721 (except for those released by calls to release_value)
722 This is so they can be freed after each command. */
724 static std::vector
<value_ref_ptr
> all_values
;
729 value::allocate_lazy (struct type
*type
)
733 /* Call check_typedef on our type to make sure that, if TYPE
734 is a TYPE_CODE_TYPEDEF, its length is set to the length
735 of the target type instead of zero. However, we do not
736 replace the typedef type by the target type, because we want
737 to keep the typedef in order to be able to set the VAL's type
738 description correctly. */
739 check_typedef (type
);
741 val
= new struct value (type
);
743 /* Values start out on the all_values chain. */
744 all_values
.emplace_back (val
);
749 /* The maximum size, in bytes, that GDB will try to allocate for a value.
750 The initial value of 64k was not selected for any specific reason, it is
751 just a reasonable starting point. */
753 static int max_value_size
= 65536; /* 64k bytes */
755 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
756 LONGEST, otherwise GDB will not be able to parse integer values from the
757 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
758 be unable to parse "set max-value-size 2".
760 As we want a consistent GDB experience across hosts with different sizes
761 of LONGEST, this arbitrary minimum value was selected, so long as this
762 is bigger than LONGEST on all GDB supported hosts we're fine. */
764 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
765 static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
767 /* Implement the "set max-value-size" command. */
770 set_max_value_size (const char *args
, int from_tty
,
771 struct cmd_list_element
*c
)
773 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
775 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
777 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
778 error (_("max-value-size set too low, increasing to %d bytes"),
783 /* Implement the "show max-value-size" command. */
786 show_max_value_size (struct ui_file
*file
, int from_tty
,
787 struct cmd_list_element
*c
, const char *value
)
789 if (max_value_size
== -1)
790 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
792 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
796 /* Called before we attempt to allocate or reallocate a buffer for the
797 contents of a value. TYPE is the type of the value for which we are
798 allocating the buffer. If the buffer is too large (based on the user
799 controllable setting) then throw an error. If this function returns
800 then we should attempt to allocate the buffer. */
803 check_type_length_before_alloc (const struct type
*type
)
805 ULONGEST length
= type
->length ();
807 if (exceeds_max_value_size (length
))
809 if (type
->name () != NULL
)
810 error (_("value of type `%s' requires %s bytes, which is more "
811 "than max-value-size"), type
->name (), pulongest (length
));
813 error (_("value requires %s bytes, which is more than "
814 "max-value-size"), pulongest (length
));
821 exceeds_max_value_size (ULONGEST length
)
823 return max_value_size
> -1 && length
> max_value_size
;
826 /* When this has a value, it is used to limit the number of array elements
827 of an array that are loaded into memory when an array value is made
829 static std::optional
<int> array_length_limiting_element_count
;
832 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
834 m_old_value
= array_length_limiting_element_count
;
835 array_length_limiting_element_count
.emplace (elements
);
839 scoped_array_length_limiting::~scoped_array_length_limiting ()
841 array_length_limiting_element_count
= m_old_value
;
844 /* Find the inner element type for ARRAY_TYPE. */
847 find_array_element_type (struct type
*array_type
)
849 array_type
= check_typedef (array_type
);
850 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
852 if (current_language
->la_language
== language_fortran
)
853 while (array_type
->code () == TYPE_CODE_ARRAY
)
855 array_type
= array_type
->target_type ();
856 array_type
= check_typedef (array_type
);
860 array_type
= array_type
->target_type ();
861 array_type
= check_typedef (array_type
);
867 /* Return the limited length of ARRAY_TYPE, which must be of
868 TYPE_CODE_ARRAY. This function can only be called when the global
869 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
871 The limited length of an array is the smallest of either (1) the total
872 size of the array type, or (2) the array target type multiplies by the
873 array_length_limiting_element_count. */
876 calculate_limited_array_length (struct type
*array_type
)
878 gdb_assert (array_length_limiting_element_count
.has_value ());
880 array_type
= check_typedef (array_type
);
881 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
883 struct type
*elm_type
= find_array_element_type (array_type
);
884 ULONGEST len
= (elm_type
->length ()
885 * (*array_length_limiting_element_count
));
886 len
= std::min (len
, array_type
->length ());
894 value::set_limited_array_length ()
896 ULONGEST limit
= m_limited_length
;
897 ULONGEST len
= type ()->length ();
899 if (array_length_limiting_element_count
.has_value ())
900 len
= calculate_limited_array_length (type ());
902 if (limit
!= 0 && len
> limit
)
904 if (len
> max_value_size
)
907 m_limited_length
= max_value_size
;
914 value::allocate_contents (bool check_size
)
918 struct type
*enc_type
= enclosing_type ();
919 ULONGEST len
= enc_type
->length ();
923 /* If we are allocating the contents of an array, which
924 is greater in size than max_value_size, and there is
925 an element limit in effect, then we can possibly try
926 to load only a sub-set of the array contents into
928 if (type () == enc_type
929 && type ()->code () == TYPE_CODE_ARRAY
930 && len
> max_value_size
931 && set_limited_array_length ())
932 len
= m_limited_length
;
934 check_type_length_before_alloc (enc_type
);
937 m_contents
.reset ((gdb_byte
*) xzalloc (len
));
941 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
942 then apply the usual max-value-size checks. */
945 value::allocate (struct type
*type
, bool check_size
)
947 struct value
*val
= value::allocate_lazy (type
);
949 val
->allocate_contents (check_size
);
954 /* Allocate a value and its contents for type TYPE. */
957 value::allocate (struct type
*type
)
959 return allocate (type
, true);
962 /* Allocate a value that has the correct length
963 for COUNT repetitions of type TYPE. */
966 allocate_repeat_value (struct type
*type
, int count
)
968 /* Despite the fact that we are really creating an array of TYPE here, we
969 use the string lower bound as the array lower bound. This seems to
970 work fine for now. */
971 int low_bound
= current_language
->string_lower_bound ();
972 /* FIXME-type-allocation: need a way to free this type when we are
974 struct type
*array_type
975 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
977 return value::allocate (array_type
);
981 value::allocate_computed (struct type
*type
,
982 const struct lval_funcs
*funcs
,
985 struct value
*v
= value::allocate_lazy (type
);
987 v
->set_lval (lval_computed
);
988 v
->m_location
.computed
.funcs
= funcs
;
989 v
->m_location
.computed
.closure
= closure
;
997 value::allocate_optimized_out (struct type
*type
)
999 struct value
*retval
= value::allocate_lazy (type
);
1001 retval
->mark_bytes_optimized_out (0, type
->length ());
1002 retval
->set_lazy (false);
1006 /* Accessor methods. */
1008 gdb::array_view
<gdb_byte
>
1009 value::contents_raw ()
1011 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1013 allocate_contents (true);
1015 ULONGEST length
= type ()->length ();
1016 return gdb::make_array_view
1017 (m_contents
.get () + m_embedded_offset
* unit_size
, length
);
1020 gdb::array_view
<gdb_byte
>
1021 value::contents_all_raw ()
1023 allocate_contents (true);
1025 ULONGEST length
= enclosing_type ()->length ();
1026 return gdb::make_array_view (m_contents
.get (), length
);
1029 /* Look at value.h for description. */
1032 value_actual_type (struct value
*value
, int resolve_simple_types
,
1033 int *real_type_found
)
1035 struct value_print_options opts
;
1036 struct type
*result
;
1038 get_user_print_options (&opts
);
1040 if (real_type_found
)
1041 *real_type_found
= 0;
1042 result
= value
->type ();
1043 if (opts
.objectprint
)
1045 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1046 fetch its rtti type. */
1047 if (result
->is_pointer_or_reference ()
1048 && (check_typedef (result
->target_type ())->code ()
1049 == TYPE_CODE_STRUCT
)
1050 && !value
->optimized_out ())
1052 struct type
*real_type
;
1054 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1057 if (real_type_found
)
1058 *real_type_found
= 1;
1062 else if (resolve_simple_types
)
1064 if (real_type_found
)
1065 *real_type_found
= 1;
1066 result
= value
->enclosing_type ();
1074 error_value_optimized_out (void)
1076 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1080 value::require_not_optimized_out () const
1082 if (!m_optimized_out
.empty ())
1084 if (m_lval
== lval_register
)
1085 throw_error (OPTIMIZED_OUT_ERROR
,
1086 _("register has not been saved in frame"));
1088 error_value_optimized_out ();
1093 value::require_available () const
1095 if (!m_unavailable
.empty ())
1096 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1099 gdb::array_view
<const gdb_byte
>
1100 value::contents_for_printing ()
1105 ULONGEST length
= enclosing_type ()->length ();
1106 return gdb::make_array_view (m_contents
.get (), length
);
1109 gdb::array_view
<const gdb_byte
>
1110 value::contents_for_printing () const
1112 gdb_assert (!m_lazy
);
1114 ULONGEST length
= enclosing_type ()->length ();
1115 return gdb::make_array_view (m_contents
.get (), length
);
1118 gdb::array_view
<const gdb_byte
>
1119 value::contents_all ()
1121 gdb::array_view
<const gdb_byte
> result
= contents_for_printing ();
1122 require_not_optimized_out ();
1123 require_available ();
1127 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1128 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1131 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1132 const std::vector
<range
> &src_range
, int src_bit_offset
,
1133 unsigned int bit_length
)
1135 for (const range
&r
: src_range
)
1139 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1140 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1141 (LONGEST
) src_bit_offset
+ bit_length
);
1144 insert_into_bit_range_vector (dst_range
,
1145 dst_bit_offset
+ (l
- src_bit_offset
),
1153 value::ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1154 int src_bit_offset
, int bit_length
) const
1156 ::ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1157 m_unavailable
, src_bit_offset
,
1159 ::ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1160 m_optimized_out
, src_bit_offset
,
1167 value::contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1168 LONGEST src_offset
, LONGEST length
)
1170 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1171 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1173 /* A lazy DST would make that this copy operation useless, since as
1174 soon as DST's contents were un-lazied (by a later value_contents
1175 call, say), the contents would be overwritten. A lazy SRC would
1176 mean we'd be copying garbage. */
1177 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1179 ULONGEST copy_length
= length
;
1180 ULONGEST limit
= m_limited_length
;
1181 if (limit
> 0 && src_offset
+ length
> limit
)
1182 copy_length
= src_offset
> limit
? 0 : limit
- src_offset
;
1184 /* The overwritten DST range gets unavailability ORed in, not
1185 replaced. Make sure to remember to implement replacing if it
1186 turns out actually necessary. */
1187 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1188 gdb_assert (!dst
->bits_any_optimized_out (TARGET_CHAR_BIT
* dst_offset
,
1189 TARGET_CHAR_BIT
* length
));
1191 /* Copy the data. */
1192 gdb::array_view
<gdb_byte
> dst_contents
1193 = dst
->contents_all_raw ().slice (dst_offset
* unit_size
,
1194 copy_length
* unit_size
);
1195 gdb::array_view
<const gdb_byte
> src_contents
1196 = contents_all_raw ().slice (src_offset
* unit_size
,
1197 copy_length
* unit_size
);
1198 gdb::copy (src_contents
, dst_contents
);
1200 /* Copy the meta-data, adjusted. */
1201 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1202 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1203 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1205 ranges_copy_adjusted (dst
, dst_bit_offset
,
1206 src_bit_offset
, bit_length
);
1212 value::contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1213 LONGEST src_bit_offset
,
1216 /* A lazy DST would make that this copy operation useless, since as
1217 soon as DST's contents were un-lazied (by a later value_contents
1218 call, say), the contents would be overwritten. A lazy SRC would
1219 mean we'd be copying garbage. */
1220 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1222 ULONGEST copy_bit_length
= bit_length
;
1223 ULONGEST bit_limit
= m_limited_length
* TARGET_CHAR_BIT
;
1224 if (bit_limit
> 0 && src_bit_offset
+ bit_length
> bit_limit
)
1225 copy_bit_length
= (src_bit_offset
> bit_limit
? 0
1226 : bit_limit
- src_bit_offset
);
1228 /* The overwritten DST range gets unavailability ORed in, not
1229 replaced. Make sure to remember to implement replacing if it
1230 turns out actually necessary. */
1231 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1232 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1233 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1234 gdb_assert (!dst
->bits_any_optimized_out (dst_bit_offset
,
1237 /* Copy the data. */
1238 gdb::array_view
<gdb_byte
> dst_contents
= dst
->contents_all_raw ();
1239 gdb::array_view
<const gdb_byte
> src_contents
= contents_all_raw ();
1240 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1241 src_contents
.data (), src_bit_offset
,
1243 type_byte_order (type ()) == BFD_ENDIAN_BIG
);
1245 /* Copy the meta-data. */
1246 ranges_copy_adjusted (dst
, dst_bit_offset
, src_bit_offset
, bit_length
);
1252 value::contents_copy (struct value
*dst
, LONGEST dst_offset
,
1253 LONGEST src_offset
, LONGEST length
)
1258 contents_copy_raw (dst
, dst_offset
, src_offset
, length
);
1261 gdb::array_view
<const gdb_byte
>
1264 gdb::array_view
<const gdb_byte
> result
= contents_writeable ();
1265 require_not_optimized_out ();
1266 require_available ();
1270 gdb::array_view
<gdb_byte
>
1271 value::contents_writeable ()
1275 return contents_raw ();
1279 value::optimized_out ()
1283 /* See if we can compute the result without fetching the
1285 if (this->lval () == lval_memory
)
1287 else if (this->lval () == lval_computed
)
1289 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
1291 if (funcs
->is_optimized_out
!= nullptr)
1292 return funcs
->is_optimized_out (this);
1295 /* Fall back to fetching. */
1300 catch (const gdb_exception_error
&ex
)
1305 case OPTIMIZED_OUT_ERROR
:
1306 case NOT_AVAILABLE_ERROR
:
1307 /* These can normally happen when we try to access an
1308 optimized out or unavailable register, either in a
1309 physical register or spilled to memory. */
1317 return !m_optimized_out
.empty ();
1320 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1321 the following LENGTH bytes. */
1324 value::mark_bytes_optimized_out (int offset
, int length
)
1326 mark_bits_optimized_out (offset
* TARGET_CHAR_BIT
,
1327 length
* TARGET_CHAR_BIT
);
1333 value::mark_bits_optimized_out (LONGEST offset
, LONGEST length
)
1335 insert_into_bit_range_vector (&m_optimized_out
, offset
, length
);
1339 value::bits_synthetic_pointer (LONGEST offset
, LONGEST length
) const
1341 if (m_lval
!= lval_computed
1342 || !m_location
.computed
.funcs
->check_synthetic_pointer
)
1344 return m_location
.computed
.funcs
->check_synthetic_pointer (this, offset
,
1348 const struct lval_funcs
*
1349 value::computed_funcs () const
1351 gdb_assert (m_lval
== lval_computed
);
1353 return m_location
.computed
.funcs
;
1357 value::computed_closure () const
1359 gdb_assert (m_lval
== lval_computed
);
1361 return m_location
.computed
.closure
;
1365 value::address () const
1367 if (m_lval
!= lval_memory
)
1369 if (m_parent
!= NULL
)
1370 return m_parent
->address () + m_offset
;
1371 if (NULL
!= TYPE_DATA_LOCATION (type ()))
1373 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1374 return TYPE_DATA_LOCATION_ADDR (type ());
1377 return m_location
.address
+ m_offset
;
1381 value::raw_address () const
1383 if (m_lval
!= lval_memory
)
1385 return m_location
.address
;
1389 value::set_address (CORE_ADDR addr
)
1391 gdb_assert (m_lval
== lval_memory
);
1392 m_location
.address
= addr
;
1396 value::deprecated_next_frame_id_hack ()
1398 gdb_assert (m_lval
== lval_register
);
1399 return &m_location
.reg
.next_frame_id
;
1403 value::deprecated_regnum_hack ()
1405 gdb_assert (m_lval
== lval_register
);
1406 return &m_location
.reg
.regnum
;
1410 /* Return a mark in the value chain. All values allocated after the
1411 mark is obtained (except for those released) are subject to being freed
1412 if a subsequent value_free_to_mark is passed the mark. */
1416 if (all_values
.empty ())
1418 return all_values
.back ().get ();
1421 /* Release a reference to VAL, which was acquired with value_incref.
1422 This function is also called to deallocate values from the value
1428 gdb_assert (m_reference_count
> 0);
1429 m_reference_count
--;
1430 if (m_reference_count
== 0)
1434 /* Free all values allocated since MARK was obtained by value_mark
1435 (except for those released). */
1437 value_free_to_mark (const struct value
*mark
)
1439 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1440 if (iter
== all_values
.end ())
1441 all_values
.clear ();
1443 all_values
.erase (iter
+ 1, all_values
.end ());
1446 /* Remove VAL from the chain all_values
1447 so it will not be freed automatically. */
1450 release_value (struct value
*val
)
1453 return value_ref_ptr ();
1455 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1456 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1460 value_ref_ptr result
= *iter
;
1461 all_values
.erase (iter
.base () - 1);
1466 /* We must always return an owned reference. Normally this happens
1467 because we transfer the reference from the value chain, but in
1468 this case the value was not on the chain. */
1469 return value_ref_ptr::new_reference (val
);
1474 std::vector
<value_ref_ptr
>
1475 value_release_to_mark (const struct value
*mark
)
1477 std::vector
<value_ref_ptr
> result
;
1479 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1480 if (iter
== all_values
.end ())
1481 std::swap (result
, all_values
);
1484 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1485 all_values
.erase (iter
+ 1, all_values
.end ());
1487 std::reverse (result
.begin (), result
.end ());
1494 value::copy () const
1496 struct type
*encl_type
= enclosing_type ();
1499 val
= value::allocate_lazy (encl_type
);
1500 val
->m_type
= m_type
;
1501 val
->set_lval (m_lval
);
1502 val
->m_location
= m_location
;
1503 val
->m_offset
= m_offset
;
1504 val
->m_bitpos
= m_bitpos
;
1505 val
->m_bitsize
= m_bitsize
;
1506 val
->m_lazy
= m_lazy
;
1507 val
->m_embedded_offset
= embedded_offset ();
1508 val
->m_pointed_to_offset
= m_pointed_to_offset
;
1509 val
->m_modifiable
= m_modifiable
;
1510 val
->m_stack
= m_stack
;
1511 val
->m_is_zero
= m_is_zero
;
1512 val
->m_in_history
= m_in_history
;
1513 val
->m_initialized
= m_initialized
;
1514 val
->m_unavailable
= m_unavailable
;
1515 val
->m_optimized_out
= m_optimized_out
;
1516 val
->m_parent
= m_parent
;
1517 val
->m_limited_length
= m_limited_length
;
1520 && !(val
->entirely_optimized_out ()
1521 || val
->entirely_unavailable ()))
1523 ULONGEST length
= val
->m_limited_length
;
1525 length
= val
->enclosing_type ()->length ();
1527 gdb_assert (m_contents
!= nullptr);
1528 const auto &arg_view
1529 = gdb::make_array_view (m_contents
.get (), length
);
1531 val
->allocate_contents (false);
1532 gdb::array_view
<gdb_byte
> val_contents
1533 = val
->contents_all_raw ().slice (0, length
);
1535 gdb::copy (arg_view
, val_contents
);
1538 if (val
->lval () == lval_computed
)
1540 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1542 if (funcs
->copy_closure
)
1543 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1548 /* Return a "const" and/or "volatile" qualified version of the value V.
1549 If CNST is true, then the returned value will be qualified with
1551 if VOLTL is true, then the returned value will be qualified with
1555 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1557 struct type
*val_type
= v
->type ();
1558 struct type
*m_enclosing_type
= v
->enclosing_type ();
1559 struct value
*cv_val
= v
->copy ();
1561 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1562 cv_val
->set_enclosing_type (make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1572 if (this->lval () != not_lval
)
1574 struct type
*enc_type
= enclosing_type ();
1575 struct value
*val
= value::allocate (enc_type
);
1577 gdb::copy (contents_all (), val
->contents_all_raw ());
1578 val
->m_type
= m_type
;
1579 val
->set_embedded_offset (embedded_offset ());
1580 val
->set_pointed_to_offset (pointed_to_offset ());
1589 value::force_lval (CORE_ADDR addr
)
1591 gdb_assert (this->lval () == not_lval
);
1593 write_memory (addr
, contents_raw ().data (), type ()->length ());
1594 m_lval
= lval_memory
;
1595 m_location
.address
= addr
;
1599 value::set_component_location (const struct value
*whole
)
1603 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1605 if (whole
->m_lval
== lval_internalvar
)
1606 m_lval
= lval_internalvar_component
;
1608 m_lval
= whole
->m_lval
;
1610 m_location
= whole
->m_location
;
1611 if (whole
->m_lval
== lval_computed
)
1613 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1615 if (funcs
->copy_closure
)
1616 m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1619 /* If the WHOLE value has a dynamically resolved location property then
1620 update the address of the COMPONENT. */
1621 type
= whole
->type ();
1622 if (NULL
!= TYPE_DATA_LOCATION (type
)
1623 && TYPE_DATA_LOCATION (type
)->is_constant ())
1624 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1626 /* Similarly, if the COMPONENT value has a dynamically resolved location
1627 property then update its address. */
1628 type
= this->type ();
1629 if (NULL
!= TYPE_DATA_LOCATION (type
)
1630 && TYPE_DATA_LOCATION (type
)->is_constant ())
1632 /* If the COMPONENT has a dynamic location, and is an
1633 lval_internalvar_component, then we change it to a lval_memory.
1635 Usually a component of an internalvar is created non-lazy, and has
1636 its content immediately copied from the parent internalvar.
1637 However, for components with a dynamic location, the content of
1638 the component is not contained within the parent, but is instead
1639 accessed indirectly. Further, the component will be created as a
1642 By changing the type of the component to lval_memory we ensure
1643 that value_fetch_lazy can successfully load the component.
1645 This solution isn't ideal, but a real fix would require values to
1646 carry around both the parent value contents, and the contents of
1647 any dynamic fields within the parent. This is a substantial
1648 change to how values work in GDB. */
1649 if (this->lval () == lval_internalvar_component
)
1651 gdb_assert (lazy ());
1652 m_lval
= lval_memory
;
1655 gdb_assert (this->lval () == lval_memory
);
1656 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1660 /* Access to the value history. */
1662 /* Record a new value in the value history.
1663 Returns the absolute history index of the entry. */
1666 value::record_latest ()
1668 /* We don't want this value to have anything to do with the inferior anymore.
1669 In particular, "set $1 = 50" should not affect the variable from which
1670 the value was taken, and fast watchpoints should be able to assume that
1671 a value on the value history never changes. */
1674 /* We know that this is a _huge_ array, any attempt to fetch this
1675 is going to cause GDB to throw an error. However, to allow
1676 the array to still be displayed we fetch its contents up to
1677 `max_value_size' and mark anything beyond "unavailable" in
1679 if (m_type
->code () == TYPE_CODE_ARRAY
1680 && m_type
->length () > max_value_size
1681 && array_length_limiting_element_count
.has_value ()
1682 && m_enclosing_type
== m_type
1683 && calculate_limited_array_length (m_type
) <= max_value_size
)
1684 m_limited_length
= max_value_size
;
1689 ULONGEST limit
= m_limited_length
;
1691 mark_bytes_unavailable (limit
, m_enclosing_type
->length () - limit
);
1693 /* Mark the value as recorded in the history for the availability check. */
1694 m_in_history
= true;
1696 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1697 from. This is a bit dubious, because then *&$1 does not just return $1
1698 but the current contents of that location. c'est la vie... */
1699 set_modifiable (false);
1701 value_history
.push_back (release_value (this));
1703 return value_history
.size ();
1706 /* Return a copy of the value in the history with sequence number NUM. */
1709 access_value_history (int num
)
1714 absnum
+= value_history
.size ();
1719 error (_("The history is empty."));
1721 error (_("There is only one value in the history."));
1723 error (_("History does not go back to $$%d."), -num
);
1725 if (absnum
> value_history
.size ())
1726 error (_("History has not yet reached $%d."), absnum
);
1730 return value_history
[absnum
]->copy ();
1736 value_history_count ()
1738 return value_history
.size ();
1742 show_values (const char *num_exp
, int from_tty
)
1750 /* "show values +" should print from the stored position.
1751 "show values <exp>" should print around value number <exp>. */
1752 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1753 num
= parse_and_eval_long (num_exp
) - 5;
1757 /* "show values" means print the last 10 values. */
1758 num
= value_history
.size () - 9;
1764 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1766 struct value_print_options opts
;
1768 val
= access_value_history (i
);
1769 gdb_printf (("$%d = "), i
);
1770 get_user_print_options (&opts
);
1771 value_print (val
, gdb_stdout
, &opts
);
1772 gdb_printf (("\n"));
1775 /* The next "show values +" should start after what we just printed. */
1778 /* Hitting just return after this command should do the same thing as
1779 "show values +". If num_exp is null, this is unnecessary, since
1780 "show values +" is not useful after "show values". */
1781 if (from_tty
&& num_exp
)
1782 set_repeat_arguments ("+");
1785 enum internalvar_kind
1787 /* The internal variable is empty. */
1790 /* The value of the internal variable is provided directly as
1791 a GDB value object. */
1794 /* A fresh value is computed via a call-back routine on every
1795 access to the internal variable. */
1796 INTERNALVAR_MAKE_VALUE
,
1798 /* The internal variable holds a GDB internal convenience function. */
1799 INTERNALVAR_FUNCTION
,
1801 /* The variable holds an integer value. */
1802 INTERNALVAR_INTEGER
,
1804 /* The variable holds a GDB-provided string. */
1808 union internalvar_data
1810 /* A value object used with INTERNALVAR_VALUE. */
1811 struct value
*value
;
1813 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1816 /* The functions to call. */
1817 const struct internalvar_funcs
*functions
;
1819 /* The function's user-data. */
1823 /* The internal function used with INTERNALVAR_FUNCTION. */
1826 struct internal_function
*function
;
1827 /* True if this is the canonical name for the function. */
1831 /* An integer value used with INTERNALVAR_INTEGER. */
1834 /* If type is non-NULL, it will be used as the type to generate
1835 a value for this internal variable. If type is NULL, a default
1836 integer type for the architecture is used. */
1841 /* A string value used with INTERNALVAR_STRING. */
1845 /* Internal variables. These are variables within the debugger
1846 that hold values assigned by debugger commands.
1847 The user refers to them with a '$' prefix
1848 that does not appear in the variable names stored internally. */
1852 internalvar (std::string name
)
1853 : name (std::move (name
))
1858 /* We support various different kinds of content of an internal variable.
1859 enum internalvar_kind specifies the kind, and union internalvar_data
1860 provides the data associated with this particular kind. */
1862 enum internalvar_kind kind
= INTERNALVAR_VOID
;
1864 union internalvar_data u
{};
1867 /* Use std::map, a sorted container, to make the order of iteration (and
1868 therefore the output of "show convenience") stable. */
1870 static std::map
<std::string
, internalvar
> internalvars
;
1872 /* If the variable does not already exist create it and give it the
1873 value given. If no value is given then the default is zero. */
1875 init_if_undefined_command (const char* args
, int from_tty
)
1877 struct internalvar
*intvar
= nullptr;
1879 /* Parse the expression - this is taken from set_command(). */
1880 expression_up expr
= parse_expression (args
);
1882 /* Validate the expression.
1883 Was the expression an assignment?
1884 Or even an expression at all? */
1885 if (expr
->first_opcode () != BINOP_ASSIGN
)
1886 error (_("Init-if-undefined requires an assignment expression."));
1888 /* Extract the variable from the parsed expression. */
1889 expr::assign_operation
*assign
1890 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
1891 if (assign
!= nullptr)
1893 expr::operation
*lhs
= assign
->get_lhs ();
1894 expr::internalvar_operation
*ivarop
1895 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
1896 if (ivarop
!= nullptr)
1897 intvar
= ivarop
->get_internalvar ();
1900 if (intvar
== nullptr)
1901 error (_("The first parameter to init-if-undefined "
1902 "should be a GDB variable."));
1904 /* Only evaluate the expression if the lvalue is void.
1905 This may still fail if the expression is invalid. */
1906 if (intvar
->kind
== INTERNALVAR_VOID
)
1911 /* Look up an internal variable with name NAME. NAME should not
1912 normally include a dollar sign.
1914 If the specified internal variable does not exist,
1915 the return value is NULL. */
1917 struct internalvar
*
1918 lookup_only_internalvar (const char *name
)
1920 auto it
= internalvars
.find (name
);
1921 if (it
== internalvars
.end ())
1927 /* Complete NAME by comparing it to the names of internal
1931 complete_internalvar (completion_tracker
&tracker
, const char *name
)
1933 int len
= strlen (name
);
1935 for (auto &pair
: internalvars
)
1937 const internalvar
&var
= pair
.second
;
1939 if (var
.name
.compare (0, len
, name
) == 0)
1940 tracker
.add_completion (make_unique_xstrdup (var
.name
.c_str ()));
1944 /* Create an internal variable with name NAME and with a void value.
1945 NAME should not normally include a dollar sign.
1947 An internal variable with that name must not exist already. */
1949 struct internalvar
*
1950 create_internalvar (const char *name
)
1952 auto pair
= internalvars
.emplace (std::make_pair (name
, internalvar (name
)));
1953 gdb_assert (pair
.second
);
1955 return &pair
.first
->second
;
1958 /* Create an internal variable with name NAME and register FUN as the
1959 function that value_of_internalvar uses to create a value whenever
1960 this variable is referenced. NAME should not normally include a
1961 dollar sign. DATA is passed uninterpreted to FUN when it is
1962 called. CLEANUP, if not NULL, is called when the internal variable
1963 is destroyed. It is passed DATA as its only argument. */
1965 struct internalvar
*
1966 create_internalvar_type_lazy (const char *name
,
1967 const struct internalvar_funcs
*funcs
,
1970 struct internalvar
*var
= create_internalvar (name
);
1972 var
->kind
= INTERNALVAR_MAKE_VALUE
;
1973 var
->u
.make_value
.functions
= funcs
;
1974 var
->u
.make_value
.data
= data
;
1978 /* See documentation in value.h. */
1981 compile_internalvar_to_ax (struct internalvar
*var
,
1982 struct agent_expr
*expr
,
1983 struct axs_value
*value
)
1985 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
1986 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
1989 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
1990 var
->u
.make_value
.data
);
1994 /* Look up an internal variable with name NAME. NAME should not
1995 normally include a dollar sign.
1997 If the specified internal variable does not exist,
1998 one is created, with a void value. */
2000 struct internalvar
*
2001 lookup_internalvar (const char *name
)
2003 struct internalvar
*var
;
2005 var
= lookup_only_internalvar (name
);
2009 return create_internalvar (name
);
2012 /* Return current value of internal variable VAR. For variables that
2013 are not inherently typed, use a value type appropriate for GDBARCH. */
2016 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2019 struct trace_state_variable
*tsv
;
2021 /* If there is a trace state variable of the same name, assume that
2022 is what we really want to see. */
2023 tsv
= find_trace_state_variable (var
->name
.c_str ());
2026 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2028 if (tsv
->value_known
)
2029 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2032 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2038 case INTERNALVAR_VOID
:
2039 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2042 case INTERNALVAR_FUNCTION
:
2043 val
= value::allocate (builtin_type (gdbarch
)->internal_fn
);
2046 case INTERNALVAR_INTEGER
:
2047 if (!var
->u
.integer
.type
)
2048 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2049 var
->u
.integer
.val
);
2051 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2054 case INTERNALVAR_STRING
:
2055 val
= current_language
->value_string (gdbarch
,
2057 strlen (var
->u
.string
));
2060 case INTERNALVAR_VALUE
:
2061 val
= var
->u
.value
->copy ();
2066 case INTERNALVAR_MAKE_VALUE
:
2067 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2068 var
->u
.make_value
.data
);
2072 internal_error (_("bad kind"));
2075 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2076 on this value go back to affect the original internal variable.
2078 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2079 no underlying modifiable state in the internal variable.
2081 Likewise, if the variable's value is a computed lvalue, we want
2082 references to it to produce another computed lvalue, where
2083 references and assignments actually operate through the
2084 computed value's functions.
2086 This means that internal variables with computed values
2087 behave a little differently from other internal variables:
2088 assignments to them don't just replace the previous value
2089 altogether. At the moment, this seems like the behavior we
2092 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2093 && val
->lval () != lval_computed
)
2095 val
->set_lval (lval_internalvar
);
2096 VALUE_INTERNALVAR (val
) = var
;
2103 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2105 if (var
->kind
== INTERNALVAR_INTEGER
)
2107 *result
= var
->u
.integer
.val
;
2111 if (var
->kind
== INTERNALVAR_VALUE
)
2113 struct type
*type
= check_typedef (var
->u
.value
->type ());
2115 if (type
->code () == TYPE_CODE_INT
)
2117 *result
= value_as_long (var
->u
.value
);
2126 get_internalvar_function (struct internalvar
*var
,
2127 struct internal_function
**result
)
2131 case INTERNALVAR_FUNCTION
:
2132 *result
= var
->u
.fn
.function
;
2141 set_internalvar_component (struct internalvar
*var
,
2142 LONGEST offset
, LONGEST bitpos
,
2143 LONGEST bitsize
, struct value
*newval
)
2146 struct gdbarch
*gdbarch
;
2151 case INTERNALVAR_VALUE
:
2152 addr
= var
->u
.value
->contents_writeable ().data ();
2153 gdbarch
= var
->u
.value
->arch ();
2154 unit_size
= gdbarch_addressable_memory_unit_size (gdbarch
);
2157 modify_field (var
->u
.value
->type (), addr
+ offset
,
2158 value_as_long (newval
), bitpos
, bitsize
);
2160 memcpy (addr
+ offset
* unit_size
, newval
->contents ().data (),
2161 newval
->type ()->length ());
2165 /* We can never get a component of any other kind. */
2166 internal_error (_("set_internalvar_component"));
2171 set_internalvar (struct internalvar
*var
, struct value
*val
)
2173 enum internalvar_kind new_kind
;
2174 union internalvar_data new_data
= { 0 };
2176 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2177 error (_("Cannot overwrite convenience function %s"), var
->name
.c_str ());
2179 /* Prepare new contents. */
2180 switch (check_typedef (val
->type ())->code ())
2182 case TYPE_CODE_VOID
:
2183 new_kind
= INTERNALVAR_VOID
;
2186 case TYPE_CODE_INTERNAL_FUNCTION
:
2187 gdb_assert (val
->lval () == lval_internalvar
);
2188 new_kind
= INTERNALVAR_FUNCTION
;
2189 get_internalvar_function (VALUE_INTERNALVAR (val
),
2190 &new_data
.fn
.function
);
2191 /* Copies created here are never canonical. */
2195 new_kind
= INTERNALVAR_VALUE
;
2196 struct value
*copy
= val
->copy ();
2197 copy
->set_modifiable (true);
2199 /* Force the value to be fetched from the target now, to avoid problems
2200 later when this internalvar is referenced and the target is gone or
2203 copy
->fetch_lazy ();
2205 /* Release the value from the value chain to prevent it from being
2206 deleted by free_all_values. From here on this function should not
2207 call error () until new_data is installed into the var->u to avoid
2209 new_data
.value
= release_value (copy
).release ();
2211 /* Internal variables which are created from values with a dynamic
2212 location don't need the location property of the origin anymore.
2213 The resolved dynamic location is used prior then any other address
2214 when accessing the value.
2215 If we keep it, we would still refer to the origin value.
2216 Remove the location property in case it exist. */
2217 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2222 /* Clean up old contents. */
2223 clear_internalvar (var
);
2226 var
->kind
= new_kind
;
2228 /* End code which must not call error(). */
2232 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2234 /* Clean up old contents. */
2235 clear_internalvar (var
);
2237 var
->kind
= INTERNALVAR_INTEGER
;
2238 var
->u
.integer
.type
= NULL
;
2239 var
->u
.integer
.val
= l
;
2243 set_internalvar_string (struct internalvar
*var
, const char *string
)
2245 /* Clean up old contents. */
2246 clear_internalvar (var
);
2248 var
->kind
= INTERNALVAR_STRING
;
2249 var
->u
.string
= xstrdup (string
);
2253 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2255 /* Clean up old contents. */
2256 clear_internalvar (var
);
2258 var
->kind
= INTERNALVAR_FUNCTION
;
2259 var
->u
.fn
.function
= f
;
2260 var
->u
.fn
.canonical
= 1;
2261 /* Variables installed here are always the canonical version. */
2265 clear_internalvar (struct internalvar
*var
)
2267 /* Clean up old contents. */
2270 case INTERNALVAR_VALUE
:
2271 var
->u
.value
->decref ();
2274 case INTERNALVAR_STRING
:
2275 xfree (var
->u
.string
);
2282 /* Reset to void kind. */
2283 var
->kind
= INTERNALVAR_VOID
;
2287 internalvar_name (const struct internalvar
*var
)
2289 return var
->name
.c_str ();
2292 static struct internal_function
*
2293 create_internal_function (const char *name
,
2294 internal_function_fn handler
, void *cookie
)
2296 struct internal_function
*ifn
= XNEW (struct internal_function
);
2298 ifn
->name
= xstrdup (name
);
2299 ifn
->handler
= handler
;
2300 ifn
->cookie
= cookie
;
2305 value_internal_function_name (struct value
*val
)
2307 struct internal_function
*ifn
;
2310 gdb_assert (val
->lval () == lval_internalvar
);
2311 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2312 gdb_assert (result
);
2318 call_internal_function (struct gdbarch
*gdbarch
,
2319 const struct language_defn
*language
,
2320 struct value
*func
, int argc
, struct value
**argv
)
2322 struct internal_function
*ifn
;
2325 gdb_assert (func
->lval () == lval_internalvar
);
2326 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2327 gdb_assert (result
);
2329 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2332 /* The 'function' command. This does nothing -- it is just a
2333 placeholder to let "help function NAME" work. This is also used as
2334 the implementation of the sub-command that is created when
2335 registering an internal function. */
2337 function_command (const char *command
, int from_tty
)
2342 /* Helper function that does the work for add_internal_function. */
2344 static struct cmd_list_element
*
2345 do_add_internal_function (const char *name
, const char *doc
,
2346 internal_function_fn handler
, void *cookie
)
2348 struct internal_function
*ifn
;
2349 struct internalvar
*var
= lookup_internalvar (name
);
2351 ifn
= create_internal_function (name
, handler
, cookie
);
2352 set_internalvar_function (var
, ifn
);
2354 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2360 add_internal_function (const char *name
, const char *doc
,
2361 internal_function_fn handler
, void *cookie
)
2363 do_add_internal_function (name
, doc
, handler
, cookie
);
2369 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2370 gdb::unique_xmalloc_ptr
<char> &&doc
,
2371 internal_function_fn handler
, void *cookie
)
2373 struct cmd_list_element
*cmd
2374 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2376 /* Manually transfer the ownership of the doc and name strings to CMD by
2377 setting the appropriate flags. */
2378 (void) doc
.release ();
2379 cmd
->doc_allocated
= 1;
2380 (void) name
.release ();
2381 cmd
->name_allocated
= 1;
2385 value::preserve (struct objfile
*objfile
, htab_t copied_types
)
2387 if (m_type
->objfile_owner () == objfile
)
2388 m_type
= copy_type_recursive (m_type
, copied_types
);
2390 if (m_enclosing_type
->objfile_owner () == objfile
)
2391 m_enclosing_type
= copy_type_recursive (m_enclosing_type
, copied_types
);
2394 /* Likewise for internal variable VAR. */
2397 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2398 htab_t copied_types
)
2402 case INTERNALVAR_INTEGER
:
2403 if (var
->u
.integer
.type
2404 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2406 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2409 case INTERNALVAR_VALUE
:
2410 var
->u
.value
->preserve (objfile
, copied_types
);
2415 /* Make sure that all types and values referenced by VAROBJ are updated before
2416 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2420 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2421 htab_t copied_types
)
2423 if (varobj
->type
->is_objfile_owned ()
2424 && varobj
->type
->objfile_owner () == objfile
)
2427 = copy_type_recursive (varobj
->type
, copied_types
);
2430 if (varobj
->value
!= nullptr)
2431 varobj
->value
->preserve (objfile
, copied_types
);
2434 /* Update the internal variables and value history when OBJFILE is
2435 discarded; we must copy the types out of the objfile. New global types
2436 will be created for every convenience variable which currently points to
2437 this objfile's types, and the convenience variables will be adjusted to
2438 use the new global types. */
2441 preserve_values (struct objfile
*objfile
)
2443 /* Create the hash table. We allocate on the objfile's obstack, since
2444 it is soon to be deleted. */
2445 htab_up copied_types
= create_copied_types_hash ();
2447 for (const value_ref_ptr
&item
: value_history
)
2448 item
->preserve (objfile
, copied_types
.get ());
2450 for (auto &pair
: internalvars
)
2451 preserve_one_internalvar (&pair
.second
, objfile
, copied_types
.get ());
2453 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2454 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2456 preserve_one_varobj (varobj
, objfile
,
2457 copied_types
.get ());
2460 preserve_ext_lang_values (objfile
, copied_types
.get ());
2464 show_convenience (const char *ignore
, int from_tty
)
2466 struct gdbarch
*gdbarch
= get_current_arch ();
2468 struct value_print_options opts
;
2470 get_user_print_options (&opts
);
2471 for (auto &pair
: internalvars
)
2473 internalvar
&var
= pair
.second
;
2479 gdb_printf (("$%s = "), var
.name
.c_str ());
2485 val
= value_of_internalvar (gdbarch
, &var
);
2486 value_print (val
, gdb_stdout
, &opts
);
2488 catch (const gdb_exception_error
&ex
)
2490 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2491 _("<error: %s>"), ex
.what ());
2494 gdb_printf (("\n"));
2498 /* This text does not mention convenience functions on purpose.
2499 The user can't create them except via Python, and if Python support
2500 is installed this message will never be printed ($_streq will
2502 gdb_printf (_("No debugger convenience variables now defined.\n"
2503 "Convenience variables have "
2504 "names starting with \"$\";\n"
2505 "use \"set\" as in \"set "
2506 "$foo = 5\" to define them.\n"));
2514 value::from_xmethod (xmethod_worker_up
&&worker
)
2518 v
= value::allocate (builtin_type (current_inferior ()->arch ())->xmethod
);
2519 v
->m_lval
= lval_xcallable
;
2520 v
->m_location
.xm_worker
= worker
.release ();
2521 v
->m_modifiable
= false;
2529 value::result_type_of_xmethod (gdb::array_view
<value
*> argv
)
2531 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2532 && m_lval
== lval_xcallable
&& !argv
.empty ());
2534 return m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2540 value::call_xmethod (gdb::array_view
<value
*> argv
)
2542 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2543 && m_lval
== lval_xcallable
&& !argv
.empty ());
2545 return m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2548 /* Extract a value as a C number (either long or double).
2549 Knows how to convert fixed values to double, or
2550 floating values to long.
2551 Does not deallocate the value. */
2554 value_as_long (struct value
*val
)
2556 /* This coerces arrays and functions, which is necessary (e.g.
2557 in disassemble_command). It also dereferences references, which
2558 I suspect is the most logical thing to do. */
2559 val
= coerce_array (val
);
2560 return unpack_long (val
->type (), val
->contents ().data ());
2566 value_as_mpz (struct value
*val
)
2568 val
= coerce_array (val
);
2569 struct type
*type
= check_typedef (val
->type ());
2571 switch (type
->code ())
2573 case TYPE_CODE_ENUM
:
2574 case TYPE_CODE_BOOL
:
2576 case TYPE_CODE_CHAR
:
2577 case TYPE_CODE_RANGE
:
2581 return gdb_mpz (value_as_long (val
));
2586 gdb::array_view
<const gdb_byte
> valbytes
= val
->contents ();
2587 enum bfd_endian byte_order
= type_byte_order (type
);
2589 /* Handle integers that are either not a multiple of the word size,
2590 or that are stored at some bit offset. */
2591 unsigned bit_off
= 0, bit_size
= 0;
2592 if (type
->bit_size_differs_p ())
2594 bit_size
= type
->bit_size ();
2597 /* We can just handle this immediately. */
2601 bit_off
= type
->bit_offset ();
2603 unsigned n_bytes
= ((bit_off
% 8) + bit_size
+ 7) / 8;
2604 valbytes
= valbytes
.slice (bit_off
/ 8, n_bytes
);
2606 if (byte_order
== BFD_ENDIAN_BIG
)
2607 bit_off
= (n_bytes
* 8 - bit_off
% 8 - bit_size
);
2612 result
.read (val
->contents (), byte_order
, type
->is_unsigned ());
2614 /* Shift off any low bits, if needed. */
2618 /* Mask off any high bits, if needed. */
2620 result
.mask (bit_size
);
2622 /* Now handle any range bias. */
2623 if (type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
2625 /* Unfortunately we have to box here, because LONGEST is
2626 probably wider than long. */
2627 result
+= gdb_mpz (type
->bounds ()->bias
);
2633 /* Extract a value as a C pointer. */
2636 value_as_address (struct value
*val
)
2638 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2640 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2641 whether we want this to be true eventually. */
2643 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2644 non-address (e.g. argument to "signal", "info break", etc.), or
2645 for pointers to char, in which the low bits *are* significant. */
2646 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2649 /* There are several targets (IA-64, PowerPC, and others) which
2650 don't represent pointers to functions as simply the address of
2651 the function's entry point. For example, on the IA-64, a
2652 function pointer points to a two-word descriptor, generated by
2653 the linker, which contains the function's entry point, and the
2654 value the IA-64 "global pointer" register should have --- to
2655 support position-independent code. The linker generates
2656 descriptors only for those functions whose addresses are taken.
2658 On such targets, it's difficult for GDB to convert an arbitrary
2659 function address into a function pointer; it has to either find
2660 an existing descriptor for that function, or call malloc and
2661 build its own. On some targets, it is impossible for GDB to
2662 build a descriptor at all: the descriptor must contain a jump
2663 instruction; data memory cannot be executed; and code memory
2666 Upon entry to this function, if VAL is a value of type `function'
2667 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2668 val->address () is the address of the function. This is what
2669 you'll get if you evaluate an expression like `main'. The call
2670 to COERCE_ARRAY below actually does all the usual unary
2671 conversions, which includes converting values of type `function'
2672 to `pointer to function'. This is the challenging conversion
2673 discussed above. Then, `unpack_pointer' will convert that pointer
2674 back into an address.
2676 So, suppose the user types `disassemble foo' on an architecture
2677 with a strange function pointer representation, on which GDB
2678 cannot build its own descriptors, and suppose further that `foo'
2679 has no linker-built descriptor. The address->pointer conversion
2680 will signal an error and prevent the command from running, even
2681 though the next step would have been to convert the pointer
2682 directly back into the same address.
2684 The following shortcut avoids this whole mess. If VAL is a
2685 function, just return its address directly. */
2686 if (val
->type ()->code () == TYPE_CODE_FUNC
2687 || val
->type ()->code () == TYPE_CODE_METHOD
)
2688 return val
->address ();
2690 val
= coerce_array (val
);
2692 /* Some architectures (e.g. Harvard), map instruction and data
2693 addresses onto a single large unified address space. For
2694 instance: An architecture may consider a large integer in the
2695 range 0x10000000 .. 0x1000ffff to already represent a data
2696 addresses (hence not need a pointer to address conversion) while
2697 a small integer would still need to be converted integer to
2698 pointer to address. Just assume such architectures handle all
2699 integer conversions in a single function. */
2703 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2704 must admonish GDB hackers to make sure its behavior matches the
2705 compiler's, whenever possible.
2707 In general, I think GDB should evaluate expressions the same way
2708 the compiler does. When the user copies an expression out of
2709 their source code and hands it to a `print' command, they should
2710 get the same value the compiler would have computed. Any
2711 deviation from this rule can cause major confusion and annoyance,
2712 and needs to be justified carefully. In other words, GDB doesn't
2713 really have the freedom to do these conversions in clever and
2716 AndrewC pointed out that users aren't complaining about how GDB
2717 casts integers to pointers; they are complaining that they can't
2718 take an address from a disassembly listing and give it to `x/i'.
2719 This is certainly important.
2721 Adding an architecture method like integer_to_address() certainly
2722 makes it possible for GDB to "get it right" in all circumstances
2723 --- the target has complete control over how things get done, so
2724 people can Do The Right Thing for their target without breaking
2725 anyone else. The standard doesn't specify how integers get
2726 converted to pointers; usually, the ABI doesn't either, but
2727 ABI-specific code is a more reasonable place to handle it. */
2729 if (!val
->type ()->is_pointer_or_reference ()
2730 && gdbarch_integer_to_address_p (gdbarch
))
2731 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2732 val
->contents ().data ());
2734 return unpack_pointer (val
->type (), val
->contents ().data ());
2738 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2739 as a long, or as a double, assuming the raw data is described
2740 by type TYPE. Knows how to convert different sizes of values
2741 and can convert between fixed and floating point. We don't assume
2742 any alignment for the raw data. Return value is in host byte order.
2744 If you want functions and arrays to be coerced to pointers, and
2745 references to be dereferenced, call value_as_long() instead.
2747 C++: It is assumed that the front-end has taken care of
2748 all matters concerning pointers to members. A pointer
2749 to member which reaches here is considered to be equivalent
2750 to an INT (or some size). After all, it is only an offset. */
2753 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2755 if (is_fixed_point_type (type
))
2756 type
= type
->fixed_point_type_base_type ();
2758 enum bfd_endian byte_order
= type_byte_order (type
);
2759 enum type_code code
= type
->code ();
2760 int len
= type
->length ();
2761 int nosign
= type
->is_unsigned ();
2765 case TYPE_CODE_TYPEDEF
:
2766 return unpack_long (check_typedef (type
), valaddr
);
2767 case TYPE_CODE_ENUM
:
2768 case TYPE_CODE_FLAGS
:
2769 case TYPE_CODE_BOOL
:
2771 case TYPE_CODE_CHAR
:
2772 case TYPE_CODE_RANGE
:
2773 case TYPE_CODE_MEMBERPTR
:
2777 if (type
->bit_size_differs_p ())
2779 unsigned bit_off
= type
->bit_offset ();
2780 unsigned bit_size
= type
->bit_size ();
2783 /* unpack_bits_as_long doesn't handle this case the
2784 way we'd like, so handle it here. */
2788 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2793 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2795 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2797 if (code
== TYPE_CODE_RANGE
)
2798 result
+= type
->bounds ()->bias
;
2803 case TYPE_CODE_DECFLOAT
:
2804 return target_float_to_longest (valaddr
, type
);
2806 case TYPE_CODE_FIXED_POINT
:
2809 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2811 type
->fixed_point_scaling_factor ());
2813 gdb_mpz vz
= vq
.as_integer ();
2814 return vz
.as_integer
<LONGEST
> ();
2819 case TYPE_CODE_RVALUE_REF
:
2820 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2821 whether we want this to be true eventually. */
2822 return extract_typed_address (valaddr
, type
);
2825 error (_("Value can't be converted to integer."));
2829 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2830 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2831 We don't assume any alignment for the raw data. Return value is in
2834 If you want functions and arrays to be coerced to pointers, and
2835 references to be dereferenced, call value_as_address() instead.
2837 C++: It is assumed that the front-end has taken care of
2838 all matters concerning pointers to members. A pointer
2839 to member which reaches here is considered to be equivalent
2840 to an INT (or some size). After all, it is only an offset. */
2843 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2845 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2846 whether we want this to be true eventually. */
2847 return unpack_long (type
, valaddr
);
2851 is_floating_value (struct value
*val
)
2853 struct type
*type
= check_typedef (val
->type ());
2855 if (is_floating_type (type
))
2857 if (!target_float_is_valid (val
->contents ().data (), type
))
2858 error (_("Invalid floating value found in program."));
2866 /* Get the value of the FIELDNO'th field (which must be static) of
2870 value_static_field (struct type
*type
, int fieldno
)
2872 struct value
*retval
;
2874 switch (type
->field (fieldno
).loc_kind ())
2876 case FIELD_LOC_KIND_PHYSADDR
:
2877 retval
= value_at_lazy (type
->field (fieldno
).type (),
2878 type
->field (fieldno
).loc_physaddr ());
2880 case FIELD_LOC_KIND_PHYSNAME
:
2882 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2883 /* type->field (fieldno).name (); */
2884 struct block_symbol sym
= lookup_symbol (phys_name
, 0, VAR_DOMAIN
, 0);
2886 if (sym
.symbol
== NULL
)
2888 /* With some compilers, e.g. HP aCC, static data members are
2889 reported as non-debuggable symbols. */
2890 struct bound_minimal_symbol msym
2891 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
2892 struct type
*field_type
= type
->field (fieldno
).type ();
2895 retval
= value::allocate_optimized_out (field_type
);
2897 retval
= value_at_lazy (field_type
, msym
.value_address ());
2900 retval
= value_of_variable (sym
.symbol
, sym
.block
);
2904 gdb_assert_not_reached ("unexpected field location kind");
2910 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2911 You have to be careful here, since the size of the data area for the value
2912 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2913 than the old enclosing type, you have to allocate more space for the
2917 value::set_enclosing_type (struct type
*new_encl_type
)
2919 if (new_encl_type
->length () > enclosing_type ()->length ())
2921 check_type_length_before_alloc (new_encl_type
);
2922 m_contents
.reset ((gdb_byte
*) xrealloc (m_contents
.release (),
2923 new_encl_type
->length ()));
2926 m_enclosing_type
= new_encl_type
;
2932 value::primitive_field (LONGEST offset
, int fieldno
, struct type
*arg_type
)
2936 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
2938 arg_type
= check_typedef (arg_type
);
2939 type
= arg_type
->field (fieldno
).type ();
2941 /* Call check_typedef on our type to make sure that, if TYPE
2942 is a TYPE_CODE_TYPEDEF, its length is set to the length
2943 of the target type instead of zero. However, we do not
2944 replace the typedef type by the target type, because we want
2945 to keep the typedef in order to be able to print the type
2946 description correctly. */
2947 check_typedef (type
);
2949 if (arg_type
->field (fieldno
).bitsize ())
2951 /* Handle packed fields.
2953 Create a new value for the bitfield, with bitpos and bitsize
2954 set. If possible, arrange offset and bitpos so that we can
2955 do a single aligned read of the size of the containing type.
2956 Otherwise, adjust offset to the byte containing the first
2957 bit. Assume that the address, offset, and embedded offset
2958 are sufficiently aligned. */
2960 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
2961 LONGEST container_bitsize
= type
->length () * 8;
2963 v
= value::allocate_lazy (type
);
2964 v
->set_bitsize (arg_type
->field (fieldno
).bitsize ());
2965 if ((bitpos
% container_bitsize
) + v
->bitsize () <= container_bitsize
2966 && type
->length () <= (int) sizeof (LONGEST
))
2967 v
->set_bitpos (bitpos
% container_bitsize
);
2969 v
->set_bitpos (bitpos
% 8);
2970 v
->set_offset ((embedded_offset ()
2972 + (bitpos
- v
->bitpos ()) / 8));
2973 v
->set_parent (this);
2977 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
2979 /* This field is actually a base subobject, so preserve the
2980 entire object's contents for later references to virtual
2984 /* Lazy register values with offsets are not supported. */
2985 if (this->lval () == lval_register
&& lazy ())
2988 /* We special case virtual inheritance here because this
2989 requires access to the contents, which we would rather avoid
2990 for references to ordinary fields of unavailable values. */
2991 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
2992 boffset
= baseclass_offset (arg_type
, fieldno
,
2993 contents ().data (),
2998 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3001 v
= value::allocate_lazy (enclosing_type ());
3004 v
= value::allocate (enclosing_type ());
3005 contents_copy_raw (v
, 0, 0, enclosing_type ()->length ());
3007 v
->deprecated_set_type (type
);
3008 v
->set_offset (this->offset ());
3009 v
->set_embedded_offset (offset
+ embedded_offset () + boffset
);
3011 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3013 /* Field is a dynamic data member. */
3015 gdb_assert (0 == offset
);
3016 /* We expect an already resolved data location. */
3017 gdb_assert (TYPE_DATA_LOCATION (type
)->is_constant ());
3018 /* For dynamic data types defer memory allocation
3019 until we actual access the value. */
3020 v
= value::allocate_lazy (type
);
3024 /* Plain old data member */
3025 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3026 / (HOST_CHAR_BIT
* unit_size
));
3028 /* Lazy register values with offsets are not supported. */
3029 if (this->lval () == lval_register
&& lazy ())
3033 v
= value::allocate_lazy (type
);
3036 v
= value::allocate (type
);
3037 contents_copy_raw (v
, v
->embedded_offset (),
3038 embedded_offset () + offset
,
3039 type_length_units (type
));
3041 v
->set_offset (this->offset () + offset
+ embedded_offset ());
3043 v
->set_component_location (this);
3047 /* Given a value ARG1 of a struct or union type,
3048 extract and return the value of one of its (non-static) fields.
3049 FIELDNO says which field. */
3052 value_field (struct value
*arg1
, int fieldno
)
3054 return arg1
->primitive_field (0, fieldno
, arg1
->type ());
3057 /* Return a non-virtual function as a value.
3058 F is the list of member functions which contains the desired method.
3059 J is an index into F which provides the desired method.
3061 We only use the symbol for its address, so be happy with either a
3062 full symbol or a minimal symbol. */
3065 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3066 int j
, struct type
*type
,
3070 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3071 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3073 struct bound_minimal_symbol msym
;
3075 sym
= lookup_symbol (physname
, 0, VAR_DOMAIN
, 0).symbol
;
3078 msym
= lookup_bound_minimal_symbol (physname
);
3079 if (msym
.minsym
== NULL
)
3083 v
= value::allocate (ftype
);
3084 v
->set_lval (lval_memory
);
3087 v
->set_address (sym
->value_block ()->entry_pc ());
3091 /* The minimal symbol might point to a function descriptor;
3092 resolve it to the actual code address instead. */
3093 struct objfile
*objfile
= msym
.objfile
;
3094 struct gdbarch
*gdbarch
= objfile
->arch ();
3096 v
->set_address (gdbarch_convert_from_func_ptr_addr
3097 (gdbarch
, msym
.value_address (),
3098 current_inferior ()->top_target ()));
3103 if (type
!= (*arg1p
)->type ())
3104 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3105 value_addr (*arg1p
)));
3107 /* Move the `this' pointer according to the offset.
3108 (*arg1p)->offset () += offset; */
3119 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3120 LONGEST bitpos
, LONGEST bitsize
)
3122 enum bfd_endian byte_order
= type_byte_order (field_type
);
3127 LONGEST read_offset
;
3129 /* Read the minimum number of bytes required; there may not be
3130 enough bytes to read an entire ULONGEST. */
3131 field_type
= check_typedef (field_type
);
3133 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3136 bytes_read
= field_type
->length ();
3137 bitsize
= 8 * bytes_read
;
3140 read_offset
= bitpos
/ 8;
3142 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3143 bytes_read
, byte_order
);
3145 /* Extract bits. See comment above. */
3147 if (byte_order
== BFD_ENDIAN_BIG
)
3148 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3150 lsbcount
= (bitpos
% 8);
3153 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3154 If the field is signed, and is negative, then sign extend. */
3156 if (bitsize
< 8 * (int) sizeof (val
))
3158 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3160 if (!field_type
->is_unsigned ())
3162 if (val
& (valmask
^ (valmask
>> 1)))
3172 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3173 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3174 ORIGINAL_VALUE, which must not be NULL. See
3175 unpack_value_bits_as_long for more details. */
3178 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3179 LONGEST embedded_offset
, int fieldno
,
3180 const struct value
*val
, LONGEST
*result
)
3182 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3183 int bitsize
= type
->field (fieldno
).bitsize ();
3184 struct type
*field_type
= type
->field (fieldno
).type ();
3187 gdb_assert (val
!= NULL
);
3189 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3190 if (val
->bits_any_optimized_out (bit_offset
, bitsize
)
3191 || !val
->bits_available (bit_offset
, bitsize
))
3194 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3199 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3200 object at VALADDR. See unpack_bits_as_long for more details. */
3203 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3205 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3206 int bitsize
= type
->field (fieldno
).bitsize ();
3207 struct type
*field_type
= type
->field (fieldno
).type ();
3209 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3215 value::unpack_bitfield (struct value
*dest_val
,
3216 LONGEST bitpos
, LONGEST bitsize
,
3217 const gdb_byte
*valaddr
, LONGEST embedded_offset
)
3220 enum bfd_endian byte_order
;
3223 struct type
*field_type
= dest_val
->type ();
3225 byte_order
= type_byte_order (field_type
);
3227 /* First, unpack and sign extend the bitfield as if it was wholly
3228 valid. Optimized out/unavailable bits are read as zero, but
3229 that's OK, as they'll end up marked below. If the VAL is
3230 wholly-invalid we may have skipped allocating its contents,
3231 though. See value::allocate_optimized_out. */
3232 if (valaddr
!= NULL
)
3236 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3238 store_signed_integer (dest_val
->contents_raw ().data (),
3239 field_type
->length (), byte_order
, num
);
3242 /* Now copy the optimized out / unavailability ranges to the right
3244 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3245 if (byte_order
== BFD_ENDIAN_BIG
)
3246 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3249 ranges_copy_adjusted (dest_val
, dst_bit_offset
, src_bit_offset
, bitsize
);
3252 /* Return a new value with type TYPE, which is FIELDNO field of the
3253 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3254 of VAL. If the VAL's contents required to extract the bitfield
3255 from are unavailable/optimized out, the new value is
3256 correspondingly marked unavailable/optimized out. */
3259 value_field_bitfield (struct type
*type
, int fieldno
,
3260 const gdb_byte
*valaddr
,
3261 LONGEST embedded_offset
, const struct value
*val
)
3263 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3264 int bitsize
= type
->field (fieldno
).bitsize ();
3265 struct value
*res_val
= value::allocate (type
->field (fieldno
).type ());
3267 val
->unpack_bitfield (res_val
, bitpos
, bitsize
, valaddr
, embedded_offset
);
3272 /* Modify the value of a bitfield. ADDR points to a block of memory in
3273 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3274 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3275 indicate which bits (in target bit order) comprise the bitfield.
3276 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3277 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3280 modify_field (struct type
*type
, gdb_byte
*addr
,
3281 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3283 enum bfd_endian byte_order
= type_byte_order (type
);
3285 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3288 /* Normalize BITPOS. */
3292 /* If a negative fieldval fits in the field in question, chop
3293 off the sign extension bits. */
3294 if ((~fieldval
& ~(mask
>> 1)) == 0)
3297 /* Warn if value is too big to fit in the field in question. */
3298 if (0 != (fieldval
& ~mask
))
3300 /* FIXME: would like to include fieldval in the message, but
3301 we don't have a sprintf_longest. */
3302 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3304 /* Truncate it, otherwise adjoining fields may be corrupted. */
3308 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3309 false valgrind reports. */
3311 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3312 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3314 /* Shifting for bit field depends on endianness of the target machine. */
3315 if (byte_order
== BFD_ENDIAN_BIG
)
3316 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3318 oword
&= ~(mask
<< bitpos
);
3319 oword
|= fieldval
<< bitpos
;
3321 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3324 /* Pack NUM into BUF using a target format of TYPE. */
3327 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3329 enum bfd_endian byte_order
= type_byte_order (type
);
3332 type
= check_typedef (type
);
3333 len
= type
->length ();
3335 switch (type
->code ())
3337 case TYPE_CODE_RANGE
:
3338 num
-= type
->bounds ()->bias
;
3341 case TYPE_CODE_CHAR
:
3342 case TYPE_CODE_ENUM
:
3343 case TYPE_CODE_FLAGS
:
3344 case TYPE_CODE_BOOL
:
3345 case TYPE_CODE_MEMBERPTR
:
3346 if (type
->bit_size_differs_p ())
3348 unsigned bit_off
= type
->bit_offset ();
3349 unsigned bit_size
= type
->bit_size ();
3350 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3353 store_signed_integer (buf
, len
, byte_order
, num
);
3357 case TYPE_CODE_RVALUE_REF
:
3359 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3363 case TYPE_CODE_DECFLOAT
:
3364 target_float_from_longest (buf
, type
, num
);
3368 error (_("Unexpected type (%d) encountered for integer constant."),
3374 /* Pack NUM into BUF using a target format of TYPE. */
3377 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3380 enum bfd_endian byte_order
;
3382 type
= check_typedef (type
);
3383 len
= type
->length ();
3384 byte_order
= type_byte_order (type
);
3386 switch (type
->code ())
3389 case TYPE_CODE_CHAR
:
3390 case TYPE_CODE_ENUM
:
3391 case TYPE_CODE_FLAGS
:
3392 case TYPE_CODE_BOOL
:
3393 case TYPE_CODE_RANGE
:
3394 case TYPE_CODE_MEMBERPTR
:
3395 if (type
->bit_size_differs_p ())
3397 unsigned bit_off
= type
->bit_offset ();
3398 unsigned bit_size
= type
->bit_size ();
3399 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3402 store_unsigned_integer (buf
, len
, byte_order
, num
);
3406 case TYPE_CODE_RVALUE_REF
:
3408 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3412 case TYPE_CODE_DECFLOAT
:
3413 target_float_from_ulongest (buf
, type
, num
);
3417 error (_("Unexpected type (%d) encountered "
3418 "for unsigned integer constant."),
3426 value::zero (struct type
*type
, enum lval_type lv
)
3428 struct value
*val
= value::allocate_lazy (type
);
3430 val
->set_lval (lv
== lval_computed
? not_lval
: lv
);
3431 val
->m_is_zero
= true;
3435 /* Convert C numbers into newly allocated values. */
3438 value_from_longest (struct type
*type
, LONGEST num
)
3440 struct value
*val
= value::allocate (type
);
3442 pack_long (val
->contents_raw ().data (), type
, num
);
3447 /* Convert C unsigned numbers into newly allocated values. */
3450 value_from_ulongest (struct type
*type
, ULONGEST num
)
3452 struct value
*val
= value::allocate (type
);
3454 pack_unsigned_long (val
->contents_raw ().data (), type
, num
);
3462 value_from_mpz (struct type
*type
, const gdb_mpz
&v
)
3464 struct type
*real_type
= check_typedef (type
);
3466 const gdb_mpz
*val
= &v
;
3468 if (real_type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
3472 storage
-= type
->bounds ()->bias
;
3475 if (type
->bit_size_differs_p ())
3477 unsigned bit_off
= type
->bit_offset ();
3478 unsigned bit_size
= type
->bit_size ();
3480 if (val
!= &storage
)
3486 storage
.mask (bit_size
);
3487 storage
<<= bit_off
;
3490 struct value
*result
= value::allocate (type
);
3491 val
->truncate (result
->contents_raw (), type_byte_order (type
),
3492 type
->is_unsigned ());
3496 /* Create a value representing a pointer of type TYPE to the address
3500 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3502 struct value
*val
= value::allocate (type
);
3504 store_typed_address (val
->contents_raw ().data (),
3505 check_typedef (type
), addr
);
3509 /* Create and return a value object of TYPE containing the value D. The
3510 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3511 it is converted to target format. */
3514 value_from_host_double (struct type
*type
, double d
)
3516 struct value
*value
= value::allocate (type
);
3517 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3518 target_float_from_host_double (value
->contents_raw ().data (),
3523 /* Create a value of type TYPE whose contents come from VALADDR, if it
3524 is non-null, and whose memory address (in the inferior) is
3525 ADDRESS. The type of the created value may differ from the passed
3526 type TYPE. Make sure to retrieve values new type after this call.
3527 Note that TYPE is not passed through resolve_dynamic_type; this is
3528 a special API intended for use only by Ada. */
3531 value_from_contents_and_address_unresolved (struct type
*type
,
3532 const gdb_byte
*valaddr
,
3537 if (valaddr
== NULL
)
3538 v
= value::allocate_lazy (type
);
3540 v
= value_from_contents (type
, valaddr
);
3541 v
->set_lval (lval_memory
);
3542 v
->set_address (address
);
3546 /* Create a value of type TYPE whose contents come from VALADDR, if it
3547 is non-null, and whose memory address (in the inferior) is
3548 ADDRESS. The type of the created value may differ from the passed
3549 type TYPE. Make sure to retrieve values new type after this call. */
3552 value_from_contents_and_address (struct type
*type
,
3553 const gdb_byte
*valaddr
,
3555 frame_info_ptr frame
)
3557 gdb::array_view
<const gdb_byte
> view
;
3558 if (valaddr
!= nullptr)
3559 view
= gdb::make_array_view (valaddr
, type
->length ());
3560 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
,
3562 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3565 if (valaddr
== NULL
)
3566 v
= value::allocate_lazy (resolved_type
);
3568 v
= value_from_contents (resolved_type
, valaddr
);
3569 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3570 && TYPE_DATA_LOCATION (resolved_type_no_typedef
)->is_constant ())
3571 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3572 v
->set_lval (lval_memory
);
3573 v
->set_address (address
);
3577 /* Create a value of type TYPE holding the contents CONTENTS.
3578 The new value is `not_lval'. */
3581 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3583 struct value
*result
;
3585 result
= value::allocate (type
);
3586 memcpy (result
->contents_raw ().data (), contents
, type
->length ());
3590 /* Extract a value from the history file. Input will be of the form
3591 $digits or $$digits. See block comment above 'write_dollar_variable'
3595 value_from_history_ref (const char *h
, const char **endp
)
3607 /* Find length of numeral string. */
3608 for (; isdigit (h
[len
]); len
++)
3611 /* Make sure numeral string is not part of an identifier. */
3612 if (h
[len
] == '_' || isalpha (h
[len
]))
3615 /* Now collect the index value. */
3620 /* For some bizarre reason, "$$" is equivalent to "$$1",
3621 rather than to "$$0" as it ought to be! */
3629 index
= -strtol (&h
[2], &local_end
, 10);
3637 /* "$" is equivalent to "$0". */
3645 index
= strtol (&h
[1], &local_end
, 10);
3650 return access_value_history (index
);
3653 /* Get the component value (offset by OFFSET bytes) of a struct or
3654 union WHOLE. Component's type is TYPE. */
3657 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3661 if (whole
->lval () == lval_memory
&& whole
->lazy ())
3662 v
= value::allocate_lazy (type
);
3665 v
= value::allocate (type
);
3666 whole
->contents_copy (v
, v
->embedded_offset (),
3667 whole
->embedded_offset () + offset
,
3668 type_length_units (type
));
3670 v
->set_offset (whole
->offset () + offset
+ whole
->embedded_offset ());
3671 v
->set_component_location (whole
);
3679 value::from_component_bitsize (struct type
*type
,
3680 LONGEST bit_offset
, LONGEST bit_length
)
3682 gdb_assert (!lazy ());
3684 /* Preserve lvalue-ness if possible. This is needed to avoid
3685 array-printing failures (including crashes) when printing Ada
3686 arrays in programs compiled with -fgnat-encodings=all. */
3687 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3688 && (bit_length
% TARGET_CHAR_BIT
) == 0
3689 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3690 return value_from_component (this, type
, bit_offset
/ TARGET_CHAR_BIT
);
3692 struct value
*v
= value::allocate (type
);
3694 LONGEST dst_offset
= TARGET_CHAR_BIT
* v
->embedded_offset ();
3695 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3696 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3698 contents_copy_raw_bitwise (v
, dst_offset
,
3700 * embedded_offset ()
3707 coerce_ref_if_computed (const struct value
*arg
)
3709 const struct lval_funcs
*funcs
;
3711 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3714 if (arg
->lval () != lval_computed
)
3717 funcs
= arg
->computed_funcs ();
3718 if (funcs
->coerce_ref
== NULL
)
3721 return funcs
->coerce_ref (arg
);
3724 /* Look at value.h for description. */
3727 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3728 const struct type
*original_type
,
3729 struct value
*original_value
,
3730 CORE_ADDR original_value_address
)
3732 gdb_assert (original_type
->is_pointer_or_reference ());
3734 struct type
*original_target_type
= original_type
->target_type ();
3735 gdb::array_view
<const gdb_byte
> view
;
3736 struct type
*resolved_original_target_type
3737 = resolve_dynamic_type (original_target_type
, view
,
3738 original_value_address
);
3740 /* Re-adjust type. */
3741 value
->deprecated_set_type (resolved_original_target_type
);
3743 /* Add embedding info. */
3744 value
->set_enclosing_type (enc_type
);
3745 value
->set_embedded_offset (original_value
->pointed_to_offset ());
3747 /* We may be pointing to an object of some derived type. */
3748 return value_full_object (value
, NULL
, 0, 0, 0);
3752 coerce_ref (struct value
*arg
)
3754 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3755 struct value
*retval
;
3756 struct type
*enc_type
;
3758 retval
= coerce_ref_if_computed (arg
);
3762 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3765 enc_type
= check_typedef (arg
->enclosing_type ());
3766 enc_type
= enc_type
->target_type ();
3768 CORE_ADDR addr
= unpack_pointer (arg
->type (), arg
->contents ().data ());
3769 retval
= value_at_lazy (enc_type
, addr
);
3770 enc_type
= retval
->type ();
3771 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3776 coerce_array (struct value
*arg
)
3780 arg
= coerce_ref (arg
);
3781 type
= check_typedef (arg
->type ());
3783 switch (type
->code ())
3785 case TYPE_CODE_ARRAY
:
3786 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3787 arg
= value_coerce_array (arg
);
3789 case TYPE_CODE_FUNC
:
3790 arg
= value_coerce_function (arg
);
3797 /* Return the return value convention that will be used for the
3800 enum return_value_convention
3801 struct_return_convention (struct gdbarch
*gdbarch
,
3802 struct value
*function
, struct type
*value_type
)
3804 enum type_code code
= value_type
->code ();
3806 if (code
== TYPE_CODE_ERROR
)
3807 error (_("Function return type unknown."));
3809 /* Probe the architecture for the return-value convention. */
3810 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3814 /* Return true if the function returning the specified type is using
3815 the convention of returning structures in memory (passing in the
3816 address as a hidden first parameter). */
3819 using_struct_return (struct gdbarch
*gdbarch
,
3820 struct value
*function
, struct type
*value_type
)
3822 if (value_type
->code () == TYPE_CODE_VOID
)
3823 /* A void return value is never in memory. See also corresponding
3824 code in "print_return_value". */
3827 return (struct_return_convention (gdbarch
, function
, value_type
)
3828 != RETURN_VALUE_REGISTER_CONVENTION
);
3834 value::fetch_lazy_bitfield ()
3836 gdb_assert (bitsize () != 0);
3838 /* To read a lazy bitfield, read the entire enclosing value. This
3839 prevents reading the same block of (possibly volatile) memory once
3840 per bitfield. It would be even better to read only the containing
3841 word, but we have no way to record that just specific bits of a
3842 value have been fetched. */
3843 struct value
*parent
= this->parent ();
3845 if (parent
->lazy ())
3846 parent
->fetch_lazy ();
3848 parent
->unpack_bitfield (this, bitpos (), bitsize (),
3849 parent
->contents_for_printing ().data (),
3856 value::fetch_lazy_memory ()
3858 gdb_assert (m_lval
== lval_memory
);
3860 CORE_ADDR addr
= address ();
3861 struct type
*type
= check_typedef (enclosing_type ());
3863 /* Figure out how much we should copy from memory. Usually, this is just
3864 the size of the type, but, for arrays, we might only be loading a
3865 small part of the array (this is only done for very large arrays). */
3867 if (m_limited_length
> 0)
3869 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY
);
3870 len
= m_limited_length
;
3872 else if (type
->length () > 0)
3873 len
= type_length_units (type
);
3875 gdb_assert (len
>= 0);
3878 read_value_memory (this, 0, stack (), addr
,
3879 contents_all_raw ().data (), len
);
3885 value::fetch_lazy_register ()
3887 frame_info_ptr next_frame
;
3889 struct type
*type
= check_typedef (this->type ());
3890 struct value
*new_val
= this;
3892 scoped_value_mark mark
;
3894 /* Offsets are not supported here; lazy register values must
3895 refer to the entire register. */
3896 gdb_assert (offset () == 0);
3898 while (new_val
->lval () == lval_register
&& new_val
->lazy ())
3900 struct frame_id next_frame_id
= VALUE_NEXT_FRAME_ID (new_val
);
3902 next_frame
= frame_find_by_id (next_frame_id
);
3903 regnum
= VALUE_REGNUM (new_val
);
3905 gdb_assert (next_frame
!= NULL
);
3907 /* Convertible register routines are used for multi-register
3908 values and for interpretation in different types
3909 (e.g. float or int from a double register). Lazy
3910 register values should have the register's natural type,
3911 so they do not apply. */
3912 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
3915 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3916 Since a "->next" operation was performed when setting
3917 this field, we do not need to perform a "next" operation
3918 again when unwinding the register. That's why
3919 frame_unwind_register_value() is called here instead of
3920 get_frame_register_value(). */
3921 new_val
= frame_unwind_register_value (next_frame
, regnum
);
3923 /* If we get another lazy lval_register value, it means the
3924 register is found by reading it from NEXT_FRAME's next frame.
3925 frame_unwind_register_value should never return a value with
3926 the frame id pointing to NEXT_FRAME. If it does, it means we
3927 either have two consecutive frames with the same frame id
3928 in the frame chain, or some code is trying to unwind
3929 behind get_prev_frame's back (e.g., a frame unwind
3930 sniffer trying to unwind), bypassing its validations. In
3931 any case, it should always be an internal error to end up
3932 in this situation. */
3933 if (new_val
->lval () == lval_register
3935 && VALUE_NEXT_FRAME_ID (new_val
) == next_frame_id
)
3936 internal_error (_("infinite loop while fetching a register"));
3939 /* If it's still lazy (for instance, a saved register on the
3940 stack), fetch it. */
3941 if (new_val
->lazy ())
3942 new_val
->fetch_lazy ();
3944 /* Copy the contents and the unavailability/optimized-out
3945 meta-data from NEW_VAL to VAL. */
3947 new_val
->contents_copy (this, embedded_offset (),
3948 new_val
->embedded_offset (),
3949 type_length_units (type
));
3953 struct gdbarch
*gdbarch
;
3954 frame_info_ptr frame
;
3955 frame
= frame_find_by_id (VALUE_NEXT_FRAME_ID (this));
3956 frame
= get_prev_frame_always (frame
);
3957 regnum
= VALUE_REGNUM (this);
3958 gdbarch
= get_frame_arch (frame
);
3960 string_file debug_file
;
3961 gdb_printf (&debug_file
,
3962 "(frame=%d, regnum=%d(%s), ...) ",
3963 frame_relative_level (frame
), regnum
,
3964 user_reg_map_regnum_to_name (gdbarch
, regnum
));
3966 gdb_printf (&debug_file
, "->");
3967 if (new_val
->optimized_out ())
3969 gdb_printf (&debug_file
, " ");
3970 val_print_optimized_out (new_val
, &debug_file
);
3975 gdb::array_view
<const gdb_byte
> buf
= new_val
->contents ();
3977 if (new_val
->lval () == lval_register
)
3978 gdb_printf (&debug_file
, " register=%d",
3979 VALUE_REGNUM (new_val
));
3980 else if (new_val
->lval () == lval_memory
)
3981 gdb_printf (&debug_file
, " address=%s",
3983 new_val
->address ()));
3985 gdb_printf (&debug_file
, " computed");
3987 gdb_printf (&debug_file
, " bytes=");
3988 gdb_printf (&debug_file
, "[");
3989 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
3990 gdb_printf (&debug_file
, "%02x", buf
[i
]);
3991 gdb_printf (&debug_file
, "]");
3994 frame_debug_printf ("%s", debug_file
.c_str ());
4001 value::fetch_lazy ()
4003 gdb_assert (lazy ());
4004 allocate_contents (true);
4005 /* A value is either lazy, or fully fetched. The
4006 availability/validity is only established as we try to fetch a
4008 gdb_assert (m_optimized_out
.empty ());
4009 gdb_assert (m_unavailable
.empty ());
4014 else if (bitsize ())
4015 fetch_lazy_bitfield ();
4016 else if (this->lval () == lval_memory
)
4017 fetch_lazy_memory ();
4018 else if (this->lval () == lval_register
)
4019 fetch_lazy_register ();
4020 else if (this->lval () == lval_computed
4021 && computed_funcs ()->read
!= NULL
)
4022 computed_funcs ()->read (this);
4024 internal_error (_("Unexpected lazy value type."));
4029 /* Implementation of the convenience function $_isvoid. */
4031 static struct value
*
4032 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4033 const struct language_defn
*language
,
4034 void *cookie
, int argc
, struct value
**argv
)
4039 error (_("You must provide one argument for $_isvoid."));
4041 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4043 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4046 /* Implementation of the convenience function $_creal. Extracts the
4047 real part from a complex number. */
4049 static struct value
*
4050 creal_internal_fn (struct gdbarch
*gdbarch
,
4051 const struct language_defn
*language
,
4052 void *cookie
, int argc
, struct value
**argv
)
4055 error (_("You must provide one argument for $_creal."));
4057 value
*cval
= argv
[0];
4058 type
*ctype
= check_typedef (cval
->type ());
4059 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4060 error (_("expected a complex number"));
4061 return value_real_part (cval
);
4064 /* Implementation of the convenience function $_cimag. Extracts the
4065 imaginary part from a complex number. */
4067 static struct value
*
4068 cimag_internal_fn (struct gdbarch
*gdbarch
,
4069 const struct language_defn
*language
,
4070 void *cookie
, int argc
,
4071 struct value
**argv
)
4074 error (_("You must provide one argument for $_cimag."));
4076 value
*cval
= argv
[0];
4077 type
*ctype
= check_typedef (cval
->type ());
4078 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4079 error (_("expected a complex number"));
4080 return value_imaginary_part (cval
);
4087 /* Test the ranges_contain function. */
4090 test_ranges_contain ()
4092 std::vector
<range
> ranges
;
4098 ranges
.push_back (r
);
4103 ranges
.push_back (r
);
4106 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4108 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4110 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4112 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4114 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4116 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4118 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4120 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4122 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4124 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4126 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4128 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4131 /* Check that RANGES contains the same ranges as EXPECTED. */
4134 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4135 gdb::array_view
<const range
> expected
)
4137 return ranges
== expected
;
4140 /* Test the insert_into_bit_range_vector function. */
4143 test_insert_into_bit_range_vector ()
4145 std::vector
<range
> ranges
;
4149 insert_into_bit_range_vector (&ranges
, 10, 5);
4150 static const range expected
[] = {
4153 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4158 insert_into_bit_range_vector (&ranges
, 11, 4);
4159 static const range expected
= {10, 5};
4160 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4163 /* [10, 14] [20, 24] */
4165 insert_into_bit_range_vector (&ranges
, 20, 5);
4166 static const range expected
[] = {
4170 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4173 /* [10, 14] [17, 24] */
4175 insert_into_bit_range_vector (&ranges
, 17, 5);
4176 static const range expected
[] = {
4180 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4183 /* [2, 8] [10, 14] [17, 24] */
4185 insert_into_bit_range_vector (&ranges
, 2, 7);
4186 static const range expected
[] = {
4191 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4194 /* [2, 14] [17, 24] */
4196 insert_into_bit_range_vector (&ranges
, 9, 1);
4197 static const range expected
[] = {
4201 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4204 /* [2, 14] [17, 24] */
4206 insert_into_bit_range_vector (&ranges
, 9, 1);
4207 static const range expected
[] = {
4211 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4216 insert_into_bit_range_vector (&ranges
, 4, 30);
4217 static const range expected
= {2, 32};
4218 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4225 type
*type
= builtin_type (current_inferior ()->arch ())->builtin_int
;
4227 /* Verify that we can copy an entirely optimized out value, that may not have
4228 its contents allocated. */
4229 value_ref_ptr val
= release_value (value::allocate_optimized_out (type
));
4230 value_ref_ptr copy
= release_value (val
->copy ());
4232 SELF_CHECK (val
->entirely_optimized_out ());
4233 SELF_CHECK (copy
->entirely_optimized_out ());
4236 } /* namespace selftests */
4237 #endif /* GDB_SELF_TEST */
4239 void _initialize_values ();
4241 _initialize_values ()
4243 cmd_list_element
*show_convenience_cmd
4244 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4245 Debugger convenience (\"$foo\") variables and functions.\n\
4246 Convenience variables are created when you assign them values;\n\
4247 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4249 A few convenience variables are given values automatically:\n\
4250 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4251 \"$__\" holds the contents of the last address examined with \"x\"."
4254 Convenience functions are defined via the Python API."
4257 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4259 add_cmd ("values", no_set_class
, show_values
, _("\
4260 Elements of value history around item number IDX (or last ten)."),
4263 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4264 Initialize a convenience variable if necessary.\n\
4265 init-if-undefined VARIABLE = EXPRESSION\n\
4266 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4267 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4268 VARIABLE is already initialized."));
4270 add_prefix_cmd ("function", no_class
, function_command
, _("\
4271 Placeholder command for showing help on convenience functions."),
4272 &functionlist
, 0, &cmdlist
);
4274 add_internal_function ("_isvoid", _("\
4275 Check whether an expression is void.\n\
4276 Usage: $_isvoid (expression)\n\
4277 Return 1 if the expression is void, zero otherwise."),
4278 isvoid_internal_fn
, NULL
);
4280 add_internal_function ("_creal", _("\
4281 Extract the real part of a complex number.\n\
4282 Usage: $_creal (expression)\n\
4283 Return the real part of a complex number, the type depends on the\n\
4284 type of a complex number."),
4285 creal_internal_fn
, NULL
);
4287 add_internal_function ("_cimag", _("\
4288 Extract the imaginary part of a complex number.\n\
4289 Usage: $_cimag (expression)\n\
4290 Return the imaginary part of a complex number, the type depends on the\n\
4291 type of a complex number."),
4292 cimag_internal_fn
, NULL
);
4294 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4295 class_support
, &max_value_size
, _("\
4296 Set maximum sized value gdb will load from the inferior."), _("\
4297 Show maximum sized value gdb will load from the inferior."), _("\
4298 Use this to control the maximum size, in bytes, of a value that gdb\n\
4299 will load from the inferior. Setting this value to 'unlimited'\n\
4300 disables checking.\n\
4301 Setting this does not invalidate already allocated values, it only\n\
4302 prevents future values, larger than this size, from being allocated."),
4304 show_max_value_size
,
4305 &setlist
, &showlist
);
4306 set_show_commands vsize_limit
4307 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4308 &max_value_size
, _("\
4309 Set the maximum number of bytes allowed in a variable-size object."), _("\
4310 Show the maximum number of bytes allowed in a variable-size object."), _("\
4311 Attempts to access an object whose size is not a compile-time constant\n\
4312 and exceeds this limit will cause an error."),
4313 NULL
, NULL
, &setlist
, &showlist
);
4314 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4317 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4318 selftests::register_test ("insert_into_bit_range_vector",
4319 selftests::test_insert_into_bit_range_vector
);
4320 selftests::register_test ("value_copy", selftests::test_value_copy
);
4329 all_values
.clear ();