1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "arch-utils.h"
21 #include "extract-store-integer.h"
27 #include "cli/cli-cmds.h"
33 #include "target-float.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
39 #include "tracepoint.h"
41 #include "user-regs.h"
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
55 /* Definition of a user function. */
56 struct internal_function
58 /* The name of the function. It is a bit odd to have this in the
59 function itself -- the user might use a differently-named
60 convenience variable to hold the function. */
64 internal_function_fn handler
;
66 /* User data for the handler. */
70 /* Returns true if the ranges defined by [offset1, offset1+len1) and
71 [offset2, offset2+len2) overlap. */
74 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
75 LONGEST offset2
, ULONGEST len2
)
79 l
= std::max (offset1
, offset2
);
80 h
= std::min (offset1
+ len1
, offset2
+ len2
);
84 /* Returns true if RANGES contains any range that overlaps [OFFSET,
88 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
96 /* We keep ranges sorted by offset and coalesce overlapping and
97 contiguous ranges, so to check if a range list contains a given
98 range, we can do a binary search for the position the given range
99 would be inserted if we only considered the starting OFFSET of
100 ranges. We call that position I. Since we also have LENGTH to
101 care for (this is a range afterall), we need to check if the
102 _previous_ range overlaps the I range. E.g.,
106 |---| |---| |------| ... |--|
111 In the case above, the binary search would return `I=1', meaning,
112 this OFFSET should be inserted at position 1, and the current
113 position 1 should be pushed further (and before 2). But, `0'
116 Then we need to check if the I range overlaps the I range itself.
121 |---| |---| |-------| ... |--|
128 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
130 if (i
> ranges
.begin ())
132 const struct range
&bef
= *(i
- 1);
134 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
138 if (i
< ranges
.end ())
140 const struct range
&r
= *i
;
142 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
149 static struct cmd_list_element
*functionlist
;
153 if (this->lval () == lval_computed
)
155 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
157 if (funcs
->free_closure
)
158 funcs
->free_closure (this);
160 else if (this->lval () == lval_xcallable
)
161 delete m_location
.xm_worker
;
169 return type ()->arch ();
173 value::bits_available (LONGEST offset
, ULONGEST length
) const
175 gdb_assert (!m_lazy
);
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len
= TARGET_CHAR_BIT
* enclosing_type ()->length ();
181 return !((m_in_history
182 && (offset
< 0 || offset
+ length
> val_len
))
183 || ranges_contain (m_unavailable
, offset
, length
));
187 value::bytes_available (LONGEST offset
, ULONGEST length
) const
189 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
190 ULONGEST mask
= (sign
<< 1) - 1;
192 if (offset
!= ((offset
& mask
) ^ sign
) - sign
193 || length
!= ((length
& mask
) ^ sign
) - sign
194 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
195 error (_("Integer overflow in data location calculation"));
197 return bits_available (offset
* TARGET_CHAR_BIT
, length
* TARGET_CHAR_BIT
);
201 value::bits_any_optimized_out (int bit_offset
, int bit_length
) const
203 gdb_assert (!m_lazy
);
205 return ranges_contain (m_optimized_out
, bit_offset
, bit_length
);
209 value::entirely_available ()
211 /* We can only tell whether the whole value is available when we try
216 if (m_unavailable
.empty ())
224 value::entirely_covered_by_range_vector (const std::vector
<range
> &ranges
)
226 /* We can only tell whether the whole value is optimized out /
227 unavailable when we try to read it. */
231 if (ranges
.size () == 1)
233 const struct range
&t
= ranges
[0];
236 && t
.length
== TARGET_CHAR_BIT
* enclosing_type ()->length ())
243 /* Insert into the vector pointed to by VECTORP the bit range starting of
244 OFFSET bits, and extending for the next LENGTH bits. */
247 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
248 LONGEST offset
, ULONGEST length
)
252 /* Insert the range sorted. If there's overlap or the new range
253 would be contiguous with an existing range, merge. */
255 newr
.offset
= offset
;
256 newr
.length
= length
;
258 /* Do a binary search for the position the given range would be
259 inserted if we only considered the starting OFFSET of ranges.
260 Call that position I. Since we also have LENGTH to care for
261 (this is a range afterall), we need to check if the _previous_
262 range overlaps the I range. E.g., calling R the new range:
264 #1 - overlaps with previous
268 |---| |---| |------| ... |--|
273 In the case #1 above, the binary search would return `I=1',
274 meaning, this OFFSET should be inserted at position 1, and the
275 current position 1 should be pushed further (and become 2). But,
276 note that `0' overlaps with R, so we want to merge them.
278 A similar consideration needs to be taken if the new range would
279 be contiguous with the previous range:
281 #2 - contiguous with previous
285 |--| |---| |------| ... |--|
290 If there's no overlap with the previous range, as in:
292 #3 - not overlapping and not contiguous
296 |--| |---| |------| ... |--|
303 #4 - R is the range with lowest offset
307 |--| |---| |------| ... |--|
312 ... we just push the new range to I.
314 All the 4 cases above need to consider that the new range may
315 also overlap several of the ranges that follow, or that R may be
316 contiguous with the following range, and merge. E.g.,
318 #5 - overlapping following ranges
321 |------------------------|
322 |--| |---| |------| ... |--|
331 |--| |---| |------| ... |--|
338 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
339 if (i
> vectorp
->begin ())
341 struct range
&bef
= *(i
- 1);
343 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
346 LONGEST l
= std::min (bef
.offset
, offset
);
347 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
353 else if (offset
== bef
.offset
+ bef
.length
)
356 bef
.length
+= length
;
362 i
= vectorp
->insert (i
, newr
);
368 i
= vectorp
->insert (i
, newr
);
371 /* Check whether the ranges following the one we've just added or
372 touched can be folded in (#5 above). */
373 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
378 /* Get the range we just touched. */
379 struct range
&t
= *i
;
383 for (; i
< vectorp
->end (); i
++)
385 struct range
&r
= *i
;
386 if (r
.offset
<= t
.offset
+ t
.length
)
390 l
= std::min (t
.offset
, r
.offset
);
391 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
400 /* If we couldn't merge this one, we won't be able to
401 merge following ones either, since the ranges are
402 always sorted by OFFSET. */
408 vectorp
->erase (next
, next
+ removed
);
413 value::mark_bits_unavailable (LONGEST offset
, ULONGEST length
)
415 insert_into_bit_range_vector (&m_unavailable
, offset
, length
);
419 value::mark_bytes_unavailable (LONGEST offset
, ULONGEST length
)
421 mark_bits_unavailable (offset
* TARGET_CHAR_BIT
,
422 length
* TARGET_CHAR_BIT
);
425 /* Find the first range in RANGES that overlaps the range defined by
426 OFFSET and LENGTH, starting at element POS in the RANGES vector,
427 Returns the index into RANGES where such overlapping range was
428 found, or -1 if none was found. */
431 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
432 LONGEST offset
, LONGEST length
)
436 for (i
= pos
; i
< ranges
->size (); i
++)
438 const range
&r
= (*ranges
)[i
];
439 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
446 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
447 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
450 It must always be the case that:
451 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
453 It is assumed that memory can be accessed from:
454 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
456 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
457 / TARGET_CHAR_BIT) */
459 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
460 const gdb_byte
*ptr2
, size_t offset2_bits
,
463 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
464 == offset2_bits
% TARGET_CHAR_BIT
);
466 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
469 gdb_byte mask
, b1
, b2
;
471 /* The offset from the base pointers PTR1 and PTR2 is not a complete
472 number of bytes. A number of bits up to either the next exact
473 byte boundary, or LENGTH_BITS (which ever is sooner) will be
475 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
476 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
477 mask
= (1 << bits
) - 1;
479 if (length_bits
< bits
)
481 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
485 /* Now load the two bytes and mask off the bits we care about. */
486 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
487 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
492 /* Now update the length and offsets to take account of the bits
493 we've just compared. */
495 offset1_bits
+= bits
;
496 offset2_bits
+= bits
;
499 if (length_bits
% TARGET_CHAR_BIT
!= 0)
503 gdb_byte mask
, b1
, b2
;
505 /* The length is not an exact number of bytes. After the previous
506 IF.. block then the offsets are byte aligned, or the
507 length is zero (in which case this code is not reached). Compare
508 a number of bits at the end of the region, starting from an exact
510 bits
= length_bits
% TARGET_CHAR_BIT
;
511 o1
= offset1_bits
+ length_bits
- bits
;
512 o2
= offset2_bits
+ length_bits
- bits
;
514 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
515 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
517 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
518 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
520 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
521 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
531 /* We've now taken care of any stray "bits" at the start, or end of
532 the region to compare, the remainder can be covered with a simple
534 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
535 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
536 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
538 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
539 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
540 length_bits
/ TARGET_CHAR_BIT
);
543 /* Length is zero, regions match. */
547 /* Helper struct for find_first_range_overlap_and_match and
548 value_contents_bits_eq. Keep track of which slot of a given ranges
549 vector have we last looked at. */
551 struct ranges_and_idx
554 const std::vector
<range
> *ranges
;
556 /* The range we've last found in RANGES. Given ranges are sorted,
557 we can start the next lookup here. */
561 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
562 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
563 ranges starting at OFFSET2 bits. Return true if the ranges match
564 and fill in *L and *H with the overlapping window relative to
565 (both) OFFSET1 or OFFSET2. */
568 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
569 struct ranges_and_idx
*rp2
,
570 LONGEST offset1
, LONGEST offset2
,
571 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
573 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
575 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
578 if (rp1
->idx
== -1 && rp2
->idx
== -1)
584 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
588 const range
*r1
, *r2
;
592 r1
= &(*rp1
->ranges
)[rp1
->idx
];
593 r2
= &(*rp2
->ranges
)[rp2
->idx
];
595 /* Get the unavailable windows intersected by the incoming
596 ranges. The first and last ranges that overlap the argument
597 range may be wider than said incoming arguments ranges. */
598 l1
= std::max (offset1
, r1
->offset
);
599 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
601 l2
= std::max (offset2
, r2
->offset
);
602 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
604 /* Make them relative to the respective start offsets, so we can
605 compare them for equality. */
612 /* Different ranges, no match. */
613 if (l1
!= l2
|| h1
!= h2
)
622 /* Helper function for value_contents_eq. The only difference is that
623 this function is bit rather than byte based.
625 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
626 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
627 Return true if the available bits match. */
630 value::contents_bits_eq (int offset1
, const struct value
*val2
, int offset2
,
633 /* Each array element corresponds to a ranges source (unavailable,
634 optimized out). '1' is for VAL1, '2' for VAL2. */
635 struct ranges_and_idx rp1
[2], rp2
[2];
637 /* See function description in value.h. */
638 gdb_assert (!m_lazy
&& !val2
->m_lazy
);
640 /* We shouldn't be trying to compare past the end of the values. */
641 gdb_assert (offset1
+ length
642 <= m_enclosing_type
->length () * TARGET_CHAR_BIT
);
643 gdb_assert (offset2
+ length
644 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
646 memset (&rp1
, 0, sizeof (rp1
));
647 memset (&rp2
, 0, sizeof (rp2
));
648 rp1
[0].ranges
= &m_unavailable
;
649 rp2
[0].ranges
= &val2
->m_unavailable
;
650 rp1
[1].ranges
= &m_optimized_out
;
651 rp2
[1].ranges
= &val2
->m_optimized_out
;
655 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
658 for (i
= 0; i
< 2; i
++)
660 ULONGEST l_tmp
, h_tmp
;
662 /* The contents only match equal if the invalid/unavailable
663 contents ranges match as well. */
664 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
665 offset1
, offset2
, length
,
669 /* We're interested in the lowest/first range found. */
670 if (i
== 0 || l_tmp
< l
)
677 /* Compare the available/valid contents. */
678 if (memcmp_with_bit_offsets (m_contents
.get (), offset1
,
679 val2
->m_contents
.get (), offset2
, l
) != 0)
693 value::contents_eq (LONGEST offset1
,
694 const struct value
*val2
, LONGEST offset2
,
695 LONGEST length
) const
697 return contents_bits_eq (offset1
* TARGET_CHAR_BIT
,
698 val2
, offset2
* TARGET_CHAR_BIT
,
699 length
* TARGET_CHAR_BIT
);
705 value::contents_eq (const struct value
*val2
) const
707 ULONGEST len1
= check_typedef (enclosing_type ())->length ();
708 ULONGEST len2
= check_typedef (val2
->enclosing_type ())->length ();
711 return contents_eq (0, val2
, 0, len1
);
714 /* The value-history records all the values printed by print commands
715 during this session. */
717 static std::vector
<value_ref_ptr
> value_history
;
720 /* List of all value objects currently allocated
721 (except for those released by calls to release_value)
722 This is so they can be freed after each command. */
724 static std::vector
<value_ref_ptr
> all_values
;
729 value::allocate_lazy (struct type
*type
)
733 /* Call check_typedef on our type to make sure that, if TYPE
734 is a TYPE_CODE_TYPEDEF, its length is set to the length
735 of the target type instead of zero. However, we do not
736 replace the typedef type by the target type, because we want
737 to keep the typedef in order to be able to set the VAL's type
738 description correctly. */
739 check_typedef (type
);
741 val
= new struct value (type
);
743 /* Values start out on the all_values chain. */
744 all_values
.emplace_back (val
);
749 /* The maximum size, in bytes, that GDB will try to allocate for a value.
750 The initial value of 64k was not selected for any specific reason, it is
751 just a reasonable starting point. */
753 static int max_value_size
= 65536; /* 64k bytes */
755 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
756 LONGEST, otherwise GDB will not be able to parse integer values from the
757 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
758 be unable to parse "set max-value-size 2".
760 As we want a consistent GDB experience across hosts with different sizes
761 of LONGEST, this arbitrary minimum value was selected, so long as this
762 is bigger than LONGEST on all GDB supported hosts we're fine. */
764 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
765 static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
767 /* Implement the "set max-value-size" command. */
770 set_max_value_size (const char *args
, int from_tty
,
771 struct cmd_list_element
*c
)
773 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
775 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
777 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
778 error (_("max-value-size set too low, increasing to %d bytes"),
783 /* Implement the "show max-value-size" command. */
786 show_max_value_size (struct ui_file
*file
, int from_tty
,
787 struct cmd_list_element
*c
, const char *value
)
789 if (max_value_size
== -1)
790 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
792 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
796 /* Called before we attempt to allocate or reallocate a buffer for the
797 contents of a value. TYPE is the type of the value for which we are
798 allocating the buffer. If the buffer is too large (based on the user
799 controllable setting) then throw an error. If this function returns
800 then we should attempt to allocate the buffer. */
803 check_type_length_before_alloc (const struct type
*type
)
805 ULONGEST length
= type
->length ();
807 if (exceeds_max_value_size (length
))
809 if (type
->name () != NULL
)
810 error (_("value of type `%s' requires %s bytes, which is more "
811 "than max-value-size"), type
->name (), pulongest (length
));
813 error (_("value requires %s bytes, which is more than "
814 "max-value-size"), pulongest (length
));
821 exceeds_max_value_size (ULONGEST length
)
823 return max_value_size
> -1 && length
> max_value_size
;
826 /* When this has a value, it is used to limit the number of array elements
827 of an array that are loaded into memory when an array value is made
829 static std::optional
<int> array_length_limiting_element_count
;
832 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
834 m_old_value
= array_length_limiting_element_count
;
835 array_length_limiting_element_count
.emplace (elements
);
839 scoped_array_length_limiting::~scoped_array_length_limiting ()
841 array_length_limiting_element_count
= m_old_value
;
844 /* Find the inner element type for ARRAY_TYPE. */
847 find_array_element_type (struct type
*array_type
)
849 array_type
= check_typedef (array_type
);
850 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
852 if (current_language
->la_language
== language_fortran
)
853 while (array_type
->code () == TYPE_CODE_ARRAY
)
855 array_type
= array_type
->target_type ();
856 array_type
= check_typedef (array_type
);
860 array_type
= array_type
->target_type ();
861 array_type
= check_typedef (array_type
);
867 /* Return the limited length of ARRAY_TYPE, which must be of
868 TYPE_CODE_ARRAY. This function can only be called when the global
869 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
871 The limited length of an array is the smallest of either (1) the total
872 size of the array type, or (2) the array target type multiplies by the
873 array_length_limiting_element_count. */
876 calculate_limited_array_length (struct type
*array_type
)
878 gdb_assert (array_length_limiting_element_count
.has_value ());
880 array_type
= check_typedef (array_type
);
881 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
883 struct type
*elm_type
= find_array_element_type (array_type
);
884 ULONGEST len
= (elm_type
->length ()
885 * (*array_length_limiting_element_count
));
886 len
= std::min (len
, array_type
->length ());
894 value::set_limited_array_length ()
896 ULONGEST limit
= m_limited_length
;
897 ULONGEST len
= type ()->length ();
899 if (array_length_limiting_element_count
.has_value ())
900 len
= calculate_limited_array_length (type ());
902 if (limit
!= 0 && len
> limit
)
904 if (len
> max_value_size
)
907 m_limited_length
= max_value_size
;
914 value::allocate_contents (bool check_size
)
918 struct type
*enc_type
= enclosing_type ();
919 ULONGEST len
= enc_type
->length ();
923 /* If we are allocating the contents of an array, which
924 is greater in size than max_value_size, and there is
925 an element limit in effect, then we can possibly try
926 to load only a sub-set of the array contents into
928 if (type () == enc_type
929 && type ()->code () == TYPE_CODE_ARRAY
930 && len
> max_value_size
931 && set_limited_array_length ())
932 len
= m_limited_length
;
934 check_type_length_before_alloc (enc_type
);
937 m_contents
.reset ((gdb_byte
*) xzalloc (len
));
941 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
942 then apply the usual max-value-size checks. */
945 value::allocate (struct type
*type
, bool check_size
)
947 struct value
*val
= value::allocate_lazy (type
);
949 val
->allocate_contents (check_size
);
954 /* Allocate a value and its contents for type TYPE. */
957 value::allocate (struct type
*type
)
959 return allocate (type
, true);
965 value::allocate_register_lazy (const frame_info_ptr
&initial_next_frame
,
966 int regnum
, struct type
*type
)
969 type
= register_type (frame_unwind_arch (initial_next_frame
), regnum
);
971 value
*result
= value::allocate_lazy (type
);
973 result
->set_lval (lval_register
);
974 result
->m_location
.reg
.regnum
= regnum
;
976 /* If this register value is created during unwind (while computing a frame
977 id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
978 NEXT_FRAME will not have a valid frame id yet. Find the next non-inline
979 frame (possibly the sentinel frame). This is where registers are unwound
981 frame_info_ptr next_frame
= initial_next_frame
;
982 while (get_frame_type (next_frame
) == INLINE_FRAME
)
983 next_frame
= get_next_frame_sentinel_okay (next_frame
);
985 result
->m_location
.reg
.next_frame_id
= get_frame_id (next_frame
);
987 /* We should have a next frame with a valid id. */
988 gdb_assert (frame_id_p (result
->m_location
.reg
.next_frame_id
));
996 value::allocate_register (const frame_info_ptr
&next_frame
, int regnum
,
999 value
*result
= value::allocate_register_lazy (next_frame
, regnum
, type
);
1000 result
->set_lazy (false);
1004 /* Allocate a value that has the correct length
1005 for COUNT repetitions of type TYPE. */
1008 allocate_repeat_value (struct type
*type
, int count
)
1010 /* Despite the fact that we are really creating an array of TYPE here, we
1011 use the string lower bound as the array lower bound. This seems to
1012 work fine for now. */
1013 int low_bound
= current_language
->string_lower_bound ();
1014 /* FIXME-type-allocation: need a way to free this type when we are
1016 struct type
*array_type
1017 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
1019 return value::allocate (array_type
);
1023 value::allocate_computed (struct type
*type
,
1024 const struct lval_funcs
*funcs
,
1027 struct value
*v
= value::allocate_lazy (type
);
1029 v
->set_lval (lval_computed
);
1030 v
->m_location
.computed
.funcs
= funcs
;
1031 v
->m_location
.computed
.closure
= closure
;
1039 value::allocate_optimized_out (struct type
*type
)
1041 struct value
*retval
= value::allocate_lazy (type
);
1043 retval
->mark_bytes_optimized_out (0, type
->length ());
1044 retval
->set_lazy (false);
1048 /* Accessor methods. */
1050 gdb::array_view
<gdb_byte
>
1051 value::contents_raw ()
1053 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1055 allocate_contents (true);
1057 ULONGEST length
= type ()->length ();
1058 return gdb::make_array_view
1059 (m_contents
.get () + m_embedded_offset
* unit_size
, length
);
1062 gdb::array_view
<gdb_byte
>
1063 value::contents_all_raw ()
1065 allocate_contents (true);
1067 ULONGEST length
= enclosing_type ()->length ();
1068 return gdb::make_array_view (m_contents
.get (), length
);
1071 /* Look at value.h for description. */
1074 value_actual_type (struct value
*value
, int resolve_simple_types
,
1075 int *real_type_found
)
1077 struct value_print_options opts
;
1078 struct type
*result
;
1080 get_user_print_options (&opts
);
1082 if (real_type_found
)
1083 *real_type_found
= 0;
1084 result
= value
->type ();
1085 if (opts
.objectprint
)
1087 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1088 fetch its rtti type. */
1089 if (result
->is_pointer_or_reference ()
1090 && (check_typedef (result
->target_type ())->code ()
1091 == TYPE_CODE_STRUCT
)
1092 && !value
->optimized_out ())
1094 struct type
*real_type
;
1096 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1099 if (real_type_found
)
1100 *real_type_found
= 1;
1104 else if (resolve_simple_types
)
1106 if (real_type_found
)
1107 *real_type_found
= 1;
1108 result
= value
->enclosing_type ();
1116 error_value_optimized_out (void)
1118 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1122 value::require_not_optimized_out () const
1124 if (!m_optimized_out
.empty ())
1126 if (m_lval
== lval_register
)
1127 throw_error (OPTIMIZED_OUT_ERROR
,
1128 _("register has not been saved in frame"));
1130 error_value_optimized_out ();
1135 value::require_available () const
1137 if (!m_unavailable
.empty ())
1138 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1141 gdb::array_view
<const gdb_byte
>
1142 value::contents_for_printing ()
1147 ULONGEST length
= enclosing_type ()->length ();
1148 return gdb::make_array_view (m_contents
.get (), length
);
1151 gdb::array_view
<const gdb_byte
>
1152 value::contents_for_printing () const
1154 gdb_assert (!m_lazy
);
1156 ULONGEST length
= enclosing_type ()->length ();
1157 return gdb::make_array_view (m_contents
.get (), length
);
1160 gdb::array_view
<const gdb_byte
>
1161 value::contents_all ()
1163 gdb::array_view
<const gdb_byte
> result
= contents_for_printing ();
1164 require_not_optimized_out ();
1165 require_available ();
1169 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1170 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1173 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1174 const std::vector
<range
> &src_range
, int src_bit_offset
,
1175 unsigned int bit_length
)
1177 for (const range
&r
: src_range
)
1181 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1182 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1183 (LONGEST
) src_bit_offset
+ bit_length
);
1186 insert_into_bit_range_vector (dst_range
,
1187 dst_bit_offset
+ (l
- src_bit_offset
),
1195 value::ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1196 int src_bit_offset
, int bit_length
) const
1198 ::ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1199 m_unavailable
, src_bit_offset
,
1201 ::ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1202 m_optimized_out
, src_bit_offset
,
1209 value::contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1210 LONGEST src_offset
, LONGEST length
)
1212 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1213 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1215 /* A lazy DST would make that this copy operation useless, since as
1216 soon as DST's contents were un-lazied (by a later value_contents
1217 call, say), the contents would be overwritten. A lazy SRC would
1218 mean we'd be copying garbage. */
1219 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1221 ULONGEST copy_length
= length
;
1222 ULONGEST limit
= m_limited_length
;
1223 if (limit
> 0 && src_offset
+ length
> limit
)
1224 copy_length
= src_offset
> limit
? 0 : limit
- src_offset
;
1226 /* The overwritten DST range gets unavailability ORed in, not
1227 replaced. Make sure to remember to implement replacing if it
1228 turns out actually necessary. */
1229 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1230 gdb_assert (!dst
->bits_any_optimized_out (TARGET_CHAR_BIT
* dst_offset
,
1231 TARGET_CHAR_BIT
* length
));
1233 if ((src_offset
+ copy_length
) * unit_size
> enclosing_type ()-> length ())
1234 error (_("access outside bounds of object"));
1236 /* Copy the data. */
1237 gdb::array_view
<gdb_byte
> dst_contents
1238 = dst
->contents_all_raw ().slice (dst_offset
* unit_size
,
1239 copy_length
* unit_size
);
1240 gdb::array_view
<const gdb_byte
> src_contents
1241 = contents_all_raw ().slice (src_offset
* unit_size
,
1242 copy_length
* unit_size
);
1243 gdb::copy (src_contents
, dst_contents
);
1245 /* Copy the meta-data, adjusted. */
1246 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1247 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1248 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1250 ranges_copy_adjusted (dst
, dst_bit_offset
,
1251 src_bit_offset
, bit_length
);
1257 value::contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1258 LONGEST src_bit_offset
,
1261 /* A lazy DST would make that this copy operation useless, since as
1262 soon as DST's contents were un-lazied (by a later value_contents
1263 call, say), the contents would be overwritten. A lazy SRC would
1264 mean we'd be copying garbage. */
1265 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1267 ULONGEST copy_bit_length
= bit_length
;
1268 ULONGEST bit_limit
= m_limited_length
* TARGET_CHAR_BIT
;
1269 if (bit_limit
> 0 && src_bit_offset
+ bit_length
> bit_limit
)
1270 copy_bit_length
= (src_bit_offset
> bit_limit
? 0
1271 : bit_limit
- src_bit_offset
);
1273 /* The overwritten DST range gets unavailability ORed in, not
1274 replaced. Make sure to remember to implement replacing if it
1275 turns out actually necessary. */
1276 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1277 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1278 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1279 gdb_assert (!dst
->bits_any_optimized_out (dst_bit_offset
,
1282 /* Copy the data. */
1283 gdb::array_view
<gdb_byte
> dst_contents
= dst
->contents_all_raw ();
1284 gdb::array_view
<const gdb_byte
> src_contents
= contents_all_raw ();
1285 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1286 src_contents
.data (), src_bit_offset
,
1288 type_byte_order (type ()) == BFD_ENDIAN_BIG
);
1290 /* Copy the meta-data. */
1291 ranges_copy_adjusted (dst
, dst_bit_offset
, src_bit_offset
, bit_length
);
1297 value::contents_copy (struct value
*dst
, LONGEST dst_offset
,
1298 LONGEST src_offset
, LONGEST length
)
1303 contents_copy_raw (dst
, dst_offset
, src_offset
, length
);
1306 gdb::array_view
<const gdb_byte
>
1309 gdb::array_view
<const gdb_byte
> result
= contents_writeable ();
1310 require_not_optimized_out ();
1311 require_available ();
1315 gdb::array_view
<gdb_byte
>
1316 value::contents_writeable ()
1320 return contents_raw ();
1324 value::optimized_out ()
1328 /* See if we can compute the result without fetching the
1330 if (this->lval () == lval_memory
)
1332 else if (this->lval () == lval_computed
)
1334 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
1336 if (funcs
->is_optimized_out
!= nullptr)
1337 return funcs
->is_optimized_out (this);
1340 /* Fall back to fetching. */
1345 catch (const gdb_exception_error
&ex
)
1350 case OPTIMIZED_OUT_ERROR
:
1351 case NOT_AVAILABLE_ERROR
:
1352 /* These can normally happen when we try to access an
1353 optimized out or unavailable register, either in a
1354 physical register or spilled to memory. */
1362 return !m_optimized_out
.empty ();
1365 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1366 the following LENGTH bytes. */
1369 value::mark_bytes_optimized_out (int offset
, int length
)
1371 mark_bits_optimized_out (offset
* TARGET_CHAR_BIT
,
1372 length
* TARGET_CHAR_BIT
);
1378 value::mark_bits_optimized_out (LONGEST offset
, LONGEST length
)
1380 insert_into_bit_range_vector (&m_optimized_out
, offset
, length
);
1384 value::bits_synthetic_pointer (LONGEST offset
, LONGEST length
) const
1386 if (m_lval
!= lval_computed
1387 || !m_location
.computed
.funcs
->check_synthetic_pointer
)
1389 return m_location
.computed
.funcs
->check_synthetic_pointer (this, offset
,
1393 const struct lval_funcs
*
1394 value::computed_funcs () const
1396 gdb_assert (m_lval
== lval_computed
);
1398 return m_location
.computed
.funcs
;
1402 value::computed_closure () const
1404 gdb_assert (m_lval
== lval_computed
);
1406 return m_location
.computed
.closure
;
1410 value::address () const
1412 if (m_lval
!= lval_memory
)
1414 if (m_parent
!= NULL
)
1415 return m_parent
->address () + m_offset
;
1416 if (NULL
!= TYPE_DATA_LOCATION (type ()))
1418 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1419 return TYPE_DATA_LOCATION_ADDR (type ());
1422 return m_location
.address
+ m_offset
;
1426 value::raw_address () const
1428 if (m_lval
!= lval_memory
)
1430 return m_location
.address
;
1434 value::set_address (CORE_ADDR addr
)
1436 gdb_assert (m_lval
== lval_memory
);
1437 m_location
.address
= addr
;
1440 /* Return a mark in the value chain. All values allocated after the
1441 mark is obtained (except for those released) are subject to being freed
1442 if a subsequent value_free_to_mark is passed the mark. */
1446 if (all_values
.empty ())
1448 return all_values
.back ().get ();
1451 /* Release a reference to VAL, which was acquired with value_incref.
1452 This function is also called to deallocate values from the value
1458 gdb_assert (m_reference_count
> 0);
1459 m_reference_count
--;
1460 if (m_reference_count
== 0)
1464 /* Free all values allocated since MARK was obtained by value_mark
1465 (except for those released). */
1467 value_free_to_mark (const struct value
*mark
)
1469 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1470 if (iter
== all_values
.end ())
1471 all_values
.clear ();
1473 all_values
.erase (iter
+ 1, all_values
.end ());
1476 /* Remove VAL from the chain all_values
1477 so it will not be freed automatically. */
1480 release_value (struct value
*val
)
1483 return value_ref_ptr ();
1485 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1486 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1490 value_ref_ptr result
= *iter
;
1491 all_values
.erase (iter
.base () - 1);
1496 /* We must always return an owned reference. Normally this happens
1497 because we transfer the reference from the value chain, but in
1498 this case the value was not on the chain. */
1499 return value_ref_ptr::new_reference (val
);
1504 std::vector
<value_ref_ptr
>
1505 value_release_to_mark (const struct value
*mark
)
1507 std::vector
<value_ref_ptr
> result
;
1509 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1510 if (iter
== all_values
.end ())
1511 std::swap (result
, all_values
);
1514 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1515 all_values
.erase (iter
+ 1, all_values
.end ());
1517 std::reverse (result
.begin (), result
.end ());
1524 value::copy () const
1526 struct type
*encl_type
= enclosing_type ();
1529 val
= value::allocate_lazy (encl_type
);
1530 val
->m_type
= m_type
;
1531 val
->set_lval (m_lval
);
1532 val
->m_location
= m_location
;
1533 val
->m_offset
= m_offset
;
1534 val
->m_bitpos
= m_bitpos
;
1535 val
->m_bitsize
= m_bitsize
;
1536 val
->m_lazy
= m_lazy
;
1537 val
->m_embedded_offset
= embedded_offset ();
1538 val
->m_pointed_to_offset
= m_pointed_to_offset
;
1539 val
->m_modifiable
= m_modifiable
;
1540 val
->m_stack
= m_stack
;
1541 val
->m_is_zero
= m_is_zero
;
1542 val
->m_in_history
= m_in_history
;
1543 val
->m_initialized
= m_initialized
;
1544 val
->m_unavailable
= m_unavailable
;
1545 val
->m_optimized_out
= m_optimized_out
;
1546 val
->m_parent
= m_parent
;
1547 val
->m_limited_length
= m_limited_length
;
1550 && !(val
->entirely_optimized_out ()
1551 || val
->entirely_unavailable ()))
1553 ULONGEST length
= val
->m_limited_length
;
1555 length
= val
->enclosing_type ()->length ();
1557 gdb_assert (m_contents
!= nullptr);
1558 const auto &arg_view
1559 = gdb::make_array_view (m_contents
.get (), length
);
1561 val
->allocate_contents (false);
1562 gdb::array_view
<gdb_byte
> val_contents
1563 = val
->contents_all_raw ().slice (0, length
);
1565 gdb::copy (arg_view
, val_contents
);
1568 if (val
->lval () == lval_computed
)
1570 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1572 if (funcs
->copy_closure
)
1573 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1578 /* Return a "const" and/or "volatile" qualified version of the value V.
1579 If CNST is true, then the returned value will be qualified with
1581 if VOLTL is true, then the returned value will be qualified with
1585 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1587 struct type
*val_type
= v
->type ();
1588 struct type
*m_enclosing_type
= v
->enclosing_type ();
1589 struct value
*cv_val
= v
->copy ();
1591 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1592 cv_val
->set_enclosing_type (make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1602 if (this->lval () != not_lval
)
1604 struct type
*enc_type
= enclosing_type ();
1605 struct value
*val
= value::allocate (enc_type
);
1607 gdb::copy (contents_all (), val
->contents_all_raw ());
1608 val
->m_type
= m_type
;
1609 val
->set_embedded_offset (embedded_offset ());
1610 val
->set_pointed_to_offset (pointed_to_offset ());
1619 value::force_lval (CORE_ADDR addr
)
1621 gdb_assert (this->lval () == not_lval
);
1623 write_memory (addr
, contents_raw ().data (), type ()->length ());
1624 m_lval
= lval_memory
;
1625 m_location
.address
= addr
;
1629 value::set_component_location (const struct value
*whole
)
1633 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1635 if (whole
->m_lval
== lval_internalvar
)
1636 m_lval
= lval_internalvar_component
;
1638 m_lval
= whole
->m_lval
;
1640 m_location
= whole
->m_location
;
1641 if (whole
->m_lval
== lval_computed
)
1643 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1645 if (funcs
->copy_closure
)
1646 m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1649 /* If the WHOLE value has a dynamically resolved location property then
1650 update the address of the COMPONENT. */
1651 type
= whole
->type ();
1652 if (NULL
!= TYPE_DATA_LOCATION (type
)
1653 && TYPE_DATA_LOCATION (type
)->is_constant ())
1654 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1656 /* Similarly, if the COMPONENT value has a dynamically resolved location
1657 property then update its address. */
1658 type
= this->type ();
1659 if (NULL
!= TYPE_DATA_LOCATION (type
)
1660 && TYPE_DATA_LOCATION (type
)->is_constant ())
1662 /* If the COMPONENT has a dynamic location, and is an
1663 lval_internalvar_component, then we change it to a lval_memory.
1665 Usually a component of an internalvar is created non-lazy, and has
1666 its content immediately copied from the parent internalvar.
1667 However, for components with a dynamic location, the content of
1668 the component is not contained within the parent, but is instead
1669 accessed indirectly. Further, the component will be created as a
1672 By changing the type of the component to lval_memory we ensure
1673 that value_fetch_lazy can successfully load the component.
1675 This solution isn't ideal, but a real fix would require values to
1676 carry around both the parent value contents, and the contents of
1677 any dynamic fields within the parent. This is a substantial
1678 change to how values work in GDB. */
1679 if (this->lval () == lval_internalvar_component
)
1681 gdb_assert (lazy ());
1682 m_lval
= lval_memory
;
1685 gdb_assert (this->lval () == lval_memory
);
1686 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1690 /* Access to the value history. */
1692 /* Record a new value in the value history.
1693 Returns the absolute history index of the entry. */
1696 value::record_latest ()
1698 /* We don't want this value to have anything to do with the inferior anymore.
1699 In particular, "set $1 = 50" should not affect the variable from which
1700 the value was taken, and fast watchpoints should be able to assume that
1701 a value on the value history never changes. */
1704 /* We know that this is a _huge_ array, any attempt to fetch this
1705 is going to cause GDB to throw an error. However, to allow
1706 the array to still be displayed we fetch its contents up to
1707 `max_value_size' and mark anything beyond "unavailable" in
1709 if (m_type
->code () == TYPE_CODE_ARRAY
1710 && m_type
->length () > max_value_size
1711 && array_length_limiting_element_count
.has_value ()
1712 && m_enclosing_type
== m_type
1713 && calculate_limited_array_length (m_type
) <= max_value_size
)
1714 m_limited_length
= max_value_size
;
1719 ULONGEST limit
= m_limited_length
;
1721 mark_bytes_unavailable (limit
, m_enclosing_type
->length () - limit
);
1723 /* Mark the value as recorded in the history for the availability check. */
1724 m_in_history
= true;
1726 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1727 from. This is a bit dubious, because then *&$1 does not just return $1
1728 but the current contents of that location. c'est la vie... */
1729 set_modifiable (false);
1731 value_history
.push_back (release_value (this));
1733 return value_history
.size ();
1736 /* Return a copy of the value in the history with sequence number NUM. */
1739 access_value_history (int num
)
1744 absnum
+= value_history
.size ();
1749 error (_("The history is empty."));
1751 error (_("There is only one value in the history."));
1753 error (_("History does not go back to $$%d."), -num
);
1755 if (absnum
> value_history
.size ())
1756 error (_("History has not yet reached $%d."), absnum
);
1760 return value_history
[absnum
]->copy ();
1766 value_history_count ()
1768 return value_history
.size ();
1772 show_values (const char *num_exp
, int from_tty
)
1780 /* "show values +" should print from the stored position.
1781 "show values <exp>" should print around value number <exp>. */
1782 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1783 num
= parse_and_eval_long (num_exp
) - 5;
1787 /* "show values" means print the last 10 values. */
1788 num
= value_history
.size () - 9;
1794 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1796 struct value_print_options opts
;
1798 val
= access_value_history (i
);
1799 gdb_printf (("$%d = "), i
);
1800 get_user_print_options (&opts
);
1801 value_print (val
, gdb_stdout
, &opts
);
1802 gdb_printf (("\n"));
1805 /* The next "show values +" should start after what we just printed. */
1808 /* Hitting just return after this command should do the same thing as
1809 "show values +". If num_exp is null, this is unnecessary, since
1810 "show values +" is not useful after "show values". */
1811 if (from_tty
&& num_exp
)
1812 set_repeat_arguments ("+");
1815 enum internalvar_kind
1817 /* The internal variable is empty. */
1820 /* The value of the internal variable is provided directly as
1821 a GDB value object. */
1824 /* A fresh value is computed via a call-back routine on every
1825 access to the internal variable. */
1826 INTERNALVAR_MAKE_VALUE
,
1828 /* The internal variable holds a GDB internal convenience function. */
1829 INTERNALVAR_FUNCTION
,
1831 /* The variable holds an integer value. */
1832 INTERNALVAR_INTEGER
,
1834 /* The variable holds a GDB-provided string. */
1838 union internalvar_data
1840 /* A value object used with INTERNALVAR_VALUE. */
1841 struct value
*value
;
1843 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1846 /* The functions to call. */
1847 const struct internalvar_funcs
*functions
;
1849 /* The function's user-data. */
1853 /* The internal function used with INTERNALVAR_FUNCTION. */
1856 struct internal_function
*function
;
1857 /* True if this is the canonical name for the function. */
1861 /* An integer value used with INTERNALVAR_INTEGER. */
1864 /* If type is non-NULL, it will be used as the type to generate
1865 a value for this internal variable. If type is NULL, a default
1866 integer type for the architecture is used. */
1871 /* A string value used with INTERNALVAR_STRING. */
1875 /* Internal variables. These are variables within the debugger
1876 that hold values assigned by debugger commands.
1877 The user refers to them with a '$' prefix
1878 that does not appear in the variable names stored internally. */
1882 internalvar (std::string name
)
1883 : name (std::move (name
))
1888 /* We support various different kinds of content of an internal variable.
1889 enum internalvar_kind specifies the kind, and union internalvar_data
1890 provides the data associated with this particular kind. */
1892 enum internalvar_kind kind
= INTERNALVAR_VOID
;
1894 union internalvar_data u
{};
1897 /* Use std::map, a sorted container, to make the order of iteration (and
1898 therefore the output of "show convenience") stable. */
1900 static std::map
<std::string
, internalvar
> internalvars
;
1902 /* If the variable does not already exist create it and give it the
1903 value given. If no value is given then the default is zero. */
1905 init_if_undefined_command (const char* args
, int from_tty
)
1907 struct internalvar
*intvar
= nullptr;
1909 /* Parse the expression - this is taken from set_command(). */
1910 expression_up expr
= parse_expression (args
);
1912 /* Validate the expression.
1913 Was the expression an assignment?
1914 Or even an expression at all? */
1915 if (expr
->first_opcode () != BINOP_ASSIGN
)
1916 error (_("Init-if-undefined requires an assignment expression."));
1918 /* Extract the variable from the parsed expression. */
1919 expr::assign_operation
*assign
1920 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
1921 if (assign
!= nullptr)
1923 expr::operation
*lhs
= assign
->get_lhs ();
1924 expr::internalvar_operation
*ivarop
1925 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
1926 if (ivarop
!= nullptr)
1927 intvar
= ivarop
->get_internalvar ();
1930 if (intvar
== nullptr)
1931 error (_("The first parameter to init-if-undefined "
1932 "should be a GDB variable."));
1934 /* Only evaluate the expression if the lvalue is void.
1935 This may still fail if the expression is invalid. */
1936 if (intvar
->kind
== INTERNALVAR_VOID
)
1941 /* Look up an internal variable with name NAME. NAME should not
1942 normally include a dollar sign.
1944 If the specified internal variable does not exist,
1945 the return value is NULL. */
1947 struct internalvar
*
1948 lookup_only_internalvar (const char *name
)
1950 auto it
= internalvars
.find (name
);
1951 if (it
== internalvars
.end ())
1957 /* Complete NAME by comparing it to the names of internal
1961 complete_internalvar (completion_tracker
&tracker
, const char *name
)
1963 int len
= strlen (name
);
1965 for (auto &pair
: internalvars
)
1967 const internalvar
&var
= pair
.second
;
1969 if (var
.name
.compare (0, len
, name
) == 0)
1970 tracker
.add_completion (make_unique_xstrdup (var
.name
.c_str ()));
1974 /* Create an internal variable with name NAME and with a void value.
1975 NAME should not normally include a dollar sign.
1977 An internal variable with that name must not exist already. */
1979 struct internalvar
*
1980 create_internalvar (const char *name
)
1982 auto pair
= internalvars
.emplace (std::make_pair (name
, internalvar (name
)));
1983 gdb_assert (pair
.second
);
1985 return &pair
.first
->second
;
1988 /* Create an internal variable with name NAME and register FUN as the
1989 function that value_of_internalvar uses to create a value whenever
1990 this variable is referenced. NAME should not normally include a
1991 dollar sign. DATA is passed uninterpreted to FUN when it is
1992 called. CLEANUP, if not NULL, is called when the internal variable
1993 is destroyed. It is passed DATA as its only argument. */
1995 struct internalvar
*
1996 create_internalvar_type_lazy (const char *name
,
1997 const struct internalvar_funcs
*funcs
,
2000 struct internalvar
*var
= create_internalvar (name
);
2002 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2003 var
->u
.make_value
.functions
= funcs
;
2004 var
->u
.make_value
.data
= data
;
2008 /* See documentation in value.h. */
2011 compile_internalvar_to_ax (struct internalvar
*var
,
2012 struct agent_expr
*expr
,
2013 struct axs_value
*value
)
2015 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2016 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2019 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2020 var
->u
.make_value
.data
);
2024 /* Look up an internal variable with name NAME. NAME should not
2025 normally include a dollar sign.
2027 If the specified internal variable does not exist,
2028 one is created, with a void value. */
2030 struct internalvar
*
2031 lookup_internalvar (const char *name
)
2033 struct internalvar
*var
;
2035 var
= lookup_only_internalvar (name
);
2039 return create_internalvar (name
);
2042 /* Return current value of internal variable VAR. For variables that
2043 are not inherently typed, use a value type appropriate for GDBARCH. */
2046 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2049 struct trace_state_variable
*tsv
;
2051 /* If there is a trace state variable of the same name, assume that
2052 is what we really want to see. */
2053 tsv
= find_trace_state_variable (var
->name
.c_str ());
2056 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2058 if (tsv
->value_known
)
2059 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2062 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2068 case INTERNALVAR_VOID
:
2069 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2072 case INTERNALVAR_FUNCTION
:
2073 val
= value::allocate (builtin_type (gdbarch
)->internal_fn
);
2076 case INTERNALVAR_INTEGER
:
2077 if (!var
->u
.integer
.type
)
2078 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2079 var
->u
.integer
.val
);
2081 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2084 case INTERNALVAR_STRING
:
2085 val
= current_language
->value_string (gdbarch
,
2087 strlen (var
->u
.string
));
2090 case INTERNALVAR_VALUE
:
2091 val
= var
->u
.value
->copy ();
2096 case INTERNALVAR_MAKE_VALUE
:
2097 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2098 var
->u
.make_value
.data
);
2102 internal_error (_("bad kind"));
2105 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2106 on this value go back to affect the original internal variable.
2108 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2109 no underlying modifiable state in the internal variable.
2111 Likewise, if the variable's value is a computed lvalue, we want
2112 references to it to produce another computed lvalue, where
2113 references and assignments actually operate through the
2114 computed value's functions.
2116 This means that internal variables with computed values
2117 behave a little differently from other internal variables:
2118 assignments to them don't just replace the previous value
2119 altogether. At the moment, this seems like the behavior we
2122 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2123 && val
->lval () != lval_computed
)
2125 val
->set_lval (lval_internalvar
);
2126 VALUE_INTERNALVAR (val
) = var
;
2133 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2135 if (var
->kind
== INTERNALVAR_INTEGER
)
2137 *result
= var
->u
.integer
.val
;
2141 if (var
->kind
== INTERNALVAR_VALUE
)
2143 struct type
*type
= check_typedef (var
->u
.value
->type ());
2145 if (type
->code () == TYPE_CODE_INT
)
2147 *result
= value_as_long (var
->u
.value
);
2152 if (var
->kind
== INTERNALVAR_MAKE_VALUE
)
2154 struct gdbarch
*gdbarch
= get_current_arch ();
2156 = (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2157 var
->u
.make_value
.data
);
2158 struct type
*type
= check_typedef (val
->type ());
2160 if (type
->code () == TYPE_CODE_INT
)
2162 *result
= value_as_long (val
);
2171 get_internalvar_function (struct internalvar
*var
,
2172 struct internal_function
**result
)
2176 case INTERNALVAR_FUNCTION
:
2177 *result
= var
->u
.fn
.function
;
2186 set_internalvar_component (struct internalvar
*var
,
2187 LONGEST offset
, LONGEST bitpos
,
2188 LONGEST bitsize
, struct value
*newval
)
2191 struct gdbarch
*gdbarch
;
2196 case INTERNALVAR_VALUE
:
2197 addr
= var
->u
.value
->contents_writeable ().data ();
2198 gdbarch
= var
->u
.value
->arch ();
2199 unit_size
= gdbarch_addressable_memory_unit_size (gdbarch
);
2202 modify_field (var
->u
.value
->type (), addr
+ offset
,
2203 value_as_long (newval
), bitpos
, bitsize
);
2205 memcpy (addr
+ offset
* unit_size
, newval
->contents ().data (),
2206 newval
->type ()->length ());
2210 /* We can never get a component of any other kind. */
2211 internal_error (_("set_internalvar_component"));
2216 set_internalvar (struct internalvar
*var
, struct value
*val
)
2218 enum internalvar_kind new_kind
;
2219 union internalvar_data new_data
= { 0 };
2221 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2222 error (_("Cannot overwrite convenience function %s"), var
->name
.c_str ());
2224 /* Prepare new contents. */
2225 switch (check_typedef (val
->type ())->code ())
2227 case TYPE_CODE_VOID
:
2228 new_kind
= INTERNALVAR_VOID
;
2231 case TYPE_CODE_INTERNAL_FUNCTION
:
2232 gdb_assert (val
->lval () == lval_internalvar
);
2233 new_kind
= INTERNALVAR_FUNCTION
;
2234 get_internalvar_function (VALUE_INTERNALVAR (val
),
2235 &new_data
.fn
.function
);
2236 /* Copies created here are never canonical. */
2240 new_kind
= INTERNALVAR_VALUE
;
2241 struct value
*copy
= val
->copy ();
2242 copy
->set_modifiable (true);
2244 /* Force the value to be fetched from the target now, to avoid problems
2245 later when this internalvar is referenced and the target is gone or
2248 copy
->fetch_lazy ();
2250 /* Release the value from the value chain to prevent it from being
2251 deleted by free_all_values. From here on this function should not
2252 call error () until new_data is installed into the var->u to avoid
2254 new_data
.value
= release_value (copy
).release ();
2256 /* Internal variables which are created from values with a dynamic
2257 location don't need the location property of the origin anymore.
2258 The resolved dynamic location is used prior then any other address
2259 when accessing the value.
2260 If we keep it, we would still refer to the origin value.
2261 Remove the location property in case it exist. */
2262 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2267 /* Clean up old contents. */
2268 clear_internalvar (var
);
2271 var
->kind
= new_kind
;
2273 /* End code which must not call error(). */
2277 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2279 /* Clean up old contents. */
2280 clear_internalvar (var
);
2282 var
->kind
= INTERNALVAR_INTEGER
;
2283 var
->u
.integer
.type
= NULL
;
2284 var
->u
.integer
.val
= l
;
2288 set_internalvar_string (struct internalvar
*var
, const char *string
)
2290 /* Clean up old contents. */
2291 clear_internalvar (var
);
2293 var
->kind
= INTERNALVAR_STRING
;
2294 var
->u
.string
= xstrdup (string
);
2298 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2300 /* Clean up old contents. */
2301 clear_internalvar (var
);
2303 var
->kind
= INTERNALVAR_FUNCTION
;
2304 var
->u
.fn
.function
= f
;
2305 var
->u
.fn
.canonical
= 1;
2306 /* Variables installed here are always the canonical version. */
2310 clear_internalvar (struct internalvar
*var
)
2312 /* Clean up old contents. */
2315 case INTERNALVAR_VALUE
:
2316 var
->u
.value
->decref ();
2319 case INTERNALVAR_STRING
:
2320 xfree (var
->u
.string
);
2327 /* Reset to void kind. */
2328 var
->kind
= INTERNALVAR_VOID
;
2332 internalvar_name (const struct internalvar
*var
)
2334 return var
->name
.c_str ();
2337 static struct internal_function
*
2338 create_internal_function (const char *name
,
2339 internal_function_fn handler
, void *cookie
)
2341 struct internal_function
*ifn
= XNEW (struct internal_function
);
2343 ifn
->name
= xstrdup (name
);
2344 ifn
->handler
= handler
;
2345 ifn
->cookie
= cookie
;
2350 value_internal_function_name (struct value
*val
)
2352 struct internal_function
*ifn
;
2355 gdb_assert (val
->lval () == lval_internalvar
);
2356 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2357 gdb_assert (result
);
2363 call_internal_function (struct gdbarch
*gdbarch
,
2364 const struct language_defn
*language
,
2365 struct value
*func
, int argc
, struct value
**argv
)
2367 struct internal_function
*ifn
;
2370 gdb_assert (func
->lval () == lval_internalvar
);
2371 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2372 gdb_assert (result
);
2374 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2377 /* The 'function' command. This does nothing -- it is just a
2378 placeholder to let "help function NAME" work. This is also used as
2379 the implementation of the sub-command that is created when
2380 registering an internal function. */
2382 function_command (const char *command
, int from_tty
)
2387 /* Helper function that does the work for add_internal_function. */
2389 static struct cmd_list_element
*
2390 do_add_internal_function (const char *name
, const char *doc
,
2391 internal_function_fn handler
, void *cookie
)
2393 struct internal_function
*ifn
;
2394 struct internalvar
*var
= lookup_internalvar (name
);
2396 ifn
= create_internal_function (name
, handler
, cookie
);
2397 set_internalvar_function (var
, ifn
);
2399 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2405 add_internal_function (const char *name
, const char *doc
,
2406 internal_function_fn handler
, void *cookie
)
2408 do_add_internal_function (name
, doc
, handler
, cookie
);
2414 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2415 gdb::unique_xmalloc_ptr
<char> &&doc
,
2416 internal_function_fn handler
, void *cookie
)
2418 struct cmd_list_element
*cmd
2419 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2421 /* Manually transfer the ownership of the doc and name strings to CMD by
2422 setting the appropriate flags. */
2423 (void) doc
.release ();
2424 cmd
->doc_allocated
= 1;
2425 (void) name
.release ();
2426 cmd
->name_allocated
= 1;
2430 value::preserve (struct objfile
*objfile
, htab_t copied_types
)
2432 if (m_type
->objfile_owner () == objfile
)
2433 m_type
= copy_type_recursive (m_type
, copied_types
);
2435 if (m_enclosing_type
->objfile_owner () == objfile
)
2436 m_enclosing_type
= copy_type_recursive (m_enclosing_type
, copied_types
);
2439 /* Likewise for internal variable VAR. */
2442 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2443 htab_t copied_types
)
2447 case INTERNALVAR_INTEGER
:
2448 if (var
->u
.integer
.type
2449 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2451 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2454 case INTERNALVAR_VALUE
:
2455 var
->u
.value
->preserve (objfile
, copied_types
);
2460 /* Make sure that all types and values referenced by VAROBJ are updated before
2461 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2465 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2466 htab_t copied_types
)
2468 if (varobj
->type
->is_objfile_owned ()
2469 && varobj
->type
->objfile_owner () == objfile
)
2472 = copy_type_recursive (varobj
->type
, copied_types
);
2475 if (varobj
->value
!= nullptr)
2476 varobj
->value
->preserve (objfile
, copied_types
);
2479 /* Update the internal variables and value history when OBJFILE is
2480 discarded; we must copy the types out of the objfile. New global types
2481 will be created for every convenience variable which currently points to
2482 this objfile's types, and the convenience variables will be adjusted to
2483 use the new global types. */
2486 preserve_values (struct objfile
*objfile
)
2488 /* Create the hash table. We allocate on the objfile's obstack, since
2489 it is soon to be deleted. */
2490 htab_up copied_types
= create_copied_types_hash ();
2492 for (const value_ref_ptr
&item
: value_history
)
2493 item
->preserve (objfile
, copied_types
.get ());
2495 for (auto &pair
: internalvars
)
2496 preserve_one_internalvar (&pair
.second
, objfile
, copied_types
.get ());
2498 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2499 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2501 preserve_one_varobj (varobj
, objfile
,
2502 copied_types
.get ());
2505 preserve_ext_lang_values (objfile
, copied_types
.get ());
2509 show_convenience (const char *ignore
, int from_tty
)
2511 struct gdbarch
*gdbarch
= get_current_arch ();
2513 struct value_print_options opts
;
2515 get_user_print_options (&opts
);
2516 for (auto &pair
: internalvars
)
2518 internalvar
&var
= pair
.second
;
2524 gdb_printf (("$%s = "), var
.name
.c_str ());
2530 val
= value_of_internalvar (gdbarch
, &var
);
2531 value_print (val
, gdb_stdout
, &opts
);
2533 catch (const gdb_exception_error
&ex
)
2535 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2536 _("<error: %s>"), ex
.what ());
2539 gdb_printf (("\n"));
2543 /* This text does not mention convenience functions on purpose.
2544 The user can't create them except via Python, and if Python support
2545 is installed this message will never be printed ($_streq will
2547 gdb_printf (_("No debugger convenience variables now defined.\n"
2548 "Convenience variables have "
2549 "names starting with \"$\";\n"
2550 "use \"set\" as in \"set "
2551 "$foo = 5\" to define them.\n"));
2559 value::from_xmethod (xmethod_worker_up
&&worker
)
2563 v
= value::allocate (builtin_type (current_inferior ()->arch ())->xmethod
);
2564 v
->m_lval
= lval_xcallable
;
2565 v
->m_location
.xm_worker
= worker
.release ();
2566 v
->m_modifiable
= false;
2574 value::result_type_of_xmethod (gdb::array_view
<value
*> argv
)
2576 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2577 && m_lval
== lval_xcallable
&& !argv
.empty ());
2579 return m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2585 value::call_xmethod (gdb::array_view
<value
*> argv
)
2587 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2588 && m_lval
== lval_xcallable
&& !argv
.empty ());
2590 return m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2593 /* Extract a value as a C number (either long or double).
2594 Knows how to convert fixed values to double, or
2595 floating values to long.
2596 Does not deallocate the value. */
2599 value_as_long (struct value
*val
)
2601 /* This coerces arrays and functions, which is necessary (e.g.
2602 in disassemble_command). It also dereferences references, which
2603 I suspect is the most logical thing to do. */
2604 val
= coerce_array (val
);
2605 return unpack_long (val
->type (), val
->contents ().data ());
2611 value_as_mpz (struct value
*val
)
2613 val
= coerce_array (val
);
2614 struct type
*type
= check_typedef (val
->type ());
2616 switch (type
->code ())
2618 case TYPE_CODE_ENUM
:
2619 case TYPE_CODE_BOOL
:
2621 case TYPE_CODE_CHAR
:
2622 case TYPE_CODE_RANGE
:
2626 return gdb_mpz (value_as_long (val
));
2631 gdb::array_view
<const gdb_byte
> valbytes
= val
->contents ();
2632 enum bfd_endian byte_order
= type_byte_order (type
);
2634 /* Handle integers that are either not a multiple of the word size,
2635 or that are stored at some bit offset. */
2636 unsigned bit_off
= 0, bit_size
= 0;
2637 if (type
->bit_size_differs_p ())
2639 bit_size
= type
->bit_size ();
2642 /* We can just handle this immediately. */
2646 bit_off
= type
->bit_offset ();
2648 unsigned n_bytes
= ((bit_off
% 8) + bit_size
+ 7) / 8;
2649 valbytes
= valbytes
.slice (bit_off
/ 8, n_bytes
);
2651 if (byte_order
== BFD_ENDIAN_BIG
)
2652 bit_off
= (n_bytes
* 8 - bit_off
% 8 - bit_size
);
2657 result
.read (val
->contents (), byte_order
, type
->is_unsigned ());
2659 /* Shift off any low bits, if needed. */
2663 /* Mask off any high bits, if needed. */
2665 result
.mask (bit_size
);
2667 /* Now handle any range bias. */
2668 if (type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
2670 /* Unfortunately we have to box here, because LONGEST is
2671 probably wider than long. */
2672 result
+= gdb_mpz (type
->bounds ()->bias
);
2678 /* Extract a value as a C pointer. */
2681 value_as_address (struct value
*val
)
2683 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2685 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2686 whether we want this to be true eventually. */
2688 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2689 non-address (e.g. argument to "signal", "info break", etc.), or
2690 for pointers to char, in which the low bits *are* significant. */
2691 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2694 /* There are several targets (IA-64, PowerPC, and others) which
2695 don't represent pointers to functions as simply the address of
2696 the function's entry point. For example, on the IA-64, a
2697 function pointer points to a two-word descriptor, generated by
2698 the linker, which contains the function's entry point, and the
2699 value the IA-64 "global pointer" register should have --- to
2700 support position-independent code. The linker generates
2701 descriptors only for those functions whose addresses are taken.
2703 On such targets, it's difficult for GDB to convert an arbitrary
2704 function address into a function pointer; it has to either find
2705 an existing descriptor for that function, or call malloc and
2706 build its own. On some targets, it is impossible for GDB to
2707 build a descriptor at all: the descriptor must contain a jump
2708 instruction; data memory cannot be executed; and code memory
2711 Upon entry to this function, if VAL is a value of type `function'
2712 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2713 val->address () is the address of the function. This is what
2714 you'll get if you evaluate an expression like `main'. The call
2715 to COERCE_ARRAY below actually does all the usual unary
2716 conversions, which includes converting values of type `function'
2717 to `pointer to function'. This is the challenging conversion
2718 discussed above. Then, `unpack_pointer' will convert that pointer
2719 back into an address.
2721 So, suppose the user types `disassemble foo' on an architecture
2722 with a strange function pointer representation, on which GDB
2723 cannot build its own descriptors, and suppose further that `foo'
2724 has no linker-built descriptor. The address->pointer conversion
2725 will signal an error and prevent the command from running, even
2726 though the next step would have been to convert the pointer
2727 directly back into the same address.
2729 The following shortcut avoids this whole mess. If VAL is a
2730 function, just return its address directly. */
2731 if (val
->type ()->code () == TYPE_CODE_FUNC
2732 || val
->type ()->code () == TYPE_CODE_METHOD
)
2733 return val
->address ();
2735 val
= coerce_array (val
);
2737 /* Some architectures (e.g. Harvard), map instruction and data
2738 addresses onto a single large unified address space. For
2739 instance: An architecture may consider a large integer in the
2740 range 0x10000000 .. 0x1000ffff to already represent a data
2741 addresses (hence not need a pointer to address conversion) while
2742 a small integer would still need to be converted integer to
2743 pointer to address. Just assume such architectures handle all
2744 integer conversions in a single function. */
2748 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2749 must admonish GDB hackers to make sure its behavior matches the
2750 compiler's, whenever possible.
2752 In general, I think GDB should evaluate expressions the same way
2753 the compiler does. When the user copies an expression out of
2754 their source code and hands it to a `print' command, they should
2755 get the same value the compiler would have computed. Any
2756 deviation from this rule can cause major confusion and annoyance,
2757 and needs to be justified carefully. In other words, GDB doesn't
2758 really have the freedom to do these conversions in clever and
2761 AndrewC pointed out that users aren't complaining about how GDB
2762 casts integers to pointers; they are complaining that they can't
2763 take an address from a disassembly listing and give it to `x/i'.
2764 This is certainly important.
2766 Adding an architecture method like integer_to_address() certainly
2767 makes it possible for GDB to "get it right" in all circumstances
2768 --- the target has complete control over how things get done, so
2769 people can Do The Right Thing for their target without breaking
2770 anyone else. The standard doesn't specify how integers get
2771 converted to pointers; usually, the ABI doesn't either, but
2772 ABI-specific code is a more reasonable place to handle it. */
2774 if (!val
->type ()->is_pointer_or_reference ()
2775 && gdbarch_integer_to_address_p (gdbarch
))
2776 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2777 val
->contents ().data ());
2779 return unpack_pointer (val
->type (), val
->contents ().data ());
2783 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2784 as a long, or as a double, assuming the raw data is described
2785 by type TYPE. Knows how to convert different sizes of values
2786 and can convert between fixed and floating point. We don't assume
2787 any alignment for the raw data. Return value is in host byte order.
2789 If you want functions and arrays to be coerced to pointers, and
2790 references to be dereferenced, call value_as_long() instead.
2792 C++: It is assumed that the front-end has taken care of
2793 all matters concerning pointers to members. A pointer
2794 to member which reaches here is considered to be equivalent
2795 to an INT (or some size). After all, it is only an offset. */
2798 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2800 if (is_fixed_point_type (type
))
2801 type
= type
->fixed_point_type_base_type ();
2803 enum bfd_endian byte_order
= type_byte_order (type
);
2804 enum type_code code
= type
->code ();
2805 int len
= type
->length ();
2806 int nosign
= type
->is_unsigned ();
2810 case TYPE_CODE_TYPEDEF
:
2811 return unpack_long (check_typedef (type
), valaddr
);
2812 case TYPE_CODE_ENUM
:
2813 case TYPE_CODE_FLAGS
:
2814 case TYPE_CODE_BOOL
:
2816 case TYPE_CODE_CHAR
:
2817 case TYPE_CODE_RANGE
:
2818 case TYPE_CODE_MEMBERPTR
:
2822 if (type
->bit_size_differs_p ())
2824 unsigned bit_off
= type
->bit_offset ();
2825 unsigned bit_size
= type
->bit_size ();
2828 /* unpack_bits_as_long doesn't handle this case the
2829 way we'd like, so handle it here. */
2833 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2838 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2840 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2842 if (code
== TYPE_CODE_RANGE
)
2843 result
+= type
->bounds ()->bias
;
2848 case TYPE_CODE_DECFLOAT
:
2849 return target_float_to_longest (valaddr
, type
);
2851 case TYPE_CODE_FIXED_POINT
:
2854 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2856 type
->fixed_point_scaling_factor ());
2858 gdb_mpz vz
= vq
.as_integer ();
2859 return vz
.as_integer
<LONGEST
> ();
2864 case TYPE_CODE_RVALUE_REF
:
2865 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2866 whether we want this to be true eventually. */
2867 return extract_typed_address (valaddr
, type
);
2870 error (_("Value can't be converted to integer."));
2874 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2875 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2876 We don't assume any alignment for the raw data. Return value is in
2879 If you want functions and arrays to be coerced to pointers, and
2880 references to be dereferenced, call value_as_address() instead.
2882 C++: It is assumed that the front-end has taken care of
2883 all matters concerning pointers to members. A pointer
2884 to member which reaches here is considered to be equivalent
2885 to an INT (or some size). After all, it is only an offset. */
2888 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2890 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2891 whether we want this to be true eventually. */
2892 return unpack_long (type
, valaddr
);
2896 is_floating_value (struct value
*val
)
2898 struct type
*type
= check_typedef (val
->type ());
2900 if (is_floating_type (type
))
2902 if (!target_float_is_valid (val
->contents ().data (), type
))
2903 error (_("Invalid floating value found in program."));
2911 /* Get the value of the FIELDNO'th field (which must be static) of
2915 value_static_field (struct type
*type
, int fieldno
)
2917 struct value
*retval
;
2919 switch (type
->field (fieldno
).loc_kind ())
2921 case FIELD_LOC_KIND_PHYSADDR
:
2922 retval
= value_at_lazy (type
->field (fieldno
).type (),
2923 type
->field (fieldno
).loc_physaddr ());
2925 case FIELD_LOC_KIND_PHYSNAME
:
2927 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2928 /* type->field (fieldno).name (); */
2929 struct block_symbol sym
= lookup_symbol (phys_name
, nullptr,
2930 SEARCH_VAR_DOMAIN
, nullptr);
2932 if (sym
.symbol
== NULL
)
2934 /* With some compilers, e.g. HP aCC, static data members are
2935 reported as non-debuggable symbols. */
2936 struct bound_minimal_symbol msym
2937 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
2938 struct type
*field_type
= type
->field (fieldno
).type ();
2941 retval
= value::allocate_optimized_out (field_type
);
2943 retval
= value_at_lazy (field_type
, msym
.value_address ());
2946 retval
= value_of_variable (sym
.symbol
, sym
.block
);
2950 gdb_assert_not_reached ("unexpected field location kind");
2956 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2957 You have to be careful here, since the size of the data area for the value
2958 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2959 than the old enclosing type, you have to allocate more space for the
2963 value::set_enclosing_type (struct type
*new_encl_type
)
2965 if (new_encl_type
->length () > enclosing_type ()->length ())
2967 check_type_length_before_alloc (new_encl_type
);
2968 m_contents
.reset ((gdb_byte
*) xrealloc (m_contents
.release (),
2969 new_encl_type
->length ()));
2972 m_enclosing_type
= new_encl_type
;
2978 value::primitive_field (LONGEST offset
, int fieldno
, struct type
*arg_type
)
2982 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
2984 arg_type
= check_typedef (arg_type
);
2985 type
= arg_type
->field (fieldno
).type ();
2987 /* Call check_typedef on our type to make sure that, if TYPE
2988 is a TYPE_CODE_TYPEDEF, its length is set to the length
2989 of the target type instead of zero. However, we do not
2990 replace the typedef type by the target type, because we want
2991 to keep the typedef in order to be able to print the type
2992 description correctly. */
2993 check_typedef (type
);
2995 if (arg_type
->field (fieldno
).bitsize ())
2997 /* Handle packed fields.
2999 Create a new value for the bitfield, with bitpos and bitsize
3000 set. If possible, arrange offset and bitpos so that we can
3001 do a single aligned read of the size of the containing type.
3002 Otherwise, adjust offset to the byte containing the first
3003 bit. Assume that the address, offset, and embedded offset
3004 are sufficiently aligned. */
3006 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
3007 LONGEST container_bitsize
= type
->length () * 8;
3009 v
= value::allocate_lazy (type
);
3010 v
->set_bitsize (arg_type
->field (fieldno
).bitsize ());
3011 if ((bitpos
% container_bitsize
) + v
->bitsize () <= container_bitsize
3012 && type
->length () <= (int) sizeof (LONGEST
))
3013 v
->set_bitpos (bitpos
% container_bitsize
);
3015 v
->set_bitpos (bitpos
% 8);
3016 v
->set_offset ((embedded_offset ()
3018 + (bitpos
- v
->bitpos ()) / 8));
3019 v
->set_parent (this);
3023 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3025 /* This field is actually a base subobject, so preserve the
3026 entire object's contents for later references to virtual
3030 /* Lazy register values with offsets are not supported. */
3031 if (this->lval () == lval_register
&& lazy ())
3034 /* We special case virtual inheritance here because this
3035 requires access to the contents, which we would rather avoid
3036 for references to ordinary fields of unavailable values. */
3037 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3038 boffset
= baseclass_offset (arg_type
, fieldno
,
3039 contents ().data (),
3044 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3047 v
= value::allocate_lazy (enclosing_type ());
3050 v
= value::allocate (enclosing_type ());
3051 contents_copy_raw (v
, 0, 0, enclosing_type ()->length ());
3053 v
->deprecated_set_type (type
);
3054 v
->set_offset (this->offset ());
3055 v
->set_embedded_offset (offset
+ embedded_offset () + boffset
);
3057 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3059 /* Field is a dynamic data member. */
3061 gdb_assert (0 == offset
);
3062 /* We expect an already resolved data location. */
3063 gdb_assert (TYPE_DATA_LOCATION (type
)->is_constant ());
3064 /* For dynamic data types defer memory allocation
3065 until we actual access the value. */
3066 v
= value::allocate_lazy (type
);
3070 /* Plain old data member */
3071 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3072 / (HOST_CHAR_BIT
* unit_size
));
3074 /* Lazy register values with offsets are not supported. */
3075 if (this->lval () == lval_register
&& lazy ())
3079 v
= value::allocate_lazy (type
);
3082 v
= value::allocate (type
);
3083 contents_copy_raw (v
, v
->embedded_offset (),
3084 embedded_offset () + offset
,
3085 type_length_units (type
));
3087 v
->set_offset (this->offset () + offset
+ embedded_offset ());
3089 v
->set_component_location (this);
3093 /* Given a value ARG1 of a struct or union type,
3094 extract and return the value of one of its (non-static) fields.
3095 FIELDNO says which field. */
3098 value_field (struct value
*arg1
, int fieldno
)
3100 return arg1
->primitive_field (0, fieldno
, arg1
->type ());
3103 /* Return a non-virtual function as a value.
3104 F is the list of member functions which contains the desired method.
3105 J is an index into F which provides the desired method.
3107 We only use the symbol for its address, so be happy with either a
3108 full symbol or a minimal symbol. */
3111 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3112 int j
, struct type
*type
,
3116 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3117 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3119 struct bound_minimal_symbol msym
;
3121 sym
= lookup_symbol (physname
, nullptr, SEARCH_FUNCTION_DOMAIN
,
3125 msym
= lookup_bound_minimal_symbol (physname
);
3126 if (msym
.minsym
== NULL
)
3130 v
= value::allocate (ftype
);
3131 v
->set_lval (lval_memory
);
3134 v
->set_address (sym
->value_block ()->entry_pc ());
3138 /* The minimal symbol might point to a function descriptor;
3139 resolve it to the actual code address instead. */
3140 struct objfile
*objfile
= msym
.objfile
;
3141 struct gdbarch
*gdbarch
= objfile
->arch ();
3143 v
->set_address (gdbarch_convert_from_func_ptr_addr
3144 (gdbarch
, msym
.value_address (),
3145 current_inferior ()->top_target ()));
3150 if (type
!= (*arg1p
)->type ())
3151 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3152 value_addr (*arg1p
)));
3154 /* Move the `this' pointer according to the offset.
3155 (*arg1p)->offset () += offset; */
3166 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3167 LONGEST bitpos
, LONGEST bitsize
)
3169 enum bfd_endian byte_order
= type_byte_order (field_type
);
3174 LONGEST read_offset
;
3176 /* Read the minimum number of bytes required; there may not be
3177 enough bytes to read an entire ULONGEST. */
3178 field_type
= check_typedef (field_type
);
3180 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3183 bytes_read
= field_type
->length ();
3184 bitsize
= 8 * bytes_read
;
3187 read_offset
= bitpos
/ 8;
3189 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3190 bytes_read
, byte_order
);
3192 /* Extract bits. See comment above. */
3194 if (byte_order
== BFD_ENDIAN_BIG
)
3195 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3197 lsbcount
= (bitpos
% 8);
3200 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3201 If the field is signed, and is negative, then sign extend. */
3203 if (bitsize
< 8 * (int) sizeof (val
))
3205 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3207 if (!field_type
->is_unsigned ())
3209 if (val
& (valmask
^ (valmask
>> 1)))
3219 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3220 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3221 ORIGINAL_VALUE, which must not be NULL. See
3222 unpack_value_bits_as_long for more details. */
3225 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3226 LONGEST embedded_offset
, int fieldno
,
3227 const struct value
*val
, LONGEST
*result
)
3229 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3230 int bitsize
= type
->field (fieldno
).bitsize ();
3231 struct type
*field_type
= type
->field (fieldno
).type ();
3234 gdb_assert (val
!= NULL
);
3236 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3237 if (val
->bits_any_optimized_out (bit_offset
, bitsize
)
3238 || !val
->bits_available (bit_offset
, bitsize
))
3241 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3246 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3247 object at VALADDR. See unpack_bits_as_long for more details. */
3250 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3252 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3253 int bitsize
= type
->field (fieldno
).bitsize ();
3254 struct type
*field_type
= type
->field (fieldno
).type ();
3256 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3262 value::unpack_bitfield (struct value
*dest_val
,
3263 LONGEST bitpos
, LONGEST bitsize
,
3264 const gdb_byte
*valaddr
, LONGEST embedded_offset
)
3267 enum bfd_endian byte_order
;
3270 struct type
*field_type
= dest_val
->type ();
3272 byte_order
= type_byte_order (field_type
);
3274 /* First, unpack and sign extend the bitfield as if it was wholly
3275 valid. Optimized out/unavailable bits are read as zero, but
3276 that's OK, as they'll end up marked below. If the VAL is
3277 wholly-invalid we may have skipped allocating its contents,
3278 though. See value::allocate_optimized_out. */
3279 if (valaddr
!= NULL
)
3283 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3285 store_signed_integer (dest_val
->contents_raw ().data (),
3286 field_type
->length (), byte_order
, num
);
3289 /* Now copy the optimized out / unavailability ranges to the right
3291 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3292 if (byte_order
== BFD_ENDIAN_BIG
)
3293 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3296 ranges_copy_adjusted (dest_val
, dst_bit_offset
, src_bit_offset
, bitsize
);
3299 /* Return a new value with type TYPE, which is FIELDNO field of the
3300 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3301 of VAL. If the VAL's contents required to extract the bitfield
3302 from are unavailable/optimized out, the new value is
3303 correspondingly marked unavailable/optimized out. */
3306 value_field_bitfield (struct type
*type
, int fieldno
,
3307 const gdb_byte
*valaddr
,
3308 LONGEST embedded_offset
, const struct value
*val
)
3310 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3311 int bitsize
= type
->field (fieldno
).bitsize ();
3312 struct value
*res_val
= value::allocate (type
->field (fieldno
).type ());
3314 val
->unpack_bitfield (res_val
, bitpos
, bitsize
, valaddr
, embedded_offset
);
3319 /* Modify the value of a bitfield. ADDR points to a block of memory in
3320 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3321 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3322 indicate which bits (in target bit order) comprise the bitfield.
3323 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3324 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3327 modify_field (struct type
*type
, gdb_byte
*addr
,
3328 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3330 enum bfd_endian byte_order
= type_byte_order (type
);
3332 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3335 /* Normalize BITPOS. */
3339 /* If a negative fieldval fits in the field in question, chop
3340 off the sign extension bits. */
3341 if ((~fieldval
& ~(mask
>> 1)) == 0)
3344 /* Warn if value is too big to fit in the field in question. */
3345 if (0 != (fieldval
& ~mask
))
3347 /* FIXME: would like to include fieldval in the message, but
3348 we don't have a sprintf_longest. */
3349 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3351 /* Truncate it, otherwise adjoining fields may be corrupted. */
3355 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3356 false valgrind reports. */
3358 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3359 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3361 /* Shifting for bit field depends on endianness of the target machine. */
3362 if (byte_order
== BFD_ENDIAN_BIG
)
3363 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3365 oword
&= ~(mask
<< bitpos
);
3366 oword
|= fieldval
<< bitpos
;
3368 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3371 /* Pack NUM into BUF using a target format of TYPE. */
3374 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3376 enum bfd_endian byte_order
= type_byte_order (type
);
3379 type
= check_typedef (type
);
3380 len
= type
->length ();
3382 switch (type
->code ())
3384 case TYPE_CODE_RANGE
:
3385 num
-= type
->bounds ()->bias
;
3388 case TYPE_CODE_CHAR
:
3389 case TYPE_CODE_ENUM
:
3390 case TYPE_CODE_FLAGS
:
3391 case TYPE_CODE_BOOL
:
3392 case TYPE_CODE_MEMBERPTR
:
3393 if (type
->bit_size_differs_p ())
3395 unsigned bit_off
= type
->bit_offset ();
3396 unsigned bit_size
= type
->bit_size ();
3397 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3400 store_signed_integer (buf
, len
, byte_order
, num
);
3404 case TYPE_CODE_RVALUE_REF
:
3406 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3410 case TYPE_CODE_DECFLOAT
:
3411 target_float_from_longest (buf
, type
, num
);
3415 error (_("Unexpected type (%d) encountered for integer constant."),
3421 /* Pack NUM into BUF using a target format of TYPE. */
3424 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3427 enum bfd_endian byte_order
;
3429 type
= check_typedef (type
);
3430 len
= type
->length ();
3431 byte_order
= type_byte_order (type
);
3433 switch (type
->code ())
3436 case TYPE_CODE_CHAR
:
3437 case TYPE_CODE_ENUM
:
3438 case TYPE_CODE_FLAGS
:
3439 case TYPE_CODE_BOOL
:
3440 case TYPE_CODE_RANGE
:
3441 case TYPE_CODE_MEMBERPTR
:
3442 if (type
->bit_size_differs_p ())
3444 unsigned bit_off
= type
->bit_offset ();
3445 unsigned bit_size
= type
->bit_size ();
3446 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3449 store_unsigned_integer (buf
, len
, byte_order
, num
);
3453 case TYPE_CODE_RVALUE_REF
:
3455 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3459 case TYPE_CODE_DECFLOAT
:
3460 target_float_from_ulongest (buf
, type
, num
);
3464 error (_("Unexpected type (%d) encountered "
3465 "for unsigned integer constant."),
3473 value::zero (struct type
*type
, enum lval_type lv
)
3475 struct value
*val
= value::allocate_lazy (type
);
3477 val
->set_lval (lv
== lval_computed
? not_lval
: lv
);
3478 val
->m_is_zero
= true;
3482 /* Convert C numbers into newly allocated values. */
3485 value_from_longest (struct type
*type
, LONGEST num
)
3487 struct value
*val
= value::allocate (type
);
3489 pack_long (val
->contents_raw ().data (), type
, num
);
3494 /* Convert C unsigned numbers into newly allocated values. */
3497 value_from_ulongest (struct type
*type
, ULONGEST num
)
3499 struct value
*val
= value::allocate (type
);
3501 pack_unsigned_long (val
->contents_raw ().data (), type
, num
);
3509 value_from_mpz (struct type
*type
, const gdb_mpz
&v
)
3511 struct type
*real_type
= check_typedef (type
);
3513 const gdb_mpz
*val
= &v
;
3515 if (real_type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
3519 storage
-= type
->bounds ()->bias
;
3522 if (type
->bit_size_differs_p ())
3524 unsigned bit_off
= type
->bit_offset ();
3525 unsigned bit_size
= type
->bit_size ();
3527 if (val
!= &storage
)
3533 storage
.mask (bit_size
);
3534 storage
<<= bit_off
;
3537 struct value
*result
= value::allocate (type
);
3538 val
->truncate (result
->contents_raw (), type_byte_order (type
),
3539 type
->is_unsigned ());
3543 /* Create a value representing a pointer of type TYPE to the address
3547 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3549 struct value
*val
= value::allocate (type
);
3551 store_typed_address (val
->contents_raw ().data (),
3552 check_typedef (type
), addr
);
3556 /* Create and return a value object of TYPE containing the value D. The
3557 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3558 it is converted to target format. */
3561 value_from_host_double (struct type
*type
, double d
)
3563 struct value
*value
= value::allocate (type
);
3564 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3565 target_float_from_host_double (value
->contents_raw ().data (),
3570 /* Create a value of type TYPE whose contents come from VALADDR, if it
3571 is non-null, and whose memory address (in the inferior) is
3572 ADDRESS. The type of the created value may differ from the passed
3573 type TYPE. Make sure to retrieve values new type after this call.
3574 Note that TYPE is not passed through resolve_dynamic_type; this is
3575 a special API intended for use only by Ada. */
3578 value_from_contents_and_address_unresolved (struct type
*type
,
3579 const gdb_byte
*valaddr
,
3584 if (valaddr
== NULL
)
3585 v
= value::allocate_lazy (type
);
3587 v
= value_from_contents (type
, valaddr
);
3588 v
->set_lval (lval_memory
);
3589 v
->set_address (address
);
3593 /* Create a value of type TYPE whose contents come from VALADDR, if it
3594 is non-null, and whose memory address (in the inferior) is
3595 ADDRESS. The type of the created value may differ from the passed
3596 type TYPE. Make sure to retrieve values new type after this call. */
3599 value_from_contents_and_address (struct type
*type
,
3600 const gdb_byte
*valaddr
,
3602 const frame_info_ptr
&frame
)
3604 gdb::array_view
<const gdb_byte
> view
;
3605 if (valaddr
!= nullptr)
3606 view
= gdb::make_array_view (valaddr
, type
->length ());
3607 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
,
3609 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3612 if (resolved_type_no_typedef
->code () == TYPE_CODE_ARRAY
3613 && resolved_type_no_typedef
->bound_optimized_out ())
3615 /* Resolution found that the bounds are optimized out. In this
3616 case, mark the array itself as optimized-out. */
3617 v
= value::allocate_optimized_out (resolved_type
);
3619 else if (valaddr
== nullptr)
3620 v
= value::allocate_lazy (resolved_type
);
3622 v
= value_from_contents (resolved_type
, valaddr
);
3623 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3624 && TYPE_DATA_LOCATION (resolved_type_no_typedef
)->is_constant ())
3625 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3626 v
->set_lval (lval_memory
);
3627 v
->set_address (address
);
3631 /* Create a value of type TYPE holding the contents CONTENTS.
3632 The new value is `not_lval'. */
3635 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3637 struct value
*result
;
3639 result
= value::allocate (type
);
3640 memcpy (result
->contents_raw ().data (), contents
, type
->length ());
3644 /* Extract a value from the history file. Input will be of the form
3645 $digits or $$digits. See block comment above 'write_dollar_variable'
3649 value_from_history_ref (const char *h
, const char **endp
)
3661 /* Find length of numeral string. */
3662 for (; isdigit (h
[len
]); len
++)
3665 /* Make sure numeral string is not part of an identifier. */
3666 if (h
[len
] == '_' || isalpha (h
[len
]))
3669 /* Now collect the index value. */
3674 /* For some bizarre reason, "$$" is equivalent to "$$1",
3675 rather than to "$$0" as it ought to be! */
3683 index
= -strtol (&h
[2], &local_end
, 10);
3691 /* "$" is equivalent to "$0". */
3699 index
= strtol (&h
[1], &local_end
, 10);
3704 return access_value_history (index
);
3707 /* Get the component value (offset by OFFSET bytes) of a struct or
3708 union WHOLE. Component's type is TYPE. */
3711 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3715 if (whole
->lval () == lval_memory
&& whole
->lazy ())
3716 v
= value::allocate_lazy (type
);
3719 v
= value::allocate (type
);
3720 whole
->contents_copy (v
, v
->embedded_offset (),
3721 whole
->embedded_offset () + offset
,
3722 type_length_units (type
));
3724 v
->set_offset (whole
->offset () + offset
+ whole
->embedded_offset ());
3725 v
->set_component_location (whole
);
3733 value::from_component_bitsize (struct type
*type
,
3734 LONGEST bit_offset
, LONGEST bit_length
)
3736 gdb_assert (!lazy ());
3738 /* Preserve lvalue-ness if possible. This is needed to avoid
3739 array-printing failures (including crashes) when printing Ada
3740 arrays in programs compiled with -fgnat-encodings=all. */
3741 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3742 && (bit_length
% TARGET_CHAR_BIT
) == 0
3743 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3744 return value_from_component (this, type
, bit_offset
/ TARGET_CHAR_BIT
);
3746 struct value
*v
= value::allocate (type
);
3748 LONGEST dst_offset
= TARGET_CHAR_BIT
* v
->embedded_offset ();
3749 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3750 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3752 contents_copy_raw_bitwise (v
, dst_offset
,
3754 * embedded_offset ()
3761 coerce_ref_if_computed (const struct value
*arg
)
3763 const struct lval_funcs
*funcs
;
3765 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3768 if (arg
->lval () != lval_computed
)
3771 funcs
= arg
->computed_funcs ();
3772 if (funcs
->coerce_ref
== NULL
)
3775 return funcs
->coerce_ref (arg
);
3778 /* Look at value.h for description. */
3781 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3782 const struct type
*original_type
,
3783 struct value
*original_value
,
3784 CORE_ADDR original_value_address
)
3786 gdb_assert (original_type
->is_pointer_or_reference ());
3788 struct type
*original_target_type
= original_type
->target_type ();
3789 gdb::array_view
<const gdb_byte
> view
;
3790 struct type
*resolved_original_target_type
3791 = resolve_dynamic_type (original_target_type
, view
,
3792 original_value_address
);
3794 /* Re-adjust type. */
3795 value
->deprecated_set_type (resolved_original_target_type
);
3797 /* Add embedding info. */
3798 value
->set_enclosing_type (enc_type
);
3799 value
->set_embedded_offset (original_value
->pointed_to_offset ());
3801 /* We may be pointing to an object of some derived type. */
3802 return value_full_object (value
, NULL
, 0, 0, 0);
3806 coerce_ref (struct value
*arg
)
3808 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3809 struct value
*retval
;
3810 struct type
*enc_type
;
3812 retval
= coerce_ref_if_computed (arg
);
3816 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3819 enc_type
= check_typedef (arg
->enclosing_type ());
3820 enc_type
= enc_type
->target_type ();
3822 CORE_ADDR addr
= unpack_pointer (arg
->type (), arg
->contents ().data ());
3823 retval
= value_at_lazy (enc_type
, addr
);
3824 enc_type
= retval
->type ();
3825 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3830 coerce_array (struct value
*arg
)
3834 arg
= coerce_ref (arg
);
3835 type
= check_typedef (arg
->type ());
3837 switch (type
->code ())
3839 case TYPE_CODE_ARRAY
:
3840 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3841 arg
= value_coerce_array (arg
);
3843 case TYPE_CODE_FUNC
:
3844 arg
= value_coerce_function (arg
);
3851 /* Return the return value convention that will be used for the
3854 enum return_value_convention
3855 struct_return_convention (struct gdbarch
*gdbarch
,
3856 struct value
*function
, struct type
*value_type
)
3858 enum type_code code
= value_type
->code ();
3860 if (code
== TYPE_CODE_ERROR
)
3861 error (_("Function return type unknown."));
3863 /* Probe the architecture for the return-value convention. */
3864 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3868 /* Return true if the function returning the specified type is using
3869 the convention of returning structures in memory (passing in the
3870 address as a hidden first parameter). */
3873 using_struct_return (struct gdbarch
*gdbarch
,
3874 struct value
*function
, struct type
*value_type
)
3876 if (value_type
->code () == TYPE_CODE_VOID
)
3877 /* A void return value is never in memory. See also corresponding
3878 code in "print_return_value". */
3881 return (struct_return_convention (gdbarch
, function
, value_type
)
3882 != RETURN_VALUE_REGISTER_CONVENTION
);
3888 value::fetch_lazy_bitfield ()
3890 gdb_assert (bitsize () != 0);
3892 /* To read a lazy bitfield, read the entire enclosing value. This
3893 prevents reading the same block of (possibly volatile) memory once
3894 per bitfield. It would be even better to read only the containing
3895 word, but we have no way to record that just specific bits of a
3896 value have been fetched. */
3897 struct value
*parent
= this->parent ();
3899 if (parent
->lazy ())
3900 parent
->fetch_lazy ();
3902 parent
->unpack_bitfield (this, bitpos (), bitsize (),
3903 parent
->contents_for_printing ().data (),
3910 value::fetch_lazy_memory ()
3912 gdb_assert (m_lval
== lval_memory
);
3914 CORE_ADDR addr
= address ();
3915 struct type
*type
= check_typedef (enclosing_type ());
3917 /* Figure out how much we should copy from memory. Usually, this is just
3918 the size of the type, but, for arrays, we might only be loading a
3919 small part of the array (this is only done for very large arrays). */
3921 if (m_limited_length
> 0)
3923 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY
);
3924 len
= m_limited_length
;
3926 else if (type
->length () > 0)
3927 len
= type_length_units (type
);
3929 gdb_assert (len
>= 0);
3932 read_value_memory (this, 0, stack (), addr
,
3933 contents_all_raw ().data (), len
);
3939 value::fetch_lazy_register ()
3941 struct type
*type
= check_typedef (this->type ());
3942 struct value
*new_val
= this;
3944 scoped_value_mark mark
;
3946 /* Offsets are not supported here; lazy register values must
3947 refer to the entire register. */
3948 gdb_assert (offset () == 0);
3950 while (new_val
->lval () == lval_register
&& new_val
->lazy ())
3952 frame_id next_frame_id
= new_val
->next_frame_id ();
3953 frame_info_ptr next_frame
= frame_find_by_id (next_frame_id
);
3954 gdb_assert (next_frame
!= NULL
);
3956 int regnum
= new_val
->regnum ();
3958 /* Convertible register routines are used for multi-register
3959 values and for interpretation in different types
3960 (e.g. float or int from a double register). Lazy
3961 register values should have the register's natural type,
3962 so they do not apply. */
3963 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
3966 new_val
= frame_unwind_register_value (next_frame
, regnum
);
3968 /* If we get another lazy lval_register value, it means the
3969 register is found by reading it from NEXT_FRAME's next frame.
3970 frame_unwind_register_value should never return a value with
3971 the frame id pointing to NEXT_FRAME. If it does, it means we
3972 either have two consecutive frames with the same frame id
3973 in the frame chain, or some code is trying to unwind
3974 behind get_prev_frame's back (e.g., a frame unwind
3975 sniffer trying to unwind), bypassing its validations. In
3976 any case, it should always be an internal error to end up
3977 in this situation. */
3978 if (new_val
->lval () == lval_register
3980 && new_val
->next_frame_id () == next_frame_id
)
3981 internal_error (_("infinite loop while fetching a register"));
3984 /* If it's still lazy (for instance, a saved register on the
3985 stack), fetch it. */
3986 if (new_val
->lazy ())
3987 new_val
->fetch_lazy ();
3989 /* Copy the contents and the unavailability/optimized-out
3990 meta-data from NEW_VAL to VAL. */
3992 new_val
->contents_copy (this, embedded_offset (),
3993 new_val
->embedded_offset (),
3994 type_length_units (type
));
3998 frame_info_ptr frame
= frame_find_by_id (this->next_frame_id ());
3999 frame
= get_prev_frame_always (frame
);
4000 int regnum
= this->regnum ();
4001 gdbarch
*gdbarch
= get_frame_arch (frame
);
4003 string_file debug_file
;
4004 gdb_printf (&debug_file
,
4005 "(frame=%d, regnum=%d(%s), ...) ",
4006 frame_relative_level (frame
), regnum
,
4007 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4009 gdb_printf (&debug_file
, "->");
4010 if (new_val
->optimized_out ())
4012 gdb_printf (&debug_file
, " ");
4013 val_print_optimized_out (new_val
, &debug_file
);
4017 if (new_val
->lval () == lval_register
)
4018 gdb_printf (&debug_file
, " register=%d", new_val
->regnum ());
4019 else if (new_val
->lval () == lval_memory
)
4020 gdb_printf (&debug_file
, " address=%s",
4022 new_val
->address ()));
4024 gdb_printf (&debug_file
, " computed");
4026 if (new_val
->entirely_available ())
4029 gdb::array_view
<const gdb_byte
> buf
= new_val
->contents ();
4031 gdb_printf (&debug_file
, " bytes=");
4032 gdb_printf (&debug_file
, "[");
4033 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4034 gdb_printf (&debug_file
, "%02x", buf
[i
]);
4035 gdb_printf (&debug_file
, "]");
4037 else if (new_val
->entirely_unavailable ())
4038 gdb_printf (&debug_file
, " unavailable");
4040 gdb_printf (&debug_file
, " partly unavailable");
4043 frame_debug_printf ("%s", debug_file
.c_str ());
4050 value::fetch_lazy ()
4052 gdb_assert (lazy ());
4053 allocate_contents (true);
4054 /* A value is either lazy, or fully fetched. The
4055 availability/validity is only established as we try to fetch a
4057 gdb_assert (m_optimized_out
.empty ());
4058 gdb_assert (m_unavailable
.empty ());
4063 else if (bitsize ())
4064 fetch_lazy_bitfield ();
4065 else if (this->lval () == lval_memory
)
4066 fetch_lazy_memory ();
4067 else if (this->lval () == lval_register
)
4068 fetch_lazy_register ();
4069 else if (this->lval () == lval_computed
4070 && computed_funcs ()->read
!= NULL
)
4071 computed_funcs ()->read (this);
4073 internal_error (_("Unexpected lazy value type."));
4081 pseudo_from_raw_part (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4082 int raw_reg_num
, int raw_offset
)
4084 value
*pseudo_reg_val
4085 = value::allocate_register (next_frame
, pseudo_reg_num
);
4086 value
*raw_reg_val
= value_of_register (raw_reg_num
, next_frame
);
4087 raw_reg_val
->contents_copy (pseudo_reg_val
, 0, raw_offset
,
4088 pseudo_reg_val
->type ()->length ());
4089 return pseudo_reg_val
;
4095 pseudo_to_raw_part (const frame_info_ptr
&next_frame
,
4096 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4097 int raw_reg_num
, int raw_offset
)
4100 = register_size (frame_unwind_arch (next_frame
), raw_reg_num
);
4102 /* When overflowing a register, put_frame_register_bytes writes to the
4103 subsequent registers. We don't want that behavior here, so make sure
4104 the write is wholly within register RAW_REG_NUM. */
4105 gdb_assert (raw_offset
+ pseudo_buf
.size () <= raw_reg_size
);
4106 put_frame_register_bytes (next_frame
, raw_reg_num
, raw_offset
, pseudo_buf
);
4112 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4113 int raw_reg_1_num
, int raw_reg_2_num
)
4115 value
*pseudo_reg_val
4116 = value::allocate_register (next_frame
, pseudo_reg_num
);
4119 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4120 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4121 raw_reg_1_val
->type ()->length ());
4122 dst_offset
+= raw_reg_1_val
->type ()->length ();
4124 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4125 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4126 raw_reg_2_val
->type ()->length ());
4127 dst_offset
+= raw_reg_2_val
->type ()->length ();
4129 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4131 return pseudo_reg_val
;
4137 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4138 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4139 int raw_reg_1_num
, int raw_reg_2_num
)
4142 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4144 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4145 put_frame_register (next_frame
, raw_reg_1_num
,
4146 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4147 src_offset
+= raw_reg_1_size
;
4149 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4150 put_frame_register (next_frame
, raw_reg_2_num
,
4151 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4152 src_offset
+= raw_reg_2_size
;
4154 gdb_assert (src_offset
== pseudo_buf
.size ());
4160 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4161 int raw_reg_1_num
, int raw_reg_2_num
,
4164 value
*pseudo_reg_val
4165 = value::allocate_register (next_frame
, pseudo_reg_num
);
4168 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4169 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4170 raw_reg_1_val
->type ()->length ());
4171 dst_offset
+= raw_reg_1_val
->type ()->length ();
4173 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4174 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4175 raw_reg_2_val
->type ()->length ());
4176 dst_offset
+= raw_reg_2_val
->type ()->length ();
4178 value
*raw_reg_3_val
= value_of_register (raw_reg_3_num
, next_frame
);
4179 raw_reg_3_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4180 raw_reg_3_val
->type ()->length ());
4181 dst_offset
+= raw_reg_3_val
->type ()->length ();
4183 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4185 return pseudo_reg_val
;
4191 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4192 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4193 int raw_reg_1_num
, int raw_reg_2_num
, int raw_reg_3_num
)
4196 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4198 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4199 put_frame_register (next_frame
, raw_reg_1_num
,
4200 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4201 src_offset
+= raw_reg_1_size
;
4203 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4204 put_frame_register (next_frame
, raw_reg_2_num
,
4205 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4206 src_offset
+= raw_reg_2_size
;
4208 int raw_reg_3_size
= register_size (arch
, raw_reg_3_num
);
4209 put_frame_register (next_frame
, raw_reg_3_num
,
4210 pseudo_buf
.slice (src_offset
, raw_reg_3_size
));
4211 src_offset
+= raw_reg_3_size
;
4213 gdb_assert (src_offset
== pseudo_buf
.size ());
4216 /* Implementation of the convenience function $_isvoid. */
4218 static struct value
*
4219 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4220 const struct language_defn
*language
,
4221 void *cookie
, int argc
, struct value
**argv
)
4226 error (_("You must provide one argument for $_isvoid."));
4228 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4230 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4233 /* Implementation of the convenience function $_creal. Extracts the
4234 real part from a complex number. */
4236 static struct value
*
4237 creal_internal_fn (struct gdbarch
*gdbarch
,
4238 const struct language_defn
*language
,
4239 void *cookie
, int argc
, struct value
**argv
)
4242 error (_("You must provide one argument for $_creal."));
4244 value
*cval
= argv
[0];
4245 type
*ctype
= check_typedef (cval
->type ());
4246 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4247 error (_("expected a complex number"));
4248 return value_real_part (cval
);
4251 /* Implementation of the convenience function $_cimag. Extracts the
4252 imaginary part from a complex number. */
4254 static struct value
*
4255 cimag_internal_fn (struct gdbarch
*gdbarch
,
4256 const struct language_defn
*language
,
4257 void *cookie
, int argc
,
4258 struct value
**argv
)
4261 error (_("You must provide one argument for $_cimag."));
4263 value
*cval
= argv
[0];
4264 type
*ctype
= check_typedef (cval
->type ());
4265 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4266 error (_("expected a complex number"));
4267 return value_imaginary_part (cval
);
4274 /* Test the ranges_contain function. */
4277 test_ranges_contain ()
4279 std::vector
<range
> ranges
;
4285 ranges
.push_back (r
);
4290 ranges
.push_back (r
);
4293 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4295 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4297 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4299 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4301 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4303 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4305 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4307 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4309 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4311 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4313 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4315 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4318 /* Check that RANGES contains the same ranges as EXPECTED. */
4321 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4322 gdb::array_view
<const range
> expected
)
4324 return ranges
== expected
;
4327 /* Test the insert_into_bit_range_vector function. */
4330 test_insert_into_bit_range_vector ()
4332 std::vector
<range
> ranges
;
4336 insert_into_bit_range_vector (&ranges
, 10, 5);
4337 static const range expected
[] = {
4340 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4345 insert_into_bit_range_vector (&ranges
, 11, 4);
4346 static const range expected
= {10, 5};
4347 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4350 /* [10, 14] [20, 24] */
4352 insert_into_bit_range_vector (&ranges
, 20, 5);
4353 static const range expected
[] = {
4357 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4360 /* [10, 14] [17, 24] */
4362 insert_into_bit_range_vector (&ranges
, 17, 5);
4363 static const range expected
[] = {
4367 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4370 /* [2, 8] [10, 14] [17, 24] */
4372 insert_into_bit_range_vector (&ranges
, 2, 7);
4373 static const range expected
[] = {
4378 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4381 /* [2, 14] [17, 24] */
4383 insert_into_bit_range_vector (&ranges
, 9, 1);
4384 static const range expected
[] = {
4388 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4391 /* [2, 14] [17, 24] */
4393 insert_into_bit_range_vector (&ranges
, 9, 1);
4394 static const range expected
[] = {
4398 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4403 insert_into_bit_range_vector (&ranges
, 4, 30);
4404 static const range expected
= {2, 32};
4405 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4412 type
*type
= builtin_type (current_inferior ()->arch ())->builtin_int
;
4414 /* Verify that we can copy an entirely optimized out value, that may not have
4415 its contents allocated. */
4416 value_ref_ptr val
= release_value (value::allocate_optimized_out (type
));
4417 value_ref_ptr copy
= release_value (val
->copy ());
4419 SELF_CHECK (val
->entirely_optimized_out ());
4420 SELF_CHECK (copy
->entirely_optimized_out ());
4423 } /* namespace selftests */
4424 #endif /* GDB_SELF_TEST */
4426 void _initialize_values ();
4428 _initialize_values ()
4430 cmd_list_element
*show_convenience_cmd
4431 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4432 Debugger convenience (\"$foo\") variables and functions.\n\
4433 Convenience variables are created when you assign them values;\n\
4434 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4436 A few convenience variables are given values automatically:\n\
4437 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4438 \"$__\" holds the contents of the last address examined with \"x\"."
4441 Convenience functions are defined via the Python API."
4444 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4446 add_cmd ("values", no_set_class
, show_values
, _("\
4447 Elements of value history around item number IDX (or last ten)."),
4450 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4451 Initialize a convenience variable if necessary.\n\
4452 init-if-undefined VARIABLE = EXPRESSION\n\
4453 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4454 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4455 VARIABLE is already initialized."));
4457 add_prefix_cmd ("function", no_class
, function_command
, _("\
4458 Placeholder command for showing help on convenience functions."),
4459 &functionlist
, 0, &cmdlist
);
4461 add_internal_function ("_isvoid", _("\
4462 Check whether an expression is void.\n\
4463 Usage: $_isvoid (expression)\n\
4464 Return 1 if the expression is void, zero otherwise."),
4465 isvoid_internal_fn
, NULL
);
4467 add_internal_function ("_creal", _("\
4468 Extract the real part of a complex number.\n\
4469 Usage: $_creal (expression)\n\
4470 Return the real part of a complex number, the type depends on the\n\
4471 type of a complex number."),
4472 creal_internal_fn
, NULL
);
4474 add_internal_function ("_cimag", _("\
4475 Extract the imaginary part of a complex number.\n\
4476 Usage: $_cimag (expression)\n\
4477 Return the imaginary part of a complex number, the type depends on the\n\
4478 type of a complex number."),
4479 cimag_internal_fn
, NULL
);
4481 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4482 class_support
, &max_value_size
, _("\
4483 Set maximum sized value gdb will load from the inferior."), _("\
4484 Show maximum sized value gdb will load from the inferior."), _("\
4485 Use this to control the maximum size, in bytes, of a value that gdb\n\
4486 will load from the inferior. Setting this value to 'unlimited'\n\
4487 disables checking.\n\
4488 Setting this does not invalidate already allocated values, it only\n\
4489 prevents future values, larger than this size, from being allocated."),
4491 show_max_value_size
,
4492 &setlist
, &showlist
);
4493 set_show_commands vsize_limit
4494 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4495 &max_value_size
, _("\
4496 Set the maximum number of bytes allowed in a variable-size object."), _("\
4497 Show the maximum number of bytes allowed in a variable-size object."), _("\
4498 Attempts to access an object whose size is not a compile-time constant\n\
4499 and exceeds this limit will cause an error."),
4500 NULL
, NULL
, &setlist
, &showlist
);
4501 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4504 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4505 selftests::register_test ("insert_into_bit_range_vector",
4506 selftests::test_insert_into_bit_range_vector
);
4507 selftests::register_test ("value_copy", selftests::test_value_copy
);
4510 /* Destroy any values currently allocated in a final cleanup instead
4511 of leaving it to global destructors, because that may be too
4512 late. For example, the destructors of xmethod values call into
4513 the Python runtime. */
4514 add_final_cleanup ([] ()
4516 all_values
.clear ();