1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "arch-utils.h"
32 #include "target-float.h"
35 #include "cli/cli-decode.h"
36 #include "extension.h"
38 #include "tracepoint.h"
40 #include "user-regs.h"
46 #include "completer.h"
47 #include "gdbsupport/selftest.h"
48 #include "gdbsupport/array-view.h"
49 #include "cli/cli-style.h"
54 /* Definition of a user function. */
55 struct internal_function
57 /* The name of the function. It is a bit odd to have this in the
58 function itself -- the user might use a differently-named
59 convenience variable to hold the function. */
63 internal_function_fn handler
;
65 /* User data for the handler. */
69 /* Returns true if the ranges defined by [offset1, offset1+len1) and
70 [offset2, offset2+len2) overlap. */
73 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
74 LONGEST offset2
, ULONGEST len2
)
78 l
= std::max (offset1
, offset2
);
79 h
= std::min (offset1
+ len1
, offset2
+ len2
);
83 /* Returns true if RANGES contains any range that overlaps [OFFSET,
87 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
95 /* We keep ranges sorted by offset and coalesce overlapping and
96 contiguous ranges, so to check if a range list contains a given
97 range, we can do a binary search for the position the given range
98 would be inserted if we only considered the starting OFFSET of
99 ranges. We call that position I. Since we also have LENGTH to
100 care for (this is a range afterall), we need to check if the
101 _previous_ range overlaps the I range. E.g.,
105 |---| |---| |------| ... |--|
110 In the case above, the binary search would return `I=1', meaning,
111 this OFFSET should be inserted at position 1, and the current
112 position 1 should be pushed further (and before 2). But, `0'
115 Then we need to check if the I range overlaps the I range itself.
120 |---| |---| |-------| ... |--|
127 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
129 if (i
> ranges
.begin ())
131 const struct range
&bef
= *(i
- 1);
133 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
137 if (i
< ranges
.end ())
139 const struct range
&r
= *i
;
141 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
148 static struct cmd_list_element
*functionlist
;
152 if (this->lval () == lval_computed
)
154 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
156 if (funcs
->free_closure
)
157 funcs
->free_closure (this);
159 else if (this->lval () == lval_xcallable
)
160 delete m_location
.xm_worker
;
168 return type ()->arch ();
172 value::bits_available (LONGEST offset
, ULONGEST length
) const
174 gdb_assert (!m_lazy
);
176 /* Don't pretend we have anything available there in the history beyond
177 the boundaries of the value recorded. It's not like inferior memory
178 where there is actual stuff underneath. */
179 ULONGEST val_len
= TARGET_CHAR_BIT
* enclosing_type ()->length ();
180 return !((m_in_history
181 && (offset
< 0 || offset
+ length
> val_len
))
182 || ranges_contain (m_unavailable
, offset
, length
));
186 value::bytes_available (LONGEST offset
, ULONGEST length
) const
188 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
189 ULONGEST mask
= (sign
<< 1) - 1;
191 if (offset
!= ((offset
& mask
) ^ sign
) - sign
192 || length
!= ((length
& mask
) ^ sign
) - sign
193 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
194 error (_("Integer overflow in data location calculation"));
196 return bits_available (offset
* TARGET_CHAR_BIT
, length
* TARGET_CHAR_BIT
);
200 value::bits_any_optimized_out (int bit_offset
, int bit_length
) const
202 gdb_assert (!m_lazy
);
204 return ranges_contain (m_optimized_out
, bit_offset
, bit_length
);
208 value::entirely_available ()
210 /* We can only tell whether the whole value is available when we try
215 if (m_unavailable
.empty ())
223 value::entirely_covered_by_range_vector (const std::vector
<range
> &ranges
)
225 /* We can only tell whether the whole value is optimized out /
226 unavailable when we try to read it. */
230 if (ranges
.size () == 1)
232 const struct range
&t
= ranges
[0];
235 && t
.length
== TARGET_CHAR_BIT
* enclosing_type ()->length ())
242 /* Insert into the vector pointed to by VECTORP the bit range starting of
243 OFFSET bits, and extending for the next LENGTH bits. */
246 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
247 LONGEST offset
, ULONGEST length
)
251 /* Insert the range sorted. If there's overlap or the new range
252 would be contiguous with an existing range, merge. */
254 newr
.offset
= offset
;
255 newr
.length
= length
;
257 /* Do a binary search for the position the given range would be
258 inserted if we only considered the starting OFFSET of ranges.
259 Call that position I. Since we also have LENGTH to care for
260 (this is a range afterall), we need to check if the _previous_
261 range overlaps the I range. E.g., calling R the new range:
263 #1 - overlaps with previous
267 |---| |---| |------| ... |--|
272 In the case #1 above, the binary search would return `I=1',
273 meaning, this OFFSET should be inserted at position 1, and the
274 current position 1 should be pushed further (and become 2). But,
275 note that `0' overlaps with R, so we want to merge them.
277 A similar consideration needs to be taken if the new range would
278 be contiguous with the previous range:
280 #2 - contiguous with previous
284 |--| |---| |------| ... |--|
289 If there's no overlap with the previous range, as in:
291 #3 - not overlapping and not contiguous
295 |--| |---| |------| ... |--|
302 #4 - R is the range with lowest offset
306 |--| |---| |------| ... |--|
311 ... we just push the new range to I.
313 All the 4 cases above need to consider that the new range may
314 also overlap several of the ranges that follow, or that R may be
315 contiguous with the following range, and merge. E.g.,
317 #5 - overlapping following ranges
320 |------------------------|
321 |--| |---| |------| ... |--|
330 |--| |---| |------| ... |--|
337 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
338 if (i
> vectorp
->begin ())
340 struct range
&bef
= *(i
- 1);
342 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
345 LONGEST l
= std::min (bef
.offset
, offset
);
346 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
352 else if (offset
== bef
.offset
+ bef
.length
)
355 bef
.length
+= length
;
361 i
= vectorp
->insert (i
, newr
);
367 i
= vectorp
->insert (i
, newr
);
370 /* Check whether the ranges following the one we've just added or
371 touched can be folded in (#5 above). */
372 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
377 /* Get the range we just touched. */
378 struct range
&t
= *i
;
382 for (; i
< vectorp
->end (); i
++)
384 struct range
&r
= *i
;
385 if (r
.offset
<= t
.offset
+ t
.length
)
389 l
= std::min (t
.offset
, r
.offset
);
390 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
399 /* If we couldn't merge this one, we won't be able to
400 merge following ones either, since the ranges are
401 always sorted by OFFSET. */
407 vectorp
->erase (next
, next
+ removed
);
412 value::mark_bits_unavailable (LONGEST offset
, ULONGEST length
)
414 insert_into_bit_range_vector (&m_unavailable
, offset
, length
);
418 value::mark_bytes_unavailable (LONGEST offset
, ULONGEST length
)
420 mark_bits_unavailable (offset
* TARGET_CHAR_BIT
,
421 length
* TARGET_CHAR_BIT
);
424 /* Find the first range in RANGES that overlaps the range defined by
425 OFFSET and LENGTH, starting at element POS in the RANGES vector,
426 Returns the index into RANGES where such overlapping range was
427 found, or -1 if none was found. */
430 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
431 LONGEST offset
, LONGEST length
)
435 for (i
= pos
; i
< ranges
->size (); i
++)
437 const range
&r
= (*ranges
)[i
];
438 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
445 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
446 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
449 It must always be the case that:
450 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
452 It is assumed that memory can be accessed from:
453 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
455 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
456 / TARGET_CHAR_BIT) */
458 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
459 const gdb_byte
*ptr2
, size_t offset2_bits
,
462 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
463 == offset2_bits
% TARGET_CHAR_BIT
);
465 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
468 gdb_byte mask
, b1
, b2
;
470 /* The offset from the base pointers PTR1 and PTR2 is not a complete
471 number of bytes. A number of bits up to either the next exact
472 byte boundary, or LENGTH_BITS (which ever is sooner) will be
474 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
475 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
476 mask
= (1 << bits
) - 1;
478 if (length_bits
< bits
)
480 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
484 /* Now load the two bytes and mask off the bits we care about. */
485 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
486 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
491 /* Now update the length and offsets to take account of the bits
492 we've just compared. */
494 offset1_bits
+= bits
;
495 offset2_bits
+= bits
;
498 if (length_bits
% TARGET_CHAR_BIT
!= 0)
502 gdb_byte mask
, b1
, b2
;
504 /* The length is not an exact number of bytes. After the previous
505 IF.. block then the offsets are byte aligned, or the
506 length is zero (in which case this code is not reached). Compare
507 a number of bits at the end of the region, starting from an exact
509 bits
= length_bits
% TARGET_CHAR_BIT
;
510 o1
= offset1_bits
+ length_bits
- bits
;
511 o2
= offset2_bits
+ length_bits
- bits
;
513 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
514 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
516 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
517 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
519 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
520 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
530 /* We've now taken care of any stray "bits" at the start, or end of
531 the region to compare, the remainder can be covered with a simple
533 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
534 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
535 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
537 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
538 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
539 length_bits
/ TARGET_CHAR_BIT
);
542 /* Length is zero, regions match. */
546 /* Helper struct for find_first_range_overlap_and_match and
547 value_contents_bits_eq. Keep track of which slot of a given ranges
548 vector have we last looked at. */
550 struct ranges_and_idx
553 const std::vector
<range
> *ranges
;
555 /* The range we've last found in RANGES. Given ranges are sorted,
556 we can start the next lookup here. */
560 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
561 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
562 ranges starting at OFFSET2 bits. Return true if the ranges match
563 and fill in *L and *H with the overlapping window relative to
564 (both) OFFSET1 or OFFSET2. */
567 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
568 struct ranges_and_idx
*rp2
,
569 LONGEST offset1
, LONGEST offset2
,
570 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
572 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
574 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
577 if (rp1
->idx
== -1 && rp2
->idx
== -1)
583 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
587 const range
*r1
, *r2
;
591 r1
= &(*rp1
->ranges
)[rp1
->idx
];
592 r2
= &(*rp2
->ranges
)[rp2
->idx
];
594 /* Get the unavailable windows intersected by the incoming
595 ranges. The first and last ranges that overlap the argument
596 range may be wider than said incoming arguments ranges. */
597 l1
= std::max (offset1
, r1
->offset
);
598 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
600 l2
= std::max (offset2
, r2
->offset
);
601 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
603 /* Make them relative to the respective start offsets, so we can
604 compare them for equality. */
611 /* Different ranges, no match. */
612 if (l1
!= l2
|| h1
!= h2
)
621 /* Helper function for value_contents_eq. The only difference is that
622 this function is bit rather than byte based.
624 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
625 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
626 Return true if the available bits match. */
629 value::contents_bits_eq (int offset1
, const struct value
*val2
, int offset2
,
632 /* Each array element corresponds to a ranges source (unavailable,
633 optimized out). '1' is for VAL1, '2' for VAL2. */
634 struct ranges_and_idx rp1
[2], rp2
[2];
636 /* See function description in value.h. */
637 gdb_assert (!m_lazy
&& !val2
->m_lazy
);
639 /* We shouldn't be trying to compare past the end of the values. */
640 gdb_assert (offset1
+ length
641 <= m_enclosing_type
->length () * TARGET_CHAR_BIT
);
642 gdb_assert (offset2
+ length
643 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
645 memset (&rp1
, 0, sizeof (rp1
));
646 memset (&rp2
, 0, sizeof (rp2
));
647 rp1
[0].ranges
= &m_unavailable
;
648 rp2
[0].ranges
= &val2
->m_unavailable
;
649 rp1
[1].ranges
= &m_optimized_out
;
650 rp2
[1].ranges
= &val2
->m_optimized_out
;
654 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
657 for (i
= 0; i
< 2; i
++)
659 ULONGEST l_tmp
, h_tmp
;
661 /* The contents only match equal if the invalid/unavailable
662 contents ranges match as well. */
663 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
664 offset1
, offset2
, length
,
668 /* We're interested in the lowest/first range found. */
669 if (i
== 0 || l_tmp
< l
)
676 /* Compare the available/valid contents. */
677 if (memcmp_with_bit_offsets (m_contents
.get (), offset1
,
678 val2
->m_contents
.get (), offset2
, l
) != 0)
692 value::contents_eq (LONGEST offset1
,
693 const struct value
*val2
, LONGEST offset2
,
694 LONGEST length
) const
696 return contents_bits_eq (offset1
* TARGET_CHAR_BIT
,
697 val2
, offset2
* TARGET_CHAR_BIT
,
698 length
* TARGET_CHAR_BIT
);
704 value::contents_eq (const struct value
*val2
) const
706 ULONGEST len1
= check_typedef (enclosing_type ())->length ();
707 ULONGEST len2
= check_typedef (val2
->enclosing_type ())->length ();
710 return contents_eq (0, val2
, 0, len1
);
713 /* The value-history records all the values printed by print commands
714 during this session. */
716 static std::vector
<value_ref_ptr
> value_history
;
719 /* List of all value objects currently allocated
720 (except for those released by calls to release_value)
721 This is so they can be freed after each command. */
723 static std::vector
<value_ref_ptr
> all_values
;
728 value::allocate_lazy (struct type
*type
)
732 /* Call check_typedef on our type to make sure that, if TYPE
733 is a TYPE_CODE_TYPEDEF, its length is set to the length
734 of the target type instead of zero. However, we do not
735 replace the typedef type by the target type, because we want
736 to keep the typedef in order to be able to set the VAL's type
737 description correctly. */
738 check_typedef (type
);
740 val
= new struct value (type
);
742 /* Values start out on the all_values chain. */
743 all_values
.emplace_back (val
);
748 /* The maximum size, in bytes, that GDB will try to allocate for a value.
749 The initial value of 64k was not selected for any specific reason, it is
750 just a reasonable starting point. */
752 static int max_value_size
= 65536; /* 64k bytes */
754 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
755 LONGEST, otherwise GDB will not be able to parse integer values from the
756 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
757 be unable to parse "set max-value-size 2".
759 As we want a consistent GDB experience across hosts with different sizes
760 of LONGEST, this arbitrary minimum value was selected, so long as this
761 is bigger than LONGEST on all GDB supported hosts we're fine. */
763 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
764 static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
766 /* Implement the "set max-value-size" command. */
769 set_max_value_size (const char *args
, int from_tty
,
770 struct cmd_list_element
*c
)
772 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
774 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
776 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
777 error (_("max-value-size set too low, increasing to %d bytes"),
782 /* Implement the "show max-value-size" command. */
785 show_max_value_size (struct ui_file
*file
, int from_tty
,
786 struct cmd_list_element
*c
, const char *value
)
788 if (max_value_size
== -1)
789 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
791 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
795 /* Called before we attempt to allocate or reallocate a buffer for the
796 contents of a value. TYPE is the type of the value for which we are
797 allocating the buffer. If the buffer is too large (based on the user
798 controllable setting) then throw an error. If this function returns
799 then we should attempt to allocate the buffer. */
802 check_type_length_before_alloc (const struct type
*type
)
804 ULONGEST length
= type
->length ();
806 if (exceeds_max_value_size (length
))
808 if (type
->name () != NULL
)
809 error (_("value of type `%s' requires %s bytes, which is more "
810 "than max-value-size"), type
->name (), pulongest (length
));
812 error (_("value requires %s bytes, which is more than "
813 "max-value-size"), pulongest (length
));
820 exceeds_max_value_size (ULONGEST length
)
822 return max_value_size
> -1 && length
> max_value_size
;
825 /* When this has a value, it is used to limit the number of array elements
826 of an array that are loaded into memory when an array value is made
828 static std::optional
<int> array_length_limiting_element_count
;
831 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
833 m_old_value
= array_length_limiting_element_count
;
834 array_length_limiting_element_count
.emplace (elements
);
838 scoped_array_length_limiting::~scoped_array_length_limiting ()
840 array_length_limiting_element_count
= m_old_value
;
843 /* Find the inner element type for ARRAY_TYPE. */
846 find_array_element_type (struct type
*array_type
)
848 array_type
= check_typedef (array_type
);
849 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
851 if (current_language
->la_language
== language_fortran
)
852 while (array_type
->code () == TYPE_CODE_ARRAY
)
854 array_type
= array_type
->target_type ();
855 array_type
= check_typedef (array_type
);
859 array_type
= array_type
->target_type ();
860 array_type
= check_typedef (array_type
);
866 /* Return the limited length of ARRAY_TYPE, which must be of
867 TYPE_CODE_ARRAY. This function can only be called when the global
868 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
870 The limited length of an array is the smallest of either (1) the total
871 size of the array type, or (2) the array target type multiplies by the
872 array_length_limiting_element_count. */
875 calculate_limited_array_length (struct type
*array_type
)
877 gdb_assert (array_length_limiting_element_count
.has_value ());
879 array_type
= check_typedef (array_type
);
880 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
882 struct type
*elm_type
= find_array_element_type (array_type
);
883 ULONGEST len
= (elm_type
->length ()
884 * (*array_length_limiting_element_count
));
885 len
= std::min (len
, array_type
->length ());
893 value::set_limited_array_length ()
895 ULONGEST limit
= m_limited_length
;
896 ULONGEST len
= type ()->length ();
898 if (array_length_limiting_element_count
.has_value ())
899 len
= calculate_limited_array_length (type ());
901 if (limit
!= 0 && len
> limit
)
903 if (len
> max_value_size
)
906 m_limited_length
= max_value_size
;
913 value::allocate_contents (bool check_size
)
917 struct type
*enc_type
= enclosing_type ();
918 ULONGEST len
= enc_type
->length ();
922 /* If we are allocating the contents of an array, which
923 is greater in size than max_value_size, and there is
924 an element limit in effect, then we can possibly try
925 to load only a sub-set of the array contents into
927 if (type () == enc_type
928 && type ()->code () == TYPE_CODE_ARRAY
929 && len
> max_value_size
930 && set_limited_array_length ())
931 len
= m_limited_length
;
933 check_type_length_before_alloc (enc_type
);
936 m_contents
.reset ((gdb_byte
*) xzalloc (len
));
940 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
941 then apply the usual max-value-size checks. */
944 value::allocate (struct type
*type
, bool check_size
)
946 struct value
*val
= value::allocate_lazy (type
);
948 val
->allocate_contents (check_size
);
953 /* Allocate a value and its contents for type TYPE. */
956 value::allocate (struct type
*type
)
958 return allocate (type
, true);
964 value::allocate_register_lazy (const frame_info_ptr
&initial_next_frame
,
965 int regnum
, struct type
*type
)
968 type
= register_type (frame_unwind_arch (initial_next_frame
), regnum
);
970 value
*result
= value::allocate_lazy (type
);
972 result
->set_lval (lval_register
);
973 result
->m_location
.reg
.regnum
= regnum
;
975 /* If this register value is created during unwind (while computing a frame
976 id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
977 NEXT_FRAME will not have a valid frame id yet. Find the next non-inline
978 frame (possibly the sentinel frame). This is where registers are unwound
980 frame_info_ptr next_frame
= initial_next_frame
;
981 while (get_frame_type (next_frame
) == INLINE_FRAME
)
982 next_frame
= get_next_frame_sentinel_okay (next_frame
);
984 result
->m_location
.reg
.next_frame_id
= get_frame_id (next_frame
);
986 /* We should have a next frame with a valid id. */
987 gdb_assert (frame_id_p (result
->m_location
.reg
.next_frame_id
));
995 value::allocate_register (const frame_info_ptr
&next_frame
, int regnum
,
998 value
*result
= value::allocate_register_lazy (next_frame
, regnum
, type
);
999 result
->set_lazy (false);
1003 /* Allocate a value that has the correct length
1004 for COUNT repetitions of type TYPE. */
1007 allocate_repeat_value (struct type
*type
, int count
)
1009 /* Despite the fact that we are really creating an array of TYPE here, we
1010 use the string lower bound as the array lower bound. This seems to
1011 work fine for now. */
1012 int low_bound
= current_language
->string_lower_bound ();
1013 /* FIXME-type-allocation: need a way to free this type when we are
1015 struct type
*array_type
1016 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
1018 return value::allocate (array_type
);
1022 value::allocate_computed (struct type
*type
,
1023 const struct lval_funcs
*funcs
,
1026 struct value
*v
= value::allocate_lazy (type
);
1028 v
->set_lval (lval_computed
);
1029 v
->m_location
.computed
.funcs
= funcs
;
1030 v
->m_location
.computed
.closure
= closure
;
1038 value::allocate_optimized_out (struct type
*type
)
1040 struct value
*retval
= value::allocate_lazy (type
);
1042 retval
->mark_bytes_optimized_out (0, type
->length ());
1043 retval
->set_lazy (false);
1047 /* Accessor methods. */
1049 gdb::array_view
<gdb_byte
>
1050 value::contents_raw ()
1052 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1054 allocate_contents (true);
1056 ULONGEST length
= type ()->length ();
1057 return gdb::make_array_view
1058 (m_contents
.get () + m_embedded_offset
* unit_size
, length
);
1061 gdb::array_view
<gdb_byte
>
1062 value::contents_all_raw ()
1064 allocate_contents (true);
1066 ULONGEST length
= enclosing_type ()->length ();
1067 return gdb::make_array_view (m_contents
.get (), length
);
1070 /* Look at value.h for description. */
1073 value_actual_type (struct value
*value
, int resolve_simple_types
,
1074 int *real_type_found
)
1076 struct value_print_options opts
;
1077 struct type
*result
;
1079 get_user_print_options (&opts
);
1081 if (real_type_found
)
1082 *real_type_found
= 0;
1083 result
= value
->type ();
1084 if (opts
.objectprint
)
1086 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1087 fetch its rtti type. */
1088 if (result
->is_pointer_or_reference ()
1089 && (check_typedef (result
->target_type ())->code ()
1090 == TYPE_CODE_STRUCT
)
1091 && !value
->optimized_out ())
1093 struct type
*real_type
;
1095 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1098 if (real_type_found
)
1099 *real_type_found
= 1;
1103 else if (resolve_simple_types
)
1105 if (real_type_found
)
1106 *real_type_found
= 1;
1107 result
= value
->enclosing_type ();
1115 error_value_optimized_out (void)
1117 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1121 value::require_not_optimized_out () const
1123 if (!m_optimized_out
.empty ())
1125 if (m_lval
== lval_register
)
1126 throw_error (OPTIMIZED_OUT_ERROR
,
1127 _("register has not been saved in frame"));
1129 error_value_optimized_out ();
1134 value::require_available () const
1136 if (!m_unavailable
.empty ())
1137 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1140 gdb::array_view
<const gdb_byte
>
1141 value::contents_for_printing ()
1146 ULONGEST length
= enclosing_type ()->length ();
1147 return gdb::make_array_view (m_contents
.get (), length
);
1150 gdb::array_view
<const gdb_byte
>
1151 value::contents_for_printing () const
1153 gdb_assert (!m_lazy
);
1155 ULONGEST length
= enclosing_type ()->length ();
1156 return gdb::make_array_view (m_contents
.get (), length
);
1159 gdb::array_view
<const gdb_byte
>
1160 value::contents_all ()
1162 gdb::array_view
<const gdb_byte
> result
= contents_for_printing ();
1163 require_not_optimized_out ();
1164 require_available ();
1168 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1169 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1172 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1173 const std::vector
<range
> &src_range
, int src_bit_offset
,
1174 unsigned int bit_length
)
1176 for (const range
&r
: src_range
)
1180 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1181 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1182 (LONGEST
) src_bit_offset
+ bit_length
);
1185 insert_into_bit_range_vector (dst_range
,
1186 dst_bit_offset
+ (l
- src_bit_offset
),
1194 value::ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1195 int src_bit_offset
, int bit_length
) const
1197 ::ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1198 m_unavailable
, src_bit_offset
,
1200 ::ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1201 m_optimized_out
, src_bit_offset
,
1208 value::contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1209 LONGEST src_offset
, LONGEST length
)
1211 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1212 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1214 /* A lazy DST would make that this copy operation useless, since as
1215 soon as DST's contents were un-lazied (by a later value_contents
1216 call, say), the contents would be overwritten. A lazy SRC would
1217 mean we'd be copying garbage. */
1218 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1220 ULONGEST copy_length
= length
;
1221 ULONGEST limit
= m_limited_length
;
1222 if (limit
> 0 && src_offset
+ length
> limit
)
1223 copy_length
= src_offset
> limit
? 0 : limit
- src_offset
;
1225 /* The overwritten DST range gets unavailability ORed in, not
1226 replaced. Make sure to remember to implement replacing if it
1227 turns out actually necessary. */
1228 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1229 gdb_assert (!dst
->bits_any_optimized_out (TARGET_CHAR_BIT
* dst_offset
,
1230 TARGET_CHAR_BIT
* length
));
1232 if ((src_offset
+ copy_length
) * unit_size
> enclosing_type ()-> length ())
1233 error (_("access outside bounds of object"));
1235 /* Copy the data. */
1236 gdb::array_view
<gdb_byte
> dst_contents
1237 = dst
->contents_all_raw ().slice (dst_offset
* unit_size
,
1238 copy_length
* unit_size
);
1239 gdb::array_view
<const gdb_byte
> src_contents
1240 = contents_all_raw ().slice (src_offset
* unit_size
,
1241 copy_length
* unit_size
);
1242 gdb::copy (src_contents
, dst_contents
);
1244 /* Copy the meta-data, adjusted. */
1245 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1246 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1247 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1249 ranges_copy_adjusted (dst
, dst_bit_offset
,
1250 src_bit_offset
, bit_length
);
1256 value::contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1257 LONGEST src_bit_offset
,
1260 /* A lazy DST would make that this copy operation useless, since as
1261 soon as DST's contents were un-lazied (by a later value_contents
1262 call, say), the contents would be overwritten. A lazy SRC would
1263 mean we'd be copying garbage. */
1264 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1266 ULONGEST copy_bit_length
= bit_length
;
1267 ULONGEST bit_limit
= m_limited_length
* TARGET_CHAR_BIT
;
1268 if (bit_limit
> 0 && src_bit_offset
+ bit_length
> bit_limit
)
1269 copy_bit_length
= (src_bit_offset
> bit_limit
? 0
1270 : bit_limit
- src_bit_offset
);
1272 /* The overwritten DST range gets unavailability ORed in, not
1273 replaced. Make sure to remember to implement replacing if it
1274 turns out actually necessary. */
1275 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1276 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1277 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1278 gdb_assert (!dst
->bits_any_optimized_out (dst_bit_offset
,
1281 /* Copy the data. */
1282 gdb::array_view
<gdb_byte
> dst_contents
= dst
->contents_all_raw ();
1283 gdb::array_view
<const gdb_byte
> src_contents
= contents_all_raw ();
1284 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1285 src_contents
.data (), src_bit_offset
,
1287 type_byte_order (type ()) == BFD_ENDIAN_BIG
);
1289 /* Copy the meta-data. */
1290 ranges_copy_adjusted (dst
, dst_bit_offset
, src_bit_offset
, bit_length
);
1296 value::contents_copy (struct value
*dst
, LONGEST dst_offset
,
1297 LONGEST src_offset
, LONGEST length
)
1302 contents_copy_raw (dst
, dst_offset
, src_offset
, length
);
1305 gdb::array_view
<const gdb_byte
>
1308 gdb::array_view
<const gdb_byte
> result
= contents_writeable ();
1309 require_not_optimized_out ();
1310 require_available ();
1314 gdb::array_view
<gdb_byte
>
1315 value::contents_writeable ()
1319 return contents_raw ();
1323 value::optimized_out ()
1327 /* See if we can compute the result without fetching the
1329 if (this->lval () == lval_memory
)
1331 else if (this->lval () == lval_computed
)
1333 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
1335 if (funcs
->is_optimized_out
!= nullptr)
1336 return funcs
->is_optimized_out (this);
1339 /* Fall back to fetching. */
1344 catch (const gdb_exception_error
&ex
)
1349 case OPTIMIZED_OUT_ERROR
:
1350 case NOT_AVAILABLE_ERROR
:
1351 /* These can normally happen when we try to access an
1352 optimized out or unavailable register, either in a
1353 physical register or spilled to memory. */
1361 return !m_optimized_out
.empty ();
1364 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1365 the following LENGTH bytes. */
1368 value::mark_bytes_optimized_out (int offset
, int length
)
1370 mark_bits_optimized_out (offset
* TARGET_CHAR_BIT
,
1371 length
* TARGET_CHAR_BIT
);
1377 value::mark_bits_optimized_out (LONGEST offset
, LONGEST length
)
1379 insert_into_bit_range_vector (&m_optimized_out
, offset
, length
);
1383 value::bits_synthetic_pointer (LONGEST offset
, LONGEST length
) const
1385 if (m_lval
!= lval_computed
1386 || !m_location
.computed
.funcs
->check_synthetic_pointer
)
1388 return m_location
.computed
.funcs
->check_synthetic_pointer (this, offset
,
1392 const struct lval_funcs
*
1393 value::computed_funcs () const
1395 gdb_assert (m_lval
== lval_computed
);
1397 return m_location
.computed
.funcs
;
1401 value::computed_closure () const
1403 gdb_assert (m_lval
== lval_computed
);
1405 return m_location
.computed
.closure
;
1409 value::address () const
1411 if (m_lval
!= lval_memory
)
1413 if (m_parent
!= NULL
)
1414 return m_parent
->address () + m_offset
;
1415 if (NULL
!= TYPE_DATA_LOCATION (type ()))
1417 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1418 return TYPE_DATA_LOCATION_ADDR (type ());
1421 return m_location
.address
+ m_offset
;
1425 value::raw_address () const
1427 if (m_lval
!= lval_memory
)
1429 return m_location
.address
;
1433 value::set_address (CORE_ADDR addr
)
1435 gdb_assert (m_lval
== lval_memory
);
1436 m_location
.address
= addr
;
1439 /* Return a mark in the value chain. All values allocated after the
1440 mark is obtained (except for those released) are subject to being freed
1441 if a subsequent value_free_to_mark is passed the mark. */
1445 if (all_values
.empty ())
1447 return all_values
.back ().get ();
1450 /* Release a reference to VAL, which was acquired with value_incref.
1451 This function is also called to deallocate values from the value
1457 gdb_assert (m_reference_count
> 0);
1458 m_reference_count
--;
1459 if (m_reference_count
== 0)
1463 /* Free all values allocated since MARK was obtained by value_mark
1464 (except for those released). */
1466 value_free_to_mark (const struct value
*mark
)
1468 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1469 if (iter
== all_values
.end ())
1470 all_values
.clear ();
1472 all_values
.erase (iter
+ 1, all_values
.end ());
1475 /* Remove VAL from the chain all_values
1476 so it will not be freed automatically. */
1479 release_value (struct value
*val
)
1482 return value_ref_ptr ();
1484 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1485 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1489 value_ref_ptr result
= *iter
;
1490 all_values
.erase (iter
.base () - 1);
1495 /* We must always return an owned reference. Normally this happens
1496 because we transfer the reference from the value chain, but in
1497 this case the value was not on the chain. */
1498 return value_ref_ptr::new_reference (val
);
1503 std::vector
<value_ref_ptr
>
1504 value_release_to_mark (const struct value
*mark
)
1506 std::vector
<value_ref_ptr
> result
;
1508 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1509 if (iter
== all_values
.end ())
1510 std::swap (result
, all_values
);
1513 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1514 all_values
.erase (iter
+ 1, all_values
.end ());
1516 std::reverse (result
.begin (), result
.end ());
1523 value::copy () const
1525 struct type
*encl_type
= enclosing_type ();
1528 val
= value::allocate_lazy (encl_type
);
1529 val
->m_type
= m_type
;
1530 val
->set_lval (m_lval
);
1531 val
->m_location
= m_location
;
1532 val
->m_offset
= m_offset
;
1533 val
->m_bitpos
= m_bitpos
;
1534 val
->m_bitsize
= m_bitsize
;
1535 val
->m_lazy
= m_lazy
;
1536 val
->m_embedded_offset
= embedded_offset ();
1537 val
->m_pointed_to_offset
= m_pointed_to_offset
;
1538 val
->m_modifiable
= m_modifiable
;
1539 val
->m_stack
= m_stack
;
1540 val
->m_is_zero
= m_is_zero
;
1541 val
->m_in_history
= m_in_history
;
1542 val
->m_initialized
= m_initialized
;
1543 val
->m_unavailable
= m_unavailable
;
1544 val
->m_optimized_out
= m_optimized_out
;
1545 val
->m_parent
= m_parent
;
1546 val
->m_limited_length
= m_limited_length
;
1549 && !(val
->entirely_optimized_out ()
1550 || val
->entirely_unavailable ()))
1552 ULONGEST length
= val
->m_limited_length
;
1554 length
= val
->enclosing_type ()->length ();
1556 gdb_assert (m_contents
!= nullptr);
1557 const auto &arg_view
1558 = gdb::make_array_view (m_contents
.get (), length
);
1560 val
->allocate_contents (false);
1561 gdb::array_view
<gdb_byte
> val_contents
1562 = val
->contents_all_raw ().slice (0, length
);
1564 gdb::copy (arg_view
, val_contents
);
1567 if (val
->lval () == lval_computed
)
1569 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1571 if (funcs
->copy_closure
)
1572 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1577 /* Return a "const" and/or "volatile" qualified version of the value V.
1578 If CNST is true, then the returned value will be qualified with
1580 if VOLTL is true, then the returned value will be qualified with
1584 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1586 struct type
*val_type
= v
->type ();
1587 struct type
*m_enclosing_type
= v
->enclosing_type ();
1588 struct value
*cv_val
= v
->copy ();
1590 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1591 cv_val
->set_enclosing_type (make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1601 if (this->lval () != not_lval
)
1603 struct type
*enc_type
= enclosing_type ();
1604 struct value
*val
= value::allocate (enc_type
);
1606 gdb::copy (contents_all (), val
->contents_all_raw ());
1607 val
->m_type
= m_type
;
1608 val
->set_embedded_offset (embedded_offset ());
1609 val
->set_pointed_to_offset (pointed_to_offset ());
1618 value::force_lval (CORE_ADDR addr
)
1620 gdb_assert (this->lval () == not_lval
);
1622 write_memory (addr
, contents_raw ().data (), type ()->length ());
1623 m_lval
= lval_memory
;
1624 m_location
.address
= addr
;
1628 value::set_component_location (const struct value
*whole
)
1632 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1634 if (whole
->m_lval
== lval_internalvar
)
1635 m_lval
= lval_internalvar_component
;
1637 m_lval
= whole
->m_lval
;
1639 m_location
= whole
->m_location
;
1640 if (whole
->m_lval
== lval_computed
)
1642 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1644 if (funcs
->copy_closure
)
1645 m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1648 /* If the WHOLE value has a dynamically resolved location property then
1649 update the address of the COMPONENT. */
1650 type
= whole
->type ();
1651 if (NULL
!= TYPE_DATA_LOCATION (type
)
1652 && TYPE_DATA_LOCATION (type
)->is_constant ())
1653 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1655 /* Similarly, if the COMPONENT value has a dynamically resolved location
1656 property then update its address. */
1657 type
= this->type ();
1658 if (NULL
!= TYPE_DATA_LOCATION (type
)
1659 && TYPE_DATA_LOCATION (type
)->is_constant ())
1661 /* If the COMPONENT has a dynamic location, and is an
1662 lval_internalvar_component, then we change it to a lval_memory.
1664 Usually a component of an internalvar is created non-lazy, and has
1665 its content immediately copied from the parent internalvar.
1666 However, for components with a dynamic location, the content of
1667 the component is not contained within the parent, but is instead
1668 accessed indirectly. Further, the component will be created as a
1671 By changing the type of the component to lval_memory we ensure
1672 that value_fetch_lazy can successfully load the component.
1674 This solution isn't ideal, but a real fix would require values to
1675 carry around both the parent value contents, and the contents of
1676 any dynamic fields within the parent. This is a substantial
1677 change to how values work in GDB. */
1678 if (this->lval () == lval_internalvar_component
)
1680 gdb_assert (lazy ());
1681 m_lval
= lval_memory
;
1684 gdb_assert (this->lval () == lval_memory
);
1685 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1689 /* Access to the value history. */
1691 /* Record a new value in the value history.
1692 Returns the absolute history index of the entry. */
1695 value::record_latest ()
1697 /* We don't want this value to have anything to do with the inferior anymore.
1698 In particular, "set $1 = 50" should not affect the variable from which
1699 the value was taken, and fast watchpoints should be able to assume that
1700 a value on the value history never changes. */
1703 /* We know that this is a _huge_ array, any attempt to fetch this
1704 is going to cause GDB to throw an error. However, to allow
1705 the array to still be displayed we fetch its contents up to
1706 `max_value_size' and mark anything beyond "unavailable" in
1708 if (m_type
->code () == TYPE_CODE_ARRAY
1709 && m_type
->length () > max_value_size
1710 && array_length_limiting_element_count
.has_value ()
1711 && m_enclosing_type
== m_type
1712 && calculate_limited_array_length (m_type
) <= max_value_size
)
1713 m_limited_length
= max_value_size
;
1718 ULONGEST limit
= m_limited_length
;
1720 mark_bytes_unavailable (limit
, m_enclosing_type
->length () - limit
);
1722 /* Mark the value as recorded in the history for the availability check. */
1723 m_in_history
= true;
1725 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1726 from. This is a bit dubious, because then *&$1 does not just return $1
1727 but the current contents of that location. c'est la vie... */
1728 set_modifiable (false);
1730 value_history
.push_back (release_value (this));
1732 return value_history
.size ();
1735 /* Return a copy of the value in the history with sequence number NUM. */
1738 access_value_history (int num
)
1743 absnum
+= value_history
.size ();
1748 error (_("The history is empty."));
1750 error (_("There is only one value in the history."));
1752 error (_("History does not go back to $$%d."), -num
);
1754 if (absnum
> value_history
.size ())
1755 error (_("History has not yet reached $%d."), absnum
);
1759 return value_history
[absnum
]->copy ();
1765 value_history_count ()
1767 return value_history
.size ();
1771 show_values (const char *num_exp
, int from_tty
)
1779 /* "show values +" should print from the stored position.
1780 "show values <exp>" should print around value number <exp>. */
1781 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1782 num
= parse_and_eval_long (num_exp
) - 5;
1786 /* "show values" means print the last 10 values. */
1787 num
= value_history
.size () - 9;
1793 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1795 struct value_print_options opts
;
1797 val
= access_value_history (i
);
1798 gdb_printf (("$%d = "), i
);
1799 get_user_print_options (&opts
);
1800 value_print (val
, gdb_stdout
, &opts
);
1801 gdb_printf (("\n"));
1804 /* The next "show values +" should start after what we just printed. */
1807 /* Hitting just return after this command should do the same thing as
1808 "show values +". If num_exp is null, this is unnecessary, since
1809 "show values +" is not useful after "show values". */
1810 if (from_tty
&& num_exp
)
1811 set_repeat_arguments ("+");
1814 enum internalvar_kind
1816 /* The internal variable is empty. */
1819 /* The value of the internal variable is provided directly as
1820 a GDB value object. */
1823 /* A fresh value is computed via a call-back routine on every
1824 access to the internal variable. */
1825 INTERNALVAR_MAKE_VALUE
,
1827 /* The internal variable holds a GDB internal convenience function. */
1828 INTERNALVAR_FUNCTION
,
1830 /* The variable holds an integer value. */
1831 INTERNALVAR_INTEGER
,
1833 /* The variable holds a GDB-provided string. */
1837 union internalvar_data
1839 /* A value object used with INTERNALVAR_VALUE. */
1840 struct value
*value
;
1842 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1845 /* The functions to call. */
1846 const struct internalvar_funcs
*functions
;
1848 /* The function's user-data. */
1852 /* The internal function used with INTERNALVAR_FUNCTION. */
1855 struct internal_function
*function
;
1856 /* True if this is the canonical name for the function. */
1860 /* An integer value used with INTERNALVAR_INTEGER. */
1863 /* If type is non-NULL, it will be used as the type to generate
1864 a value for this internal variable. If type is NULL, a default
1865 integer type for the architecture is used. */
1870 /* A string value used with INTERNALVAR_STRING. */
1874 /* Internal variables. These are variables within the debugger
1875 that hold values assigned by debugger commands.
1876 The user refers to them with a '$' prefix
1877 that does not appear in the variable names stored internally. */
1881 internalvar (std::string name
)
1882 : name (std::move (name
))
1887 /* We support various different kinds of content of an internal variable.
1888 enum internalvar_kind specifies the kind, and union internalvar_data
1889 provides the data associated with this particular kind. */
1891 enum internalvar_kind kind
= INTERNALVAR_VOID
;
1893 union internalvar_data u
{};
1896 /* Use std::map, a sorted container, to make the order of iteration (and
1897 therefore the output of "show convenience") stable. */
1899 static std::map
<std::string
, internalvar
> internalvars
;
1901 /* If the variable does not already exist create it and give it the
1902 value given. If no value is given then the default is zero. */
1904 init_if_undefined_command (const char* args
, int from_tty
)
1906 struct internalvar
*intvar
= nullptr;
1908 /* Parse the expression - this is taken from set_command(). */
1909 expression_up expr
= parse_expression (args
);
1911 /* Validate the expression.
1912 Was the expression an assignment?
1913 Or even an expression at all? */
1914 if (expr
->first_opcode () != BINOP_ASSIGN
)
1915 error (_("Init-if-undefined requires an assignment expression."));
1917 /* Extract the variable from the parsed expression. */
1918 expr::assign_operation
*assign
1919 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
1920 if (assign
!= nullptr)
1922 expr::operation
*lhs
= assign
->get_lhs ();
1923 expr::internalvar_operation
*ivarop
1924 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
1925 if (ivarop
!= nullptr)
1926 intvar
= ivarop
->get_internalvar ();
1929 if (intvar
== nullptr)
1930 error (_("The first parameter to init-if-undefined "
1931 "should be a GDB variable."));
1933 /* Only evaluate the expression if the lvalue is void.
1934 This may still fail if the expression is invalid. */
1935 if (intvar
->kind
== INTERNALVAR_VOID
)
1940 /* Look up an internal variable with name NAME. NAME should not
1941 normally include a dollar sign.
1943 If the specified internal variable does not exist,
1944 the return value is NULL. */
1946 struct internalvar
*
1947 lookup_only_internalvar (const char *name
)
1949 auto it
= internalvars
.find (name
);
1950 if (it
== internalvars
.end ())
1956 /* Complete NAME by comparing it to the names of internal
1960 complete_internalvar (completion_tracker
&tracker
, const char *name
)
1962 int len
= strlen (name
);
1964 for (auto &pair
: internalvars
)
1966 const internalvar
&var
= pair
.second
;
1968 if (var
.name
.compare (0, len
, name
) == 0)
1969 tracker
.add_completion (make_unique_xstrdup (var
.name
.c_str ()));
1973 /* Create an internal variable with name NAME and with a void value.
1974 NAME should not normally include a dollar sign.
1976 An internal variable with that name must not exist already. */
1978 struct internalvar
*
1979 create_internalvar (const char *name
)
1981 auto pair
= internalvars
.emplace (std::make_pair (name
, internalvar (name
)));
1982 gdb_assert (pair
.second
);
1984 return &pair
.first
->second
;
1987 /* Create an internal variable with name NAME and register FUN as the
1988 function that value_of_internalvar uses to create a value whenever
1989 this variable is referenced. NAME should not normally include a
1990 dollar sign. DATA is passed uninterpreted to FUN when it is
1991 called. CLEANUP, if not NULL, is called when the internal variable
1992 is destroyed. It is passed DATA as its only argument. */
1994 struct internalvar
*
1995 create_internalvar_type_lazy (const char *name
,
1996 const struct internalvar_funcs
*funcs
,
1999 struct internalvar
*var
= create_internalvar (name
);
2001 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2002 var
->u
.make_value
.functions
= funcs
;
2003 var
->u
.make_value
.data
= data
;
2007 /* See documentation in value.h. */
2010 compile_internalvar_to_ax (struct internalvar
*var
,
2011 struct agent_expr
*expr
,
2012 struct axs_value
*value
)
2014 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2015 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2018 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2019 var
->u
.make_value
.data
);
2023 /* Look up an internal variable with name NAME. NAME should not
2024 normally include a dollar sign.
2026 If the specified internal variable does not exist,
2027 one is created, with a void value. */
2029 struct internalvar
*
2030 lookup_internalvar (const char *name
)
2032 struct internalvar
*var
;
2034 var
= lookup_only_internalvar (name
);
2038 return create_internalvar (name
);
2041 /* Return current value of internal variable VAR. For variables that
2042 are not inherently typed, use a value type appropriate for GDBARCH. */
2045 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2048 struct trace_state_variable
*tsv
;
2050 /* If there is a trace state variable of the same name, assume that
2051 is what we really want to see. */
2052 tsv
= find_trace_state_variable (var
->name
.c_str ());
2055 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2057 if (tsv
->value_known
)
2058 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2061 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2067 case INTERNALVAR_VOID
:
2068 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2071 case INTERNALVAR_FUNCTION
:
2072 val
= value::allocate (builtin_type (gdbarch
)->internal_fn
);
2075 case INTERNALVAR_INTEGER
:
2076 if (!var
->u
.integer
.type
)
2077 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2078 var
->u
.integer
.val
);
2080 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2083 case INTERNALVAR_STRING
:
2084 val
= current_language
->value_string (gdbarch
,
2086 strlen (var
->u
.string
));
2089 case INTERNALVAR_VALUE
:
2090 val
= var
->u
.value
->copy ();
2095 case INTERNALVAR_MAKE_VALUE
:
2096 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2097 var
->u
.make_value
.data
);
2101 internal_error (_("bad kind"));
2104 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2105 on this value go back to affect the original internal variable.
2107 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2108 no underlying modifiable state in the internal variable.
2110 Likewise, if the variable's value is a computed lvalue, we want
2111 references to it to produce another computed lvalue, where
2112 references and assignments actually operate through the
2113 computed value's functions.
2115 This means that internal variables with computed values
2116 behave a little differently from other internal variables:
2117 assignments to them don't just replace the previous value
2118 altogether. At the moment, this seems like the behavior we
2121 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2122 && val
->lval () != lval_computed
)
2124 val
->set_lval (lval_internalvar
);
2125 VALUE_INTERNALVAR (val
) = var
;
2132 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2134 if (var
->kind
== INTERNALVAR_INTEGER
)
2136 *result
= var
->u
.integer
.val
;
2140 if (var
->kind
== INTERNALVAR_VALUE
)
2142 struct type
*type
= check_typedef (var
->u
.value
->type ());
2144 if (type
->code () == TYPE_CODE_INT
)
2146 *result
= value_as_long (var
->u
.value
);
2151 if (var
->kind
== INTERNALVAR_MAKE_VALUE
)
2153 struct gdbarch
*gdbarch
= get_current_arch ();
2155 = (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2156 var
->u
.make_value
.data
);
2157 struct type
*type
= check_typedef (val
->type ());
2159 if (type
->code () == TYPE_CODE_INT
)
2161 *result
= value_as_long (val
);
2170 get_internalvar_function (struct internalvar
*var
,
2171 struct internal_function
**result
)
2175 case INTERNALVAR_FUNCTION
:
2176 *result
= var
->u
.fn
.function
;
2185 set_internalvar_component (struct internalvar
*var
,
2186 LONGEST offset
, LONGEST bitpos
,
2187 LONGEST bitsize
, struct value
*newval
)
2190 struct gdbarch
*gdbarch
;
2195 case INTERNALVAR_VALUE
:
2196 addr
= var
->u
.value
->contents_writeable ().data ();
2197 gdbarch
= var
->u
.value
->arch ();
2198 unit_size
= gdbarch_addressable_memory_unit_size (gdbarch
);
2201 modify_field (var
->u
.value
->type (), addr
+ offset
,
2202 value_as_long (newval
), bitpos
, bitsize
);
2204 memcpy (addr
+ offset
* unit_size
, newval
->contents ().data (),
2205 newval
->type ()->length ());
2209 /* We can never get a component of any other kind. */
2210 internal_error (_("set_internalvar_component"));
2215 set_internalvar (struct internalvar
*var
, struct value
*val
)
2217 enum internalvar_kind new_kind
;
2218 union internalvar_data new_data
= { 0 };
2220 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2221 error (_("Cannot overwrite convenience function %s"), var
->name
.c_str ());
2223 /* Prepare new contents. */
2224 switch (check_typedef (val
->type ())->code ())
2226 case TYPE_CODE_VOID
:
2227 new_kind
= INTERNALVAR_VOID
;
2230 case TYPE_CODE_INTERNAL_FUNCTION
:
2231 gdb_assert (val
->lval () == lval_internalvar
);
2232 new_kind
= INTERNALVAR_FUNCTION
;
2233 get_internalvar_function (VALUE_INTERNALVAR (val
),
2234 &new_data
.fn
.function
);
2235 /* Copies created here are never canonical. */
2239 new_kind
= INTERNALVAR_VALUE
;
2240 struct value
*copy
= val
->copy ();
2241 copy
->set_modifiable (true);
2243 /* Force the value to be fetched from the target now, to avoid problems
2244 later when this internalvar is referenced and the target is gone or
2247 copy
->fetch_lazy ();
2249 /* Release the value from the value chain to prevent it from being
2250 deleted by free_all_values. From here on this function should not
2251 call error () until new_data is installed into the var->u to avoid
2253 new_data
.value
= release_value (copy
).release ();
2255 /* Internal variables which are created from values with a dynamic
2256 location don't need the location property of the origin anymore.
2257 The resolved dynamic location is used prior then any other address
2258 when accessing the value.
2259 If we keep it, we would still refer to the origin value.
2260 Remove the location property in case it exist. */
2261 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2266 /* Clean up old contents. */
2267 clear_internalvar (var
);
2270 var
->kind
= new_kind
;
2272 /* End code which must not call error(). */
2276 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2278 /* Clean up old contents. */
2279 clear_internalvar (var
);
2281 var
->kind
= INTERNALVAR_INTEGER
;
2282 var
->u
.integer
.type
= NULL
;
2283 var
->u
.integer
.val
= l
;
2287 set_internalvar_string (struct internalvar
*var
, const char *string
)
2289 /* Clean up old contents. */
2290 clear_internalvar (var
);
2292 var
->kind
= INTERNALVAR_STRING
;
2293 var
->u
.string
= xstrdup (string
);
2297 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2299 /* Clean up old contents. */
2300 clear_internalvar (var
);
2302 var
->kind
= INTERNALVAR_FUNCTION
;
2303 var
->u
.fn
.function
= f
;
2304 var
->u
.fn
.canonical
= 1;
2305 /* Variables installed here are always the canonical version. */
2309 clear_internalvar (struct internalvar
*var
)
2311 /* Clean up old contents. */
2314 case INTERNALVAR_VALUE
:
2315 var
->u
.value
->decref ();
2318 case INTERNALVAR_STRING
:
2319 xfree (var
->u
.string
);
2326 /* Reset to void kind. */
2327 var
->kind
= INTERNALVAR_VOID
;
2331 internalvar_name (const struct internalvar
*var
)
2333 return var
->name
.c_str ();
2336 static struct internal_function
*
2337 create_internal_function (const char *name
,
2338 internal_function_fn handler
, void *cookie
)
2340 struct internal_function
*ifn
= XNEW (struct internal_function
);
2342 ifn
->name
= xstrdup (name
);
2343 ifn
->handler
= handler
;
2344 ifn
->cookie
= cookie
;
2349 value_internal_function_name (struct value
*val
)
2351 struct internal_function
*ifn
;
2354 gdb_assert (val
->lval () == lval_internalvar
);
2355 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2356 gdb_assert (result
);
2362 call_internal_function (struct gdbarch
*gdbarch
,
2363 const struct language_defn
*language
,
2364 struct value
*func
, int argc
, struct value
**argv
)
2366 struct internal_function
*ifn
;
2369 gdb_assert (func
->lval () == lval_internalvar
);
2370 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2371 gdb_assert (result
);
2373 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2376 /* The 'function' command. This does nothing -- it is just a
2377 placeholder to let "help function NAME" work. This is also used as
2378 the implementation of the sub-command that is created when
2379 registering an internal function. */
2381 function_command (const char *command
, int from_tty
)
2386 /* Helper function that does the work for add_internal_function. */
2388 static struct cmd_list_element
*
2389 do_add_internal_function (const char *name
, const char *doc
,
2390 internal_function_fn handler
, void *cookie
)
2392 struct internal_function
*ifn
;
2393 struct internalvar
*var
= lookup_internalvar (name
);
2395 ifn
= create_internal_function (name
, handler
, cookie
);
2396 set_internalvar_function (var
, ifn
);
2398 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2404 add_internal_function (const char *name
, const char *doc
,
2405 internal_function_fn handler
, void *cookie
)
2407 do_add_internal_function (name
, doc
, handler
, cookie
);
2413 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2414 gdb::unique_xmalloc_ptr
<char> &&doc
,
2415 internal_function_fn handler
, void *cookie
)
2417 struct cmd_list_element
*cmd
2418 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2420 /* Manually transfer the ownership of the doc and name strings to CMD by
2421 setting the appropriate flags. */
2422 (void) doc
.release ();
2423 cmd
->doc_allocated
= 1;
2424 (void) name
.release ();
2425 cmd
->name_allocated
= 1;
2429 value::preserve (struct objfile
*objfile
, htab_t copied_types
)
2431 if (m_type
->objfile_owner () == objfile
)
2432 m_type
= copy_type_recursive (m_type
, copied_types
);
2434 if (m_enclosing_type
->objfile_owner () == objfile
)
2435 m_enclosing_type
= copy_type_recursive (m_enclosing_type
, copied_types
);
2438 /* Likewise for internal variable VAR. */
2441 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2442 htab_t copied_types
)
2446 case INTERNALVAR_INTEGER
:
2447 if (var
->u
.integer
.type
2448 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2450 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2453 case INTERNALVAR_VALUE
:
2454 var
->u
.value
->preserve (objfile
, copied_types
);
2459 /* Make sure that all types and values referenced by VAROBJ are updated before
2460 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2464 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2465 htab_t copied_types
)
2467 if (varobj
->type
->is_objfile_owned ()
2468 && varobj
->type
->objfile_owner () == objfile
)
2471 = copy_type_recursive (varobj
->type
, copied_types
);
2474 if (varobj
->value
!= nullptr)
2475 varobj
->value
->preserve (objfile
, copied_types
);
2478 /* Update the internal variables and value history when OBJFILE is
2479 discarded; we must copy the types out of the objfile. New global types
2480 will be created for every convenience variable which currently points to
2481 this objfile's types, and the convenience variables will be adjusted to
2482 use the new global types. */
2485 preserve_values (struct objfile
*objfile
)
2487 /* Create the hash table. We allocate on the objfile's obstack, since
2488 it is soon to be deleted. */
2489 htab_up copied_types
= create_copied_types_hash ();
2491 for (const value_ref_ptr
&item
: value_history
)
2492 item
->preserve (objfile
, copied_types
.get ());
2494 for (auto &pair
: internalvars
)
2495 preserve_one_internalvar (&pair
.second
, objfile
, copied_types
.get ());
2497 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2498 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2500 preserve_one_varobj (varobj
, objfile
,
2501 copied_types
.get ());
2504 preserve_ext_lang_values (objfile
, copied_types
.get ());
2508 show_convenience (const char *ignore
, int from_tty
)
2510 struct gdbarch
*gdbarch
= get_current_arch ();
2512 struct value_print_options opts
;
2514 get_user_print_options (&opts
);
2515 for (auto &pair
: internalvars
)
2517 internalvar
&var
= pair
.second
;
2523 gdb_printf (("$%s = "), var
.name
.c_str ());
2529 val
= value_of_internalvar (gdbarch
, &var
);
2530 value_print (val
, gdb_stdout
, &opts
);
2532 catch (const gdb_exception_error
&ex
)
2534 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2535 _("<error: %s>"), ex
.what ());
2538 gdb_printf (("\n"));
2542 /* This text does not mention convenience functions on purpose.
2543 The user can't create them except via Python, and if Python support
2544 is installed this message will never be printed ($_streq will
2546 gdb_printf (_("No debugger convenience variables now defined.\n"
2547 "Convenience variables have "
2548 "names starting with \"$\";\n"
2549 "use \"set\" as in \"set "
2550 "$foo = 5\" to define them.\n"));
2558 value::from_xmethod (xmethod_worker_up
&&worker
)
2562 v
= value::allocate (builtin_type (current_inferior ()->arch ())->xmethod
);
2563 v
->m_lval
= lval_xcallable
;
2564 v
->m_location
.xm_worker
= worker
.release ();
2565 v
->m_modifiable
= false;
2573 value::result_type_of_xmethod (gdb::array_view
<value
*> argv
)
2575 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2576 && m_lval
== lval_xcallable
&& !argv
.empty ());
2578 return m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2584 value::call_xmethod (gdb::array_view
<value
*> argv
)
2586 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2587 && m_lval
== lval_xcallable
&& !argv
.empty ());
2589 return m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2592 /* Extract a value as a C number (either long or double).
2593 Knows how to convert fixed values to double, or
2594 floating values to long.
2595 Does not deallocate the value. */
2598 value_as_long (struct value
*val
)
2600 /* This coerces arrays and functions, which is necessary (e.g.
2601 in disassemble_command). It also dereferences references, which
2602 I suspect is the most logical thing to do. */
2603 val
= coerce_array (val
);
2604 return unpack_long (val
->type (), val
->contents ().data ());
2610 value_as_mpz (struct value
*val
)
2612 val
= coerce_array (val
);
2613 struct type
*type
= check_typedef (val
->type ());
2615 switch (type
->code ())
2617 case TYPE_CODE_ENUM
:
2618 case TYPE_CODE_BOOL
:
2620 case TYPE_CODE_CHAR
:
2621 case TYPE_CODE_RANGE
:
2625 return gdb_mpz (value_as_long (val
));
2630 gdb::array_view
<const gdb_byte
> valbytes
= val
->contents ();
2631 enum bfd_endian byte_order
= type_byte_order (type
);
2633 /* Handle integers that are either not a multiple of the word size,
2634 or that are stored at some bit offset. */
2635 unsigned bit_off
= 0, bit_size
= 0;
2636 if (type
->bit_size_differs_p ())
2638 bit_size
= type
->bit_size ();
2641 /* We can just handle this immediately. */
2645 bit_off
= type
->bit_offset ();
2647 unsigned n_bytes
= ((bit_off
% 8) + bit_size
+ 7) / 8;
2648 valbytes
= valbytes
.slice (bit_off
/ 8, n_bytes
);
2650 if (byte_order
== BFD_ENDIAN_BIG
)
2651 bit_off
= (n_bytes
* 8 - bit_off
% 8 - bit_size
);
2656 result
.read (val
->contents (), byte_order
, type
->is_unsigned ());
2658 /* Shift off any low bits, if needed. */
2662 /* Mask off any high bits, if needed. */
2664 result
.mask (bit_size
);
2666 /* Now handle any range bias. */
2667 if (type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
2669 /* Unfortunately we have to box here, because LONGEST is
2670 probably wider than long. */
2671 result
+= gdb_mpz (type
->bounds ()->bias
);
2677 /* Extract a value as a C pointer. */
2680 value_as_address (struct value
*val
)
2682 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2684 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2685 whether we want this to be true eventually. */
2687 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2688 non-address (e.g. argument to "signal", "info break", etc.), or
2689 for pointers to char, in which the low bits *are* significant. */
2690 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2693 /* There are several targets (IA-64, PowerPC, and others) which
2694 don't represent pointers to functions as simply the address of
2695 the function's entry point. For example, on the IA-64, a
2696 function pointer points to a two-word descriptor, generated by
2697 the linker, which contains the function's entry point, and the
2698 value the IA-64 "global pointer" register should have --- to
2699 support position-independent code. The linker generates
2700 descriptors only for those functions whose addresses are taken.
2702 On such targets, it's difficult for GDB to convert an arbitrary
2703 function address into a function pointer; it has to either find
2704 an existing descriptor for that function, or call malloc and
2705 build its own. On some targets, it is impossible for GDB to
2706 build a descriptor at all: the descriptor must contain a jump
2707 instruction; data memory cannot be executed; and code memory
2710 Upon entry to this function, if VAL is a value of type `function'
2711 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2712 val->address () is the address of the function. This is what
2713 you'll get if you evaluate an expression like `main'. The call
2714 to COERCE_ARRAY below actually does all the usual unary
2715 conversions, which includes converting values of type `function'
2716 to `pointer to function'. This is the challenging conversion
2717 discussed above. Then, `unpack_pointer' will convert that pointer
2718 back into an address.
2720 So, suppose the user types `disassemble foo' on an architecture
2721 with a strange function pointer representation, on which GDB
2722 cannot build its own descriptors, and suppose further that `foo'
2723 has no linker-built descriptor. The address->pointer conversion
2724 will signal an error and prevent the command from running, even
2725 though the next step would have been to convert the pointer
2726 directly back into the same address.
2728 The following shortcut avoids this whole mess. If VAL is a
2729 function, just return its address directly. */
2730 if (val
->type ()->code () == TYPE_CODE_FUNC
2731 || val
->type ()->code () == TYPE_CODE_METHOD
)
2732 return val
->address ();
2734 val
= coerce_array (val
);
2736 /* Some architectures (e.g. Harvard), map instruction and data
2737 addresses onto a single large unified address space. For
2738 instance: An architecture may consider a large integer in the
2739 range 0x10000000 .. 0x1000ffff to already represent a data
2740 addresses (hence not need a pointer to address conversion) while
2741 a small integer would still need to be converted integer to
2742 pointer to address. Just assume such architectures handle all
2743 integer conversions in a single function. */
2747 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2748 must admonish GDB hackers to make sure its behavior matches the
2749 compiler's, whenever possible.
2751 In general, I think GDB should evaluate expressions the same way
2752 the compiler does. When the user copies an expression out of
2753 their source code and hands it to a `print' command, they should
2754 get the same value the compiler would have computed. Any
2755 deviation from this rule can cause major confusion and annoyance,
2756 and needs to be justified carefully. In other words, GDB doesn't
2757 really have the freedom to do these conversions in clever and
2760 AndrewC pointed out that users aren't complaining about how GDB
2761 casts integers to pointers; they are complaining that they can't
2762 take an address from a disassembly listing and give it to `x/i'.
2763 This is certainly important.
2765 Adding an architecture method like integer_to_address() certainly
2766 makes it possible for GDB to "get it right" in all circumstances
2767 --- the target has complete control over how things get done, so
2768 people can Do The Right Thing for their target without breaking
2769 anyone else. The standard doesn't specify how integers get
2770 converted to pointers; usually, the ABI doesn't either, but
2771 ABI-specific code is a more reasonable place to handle it. */
2773 if (!val
->type ()->is_pointer_or_reference ()
2774 && gdbarch_integer_to_address_p (gdbarch
))
2775 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2776 val
->contents ().data ());
2778 return unpack_pointer (val
->type (), val
->contents ().data ());
2782 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2783 as a long, or as a double, assuming the raw data is described
2784 by type TYPE. Knows how to convert different sizes of values
2785 and can convert between fixed and floating point. We don't assume
2786 any alignment for the raw data. Return value is in host byte order.
2788 If you want functions and arrays to be coerced to pointers, and
2789 references to be dereferenced, call value_as_long() instead.
2791 C++: It is assumed that the front-end has taken care of
2792 all matters concerning pointers to members. A pointer
2793 to member which reaches here is considered to be equivalent
2794 to an INT (or some size). After all, it is only an offset. */
2797 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2799 if (is_fixed_point_type (type
))
2800 type
= type
->fixed_point_type_base_type ();
2802 enum bfd_endian byte_order
= type_byte_order (type
);
2803 enum type_code code
= type
->code ();
2804 int len
= type
->length ();
2805 int nosign
= type
->is_unsigned ();
2809 case TYPE_CODE_TYPEDEF
:
2810 return unpack_long (check_typedef (type
), valaddr
);
2811 case TYPE_CODE_ENUM
:
2812 case TYPE_CODE_FLAGS
:
2813 case TYPE_CODE_BOOL
:
2815 case TYPE_CODE_CHAR
:
2816 case TYPE_CODE_RANGE
:
2817 case TYPE_CODE_MEMBERPTR
:
2821 if (type
->bit_size_differs_p ())
2823 unsigned bit_off
= type
->bit_offset ();
2824 unsigned bit_size
= type
->bit_size ();
2827 /* unpack_bits_as_long doesn't handle this case the
2828 way we'd like, so handle it here. */
2832 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2837 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2839 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2841 if (code
== TYPE_CODE_RANGE
)
2842 result
+= type
->bounds ()->bias
;
2847 case TYPE_CODE_DECFLOAT
:
2848 return target_float_to_longest (valaddr
, type
);
2850 case TYPE_CODE_FIXED_POINT
:
2853 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2855 type
->fixed_point_scaling_factor ());
2857 gdb_mpz vz
= vq
.as_integer ();
2858 return vz
.as_integer
<LONGEST
> ();
2863 case TYPE_CODE_RVALUE_REF
:
2864 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2865 whether we want this to be true eventually. */
2866 return extract_typed_address (valaddr
, type
);
2869 error (_("Value can't be converted to integer."));
2873 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2874 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2875 We don't assume any alignment for the raw data. Return value is in
2878 If you want functions and arrays to be coerced to pointers, and
2879 references to be dereferenced, call value_as_address() instead.
2881 C++: It is assumed that the front-end has taken care of
2882 all matters concerning pointers to members. A pointer
2883 to member which reaches here is considered to be equivalent
2884 to an INT (or some size). After all, it is only an offset. */
2887 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2889 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2890 whether we want this to be true eventually. */
2891 return unpack_long (type
, valaddr
);
2895 is_floating_value (struct value
*val
)
2897 struct type
*type
= check_typedef (val
->type ());
2899 if (is_floating_type (type
))
2901 if (!target_float_is_valid (val
->contents ().data (), type
))
2902 error (_("Invalid floating value found in program."));
2910 /* Get the value of the FIELDNO'th field (which must be static) of
2914 value_static_field (struct type
*type
, int fieldno
)
2916 struct value
*retval
;
2918 switch (type
->field (fieldno
).loc_kind ())
2920 case FIELD_LOC_KIND_PHYSADDR
:
2921 retval
= value_at_lazy (type
->field (fieldno
).type (),
2922 type
->field (fieldno
).loc_physaddr ());
2924 case FIELD_LOC_KIND_PHYSNAME
:
2926 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2927 /* type->field (fieldno).name (); */
2928 struct block_symbol sym
= lookup_symbol (phys_name
, nullptr,
2929 SEARCH_VAR_DOMAIN
, nullptr);
2931 if (sym
.symbol
== NULL
)
2933 /* With some compilers, e.g. HP aCC, static data members are
2934 reported as non-debuggable symbols. */
2935 struct bound_minimal_symbol msym
2936 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
2937 struct type
*field_type
= type
->field (fieldno
).type ();
2940 retval
= value::allocate_optimized_out (field_type
);
2942 retval
= value_at_lazy (field_type
, msym
.value_address ());
2945 retval
= value_of_variable (sym
.symbol
, sym
.block
);
2949 gdb_assert_not_reached ("unexpected field location kind");
2955 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2956 You have to be careful here, since the size of the data area for the value
2957 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2958 than the old enclosing type, you have to allocate more space for the
2962 value::set_enclosing_type (struct type
*new_encl_type
)
2964 if (new_encl_type
->length () > enclosing_type ()->length ())
2966 check_type_length_before_alloc (new_encl_type
);
2967 m_contents
.reset ((gdb_byte
*) xrealloc (m_contents
.release (),
2968 new_encl_type
->length ()));
2971 m_enclosing_type
= new_encl_type
;
2977 value::primitive_field (LONGEST offset
, int fieldno
, struct type
*arg_type
)
2981 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
2983 arg_type
= check_typedef (arg_type
);
2984 type
= arg_type
->field (fieldno
).type ();
2986 /* Call check_typedef on our type to make sure that, if TYPE
2987 is a TYPE_CODE_TYPEDEF, its length is set to the length
2988 of the target type instead of zero. However, we do not
2989 replace the typedef type by the target type, because we want
2990 to keep the typedef in order to be able to print the type
2991 description correctly. */
2992 check_typedef (type
);
2994 if (arg_type
->field (fieldno
).bitsize ())
2996 /* Handle packed fields.
2998 Create a new value for the bitfield, with bitpos and bitsize
2999 set. If possible, arrange offset and bitpos so that we can
3000 do a single aligned read of the size of the containing type.
3001 Otherwise, adjust offset to the byte containing the first
3002 bit. Assume that the address, offset, and embedded offset
3003 are sufficiently aligned. */
3005 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
3006 LONGEST container_bitsize
= type
->length () * 8;
3008 v
= value::allocate_lazy (type
);
3009 v
->set_bitsize (arg_type
->field (fieldno
).bitsize ());
3010 if ((bitpos
% container_bitsize
) + v
->bitsize () <= container_bitsize
3011 && type
->length () <= (int) sizeof (LONGEST
))
3012 v
->set_bitpos (bitpos
% container_bitsize
);
3014 v
->set_bitpos (bitpos
% 8);
3015 v
->set_offset ((embedded_offset ()
3017 + (bitpos
- v
->bitpos ()) / 8));
3018 v
->set_parent (this);
3022 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3024 /* This field is actually a base subobject, so preserve the
3025 entire object's contents for later references to virtual
3029 /* Lazy register values with offsets are not supported. */
3030 if (this->lval () == lval_register
&& lazy ())
3033 /* We special case virtual inheritance here because this
3034 requires access to the contents, which we would rather avoid
3035 for references to ordinary fields of unavailable values. */
3036 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3037 boffset
= baseclass_offset (arg_type
, fieldno
,
3038 contents ().data (),
3043 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3046 v
= value::allocate_lazy (enclosing_type ());
3049 v
= value::allocate (enclosing_type ());
3050 contents_copy_raw (v
, 0, 0, enclosing_type ()->length ());
3052 v
->deprecated_set_type (type
);
3053 v
->set_offset (this->offset ());
3054 v
->set_embedded_offset (offset
+ embedded_offset () + boffset
);
3056 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3058 /* Field is a dynamic data member. */
3060 gdb_assert (0 == offset
);
3061 /* We expect an already resolved data location. */
3062 gdb_assert (TYPE_DATA_LOCATION (type
)->is_constant ());
3063 /* For dynamic data types defer memory allocation
3064 until we actual access the value. */
3065 v
= value::allocate_lazy (type
);
3069 /* Plain old data member */
3070 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3071 / (HOST_CHAR_BIT
* unit_size
));
3073 /* Lazy register values with offsets are not supported. */
3074 if (this->lval () == lval_register
&& lazy ())
3078 v
= value::allocate_lazy (type
);
3081 v
= value::allocate (type
);
3082 contents_copy_raw (v
, v
->embedded_offset (),
3083 embedded_offset () + offset
,
3084 type_length_units (type
));
3086 v
->set_offset (this->offset () + offset
+ embedded_offset ());
3088 v
->set_component_location (this);
3092 /* Given a value ARG1 of a struct or union type,
3093 extract and return the value of one of its (non-static) fields.
3094 FIELDNO says which field. */
3097 value_field (struct value
*arg1
, int fieldno
)
3099 return arg1
->primitive_field (0, fieldno
, arg1
->type ());
3102 /* Return a non-virtual function as a value.
3103 F is the list of member functions which contains the desired method.
3104 J is an index into F which provides the desired method.
3106 We only use the symbol for its address, so be happy with either a
3107 full symbol or a minimal symbol. */
3110 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3111 int j
, struct type
*type
,
3115 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3116 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3118 struct bound_minimal_symbol msym
;
3120 sym
= lookup_symbol (physname
, nullptr, SEARCH_FUNCTION_DOMAIN
,
3124 msym
= lookup_bound_minimal_symbol (physname
);
3125 if (msym
.minsym
== NULL
)
3129 v
= value::allocate (ftype
);
3130 v
->set_lval (lval_memory
);
3133 v
->set_address (sym
->value_block ()->entry_pc ());
3137 /* The minimal symbol might point to a function descriptor;
3138 resolve it to the actual code address instead. */
3139 struct objfile
*objfile
= msym
.objfile
;
3140 struct gdbarch
*gdbarch
= objfile
->arch ();
3142 v
->set_address (gdbarch_convert_from_func_ptr_addr
3143 (gdbarch
, msym
.value_address (),
3144 current_inferior ()->top_target ()));
3149 if (type
!= (*arg1p
)->type ())
3150 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3151 value_addr (*arg1p
)));
3153 /* Move the `this' pointer according to the offset.
3154 (*arg1p)->offset () += offset; */
3165 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3166 LONGEST bitpos
, LONGEST bitsize
)
3168 enum bfd_endian byte_order
= type_byte_order (field_type
);
3173 LONGEST read_offset
;
3175 /* Read the minimum number of bytes required; there may not be
3176 enough bytes to read an entire ULONGEST. */
3177 field_type
= check_typedef (field_type
);
3179 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3182 bytes_read
= field_type
->length ();
3183 bitsize
= 8 * bytes_read
;
3186 read_offset
= bitpos
/ 8;
3188 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3189 bytes_read
, byte_order
);
3191 /* Extract bits. See comment above. */
3193 if (byte_order
== BFD_ENDIAN_BIG
)
3194 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3196 lsbcount
= (bitpos
% 8);
3199 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3200 If the field is signed, and is negative, then sign extend. */
3202 if (bitsize
< 8 * (int) sizeof (val
))
3204 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3206 if (!field_type
->is_unsigned ())
3208 if (val
& (valmask
^ (valmask
>> 1)))
3218 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3219 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3220 ORIGINAL_VALUE, which must not be NULL. See
3221 unpack_value_bits_as_long for more details. */
3224 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3225 LONGEST embedded_offset
, int fieldno
,
3226 const struct value
*val
, LONGEST
*result
)
3228 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3229 int bitsize
= type
->field (fieldno
).bitsize ();
3230 struct type
*field_type
= type
->field (fieldno
).type ();
3233 gdb_assert (val
!= NULL
);
3235 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3236 if (val
->bits_any_optimized_out (bit_offset
, bitsize
)
3237 || !val
->bits_available (bit_offset
, bitsize
))
3240 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3245 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3246 object at VALADDR. See unpack_bits_as_long for more details. */
3249 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3251 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3252 int bitsize
= type
->field (fieldno
).bitsize ();
3253 struct type
*field_type
= type
->field (fieldno
).type ();
3255 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3261 value::unpack_bitfield (struct value
*dest_val
,
3262 LONGEST bitpos
, LONGEST bitsize
,
3263 const gdb_byte
*valaddr
, LONGEST embedded_offset
)
3266 enum bfd_endian byte_order
;
3269 struct type
*field_type
= dest_val
->type ();
3271 byte_order
= type_byte_order (field_type
);
3273 /* First, unpack and sign extend the bitfield as if it was wholly
3274 valid. Optimized out/unavailable bits are read as zero, but
3275 that's OK, as they'll end up marked below. If the VAL is
3276 wholly-invalid we may have skipped allocating its contents,
3277 though. See value::allocate_optimized_out. */
3278 if (valaddr
!= NULL
)
3282 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3284 store_signed_integer (dest_val
->contents_raw ().data (),
3285 field_type
->length (), byte_order
, num
);
3288 /* Now copy the optimized out / unavailability ranges to the right
3290 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3291 if (byte_order
== BFD_ENDIAN_BIG
)
3292 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3295 ranges_copy_adjusted (dest_val
, dst_bit_offset
, src_bit_offset
, bitsize
);
3298 /* Return a new value with type TYPE, which is FIELDNO field of the
3299 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3300 of VAL. If the VAL's contents required to extract the bitfield
3301 from are unavailable/optimized out, the new value is
3302 correspondingly marked unavailable/optimized out. */
3305 value_field_bitfield (struct type
*type
, int fieldno
,
3306 const gdb_byte
*valaddr
,
3307 LONGEST embedded_offset
, const struct value
*val
)
3309 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3310 int bitsize
= type
->field (fieldno
).bitsize ();
3311 struct value
*res_val
= value::allocate (type
->field (fieldno
).type ());
3313 val
->unpack_bitfield (res_val
, bitpos
, bitsize
, valaddr
, embedded_offset
);
3318 /* Modify the value of a bitfield. ADDR points to a block of memory in
3319 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3320 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3321 indicate which bits (in target bit order) comprise the bitfield.
3322 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3323 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3326 modify_field (struct type
*type
, gdb_byte
*addr
,
3327 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3329 enum bfd_endian byte_order
= type_byte_order (type
);
3331 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3334 /* Normalize BITPOS. */
3338 /* If a negative fieldval fits in the field in question, chop
3339 off the sign extension bits. */
3340 if ((~fieldval
& ~(mask
>> 1)) == 0)
3343 /* Warn if value is too big to fit in the field in question. */
3344 if (0 != (fieldval
& ~mask
))
3346 /* FIXME: would like to include fieldval in the message, but
3347 we don't have a sprintf_longest. */
3348 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3350 /* Truncate it, otherwise adjoining fields may be corrupted. */
3354 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3355 false valgrind reports. */
3357 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3358 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3360 /* Shifting for bit field depends on endianness of the target machine. */
3361 if (byte_order
== BFD_ENDIAN_BIG
)
3362 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3364 oword
&= ~(mask
<< bitpos
);
3365 oword
|= fieldval
<< bitpos
;
3367 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3370 /* Pack NUM into BUF using a target format of TYPE. */
3373 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3375 enum bfd_endian byte_order
= type_byte_order (type
);
3378 type
= check_typedef (type
);
3379 len
= type
->length ();
3381 switch (type
->code ())
3383 case TYPE_CODE_RANGE
:
3384 num
-= type
->bounds ()->bias
;
3387 case TYPE_CODE_CHAR
:
3388 case TYPE_CODE_ENUM
:
3389 case TYPE_CODE_FLAGS
:
3390 case TYPE_CODE_BOOL
:
3391 case TYPE_CODE_MEMBERPTR
:
3392 if (type
->bit_size_differs_p ())
3394 unsigned bit_off
= type
->bit_offset ();
3395 unsigned bit_size
= type
->bit_size ();
3396 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3399 store_signed_integer (buf
, len
, byte_order
, num
);
3403 case TYPE_CODE_RVALUE_REF
:
3405 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3409 case TYPE_CODE_DECFLOAT
:
3410 target_float_from_longest (buf
, type
, num
);
3414 error (_("Unexpected type (%d) encountered for integer constant."),
3420 /* Pack NUM into BUF using a target format of TYPE. */
3423 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3426 enum bfd_endian byte_order
;
3428 type
= check_typedef (type
);
3429 len
= type
->length ();
3430 byte_order
= type_byte_order (type
);
3432 switch (type
->code ())
3435 case TYPE_CODE_CHAR
:
3436 case TYPE_CODE_ENUM
:
3437 case TYPE_CODE_FLAGS
:
3438 case TYPE_CODE_BOOL
:
3439 case TYPE_CODE_RANGE
:
3440 case TYPE_CODE_MEMBERPTR
:
3441 if (type
->bit_size_differs_p ())
3443 unsigned bit_off
= type
->bit_offset ();
3444 unsigned bit_size
= type
->bit_size ();
3445 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3448 store_unsigned_integer (buf
, len
, byte_order
, num
);
3452 case TYPE_CODE_RVALUE_REF
:
3454 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3458 case TYPE_CODE_DECFLOAT
:
3459 target_float_from_ulongest (buf
, type
, num
);
3463 error (_("Unexpected type (%d) encountered "
3464 "for unsigned integer constant."),
3472 value::zero (struct type
*type
, enum lval_type lv
)
3474 struct value
*val
= value::allocate_lazy (type
);
3476 val
->set_lval (lv
== lval_computed
? not_lval
: lv
);
3477 val
->m_is_zero
= true;
3481 /* Convert C numbers into newly allocated values. */
3484 value_from_longest (struct type
*type
, LONGEST num
)
3486 struct value
*val
= value::allocate (type
);
3488 pack_long (val
->contents_raw ().data (), type
, num
);
3493 /* Convert C unsigned numbers into newly allocated values. */
3496 value_from_ulongest (struct type
*type
, ULONGEST num
)
3498 struct value
*val
= value::allocate (type
);
3500 pack_unsigned_long (val
->contents_raw ().data (), type
, num
);
3508 value_from_mpz (struct type
*type
, const gdb_mpz
&v
)
3510 struct type
*real_type
= check_typedef (type
);
3512 const gdb_mpz
*val
= &v
;
3514 if (real_type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
3518 storage
-= type
->bounds ()->bias
;
3521 if (type
->bit_size_differs_p ())
3523 unsigned bit_off
= type
->bit_offset ();
3524 unsigned bit_size
= type
->bit_size ();
3526 if (val
!= &storage
)
3532 storage
.mask (bit_size
);
3533 storage
<<= bit_off
;
3536 struct value
*result
= value::allocate (type
);
3537 val
->truncate (result
->contents_raw (), type_byte_order (type
),
3538 type
->is_unsigned ());
3542 /* Create a value representing a pointer of type TYPE to the address
3546 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3548 struct value
*val
= value::allocate (type
);
3550 store_typed_address (val
->contents_raw ().data (),
3551 check_typedef (type
), addr
);
3555 /* Create and return a value object of TYPE containing the value D. The
3556 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3557 it is converted to target format. */
3560 value_from_host_double (struct type
*type
, double d
)
3562 struct value
*value
= value::allocate (type
);
3563 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3564 target_float_from_host_double (value
->contents_raw ().data (),
3569 /* Create a value of type TYPE whose contents come from VALADDR, if it
3570 is non-null, and whose memory address (in the inferior) is
3571 ADDRESS. The type of the created value may differ from the passed
3572 type TYPE. Make sure to retrieve values new type after this call.
3573 Note that TYPE is not passed through resolve_dynamic_type; this is
3574 a special API intended for use only by Ada. */
3577 value_from_contents_and_address_unresolved (struct type
*type
,
3578 const gdb_byte
*valaddr
,
3583 if (valaddr
== NULL
)
3584 v
= value::allocate_lazy (type
);
3586 v
= value_from_contents (type
, valaddr
);
3587 v
->set_lval (lval_memory
);
3588 v
->set_address (address
);
3592 /* Create a value of type TYPE whose contents come from VALADDR, if it
3593 is non-null, and whose memory address (in the inferior) is
3594 ADDRESS. The type of the created value may differ from the passed
3595 type TYPE. Make sure to retrieve values new type after this call. */
3598 value_from_contents_and_address (struct type
*type
,
3599 const gdb_byte
*valaddr
,
3601 const frame_info_ptr
&frame
)
3603 gdb::array_view
<const gdb_byte
> view
;
3604 if (valaddr
!= nullptr)
3605 view
= gdb::make_array_view (valaddr
, type
->length ());
3606 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
,
3608 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3611 if (resolved_type_no_typedef
->code () == TYPE_CODE_ARRAY
3612 && resolved_type_no_typedef
->bound_optimized_out ())
3614 /* Resolution found that the bounds are optimized out. In this
3615 case, mark the array itself as optimized-out. */
3616 v
= value::allocate_optimized_out (resolved_type
);
3618 else if (valaddr
== nullptr)
3619 v
= value::allocate_lazy (resolved_type
);
3621 v
= value_from_contents (resolved_type
, valaddr
);
3622 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3623 && TYPE_DATA_LOCATION (resolved_type_no_typedef
)->is_constant ())
3624 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3625 v
->set_lval (lval_memory
);
3626 v
->set_address (address
);
3630 /* Create a value of type TYPE holding the contents CONTENTS.
3631 The new value is `not_lval'. */
3634 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3636 struct value
*result
;
3638 result
= value::allocate (type
);
3639 memcpy (result
->contents_raw ().data (), contents
, type
->length ());
3643 /* Extract a value from the history file. Input will be of the form
3644 $digits or $$digits. See block comment above 'write_dollar_variable'
3648 value_from_history_ref (const char *h
, const char **endp
)
3660 /* Find length of numeral string. */
3661 for (; isdigit (h
[len
]); len
++)
3664 /* Make sure numeral string is not part of an identifier. */
3665 if (h
[len
] == '_' || isalpha (h
[len
]))
3668 /* Now collect the index value. */
3673 /* For some bizarre reason, "$$" is equivalent to "$$1",
3674 rather than to "$$0" as it ought to be! */
3682 index
= -strtol (&h
[2], &local_end
, 10);
3690 /* "$" is equivalent to "$0". */
3698 index
= strtol (&h
[1], &local_end
, 10);
3703 return access_value_history (index
);
3706 /* Get the component value (offset by OFFSET bytes) of a struct or
3707 union WHOLE. Component's type is TYPE. */
3710 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3714 if (whole
->lval () == lval_memory
&& whole
->lazy ())
3715 v
= value::allocate_lazy (type
);
3718 v
= value::allocate (type
);
3719 whole
->contents_copy (v
, v
->embedded_offset (),
3720 whole
->embedded_offset () + offset
,
3721 type_length_units (type
));
3723 v
->set_offset (whole
->offset () + offset
+ whole
->embedded_offset ());
3724 v
->set_component_location (whole
);
3732 value::from_component_bitsize (struct type
*type
,
3733 LONGEST bit_offset
, LONGEST bit_length
)
3735 gdb_assert (!lazy ());
3737 /* Preserve lvalue-ness if possible. This is needed to avoid
3738 array-printing failures (including crashes) when printing Ada
3739 arrays in programs compiled with -fgnat-encodings=all. */
3740 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3741 && (bit_length
% TARGET_CHAR_BIT
) == 0
3742 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3743 return value_from_component (this, type
, bit_offset
/ TARGET_CHAR_BIT
);
3745 struct value
*v
= value::allocate (type
);
3747 LONGEST dst_offset
= TARGET_CHAR_BIT
* v
->embedded_offset ();
3748 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3749 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3751 contents_copy_raw_bitwise (v
, dst_offset
,
3753 * embedded_offset ()
3760 coerce_ref_if_computed (const struct value
*arg
)
3762 const struct lval_funcs
*funcs
;
3764 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3767 if (arg
->lval () != lval_computed
)
3770 funcs
= arg
->computed_funcs ();
3771 if (funcs
->coerce_ref
== NULL
)
3774 return funcs
->coerce_ref (arg
);
3777 /* Look at value.h for description. */
3780 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3781 const struct type
*original_type
,
3782 struct value
*original_value
,
3783 CORE_ADDR original_value_address
)
3785 gdb_assert (original_type
->is_pointer_or_reference ());
3787 struct type
*original_target_type
= original_type
->target_type ();
3788 gdb::array_view
<const gdb_byte
> view
;
3789 struct type
*resolved_original_target_type
3790 = resolve_dynamic_type (original_target_type
, view
,
3791 original_value_address
);
3793 /* Re-adjust type. */
3794 value
->deprecated_set_type (resolved_original_target_type
);
3796 /* Add embedding info. */
3797 value
->set_enclosing_type (enc_type
);
3798 value
->set_embedded_offset (original_value
->pointed_to_offset ());
3800 /* We may be pointing to an object of some derived type. */
3801 return value_full_object (value
, NULL
, 0, 0, 0);
3805 coerce_ref (struct value
*arg
)
3807 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3808 struct value
*retval
;
3809 struct type
*enc_type
;
3811 retval
= coerce_ref_if_computed (arg
);
3815 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3818 enc_type
= check_typedef (arg
->enclosing_type ());
3819 enc_type
= enc_type
->target_type ();
3821 CORE_ADDR addr
= unpack_pointer (arg
->type (), arg
->contents ().data ());
3822 retval
= value_at_lazy (enc_type
, addr
);
3823 enc_type
= retval
->type ();
3824 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3829 coerce_array (struct value
*arg
)
3833 arg
= coerce_ref (arg
);
3834 type
= check_typedef (arg
->type ());
3836 switch (type
->code ())
3838 case TYPE_CODE_ARRAY
:
3839 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3840 arg
= value_coerce_array (arg
);
3842 case TYPE_CODE_FUNC
:
3843 arg
= value_coerce_function (arg
);
3850 /* Return the return value convention that will be used for the
3853 enum return_value_convention
3854 struct_return_convention (struct gdbarch
*gdbarch
,
3855 struct value
*function
, struct type
*value_type
)
3857 enum type_code code
= value_type
->code ();
3859 if (code
== TYPE_CODE_ERROR
)
3860 error (_("Function return type unknown."));
3862 /* Probe the architecture for the return-value convention. */
3863 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3867 /* Return true if the function returning the specified type is using
3868 the convention of returning structures in memory (passing in the
3869 address as a hidden first parameter). */
3872 using_struct_return (struct gdbarch
*gdbarch
,
3873 struct value
*function
, struct type
*value_type
)
3875 if (value_type
->code () == TYPE_CODE_VOID
)
3876 /* A void return value is never in memory. See also corresponding
3877 code in "print_return_value". */
3880 return (struct_return_convention (gdbarch
, function
, value_type
)
3881 != RETURN_VALUE_REGISTER_CONVENTION
);
3887 value::fetch_lazy_bitfield ()
3889 gdb_assert (bitsize () != 0);
3891 /* To read a lazy bitfield, read the entire enclosing value. This
3892 prevents reading the same block of (possibly volatile) memory once
3893 per bitfield. It would be even better to read only the containing
3894 word, but we have no way to record that just specific bits of a
3895 value have been fetched. */
3896 struct value
*parent
= this->parent ();
3898 if (parent
->lazy ())
3899 parent
->fetch_lazy ();
3901 parent
->unpack_bitfield (this, bitpos (), bitsize (),
3902 parent
->contents_for_printing ().data (),
3909 value::fetch_lazy_memory ()
3911 gdb_assert (m_lval
== lval_memory
);
3913 CORE_ADDR addr
= address ();
3914 struct type
*type
= check_typedef (enclosing_type ());
3916 /* Figure out how much we should copy from memory. Usually, this is just
3917 the size of the type, but, for arrays, we might only be loading a
3918 small part of the array (this is only done for very large arrays). */
3920 if (m_limited_length
> 0)
3922 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY
);
3923 len
= m_limited_length
;
3925 else if (type
->length () > 0)
3926 len
= type_length_units (type
);
3928 gdb_assert (len
>= 0);
3931 read_value_memory (this, 0, stack (), addr
,
3932 contents_all_raw ().data (), len
);
3938 value::fetch_lazy_register ()
3940 struct type
*type
= check_typedef (this->type ());
3941 struct value
*new_val
= this;
3943 scoped_value_mark mark
;
3945 /* Offsets are not supported here; lazy register values must
3946 refer to the entire register. */
3947 gdb_assert (offset () == 0);
3949 while (new_val
->lval () == lval_register
&& new_val
->lazy ())
3951 frame_id next_frame_id
= new_val
->next_frame_id ();
3952 frame_info_ptr next_frame
= frame_find_by_id (next_frame_id
);
3953 gdb_assert (next_frame
!= NULL
);
3955 int regnum
= new_val
->regnum ();
3957 /* Convertible register routines are used for multi-register
3958 values and for interpretation in different types
3959 (e.g. float or int from a double register). Lazy
3960 register values should have the register's natural type,
3961 so they do not apply. */
3962 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
3965 new_val
= frame_unwind_register_value (next_frame
, regnum
);
3967 /* If we get another lazy lval_register value, it means the
3968 register is found by reading it from NEXT_FRAME's next frame.
3969 frame_unwind_register_value should never return a value with
3970 the frame id pointing to NEXT_FRAME. If it does, it means we
3971 either have two consecutive frames with the same frame id
3972 in the frame chain, or some code is trying to unwind
3973 behind get_prev_frame's back (e.g., a frame unwind
3974 sniffer trying to unwind), bypassing its validations. In
3975 any case, it should always be an internal error to end up
3976 in this situation. */
3977 if (new_val
->lval () == lval_register
3979 && new_val
->next_frame_id () == next_frame_id
)
3980 internal_error (_("infinite loop while fetching a register"));
3983 /* If it's still lazy (for instance, a saved register on the
3984 stack), fetch it. */
3985 if (new_val
->lazy ())
3986 new_val
->fetch_lazy ();
3988 /* Copy the contents and the unavailability/optimized-out
3989 meta-data from NEW_VAL to VAL. */
3991 new_val
->contents_copy (this, embedded_offset (),
3992 new_val
->embedded_offset (),
3993 type_length_units (type
));
3997 frame_info_ptr frame
= frame_find_by_id (this->next_frame_id ());
3998 frame
= get_prev_frame_always (frame
);
3999 int regnum
= this->regnum ();
4000 gdbarch
*gdbarch
= get_frame_arch (frame
);
4002 string_file debug_file
;
4003 gdb_printf (&debug_file
,
4004 "(frame=%d, regnum=%d(%s), ...) ",
4005 frame_relative_level (frame
), regnum
,
4006 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4008 gdb_printf (&debug_file
, "->");
4009 if (new_val
->optimized_out ())
4011 gdb_printf (&debug_file
, " ");
4012 val_print_optimized_out (new_val
, &debug_file
);
4016 if (new_val
->lval () == lval_register
)
4017 gdb_printf (&debug_file
, " register=%d", new_val
->regnum ());
4018 else if (new_val
->lval () == lval_memory
)
4019 gdb_printf (&debug_file
, " address=%s",
4021 new_val
->address ()));
4023 gdb_printf (&debug_file
, " computed");
4025 if (new_val
->entirely_available ())
4028 gdb::array_view
<const gdb_byte
> buf
= new_val
->contents ();
4030 gdb_printf (&debug_file
, " bytes=");
4031 gdb_printf (&debug_file
, "[");
4032 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4033 gdb_printf (&debug_file
, "%02x", buf
[i
]);
4034 gdb_printf (&debug_file
, "]");
4036 else if (new_val
->entirely_unavailable ())
4037 gdb_printf (&debug_file
, " unavailable");
4039 gdb_printf (&debug_file
, " partly unavailable");
4042 frame_debug_printf ("%s", debug_file
.c_str ());
4049 value::fetch_lazy ()
4051 gdb_assert (lazy ());
4052 allocate_contents (true);
4053 /* A value is either lazy, or fully fetched. The
4054 availability/validity is only established as we try to fetch a
4056 gdb_assert (m_optimized_out
.empty ());
4057 gdb_assert (m_unavailable
.empty ());
4062 else if (bitsize ())
4063 fetch_lazy_bitfield ();
4064 else if (this->lval () == lval_memory
)
4065 fetch_lazy_memory ();
4066 else if (this->lval () == lval_register
)
4067 fetch_lazy_register ();
4068 else if (this->lval () == lval_computed
4069 && computed_funcs ()->read
!= NULL
)
4070 computed_funcs ()->read (this);
4072 internal_error (_("Unexpected lazy value type."));
4080 pseudo_from_raw_part (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4081 int raw_reg_num
, int raw_offset
)
4083 value
*pseudo_reg_val
4084 = value::allocate_register (next_frame
, pseudo_reg_num
);
4085 value
*raw_reg_val
= value_of_register (raw_reg_num
, next_frame
);
4086 raw_reg_val
->contents_copy (pseudo_reg_val
, 0, raw_offset
,
4087 pseudo_reg_val
->type ()->length ());
4088 return pseudo_reg_val
;
4094 pseudo_to_raw_part (const frame_info_ptr
&next_frame
,
4095 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4096 int raw_reg_num
, int raw_offset
)
4099 = register_size (frame_unwind_arch (next_frame
), raw_reg_num
);
4101 /* When overflowing a register, put_frame_register_bytes writes to the
4102 subsequent registers. We don't want that behavior here, so make sure
4103 the write is wholly within register RAW_REG_NUM. */
4104 gdb_assert (raw_offset
+ pseudo_buf
.size () <= raw_reg_size
);
4105 put_frame_register_bytes (next_frame
, raw_reg_num
, raw_offset
, pseudo_buf
);
4111 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4112 int raw_reg_1_num
, int raw_reg_2_num
)
4114 value
*pseudo_reg_val
4115 = value::allocate_register (next_frame
, pseudo_reg_num
);
4118 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4119 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4120 raw_reg_1_val
->type ()->length ());
4121 dst_offset
+= raw_reg_1_val
->type ()->length ();
4123 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4124 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4125 raw_reg_2_val
->type ()->length ());
4126 dst_offset
+= raw_reg_2_val
->type ()->length ();
4128 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4130 return pseudo_reg_val
;
4136 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4137 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4138 int raw_reg_1_num
, int raw_reg_2_num
)
4141 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4143 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4144 put_frame_register (next_frame
, raw_reg_1_num
,
4145 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4146 src_offset
+= raw_reg_1_size
;
4148 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4149 put_frame_register (next_frame
, raw_reg_2_num
,
4150 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4151 src_offset
+= raw_reg_2_size
;
4153 gdb_assert (src_offset
== pseudo_buf
.size ());
4159 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4160 int raw_reg_1_num
, int raw_reg_2_num
,
4163 value
*pseudo_reg_val
4164 = value::allocate_register (next_frame
, pseudo_reg_num
);
4167 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4168 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4169 raw_reg_1_val
->type ()->length ());
4170 dst_offset
+= raw_reg_1_val
->type ()->length ();
4172 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4173 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4174 raw_reg_2_val
->type ()->length ());
4175 dst_offset
+= raw_reg_2_val
->type ()->length ();
4177 value
*raw_reg_3_val
= value_of_register (raw_reg_3_num
, next_frame
);
4178 raw_reg_3_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4179 raw_reg_3_val
->type ()->length ());
4180 dst_offset
+= raw_reg_3_val
->type ()->length ();
4182 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4184 return pseudo_reg_val
;
4190 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4191 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4192 int raw_reg_1_num
, int raw_reg_2_num
, int raw_reg_3_num
)
4195 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4197 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4198 put_frame_register (next_frame
, raw_reg_1_num
,
4199 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4200 src_offset
+= raw_reg_1_size
;
4202 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4203 put_frame_register (next_frame
, raw_reg_2_num
,
4204 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4205 src_offset
+= raw_reg_2_size
;
4207 int raw_reg_3_size
= register_size (arch
, raw_reg_3_num
);
4208 put_frame_register (next_frame
, raw_reg_3_num
,
4209 pseudo_buf
.slice (src_offset
, raw_reg_3_size
));
4210 src_offset
+= raw_reg_3_size
;
4212 gdb_assert (src_offset
== pseudo_buf
.size ());
4215 /* Implementation of the convenience function $_isvoid. */
4217 static struct value
*
4218 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4219 const struct language_defn
*language
,
4220 void *cookie
, int argc
, struct value
**argv
)
4225 error (_("You must provide one argument for $_isvoid."));
4227 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4229 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4232 /* Implementation of the convenience function $_creal. Extracts the
4233 real part from a complex number. */
4235 static struct value
*
4236 creal_internal_fn (struct gdbarch
*gdbarch
,
4237 const struct language_defn
*language
,
4238 void *cookie
, int argc
, struct value
**argv
)
4241 error (_("You must provide one argument for $_creal."));
4243 value
*cval
= argv
[0];
4244 type
*ctype
= check_typedef (cval
->type ());
4245 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4246 error (_("expected a complex number"));
4247 return value_real_part (cval
);
4250 /* Implementation of the convenience function $_cimag. Extracts the
4251 imaginary part from a complex number. */
4253 static struct value
*
4254 cimag_internal_fn (struct gdbarch
*gdbarch
,
4255 const struct language_defn
*language
,
4256 void *cookie
, int argc
,
4257 struct value
**argv
)
4260 error (_("You must provide one argument for $_cimag."));
4262 value
*cval
= argv
[0];
4263 type
*ctype
= check_typedef (cval
->type ());
4264 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4265 error (_("expected a complex number"));
4266 return value_imaginary_part (cval
);
4273 /* Test the ranges_contain function. */
4276 test_ranges_contain ()
4278 std::vector
<range
> ranges
;
4284 ranges
.push_back (r
);
4289 ranges
.push_back (r
);
4292 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4294 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4296 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4298 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4300 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4302 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4304 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4306 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4308 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4310 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4312 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4314 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4317 /* Check that RANGES contains the same ranges as EXPECTED. */
4320 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4321 gdb::array_view
<const range
> expected
)
4323 return ranges
== expected
;
4326 /* Test the insert_into_bit_range_vector function. */
4329 test_insert_into_bit_range_vector ()
4331 std::vector
<range
> ranges
;
4335 insert_into_bit_range_vector (&ranges
, 10, 5);
4336 static const range expected
[] = {
4339 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4344 insert_into_bit_range_vector (&ranges
, 11, 4);
4345 static const range expected
= {10, 5};
4346 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4349 /* [10, 14] [20, 24] */
4351 insert_into_bit_range_vector (&ranges
, 20, 5);
4352 static const range expected
[] = {
4356 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4359 /* [10, 14] [17, 24] */
4361 insert_into_bit_range_vector (&ranges
, 17, 5);
4362 static const range expected
[] = {
4366 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4369 /* [2, 8] [10, 14] [17, 24] */
4371 insert_into_bit_range_vector (&ranges
, 2, 7);
4372 static const range expected
[] = {
4377 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4380 /* [2, 14] [17, 24] */
4382 insert_into_bit_range_vector (&ranges
, 9, 1);
4383 static const range expected
[] = {
4387 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4390 /* [2, 14] [17, 24] */
4392 insert_into_bit_range_vector (&ranges
, 9, 1);
4393 static const range expected
[] = {
4397 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4402 insert_into_bit_range_vector (&ranges
, 4, 30);
4403 static const range expected
= {2, 32};
4404 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4411 type
*type
= builtin_type (current_inferior ()->arch ())->builtin_int
;
4413 /* Verify that we can copy an entirely optimized out value, that may not have
4414 its contents allocated. */
4415 value_ref_ptr val
= release_value (value::allocate_optimized_out (type
));
4416 value_ref_ptr copy
= release_value (val
->copy ());
4418 SELF_CHECK (val
->entirely_optimized_out ());
4419 SELF_CHECK (copy
->entirely_optimized_out ());
4422 } /* namespace selftests */
4423 #endif /* GDB_SELF_TEST */
4425 void _initialize_values ();
4427 _initialize_values ()
4429 cmd_list_element
*show_convenience_cmd
4430 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4431 Debugger convenience (\"$foo\") variables and functions.\n\
4432 Convenience variables are created when you assign them values;\n\
4433 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4435 A few convenience variables are given values automatically:\n\
4436 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4437 \"$__\" holds the contents of the last address examined with \"x\"."
4440 Convenience functions are defined via the Python API."
4443 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4445 add_cmd ("values", no_set_class
, show_values
, _("\
4446 Elements of value history around item number IDX (or last ten)."),
4449 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4450 Initialize a convenience variable if necessary.\n\
4451 init-if-undefined VARIABLE = EXPRESSION\n\
4452 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4453 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4454 VARIABLE is already initialized."));
4456 add_prefix_cmd ("function", no_class
, function_command
, _("\
4457 Placeholder command for showing help on convenience functions."),
4458 &functionlist
, 0, &cmdlist
);
4460 add_internal_function ("_isvoid", _("\
4461 Check whether an expression is void.\n\
4462 Usage: $_isvoid (expression)\n\
4463 Return 1 if the expression is void, zero otherwise."),
4464 isvoid_internal_fn
, NULL
);
4466 add_internal_function ("_creal", _("\
4467 Extract the real part of a complex number.\n\
4468 Usage: $_creal (expression)\n\
4469 Return the real part of a complex number, the type depends on the\n\
4470 type of a complex number."),
4471 creal_internal_fn
, NULL
);
4473 add_internal_function ("_cimag", _("\
4474 Extract the imaginary part of a complex number.\n\
4475 Usage: $_cimag (expression)\n\
4476 Return the imaginary part of a complex number, the type depends on the\n\
4477 type of a complex number."),
4478 cimag_internal_fn
, NULL
);
4480 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4481 class_support
, &max_value_size
, _("\
4482 Set maximum sized value gdb will load from the inferior."), _("\
4483 Show maximum sized value gdb will load from the inferior."), _("\
4484 Use this to control the maximum size, in bytes, of a value that gdb\n\
4485 will load from the inferior. Setting this value to 'unlimited'\n\
4486 disables checking.\n\
4487 Setting this does not invalidate already allocated values, it only\n\
4488 prevents future values, larger than this size, from being allocated."),
4490 show_max_value_size
,
4491 &setlist
, &showlist
);
4492 set_show_commands vsize_limit
4493 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4494 &max_value_size
, _("\
4495 Set the maximum number of bytes allowed in a variable-size object."), _("\
4496 Show the maximum number of bytes allowed in a variable-size object."), _("\
4497 Attempts to access an object whose size is not a compile-time constant\n\
4498 and exceeds this limit will cause an error."),
4499 NULL
, NULL
, &setlist
, &showlist
);
4500 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4503 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4504 selftests::register_test ("insert_into_bit_range_vector",
4505 selftests::test_insert_into_bit_range_vector
);
4506 selftests::register_test ("value_copy", selftests::test_value_copy
);
4509 /* Destroy any values currently allocated in a final cleanup instead
4510 of leaving it to global destructors, because that may be too
4511 late. For example, the destructors of xmethod values call into
4512 the Python runtime. */
4513 add_final_cleanup ([] ()
4515 all_values
.clear ();