Merge mozilla-central to autoland. a=merge CLOSED TREE
[gecko.git] / dom / canvas / TiedFields.h
blob2df225aeee01d80fe1df2d4570ed139bb6da5cbb
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #ifndef DOM_CANVAS_TIED_FIELDS_H
6 #define DOM_CANVAS_TIED_FIELDS_H
8 #include "TupleUtils.h"
10 namespace mozilla {
12 // -
14 /**
15 * TiedFields(T&) -> std::tuple<Fields&...>
16 * TiedFields(const T&) -> std::tuple<const Fields&...>
18 * You can also overload TiedFields without adding T::MutTiedFields:
19 * template<>
20 * inline auto TiedFields<gfx::IntSize>(gfx::IntSize& a) {
21 * return std::tie(a.width, a.height);
22 * }
24 template <class T>
25 constexpr auto TiedFields(T& t) {
26 const auto fields = t.MutTiedFields();
27 return fields;
29 template <class T, class... Args, class Tup = std::tuple<Args&...>>
30 constexpr auto TiedFields(const T& t) {
31 // Uncast const to get mutable-fields tuple, but reapply const to tuple args.
32 // We should do better than this when C++ gets a solution other than macros.
33 const auto mutFields = TiedFields(const_cast<T&>(t));
34 return ToTupleOfConstRefs(mutFields);
37 /**
38 * Returns true if all bytes in T are accounted for via size of all tied fields.
39 * Returns false if there's bytes unaccounted for, which might indicate either
40 * unaccounted-for padding or missing fields.
41 * The goal is to check that TiedFields returns every field in T, and this
42 * returns false if it suspects there are bytes that are not accounted for by
43 * TiedFields.
45 * `constexpr` effectively cannot do math on pointers, so it's not possible to
46 * figure out via `constexpr` whether fields are consecutive or dense.
47 * However, we can at least compare `sizeof(T)` to the sum of `sizeof(Args...)`
48 * for `TiedFields(T) -> std::tuple<Args...>`.
50 * See TiedFieldsExamples.
52 template <class T>
53 constexpr bool AreAllBytesTiedFields() {
54 using fieldsT = decltype(TiedFields(std::declval<T>()));
55 const auto fields_size_sum = SizeofTupleArgs<fieldsT>::value;
56 const auto t_size = sizeof(T);
57 return fields_size_sum == t_size;
60 // It's also possible to determine AreAllBytesRecursiveTiedFields:
61 // https://hackmd.io/@jgilbert/B16qa0Fa9
63 // -
65 template <class StructT, size_t FieldId, size_t PrevFieldBeginOffset,
66 class PrevFieldT, size_t PrevFieldEndOffset, class FieldT,
67 size_t FieldAlignment = alignof(FieldT)>
68 struct FieldDebugInfoT {
69 static constexpr bool IsTightlyPacked() {
70 return PrevFieldEndOffset % FieldAlignment == 0;
74 template <class StructT, class TupleOfFields, size_t FieldId>
75 struct TightlyPackedFieldEndOffsetT {
76 template <size_t I>
77 using FieldTAt = std::remove_reference_t<
78 typename std::tuple_element<I, TupleOfFields>::type>;
80 static constexpr size_t Fn() {
81 constexpr auto num_fields = std::tuple_size_v<TupleOfFields>;
82 static_assert(FieldId < num_fields);
84 using PrevFieldT = FieldTAt<FieldId - 1>;
85 using FieldT = FieldTAt<FieldId>;
86 constexpr auto prev_field_end_offset =
87 TightlyPackedFieldEndOffsetT<StructT, TupleOfFields, FieldId - 1>::Fn();
88 constexpr auto prev_field_begin_offset =
89 prev_field_end_offset - sizeof(PrevFieldT);
91 using FieldDebugInfoT =
92 FieldDebugInfoT<StructT, FieldId, prev_field_begin_offset, PrevFieldT,
93 prev_field_end_offset, FieldT>;
94 static_assert(FieldDebugInfoT::IsTightlyPacked(),
95 "This field was not tightly packed. Is there padding between "
96 "it and its predecessor?");
98 return prev_field_end_offset + sizeof(FieldT);
102 template <class StructT, class TupleOfFields>
103 struct TightlyPackedFieldEndOffsetT<StructT, TupleOfFields, 0> {
104 static constexpr size_t Fn() {
105 using FieldT = typename std::tuple_element<0, TupleOfFields>::type;
106 return sizeof(FieldT);
109 template <class StructT, class TupleOfFields>
110 struct TightlyPackedFieldEndOffsetT<StructT, TupleOfFields, size_t(-1)> {
111 static constexpr size_t Fn() {
112 // -1 means tuple_size_v<TupleOfFields> -> 0.
113 static_assert(sizeof(StructT) == 0);
114 return 0;
118 template <class StructT>
119 constexpr bool AssertTiedFieldsAreExhaustive() {
120 using TupleOfFields = decltype(std::declval<StructT>().MutTiedFields());
121 constexpr auto num_fields = std::tuple_size_v<TupleOfFields>;
122 constexpr auto end_offset_of_last_field =
123 TightlyPackedFieldEndOffsetT<StructT, TupleOfFields,
124 num_fields - 1>::Fn();
125 static_assert(
126 end_offset_of_last_field == sizeof(StructT),
127 "Incorrect field list in MutTiedFields()? (or not tightly-packed?)");
128 return true; // Support `static_assert(AssertTiedFieldsAreExhaustive())`.
131 // -
134 * Padding<T> can be used to pad out a struct so that it's not implicitly
135 * padded by struct rules.
136 * You can also just add your padding to TiedFields, but by explicitly typing
137 * padding like this, serialization can make a choice whether to copy Padding,
138 * or instead to omit the copy.
140 * Omitting the copy isn't always faster.
141 * struct Entry {
142 * uint16_t key;
143 * Padding<uint16_t> padding;
144 * uint32_t val;
145 * auto MutTiedFields() { return std::tie(key, padding, val); }
146 * };
147 * If you serialize Padding, the serialized size is 8, and the compiler will
148 * optimize serialization to a single 8-byte memcpy.
149 * If your serialization omits Padding, the serialized size of Entry shrinks
150 * by 25%. If you have a big list of Entrys, maybe this is a big savings!
151 * However, by optimizing for size here you sacrifice speed, because this splits
152 * the single memcpy into two: a 2-byte memcpy and a 4-byte memcpy.
154 * Explicitly marking padding gives callers the option of choosing.
156 template <class T>
157 struct Padding {
158 T ignored;
160 friend constexpr bool operator==(const Padding&, const Padding&) {
161 return true;
163 friend constexpr bool operator<(const Padding&, const Padding&) {
164 return false;
167 static_assert(sizeof(Padding<bool>) == 1);
168 static_assert(sizeof(Padding<bool[2]>) == 2);
169 static_assert(sizeof(Padding<int>) == 4);
171 // -
173 namespace TiedFieldsExamples {
175 struct Cat {
176 int i;
177 bool b;
179 constexpr auto MutTiedFields() { return std::tie(i, b); }
181 static_assert(sizeof(Cat) == 8);
182 static_assert(!AreAllBytesTiedFields<Cat>());
184 struct Dog {
185 bool b;
186 int i;
188 constexpr auto MutTiedFields() { return std::tie(i, b); }
190 static_assert(sizeof(Dog) == 8);
191 static_assert(!AreAllBytesTiedFields<Dog>());
193 struct Fish {
194 bool b;
195 bool padding[3];
196 int i;
198 constexpr auto MutTiedFields() { return std::tie(i, b, padding); }
200 static_assert(sizeof(Fish) == 8);
201 static_assert(AreAllBytesTiedFields<Fish>());
203 struct Eel { // Like a Fish, but you can skip serializing the padding.
204 bool b;
205 Padding<bool> padding[3];
206 int i;
208 constexpr auto MutTiedFields() { return std::tie(i, b, padding); }
210 static_assert(sizeof(Eel) == 8);
211 static_assert(AreAllBytesTiedFields<Eel>());
213 // -
215 // #define LETS_USE_BIT_FIELDS
216 #ifdef LETS_USE_BIT_FIELDS
217 # undef LETS_USE_BIT_FIELDS
219 struct Platypus {
220 short s : 1;
221 short s2 : 1;
222 int i;
224 constexpr auto MutTiedFields() {
225 return std::tie(s, s2, i); // Error: Can't take reference to bit-field.
229 #endif
231 // -
233 struct FishTank {
234 Fish f;
235 int i2;
237 constexpr auto MutTiedFields() { return std::tie(f, i2); }
239 static_assert(sizeof(FishTank) == 12);
240 static_assert(AreAllBytesTiedFields<FishTank>());
242 struct CatCarrier {
243 Cat c;
244 int i2;
246 constexpr auto MutTiedFields() { return std::tie(c, i2); }
248 static_assert(sizeof(CatCarrier) == 12);
249 static_assert(AreAllBytesTiedFields<CatCarrier>());
250 static_assert(
251 !AreAllBytesTiedFields<decltype(CatCarrier::c)>()); // BUT BEWARE THIS!
252 // For example, if we had AreAllBytesRecursiveTiedFields:
253 // static_assert(!AreAllBytesRecursiveTiedFields<CatCarrier>());
255 } // namespace TiedFieldsExamples
256 } // namespace mozilla
258 #endif // DOM_CANVAS_TIED_FIELDS_H