1 #include "memoryspace.hpp"
3 #include "serialization.hpp"
10 template<typename T
, bool linear
> inline T
internal_read(memory_space
& m
, uint64_t addr
)
12 std::pair
<memory_space::region
*, uint64_t> g
;
14 g
= m
.lookup_linear(addr
);
17 if(!g
.first
|| g
.second
+ sizeof(T
) > g
.first
->size
)
19 if(g
.first
->direct_map
)
20 return serialization::read_endian
<T
>(g
.first
->direct_map
+ g
.second
, g
.first
->endian
);
23 g
.first
->read(g
.second
, &buf
, sizeof(T
));
24 return serialization::read_endian
<T
>(&buf
, g
.first
->endian
);
28 template<typename T
, bool linear
> inline bool internal_write(memory_space
& m
, uint64_t addr
, T value
)
30 std::pair
<memory_space::region
*, uint64_t> g
;
32 g
= m
.lookup_linear(addr
);
35 if(!g
.first
|| g
.first
->readonly
|| g
.second
+ sizeof(T
) > g
.first
->size
)
37 if(g
.first
->direct_map
)
38 serialization::write_endian(g
.first
->direct_map
+ g
.second
, value
, g
.first
->endian
);
41 serialization::write_endian(&buf
, value
, g
.first
->endian
);
42 g
.first
->write(g
.second
, &buf
, sizeof(T
));
47 void read_range_r(memory_space::region
& r
, uint64_t offset
, void* buffer
, size_t bsize
)
50 if(offset
>= r
.size
) {
51 memset(buffer
, 0, bsize
);
54 uint64_t maxcopy
= min(static_cast<uint64_t>(bsize
), r
.size
- offset
);
55 memcpy(buffer
, r
.direct_map
+ offset
, maxcopy
);
57 memset(reinterpret_cast<char*>(buffer
) + maxcopy
, 0, bsize
- maxcopy
);
59 r
.read(offset
, buffer
, bsize
);
62 bool write_range_r(memory_space::region
& r
, uint64_t offset
, const void* buffer
, size_t bsize
)
69 uint64_t maxcopy
= min(static_cast<uint64_t>(bsize
), r
.size
- offset
);
70 memcpy(r
.direct_map
+ offset
, buffer
, maxcopy
);
73 return r
.write(offset
, buffer
, bsize
);
77 memory_space::region::~region() throw()
81 void memory_space::region::read(uint64_t offset
, void* buffer
, size_t tsize
)
83 if(!direct_map
|| offset
>= size
) {
84 memset(buffer
, 0, tsize
);
87 uint64_t maxcopy
= min(static_cast<uint64_t>(tsize
), size
- offset
);
88 memcpy(buffer
, direct_map
+ offset
, maxcopy
);
90 memset(reinterpret_cast<char*>(buffer
) + maxcopy
, 0, tsize
- maxcopy
);
93 bool memory_space::region::write(uint64_t offset
, const void* buffer
, size_t tsize
)
95 if(!direct_map
|| readonly
|| offset
>= size
)
97 uint64_t maxcopy
= min(static_cast<uint64_t>(tsize
), size
- offset
);
98 memcpy(direct_map
+ offset
, buffer
, maxcopy
);
102 std::pair
<memory_space::region
*, uint64_t> memory_space::lookup(uint64_t address
)
104 threads::alock
m(mlock
);
106 size_t ub
= u_regions
.size();
108 size_t mb
= (lb
+ ub
) / 2;
109 if(u_regions
[mb
]->base
> address
) {
113 if(u_regions
[mb
]->last_address() < address
) {
117 return std::make_pair(u_regions
[mb
], address
- u_regions
[mb
]->base
);
119 return std::make_pair(reinterpret_cast<region
*>(NULL
), 0);
122 std::pair
<memory_space::region
*, uint64_t> memory_space::lookup_linear(uint64_t linear
)
124 threads::alock
m(mlock
);
125 if(linear
>= linear_size
)
126 return std::make_pair(reinterpret_cast<region
*>(NULL
), 0);
128 size_t ub
= linear_bases
.size() - 1;
130 size_t mb
= (lb
+ ub
) / 2;
131 if(linear_bases
[mb
] > linear
) {
135 if(linear_bases
[mb
+ 1] <= linear
) {
139 return std::make_pair(u_lregions
[mb
], linear
- linear_bases
[mb
]);
141 return std::make_pair(reinterpret_cast<region
*>(NULL
), 0);
144 void memory_space::read_all_linear_memory(uint8_t* buffer
)
146 auto g
= lookup_linear(0);
149 read_range_r(*g
.first
, g
.second
, buffer
+ off
, g
.first
->size
);
150 off
+= g
.first
->size
;
151 g
= lookup_linear(off
);
155 #define MSR memory_space::read
156 #define MSW memory_space::write
157 #define MSRL memory_space::read_linear
158 #define MSWL memory_space::write_linear
160 template<> int8_t MSR (uint64_t address
) { return internal_read
<int8_t, false>(*this, address
); }
161 template<> uint8_t MSR (uint64_t address
) { return internal_read
<uint8_t, false>(*this, address
); }
162 template<> int16_t MSR (uint64_t address
) { return internal_read
<int16_t, false>(*this, address
); }
163 template<> uint16_t MSR (uint64_t address
) { return internal_read
<uint16_t, false>(*this, address
); }
164 template<> ss_int24_t
MSR (uint64_t address
) { return internal_read
<ss_int24_t
, false>(*this, address
); }
165 template<> ss_uint24_t
MSR (uint64_t address
) { return internal_read
<ss_uint24_t
, false>(*this, address
); }
166 template<> int32_t MSR (uint64_t address
) { return internal_read
<int32_t, false>(*this, address
); }
167 template<> uint32_t MSR (uint64_t address
) { return internal_read
<uint32_t, false>(*this, address
); }
168 template<> int64_t MSR (uint64_t address
) { return internal_read
<int64_t, false>(*this, address
); }
169 template<> uint64_t MSR (uint64_t address
) { return internal_read
<uint64_t, false>(*this, address
); }
170 template<> float MSR (uint64_t address
) { return internal_read
<float, false>(*this, address
); }
171 template<> double MSR (uint64_t address
) { return internal_read
<double, false>(*this, address
); }
172 template<> bool MSW (uint64_t a
, int8_t v
) { return internal_write
<int8_t, false>(*this, a
, v
); }
173 template<> bool MSW (uint64_t a
, uint8_t v
) { return internal_write
<uint8_t, false>(*this, a
, v
); }
174 template<> bool MSW (uint64_t a
, int16_t v
) { return internal_write
<int16_t, false>(*this, a
, v
); }
175 template<> bool MSW (uint64_t a
, uint16_t v
) { return internal_write
<uint16_t, false>(*this, a
, v
); }
176 template<> bool MSW (uint64_t a
, ss_int24_t v
) { return internal_write
<ss_int24_t
, false>(*this, a
, v
); }
177 template<> bool MSW (uint64_t a
, ss_uint24_t v
) { return internal_write
<ss_uint24_t
, false>(*this, a
, v
); }
178 template<> bool MSW (uint64_t a
, int32_t v
) { return internal_write
<int32_t, false>(*this, a
, v
); }
179 template<> bool MSW (uint64_t a
, uint32_t v
) { return internal_write
<uint32_t, false>(*this, a
, v
); }
180 template<> bool MSW (uint64_t a
, int64_t v
) { return internal_write
<int64_t, false>(*this, a
, v
); }
181 template<> bool MSW (uint64_t a
, uint64_t v
) { return internal_write
<uint64_t, false>(*this, a
, v
); }
182 template<> bool MSW (uint64_t a
, float v
) { return internal_write
<float, false>(*this, a
, v
); }
183 template<> bool MSW (uint64_t a
, double v
) { return internal_write
<double, false>(*this, a
, v
); }
184 template<> int8_t MSRL (uint64_t address
) { return internal_read
<int8_t, true>(*this, address
); }
185 template<> uint8_t MSRL (uint64_t address
) { return internal_read
<uint8_t, true>(*this, address
); }
186 template<> int16_t MSRL (uint64_t address
) { return internal_read
<int16_t, true>(*this, address
); }
187 template<> uint16_t MSRL (uint64_t address
) { return internal_read
<uint16_t, true>(*this, address
); }
188 template<> ss_int24_t
MSRL (uint64_t address
) { return internal_read
<ss_int24_t
, true>(*this, address
); }
189 template<> ss_uint24_t
MSRL (uint64_t address
) { return internal_read
<ss_uint24_t
, true>(*this, address
); }
190 template<> int32_t MSRL (uint64_t address
) { return internal_read
<int32_t, true>(*this, address
); }
191 template<> uint32_t MSRL (uint64_t address
) { return internal_read
<uint32_t, true>(*this, address
); }
192 template<> int64_t MSRL (uint64_t address
) { return internal_read
<int64_t, true>(*this, address
); }
193 template<> uint64_t MSRL (uint64_t address
) { return internal_read
<uint64_t, true>(*this, address
); }
194 template<> float MSRL (uint64_t address
) { return internal_read
<float, true>(*this, address
); }
195 template<> double MSRL (uint64_t address
) { return internal_read
<double, true>(*this, address
); }
196 template<> bool MSWL (uint64_t a
, int8_t v
) { return internal_write
<int8_t, true>(*this, a
, v
); }
197 template<> bool MSWL (uint64_t a
, uint8_t v
) { return internal_write
<uint8_t, true>(*this, a
, v
); }
198 template<> bool MSWL (uint64_t a
, int16_t v
) { return internal_write
<int16_t, true>(*this, a
, v
); }
199 template<> bool MSWL (uint64_t a
, uint16_t v
) { return internal_write
<uint16_t, true>(*this, a
, v
); }
200 template<> bool MSWL (uint64_t a
, ss_int24_t v
) { return internal_write
<ss_int24_t
, true>(*this, a
, v
); }
201 template<> bool MSWL (uint64_t a
, ss_uint24_t v
) { return internal_write
<ss_uint24_t
, true>(*this, a
, v
); }
202 template<> bool MSWL (uint64_t a
, int32_t v
) { return internal_write
<int32_t, true>(*this, a
, v
); }
203 template<> bool MSWL (uint64_t a
, uint32_t v
) { return internal_write
<uint32_t, true>(*this, a
, v
); }
204 template<> bool MSWL (uint64_t a
, int64_t v
) { return internal_write
<int64_t, true>(*this, a
, v
); }
205 template<> bool MSWL (uint64_t a
, uint64_t v
) { return internal_write
<uint64_t, true>(*this, a
, v
); }
206 template<> bool MSWL (uint64_t a
, float v
) { return internal_write
<float, true>(*this, a
, v
); }
207 template<> bool MSWL (uint64_t a
, double v
) { return internal_write
<double, true>(*this, a
, v
); }
209 void memory_space::read_range(uint64_t address
, void* buffer
, size_t bsize
)
211 auto g
= lookup(address
);
213 memset(buffer
, 0, bsize
);
216 read_range_r(*g
.first
, g
.second
, buffer
, bsize
);
219 bool memory_space::write_range(uint64_t address
, const void* buffer
, size_t bsize
)
221 auto g
= lookup(address
);
224 return write_range_r(*g
.first
, g
.second
, buffer
, bsize
);
227 void memory_space::read_range_linear(uint64_t address
, void* buffer
, size_t bsize
)
229 auto g
= lookup_linear(address
);
231 memset(buffer
, 0, bsize
);
234 read_range_r(*g
.first
, g
.second
, buffer
, bsize
);
237 bool memory_space::write_range_linear(uint64_t address
, const void* buffer
, size_t bsize
)
239 auto g
= lookup_linear(address
);
242 return write_range_r(*g
.first
, g
.second
, buffer
, bsize
);
245 memory_space::region
* memory_space::lookup_n(size_t n
)
247 threads::alock
m(mlock
);
248 if(n
>= u_regions
.size())
254 std::list
<memory_space::region
*> memory_space::get_regions()
256 threads::alock
m(mlock
);
257 std::list
<region
*> r
;
258 for(auto i
: u_regions
)
263 char* memory_space::get_physical_mapping(uint64_t base
, uint64_t size
)
265 uint64_t last
= base
+ size
- 1;
267 return NULL
; //Warps around.
268 auto g1
= lookup(base
);
269 auto g2
= lookup(last
);
270 if(g1
.first
!= g2
.first
)
271 return NULL
; //Not the same VMA.
272 if(!g1
.first
|| !g1
.first
->direct_map
)
273 return NULL
; //Not mapped.
275 return reinterpret_cast<char*>(g1
.first
->direct_map
+ g1
.second
);
278 void memory_space::set_regions(const std::list
<memory_space::region
*>& regions
)
280 threads::alock
m(mlock
);
281 std::vector
<region
*> n_regions
;
282 std::vector
<region
*> n_lregions
;
283 std::vector
<uint64_t> n_linear_bases
;
284 //Calculate array sizes.
285 n_regions
.resize(regions
.size());
287 for(auto i
: regions
)
288 if(!i
->readonly
&& !i
->special
)
290 n_lregions
.resize(linear_c
);
291 n_linear_bases
.resize(linear_c
+ 1);
293 //Fill the main array (it must be sorted!).
295 for(auto j
: regions
)
297 std::sort(n_regions
.begin(), n_regions
.end(),
298 [](region
* a
, region
* b
) -> bool { return a
->base
< b
->base
; });
300 //Fill linear address arrays from the main array.
303 for(auto j
: n_regions
) {
304 if(j
->readonly
|| j
->special
)
307 n_linear_bases
[i
] = base
;
308 base
= base
+ j
->size
;
311 n_linear_bases
[i
] = base
;
313 std::swap(u_regions
, n_regions
);
314 std::swap(u_lregions
, n_lregions
);
315 std::swap(linear_bases
, n_linear_bases
);
319 int memory_space::_get_system_endian()
323 uint16_t magic
= 258;
324 return (*reinterpret_cast<uint8_t*>(&magic
) == 1) ? 1 : -1;
327 int memory_space::sysendian
= 0;
329 std::string
memory_space::address_to_textual(uint64_t addr
)
331 threads::alock
m(mlock
);
332 for(auto i
: u_regions
) {
333 if(addr
>= i
->base
&& addr
<= i
->last_address()) {
334 return (stringfmt() << i
->name
<< "+" << std::hex
<< (addr
- i
->base
)).str();
337 return (stringfmt() << std::hex
<< addr
).str();
340 memory_space::region_direct::region_direct(const std::string
& _name
, uint64_t _base
, int _endian
,
341 unsigned char* _memory
, size_t _size
, bool _readonly
)
346 direct_map
= _memory
;
348 readonly
= _readonly
;
352 memory_space::region_direct::~region_direct() throw() {}
356 const static uint64_t p63
= 0x8000000000000000ULL
;
357 const static uint64_t p64m1
= 0xFFFFFFFFFFFFFFFFULL
;
359 void block_bounds(uint64_t base
, uint64_t size
, uint64_t& low
, uint64_t& high
)
361 if(base
+ size
>= base
) {
363 low
= min(low
, base
);
364 high
= max(high
, base
+ size
- 1);
365 } else if(base
+ size
== 0) {
366 //Just barely avoids warparound.
367 low
= min(low
, base
);
368 high
= 0xFFFFFFFFFFFFFFFFULL
;
370 //Fully warps around.
372 high
= 0xFFFFFFFFFFFFFFFFULL
;
377 //rows and stride is nonzero.
378 std::pair
<uint64_t, uint64_t> base_bounds(uint64_t base
, uint64_t rows
, uint64_t stride
)
380 uint64_t space
= p64m1
- base
;
381 if(space
/ stride
< rows
- 1)
383 return std::make_pair(0, p64m1
);
384 return std::make_pair(base
, base
+ (rows
- 1) * stride
);
388 std::pair
<uint64_t, uint64_t> memoryspace_row_bounds(uint64_t base
, uint64_t size
, uint64_t rows
,
391 uint64_t low
= p64m1
;
396 //Case I: Stride is 0.
397 //Just one block is accessed.
400 block_bounds(base
, size
, low
, high
);
401 } else if(stride
== p63
) {
402 //Case II: Stride is 2^63.
403 //If there are multiple blocks, There are 2 accessed blocks, [base, base+size) and
404 //[base+X, base+size+X), where X=2^63.
406 hb
= (rows
> 1) ? (base
+ p63
) : base
;
407 } else if(stride
> p63
) {
408 //Case III: Stride is negative.
409 //Flip the problem around to get stride that is positive.
410 auto g
= base_bounds(p64m1
- base
, rows
, ~stride
+ 1);
411 lb
= p64m1
- g
.first
;
412 hb
= p64m1
- g
.second
;
414 //Case IV: Stride is positive.
415 auto g
= base_bounds(base
, rows
, stride
);
419 block_bounds(lb
, size
, low
, high
);
420 block_bounds(hb
, size
, low
, high
);
422 return std::make_pair(low
, high
);
425 bool memoryspace_row_limited(uint64_t base
, uint64_t size
, uint64_t rows
, uint64_t stride
, uint64_t limit
)
427 auto g
= memoryspace_row_bounds(base
, size
, rows
, stride
);
428 if(g
.first
> g
.second
)
430 return (g
.second
< limit
);