Implicitly redirect cmdhelp includes to corresponding JSON files
[lsnes.git] / src / library / memoryspace.cpp
blob293595db80035f4d83af2b08c1e887fef0631e91
1 #include "memoryspace.hpp"
2 #include "minmax.hpp"
3 #include "serialization.hpp"
4 #include "int24.hpp"
5 #include "string.hpp"
6 #include <algorithm>
8 namespace
10 template<typename T, bool linear> inline T internal_read(memory_space& m, uint64_t addr)
12 std::pair<memory_space::region*, uint64_t> g;
13 if(linear)
14 g = m.lookup_linear(addr);
15 else
16 g = m.lookup(addr);
17 if(!g.first || g.second + sizeof(T) > g.first->size)
18 return 0;
19 if(g.first->direct_map)
20 return serialization::read_endian<T>(g.first->direct_map + g.second, g.first->endian);
21 else {
22 T buf;
23 g.first->read(g.second, &buf, sizeof(T));
24 return serialization::read_endian<T>(&buf, g.first->endian);
28 template<typename T, bool linear> inline bool internal_write(memory_space& m, uint64_t addr, T value)
30 std::pair<memory_space::region*, uint64_t> g;
31 if(linear)
32 g = m.lookup_linear(addr);
33 else
34 g = m.lookup(addr);
35 if(!g.first || g.first->readonly || g.second + sizeof(T) > g.first->size)
36 return false;
37 if(g.first->direct_map)
38 serialization::write_endian(g.first->direct_map + g.second, value, g.first->endian);
39 else {
40 T buf;
41 serialization::write_endian(&buf, value, g.first->endian);
42 g.first->write(g.second, &buf, sizeof(T));
44 return true;
47 void read_range_r(memory_space::region& r, uint64_t offset, void* buffer, size_t bsize)
49 if(r.direct_map) {
50 if(offset >= r.size) {
51 memset(buffer, 0, bsize);
52 return;
54 uint64_t maxcopy = min(static_cast<uint64_t>(bsize), r.size - offset);
55 memcpy(buffer, r.direct_map + offset, maxcopy);
56 if(maxcopy < bsize)
57 memset(reinterpret_cast<char*>(buffer) + maxcopy, 0, bsize - maxcopy);
58 } else
59 r.read(offset, buffer, bsize);
62 bool write_range_r(memory_space::region& r, uint64_t offset, const void* buffer, size_t bsize)
64 if(r.readonly)
65 return false;
66 if(r.direct_map) {
67 if(offset >= r.size)
68 return false;
69 uint64_t maxcopy = min(static_cast<uint64_t>(bsize), r.size - offset);
70 memcpy(r.direct_map + offset, buffer, maxcopy);
71 return true;
72 } else
73 return r.write(offset, buffer, bsize);
77 memory_space::region::~region() throw()
81 void memory_space::region::read(uint64_t offset, void* buffer, size_t tsize)
83 if(!direct_map || offset >= size) {
84 memset(buffer, 0, tsize);
85 return;
87 uint64_t maxcopy = min(static_cast<uint64_t>(tsize), size - offset);
88 memcpy(buffer, direct_map + offset, maxcopy);
89 if(maxcopy < tsize)
90 memset(reinterpret_cast<char*>(buffer) + maxcopy, 0, tsize - maxcopy);
93 bool memory_space::region::write(uint64_t offset, const void* buffer, size_t tsize)
95 if(!direct_map || readonly || offset >= size)
96 return false;
97 uint64_t maxcopy = min(static_cast<uint64_t>(tsize), size - offset);
98 memcpy(direct_map + offset, buffer, maxcopy);
99 return true;
102 std::pair<memory_space::region*, uint64_t> memory_space::lookup(uint64_t address)
104 threads::alock m(mlock);
105 size_t lb = 0;
106 size_t ub = u_regions.size();
107 while(lb < ub) {
108 size_t mb = (lb + ub) / 2;
109 if(u_regions[mb]->base > address) {
110 ub = mb;
111 continue;
113 if(u_regions[mb]->last_address() < address) {
114 lb = mb + 1;
115 continue;
117 return std::make_pair(u_regions[mb], address - u_regions[mb]->base);
119 return std::make_pair(reinterpret_cast<region*>(NULL), 0);
122 std::pair<memory_space::region*, uint64_t> memory_space::lookup_linear(uint64_t linear)
124 threads::alock m(mlock);
125 if(linear >= linear_size)
126 return std::make_pair(reinterpret_cast<region*>(NULL), 0);
127 size_t lb = 0;
128 size_t ub = linear_bases.size() - 1;
129 while(lb < ub) {
130 size_t mb = (lb + ub) / 2;
131 if(linear_bases[mb] > linear) {
132 ub = mb;
133 continue;
135 if(linear_bases[mb + 1] <= linear) {
136 lb = mb + 1;
137 continue;
139 return std::make_pair(u_lregions[mb], linear - linear_bases[mb]);
141 return std::make_pair(reinterpret_cast<region*>(NULL), 0);
144 void memory_space::read_all_linear_memory(uint8_t* buffer)
146 auto g = lookup_linear(0);
147 size_t off = 0;
148 while(g.first) {
149 read_range_r(*g.first, g.second, buffer + off, g.first->size);
150 off += g.first->size;
151 g = lookup_linear(off);
155 #define MSR memory_space::read
156 #define MSW memory_space::write
157 #define MSRL memory_space::read_linear
158 #define MSWL memory_space::write_linear
160 template<> int8_t MSR (uint64_t address) { return internal_read<int8_t, false>(*this, address); }
161 template<> uint8_t MSR (uint64_t address) { return internal_read<uint8_t, false>(*this, address); }
162 template<> int16_t MSR (uint64_t address) { return internal_read<int16_t, false>(*this, address); }
163 template<> uint16_t MSR (uint64_t address) { return internal_read<uint16_t, false>(*this, address); }
164 template<> ss_int24_t MSR (uint64_t address) { return internal_read<ss_int24_t, false>(*this, address); }
165 template<> ss_uint24_t MSR (uint64_t address) { return internal_read<ss_uint24_t, false>(*this, address); }
166 template<> int32_t MSR (uint64_t address) { return internal_read<int32_t, false>(*this, address); }
167 template<> uint32_t MSR (uint64_t address) { return internal_read<uint32_t, false>(*this, address); }
168 template<> int64_t MSR (uint64_t address) { return internal_read<int64_t, false>(*this, address); }
169 template<> uint64_t MSR (uint64_t address) { return internal_read<uint64_t, false>(*this, address); }
170 template<> float MSR (uint64_t address) { return internal_read<float, false>(*this, address); }
171 template<> double MSR (uint64_t address) { return internal_read<double, false>(*this, address); }
172 template<> bool MSW (uint64_t a, int8_t v) { return internal_write<int8_t, false>(*this, a, v); }
173 template<> bool MSW (uint64_t a, uint8_t v) { return internal_write<uint8_t, false>(*this, a, v); }
174 template<> bool MSW (uint64_t a, int16_t v) { return internal_write<int16_t, false>(*this, a, v); }
175 template<> bool MSW (uint64_t a, uint16_t v) { return internal_write<uint16_t, false>(*this, a, v); }
176 template<> bool MSW (uint64_t a, ss_int24_t v) { return internal_write<ss_int24_t, false>(*this, a, v); }
177 template<> bool MSW (uint64_t a, ss_uint24_t v) { return internal_write<ss_uint24_t, false>(*this, a, v); }
178 template<> bool MSW (uint64_t a, int32_t v) { return internal_write<int32_t, false>(*this, a, v); }
179 template<> bool MSW (uint64_t a, uint32_t v) { return internal_write<uint32_t, false>(*this, a, v); }
180 template<> bool MSW (uint64_t a, int64_t v) { return internal_write<int64_t, false>(*this, a, v); }
181 template<> bool MSW (uint64_t a, uint64_t v) { return internal_write<uint64_t, false>(*this, a, v); }
182 template<> bool MSW (uint64_t a, float v) { return internal_write<float, false>(*this, a, v); }
183 template<> bool MSW (uint64_t a, double v) { return internal_write<double, false>(*this, a, v); }
184 template<> int8_t MSRL (uint64_t address) { return internal_read<int8_t, true>(*this, address); }
185 template<> uint8_t MSRL (uint64_t address) { return internal_read<uint8_t, true>(*this, address); }
186 template<> int16_t MSRL (uint64_t address) { return internal_read<int16_t, true>(*this, address); }
187 template<> uint16_t MSRL (uint64_t address) { return internal_read<uint16_t, true>(*this, address); }
188 template<> ss_int24_t MSRL (uint64_t address) { return internal_read<ss_int24_t, true>(*this, address); }
189 template<> ss_uint24_t MSRL (uint64_t address) { return internal_read<ss_uint24_t, true>(*this, address); }
190 template<> int32_t MSRL (uint64_t address) { return internal_read<int32_t, true>(*this, address); }
191 template<> uint32_t MSRL (uint64_t address) { return internal_read<uint32_t, true>(*this, address); }
192 template<> int64_t MSRL (uint64_t address) { return internal_read<int64_t, true>(*this, address); }
193 template<> uint64_t MSRL (uint64_t address) { return internal_read<uint64_t, true>(*this, address); }
194 template<> float MSRL (uint64_t address) { return internal_read<float, true>(*this, address); }
195 template<> double MSRL (uint64_t address) { return internal_read<double, true>(*this, address); }
196 template<> bool MSWL (uint64_t a, int8_t v) { return internal_write<int8_t, true>(*this, a, v); }
197 template<> bool MSWL (uint64_t a, uint8_t v) { return internal_write<uint8_t, true>(*this, a, v); }
198 template<> bool MSWL (uint64_t a, int16_t v) { return internal_write<int16_t, true>(*this, a, v); }
199 template<> bool MSWL (uint64_t a, uint16_t v) { return internal_write<uint16_t, true>(*this, a, v); }
200 template<> bool MSWL (uint64_t a, ss_int24_t v) { return internal_write<ss_int24_t, true>(*this, a, v); }
201 template<> bool MSWL (uint64_t a, ss_uint24_t v) { return internal_write<ss_uint24_t, true>(*this, a, v); }
202 template<> bool MSWL (uint64_t a, int32_t v) { return internal_write<int32_t, true>(*this, a, v); }
203 template<> bool MSWL (uint64_t a, uint32_t v) { return internal_write<uint32_t, true>(*this, a, v); }
204 template<> bool MSWL (uint64_t a, int64_t v) { return internal_write<int64_t, true>(*this, a, v); }
205 template<> bool MSWL (uint64_t a, uint64_t v) { return internal_write<uint64_t, true>(*this, a, v); }
206 template<> bool MSWL (uint64_t a, float v) { return internal_write<float, true>(*this, a, v); }
207 template<> bool MSWL (uint64_t a, double v) { return internal_write<double, true>(*this, a, v); }
209 void memory_space::read_range(uint64_t address, void* buffer, size_t bsize)
211 auto g = lookup(address);
212 if(!g.first) {
213 memset(buffer, 0, bsize);
214 return;
216 read_range_r(*g.first, g.second, buffer, bsize);
219 bool memory_space::write_range(uint64_t address, const void* buffer, size_t bsize)
221 auto g = lookup(address);
222 if(!g.first)
223 return false;
224 return write_range_r(*g.first, g.second, buffer, bsize);
227 void memory_space::read_range_linear(uint64_t address, void* buffer, size_t bsize)
229 auto g = lookup_linear(address);
230 if(!g.first) {
231 memset(buffer, 0, bsize);
232 return;
234 read_range_r(*g.first, g.second, buffer, bsize);
237 bool memory_space::write_range_linear(uint64_t address, const void* buffer, size_t bsize)
239 auto g = lookup_linear(address);
240 if(!g.first)
241 return false;
242 return write_range_r(*g.first, g.second, buffer, bsize);
245 memory_space::region* memory_space::lookup_n(size_t n)
247 threads::alock m(mlock);
248 if(n >= u_regions.size())
249 return NULL;
250 return u_regions[n];
254 std::list<memory_space::region*> memory_space::get_regions()
256 threads::alock m(mlock);
257 std::list<region*> r;
258 for(auto i : u_regions)
259 r.push_back(i);
260 return r;
263 char* memory_space::get_physical_mapping(uint64_t base, uint64_t size)
265 uint64_t last = base + size - 1;
266 if(last < base)
267 return NULL; //Warps around.
268 auto g1 = lookup(base);
269 auto g2 = lookup(last);
270 if(g1.first != g2.first)
271 return NULL; //Not the same VMA.
272 if(!g1.first || !g1.first->direct_map)
273 return NULL; //Not mapped.
274 //OK.
275 return reinterpret_cast<char*>(g1.first->direct_map + g1.second);
278 void memory_space::set_regions(const std::list<memory_space::region*>& regions)
280 threads::alock m(mlock);
281 std::vector<region*> n_regions;
282 std::vector<region*> n_lregions;
283 std::vector<uint64_t> n_linear_bases;
284 //Calculate array sizes.
285 n_regions.resize(regions.size());
286 size_t linear_c = 0;
287 for(auto i : regions)
288 if(!i->readonly && !i->special)
289 linear_c++;
290 n_lregions.resize(linear_c);
291 n_linear_bases.resize(linear_c + 1);
293 //Fill the main array (it must be sorted!).
294 size_t i = 0;
295 for(auto j : regions)
296 n_regions[i++] = j;
297 std::sort(n_regions.begin(), n_regions.end(),
298 [](region* a, region* b) -> bool { return a->base < b->base; });
300 //Fill linear address arrays from the main array.
301 i = 0;
302 uint64_t base = 0;
303 for(auto j : n_regions) {
304 if(j->readonly || j->special)
305 continue;
306 n_lregions[i] = j;
307 n_linear_bases[i] = base;
308 base = base + j->size;
309 i++;
311 n_linear_bases[i] = base;
313 std::swap(u_regions, n_regions);
314 std::swap(u_lregions, n_lregions);
315 std::swap(linear_bases, n_linear_bases);
316 linear_size = base;
319 int memory_space::_get_system_endian()
321 if(sysendian)
322 return sysendian;
323 uint16_t magic = 258;
324 return (*reinterpret_cast<uint8_t*>(&magic) == 1) ? 1 : -1;
327 int memory_space::sysendian = 0;
329 std::string memory_space::address_to_textual(uint64_t addr)
331 threads::alock m(mlock);
332 for(auto i : u_regions) {
333 if(addr >= i->base && addr <= i->last_address()) {
334 return (stringfmt() << i->name << "+" << std::hex << (addr - i->base)).str();
337 return (stringfmt() << std::hex << addr).str();
340 memory_space::region_direct::region_direct(const std::string& _name, uint64_t _base, int _endian,
341 unsigned char* _memory, size_t _size, bool _readonly)
343 name = _name;
344 base = _base;
345 endian = _endian;
346 direct_map = _memory;
347 size = _size;
348 readonly = _readonly;
349 special = false;
352 memory_space::region_direct::~region_direct() throw() {}
354 namespace
356 const static uint64_t p63 = 0x8000000000000000ULL;
357 const static uint64_t p64m1 = 0xFFFFFFFFFFFFFFFFULL;
359 void block_bounds(uint64_t base, uint64_t size, uint64_t& low, uint64_t& high)
361 if(base + size >= base) {
362 //No warparound.
363 low = min(low, base);
364 high = max(high, base + size - 1);
365 } else if(base + size == 0) {
366 //Just barely avoids warparound.
367 low = min(low, base);
368 high = 0xFFFFFFFFFFFFFFFFULL;
369 } else {
370 //Fully warps around.
371 low = 0;
372 high = 0xFFFFFFFFFFFFFFFFULL;
376 //Stride < 2^63.
377 //rows and stride is nonzero.
378 std::pair<uint64_t, uint64_t> base_bounds(uint64_t base, uint64_t rows, uint64_t stride)
380 uint64_t space = p64m1 - base;
381 if(space / stride < rows - 1)
382 //Approximate a bit.
383 return std::make_pair(0, p64m1);
384 return std::make_pair(base, base + (rows - 1) * stride);
388 std::pair<uint64_t, uint64_t> memoryspace_row_bounds(uint64_t base, uint64_t size, uint64_t rows,
389 uint64_t stride)
391 uint64_t low = p64m1;
392 uint64_t high = 0;
393 if(size && rows) {
394 uint64_t lb, hb;
395 if(stride == 0) {
396 //Case I: Stride is 0.
397 //Just one block is accessed.
398 lb = base;
399 hb = base;
400 block_bounds(base, size, low, high);
401 } else if(stride == p63) {
402 //Case II: Stride is 2^63.
403 //If there are multiple blocks, There are 2 accessed blocks, [base, base+size) and
404 //[base+X, base+size+X), where X=2^63.
405 lb = base;
406 hb = (rows > 1) ? (base + p63) : base;
407 } else if(stride > p63) {
408 //Case III: Stride is negative.
409 //Flip the problem around to get stride that is positive.
410 auto g = base_bounds(p64m1 - base, rows, ~stride + 1);
411 lb = p64m1 - g.first;
412 hb = p64m1 - g.second;
413 } else {
414 //Case IV: Stride is positive.
415 auto g = base_bounds(base, rows, stride);
416 lb = g.first;
417 hb = g.second;
419 block_bounds(lb, size, low, high);
420 block_bounds(hb, size, low, high);
422 return std::make_pair(low, high);
425 bool memoryspace_row_limited(uint64_t base, uint64_t size, uint64_t rows, uint64_t stride, uint64_t limit)
427 auto g = memoryspace_row_bounds(base, size, rows, stride);
428 if(g.first > g.second)
429 return true;
430 return (g.second < limit);