Revert "target-arm: Use global env in neon_helper.c helpers"
[qemu.git] / xen-mapcache.c
blobfac47cd9be72bf1201f21745498625fec44c4515
1 /*
2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 */
9 #include "config.h"
11 #include <sys/resource.h>
13 #include "hw/xen_backend.h"
14 #include "blockdev.h"
15 #include "bitmap.h"
17 #include <xen/hvm/params.h>
18 #include <sys/mman.h>
20 #include "xen-mapcache.h"
21 #include "trace.h"
24 //#define MAPCACHE_DEBUG
26 #ifdef MAPCACHE_DEBUG
27 # define DPRINTF(fmt, ...) do { \
28 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
29 } while (0)
30 #else
31 # define DPRINTF(fmt, ...) do { } while (0)
32 #endif
34 #if defined(__i386__)
35 # define MCACHE_BUCKET_SHIFT 16
36 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
37 #elif defined(__x86_64__)
38 # define MCACHE_BUCKET_SHIFT 20
39 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
40 #endif
41 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
43 typedef struct MapCacheEntry {
44 target_phys_addr_t paddr_index;
45 uint8_t *vaddr_base;
46 unsigned long *valid_mapping;
47 uint8_t lock;
48 target_phys_addr_t size;
49 struct MapCacheEntry *next;
50 } MapCacheEntry;
52 typedef struct MapCacheRev {
53 uint8_t *vaddr_req;
54 target_phys_addr_t paddr_index;
55 target_phys_addr_t size;
56 QTAILQ_ENTRY(MapCacheRev) next;
57 } MapCacheRev;
59 typedef struct MapCache {
60 MapCacheEntry *entry;
61 unsigned long nr_buckets;
62 QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
64 /* For most cases (>99.9%), the page address is the same. */
65 target_phys_addr_t last_address_index;
66 uint8_t *last_address_vaddr;
67 unsigned long max_mcache_size;
68 unsigned int mcache_bucket_shift;
69 } MapCache;
71 static MapCache *mapcache;
73 static inline int test_bits(int nr, int size, const unsigned long *addr)
75 unsigned long res = find_next_zero_bit(addr, size + nr, nr);
76 if (res >= nr + size)
77 return 1;
78 else
79 return 0;
82 void qemu_map_cache_init(void)
84 unsigned long size;
85 struct rlimit rlimit_as;
87 mapcache = qemu_mallocz(sizeof (MapCache));
89 QTAILQ_INIT(&mapcache->locked_entries);
90 mapcache->last_address_index = -1;
92 getrlimit(RLIMIT_AS, &rlimit_as);
93 if (rlimit_as.rlim_max < MCACHE_MAX_SIZE) {
94 rlimit_as.rlim_cur = rlimit_as.rlim_max;
95 } else {
96 rlimit_as.rlim_cur = MCACHE_MAX_SIZE;
99 setrlimit(RLIMIT_AS, &rlimit_as);
100 mapcache->max_mcache_size = rlimit_as.rlim_cur;
102 mapcache->nr_buckets =
103 (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
104 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
105 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
107 size = mapcache->nr_buckets * sizeof (MapCacheEntry);
108 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
109 DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
110 mapcache->entry = qemu_mallocz(size);
113 static void qemu_remap_bucket(MapCacheEntry *entry,
114 target_phys_addr_t size,
115 target_phys_addr_t address_index)
117 uint8_t *vaddr_base;
118 xen_pfn_t *pfns;
119 int *err;
120 unsigned int i;
121 target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT;
123 trace_qemu_remap_bucket(address_index);
125 pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t));
126 err = qemu_mallocz(nb_pfn * sizeof (int));
128 if (entry->vaddr_base != NULL) {
129 if (munmap(entry->vaddr_base, entry->size) != 0) {
130 perror("unmap fails");
131 exit(-1);
134 if (entry->valid_mapping != NULL) {
135 qemu_free(entry->valid_mapping);
136 entry->valid_mapping = NULL;
139 for (i = 0; i < nb_pfn; i++) {
140 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
143 vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE,
144 pfns, err, nb_pfn);
145 if (vaddr_base == NULL) {
146 perror("xc_map_foreign_bulk");
147 exit(-1);
150 entry->vaddr_base = vaddr_base;
151 entry->paddr_index = address_index;
152 entry->size = size;
153 entry->valid_mapping = (unsigned long *) qemu_mallocz(sizeof(unsigned long) *
154 BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
156 bitmap_zero(entry->valid_mapping, nb_pfn);
157 for (i = 0; i < nb_pfn; i++) {
158 if (!err[i]) {
159 bitmap_set(entry->valid_mapping, i, 1);
163 qemu_free(pfns);
164 qemu_free(err);
167 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock)
169 MapCacheEntry *entry, *pentry = NULL;
170 target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
171 target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
172 target_phys_addr_t __size = size;
174 trace_qemu_map_cache(phys_addr);
176 if (address_index == mapcache->last_address_index && !lock && !__size) {
177 trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
178 return mapcache->last_address_vaddr + address_offset;
181 /* size is always a multiple of MCACHE_BUCKET_SIZE */
182 if ((address_offset + (__size % MCACHE_BUCKET_SIZE)) > MCACHE_BUCKET_SIZE)
183 __size += MCACHE_BUCKET_SIZE;
184 if (__size % MCACHE_BUCKET_SIZE)
185 __size += MCACHE_BUCKET_SIZE - (__size % MCACHE_BUCKET_SIZE);
186 if (!__size)
187 __size = MCACHE_BUCKET_SIZE;
189 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
191 while (entry && entry->lock && entry->vaddr_base &&
192 (entry->paddr_index != address_index || entry->size != __size ||
193 !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
194 entry->valid_mapping))) {
195 pentry = entry;
196 entry = entry->next;
198 if (!entry) {
199 entry = qemu_mallocz(sizeof (MapCacheEntry));
200 pentry->next = entry;
201 qemu_remap_bucket(entry, __size, address_index);
202 } else if (!entry->lock) {
203 if (!entry->vaddr_base || entry->paddr_index != address_index ||
204 entry->size != __size ||
205 !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
206 entry->valid_mapping)) {
207 qemu_remap_bucket(entry, __size, address_index);
211 if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
212 entry->valid_mapping)) {
213 mapcache->last_address_index = -1;
214 trace_qemu_map_cache_return(NULL);
215 return NULL;
218 mapcache->last_address_index = address_index;
219 mapcache->last_address_vaddr = entry->vaddr_base;
220 if (lock) {
221 MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev));
222 entry->lock++;
223 reventry->vaddr_req = mapcache->last_address_vaddr + address_offset;
224 reventry->paddr_index = mapcache->last_address_index;
225 reventry->size = entry->size;
226 QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
229 trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
230 return mapcache->last_address_vaddr + address_offset;
233 ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
235 MapCacheEntry *entry = NULL, *pentry = NULL;
236 MapCacheRev *reventry;
237 target_phys_addr_t paddr_index;
238 target_phys_addr_t size;
239 int found = 0;
241 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
242 if (reventry->vaddr_req == ptr) {
243 paddr_index = reventry->paddr_index;
244 size = reventry->size;
245 found = 1;
246 break;
249 if (!found) {
250 fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr);
251 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
252 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
253 reventry->vaddr_req);
255 abort();
256 return 0;
259 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
260 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
261 pentry = entry;
262 entry = entry->next;
264 if (!entry) {
265 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
266 return 0;
268 return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
269 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
272 void qemu_invalidate_entry(uint8_t *buffer)
274 MapCacheEntry *entry = NULL, *pentry = NULL;
275 MapCacheRev *reventry;
276 target_phys_addr_t paddr_index;
277 target_phys_addr_t size;
278 int found = 0;
280 if (mapcache->last_address_vaddr == buffer) {
281 mapcache->last_address_index = -1;
284 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
285 if (reventry->vaddr_req == buffer) {
286 paddr_index = reventry->paddr_index;
287 size = reventry->size;
288 found = 1;
289 break;
292 if (!found) {
293 DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer);
294 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
295 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
297 return;
299 QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
300 qemu_free(reventry);
302 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
303 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
304 pentry = entry;
305 entry = entry->next;
307 if (!entry) {
308 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
309 return;
311 entry->lock--;
312 if (entry->lock > 0 || pentry == NULL) {
313 return;
316 pentry->next = entry->next;
317 if (munmap(entry->vaddr_base, entry->size) != 0) {
318 perror("unmap fails");
319 exit(-1);
321 qemu_free(entry->valid_mapping);
322 qemu_free(entry);
325 void qemu_invalidate_map_cache(void)
327 unsigned long i;
328 MapCacheRev *reventry;
330 /* Flush pending AIO before destroying the mapcache */
331 qemu_aio_flush();
333 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
334 DPRINTF("There should be no locked mappings at this time, "
335 "but "TARGET_FMT_plx" -> %p is present\n",
336 reventry->paddr_index, reventry->vaddr_req);
339 mapcache_lock();
341 for (i = 0; i < mapcache->nr_buckets; i++) {
342 MapCacheEntry *entry = &mapcache->entry[i];
344 if (entry->vaddr_base == NULL) {
345 continue;
348 if (munmap(entry->vaddr_base, entry->size) != 0) {
349 perror("unmap fails");
350 exit(-1);
353 entry->paddr_index = 0;
354 entry->vaddr_base = NULL;
355 entry->size = 0;
356 qemu_free(entry->valid_mapping);
357 entry->valid_mapping = NULL;
360 mapcache->last_address_index = -1;
361 mapcache->last_address_vaddr = NULL;
363 mapcache_unlock();