Merge remote-tracking branch 'remotes/fam/tags/for-upstream' into staging
[qemu.git] / xen-mapcache.c
blob8f3a5920135264d0323ab598523b992e49ad743d
1 /*
2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include "qemu/osdep.h"
13 #include <sys/resource.h>
15 #include "hw/xen/xen_backend.h"
16 #include "sysemu/blockdev.h"
17 #include "qemu/bitmap.h"
19 #include <xen/hvm/params.h>
21 #include "sysemu/xen-mapcache.h"
22 #include "trace.h"
25 //#define MAPCACHE_DEBUG
27 #ifdef MAPCACHE_DEBUG
28 # define DPRINTF(fmt, ...) do { \
29 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
30 } while (0)
31 #else
32 # define DPRINTF(fmt, ...) do { } while (0)
33 #endif
35 #if HOST_LONG_BITS == 32
36 # define MCACHE_BUCKET_SHIFT 16
37 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
38 #else
39 # define MCACHE_BUCKET_SHIFT 20
40 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
41 #endif
42 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
44 /* This is the size of the virtual address space reserve to QEMU that will not
45 * be use by MapCache.
46 * From empirical tests I observed that qemu use 75MB more than the
47 * max_mcache_size.
49 #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
51 typedef struct MapCacheEntry {
52 hwaddr paddr_index;
53 uint8_t *vaddr_base;
54 unsigned long *valid_mapping;
55 uint8_t lock;
56 hwaddr size;
57 struct MapCacheEntry *next;
58 } MapCacheEntry;
60 typedef struct MapCacheRev {
61 uint8_t *vaddr_req;
62 hwaddr paddr_index;
63 hwaddr size;
64 QTAILQ_ENTRY(MapCacheRev) next;
65 } MapCacheRev;
67 typedef struct MapCache {
68 MapCacheEntry *entry;
69 unsigned long nr_buckets;
70 QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
72 /* For most cases (>99.9%), the page address is the same. */
73 MapCacheEntry *last_entry;
74 unsigned long max_mcache_size;
75 unsigned int mcache_bucket_shift;
77 phys_offset_to_gaddr_t phys_offset_to_gaddr;
78 QemuMutex lock;
79 void *opaque;
80 } MapCache;
82 static MapCache *mapcache;
84 static inline void mapcache_lock(void)
86 qemu_mutex_lock(&mapcache->lock);
89 static inline void mapcache_unlock(void)
91 qemu_mutex_unlock(&mapcache->lock);
94 static inline int test_bits(int nr, int size, const unsigned long *addr)
96 unsigned long res = find_next_zero_bit(addr, size + nr, nr);
97 if (res >= nr + size)
98 return 1;
99 else
100 return 0;
103 void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
105 unsigned long size;
106 struct rlimit rlimit_as;
108 mapcache = g_malloc0(sizeof (MapCache));
110 mapcache->phys_offset_to_gaddr = f;
111 mapcache->opaque = opaque;
112 qemu_mutex_init(&mapcache->lock);
114 QTAILQ_INIT(&mapcache->locked_entries);
116 if (geteuid() == 0) {
117 rlimit_as.rlim_cur = RLIM_INFINITY;
118 rlimit_as.rlim_max = RLIM_INFINITY;
119 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
120 } else {
121 getrlimit(RLIMIT_AS, &rlimit_as);
122 rlimit_as.rlim_cur = rlimit_as.rlim_max;
124 if (rlimit_as.rlim_max != RLIM_INFINITY) {
125 fprintf(stderr, "Warning: QEMU's maximum size of virtual"
126 " memory is not infinity.\n");
128 if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
129 mapcache->max_mcache_size = rlimit_as.rlim_max -
130 NON_MCACHE_MEMORY_SIZE;
131 } else {
132 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
136 setrlimit(RLIMIT_AS, &rlimit_as);
138 mapcache->nr_buckets =
139 (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
140 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
141 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
143 size = mapcache->nr_buckets * sizeof (MapCacheEntry);
144 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
145 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
146 mapcache->nr_buckets, size);
147 mapcache->entry = g_malloc0(size);
150 static void xen_remap_bucket(MapCacheEntry *entry,
151 hwaddr size,
152 hwaddr address_index)
154 uint8_t *vaddr_base;
155 xen_pfn_t *pfns;
156 int *err;
157 unsigned int i;
158 hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
160 trace_xen_remap_bucket(address_index);
162 pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
163 err = g_malloc0(nb_pfn * sizeof (int));
165 if (entry->vaddr_base != NULL) {
166 if (munmap(entry->vaddr_base, entry->size) != 0) {
167 perror("unmap fails");
168 exit(-1);
171 g_free(entry->valid_mapping);
172 entry->valid_mapping = NULL;
174 for (i = 0; i < nb_pfn; i++) {
175 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
178 vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
179 nb_pfn, pfns, err);
180 if (vaddr_base == NULL) {
181 perror("xenforeignmemory_map");
182 exit(-1);
185 entry->vaddr_base = vaddr_base;
186 entry->paddr_index = address_index;
187 entry->size = size;
188 entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
189 BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
191 bitmap_zero(entry->valid_mapping, nb_pfn);
192 for (i = 0; i < nb_pfn; i++) {
193 if (!err[i]) {
194 bitmap_set(entry->valid_mapping, i, 1);
198 g_free(pfns);
199 g_free(err);
202 static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
203 uint8_t lock)
205 MapCacheEntry *entry, *pentry = NULL;
206 hwaddr address_index;
207 hwaddr address_offset;
208 hwaddr cache_size = size;
209 hwaddr test_bit_size;
210 bool translated = false;
212 tryagain:
213 address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
214 address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
216 trace_xen_map_cache(phys_addr);
218 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
219 if (size) {
220 test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
222 if (test_bit_size % XC_PAGE_SIZE) {
223 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
225 } else {
226 test_bit_size = XC_PAGE_SIZE;
229 if (mapcache->last_entry != NULL &&
230 mapcache->last_entry->paddr_index == address_index &&
231 !lock && !size &&
232 test_bits(address_offset >> XC_PAGE_SHIFT,
233 test_bit_size >> XC_PAGE_SHIFT,
234 mapcache->last_entry->valid_mapping)) {
235 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
236 return mapcache->last_entry->vaddr_base + address_offset;
239 /* size is always a multiple of MCACHE_BUCKET_SIZE */
240 if (size) {
241 cache_size = size + address_offset;
242 if (cache_size % MCACHE_BUCKET_SIZE) {
243 cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
245 } else {
246 cache_size = MCACHE_BUCKET_SIZE;
249 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
251 while (entry && entry->lock && entry->vaddr_base &&
252 (entry->paddr_index != address_index || entry->size != cache_size ||
253 !test_bits(address_offset >> XC_PAGE_SHIFT,
254 test_bit_size >> XC_PAGE_SHIFT,
255 entry->valid_mapping))) {
256 pentry = entry;
257 entry = entry->next;
259 if (!entry) {
260 entry = g_malloc0(sizeof (MapCacheEntry));
261 pentry->next = entry;
262 xen_remap_bucket(entry, cache_size, address_index);
263 } else if (!entry->lock) {
264 if (!entry->vaddr_base || entry->paddr_index != address_index ||
265 entry->size != cache_size ||
266 !test_bits(address_offset >> XC_PAGE_SHIFT,
267 test_bit_size >> XC_PAGE_SHIFT,
268 entry->valid_mapping)) {
269 xen_remap_bucket(entry, cache_size, address_index);
273 if(!test_bits(address_offset >> XC_PAGE_SHIFT,
274 test_bit_size >> XC_PAGE_SHIFT,
275 entry->valid_mapping)) {
276 mapcache->last_entry = NULL;
277 if (!translated && mapcache->phys_offset_to_gaddr) {
278 phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
279 translated = true;
280 goto tryagain;
282 trace_xen_map_cache_return(NULL);
283 return NULL;
286 mapcache->last_entry = entry;
287 if (lock) {
288 MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
289 entry->lock++;
290 reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
291 reventry->paddr_index = mapcache->last_entry->paddr_index;
292 reventry->size = entry->size;
293 QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
296 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
297 return mapcache->last_entry->vaddr_base + address_offset;
300 uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
301 uint8_t lock)
303 uint8_t *p;
305 mapcache_lock();
306 p = xen_map_cache_unlocked(phys_addr, size, lock);
307 mapcache_unlock();
308 return p;
311 ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
313 MapCacheEntry *entry = NULL;
314 MapCacheRev *reventry;
315 hwaddr paddr_index;
316 hwaddr size;
317 ram_addr_t raddr;
318 int found = 0;
320 mapcache_lock();
321 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
322 if (reventry->vaddr_req == ptr) {
323 paddr_index = reventry->paddr_index;
324 size = reventry->size;
325 found = 1;
326 break;
329 if (!found) {
330 fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
331 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
332 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
333 reventry->vaddr_req);
335 abort();
336 return 0;
339 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
340 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
341 entry = entry->next;
343 if (!entry) {
344 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
345 raddr = 0;
346 } else {
347 raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
348 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
350 mapcache_unlock();
351 return raddr;
354 static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
356 MapCacheEntry *entry = NULL, *pentry = NULL;
357 MapCacheRev *reventry;
358 hwaddr paddr_index;
359 hwaddr size;
360 int found = 0;
362 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
363 if (reventry->vaddr_req == buffer) {
364 paddr_index = reventry->paddr_index;
365 size = reventry->size;
366 found = 1;
367 break;
370 if (!found) {
371 DPRINTF("%s, could not find %p\n", __func__, buffer);
372 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
373 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
375 return;
377 QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
378 g_free(reventry);
380 if (mapcache->last_entry != NULL &&
381 mapcache->last_entry->paddr_index == paddr_index) {
382 mapcache->last_entry = NULL;
385 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
386 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
387 pentry = entry;
388 entry = entry->next;
390 if (!entry) {
391 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
392 return;
394 entry->lock--;
395 if (entry->lock > 0 || pentry == NULL) {
396 return;
399 pentry->next = entry->next;
400 if (munmap(entry->vaddr_base, entry->size) != 0) {
401 perror("unmap fails");
402 exit(-1);
404 g_free(entry->valid_mapping);
405 g_free(entry);
408 void xen_invalidate_map_cache_entry(uint8_t *buffer)
410 mapcache_lock();
411 xen_invalidate_map_cache_entry_unlocked(buffer);
412 mapcache_unlock();
415 void xen_invalidate_map_cache(void)
417 unsigned long i;
418 MapCacheRev *reventry;
420 /* Flush pending AIO before destroying the mapcache */
421 bdrv_drain_all();
423 mapcache_lock();
425 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
426 DPRINTF("There should be no locked mappings at this time, "
427 "but "TARGET_FMT_plx" -> %p is present\n",
428 reventry->paddr_index, reventry->vaddr_req);
431 for (i = 0; i < mapcache->nr_buckets; i++) {
432 MapCacheEntry *entry = &mapcache->entry[i];
434 if (entry->vaddr_base == NULL) {
435 continue;
437 if (entry->lock > 0) {
438 continue;
441 if (munmap(entry->vaddr_base, entry->size) != 0) {
442 perror("unmap fails");
443 exit(-1);
446 entry->paddr_index = 0;
447 entry->vaddr_base = NULL;
448 entry->size = 0;
449 g_free(entry->valid_mapping);
450 entry->valid_mapping = NULL;
453 mapcache->last_entry = NULL;
455 mapcache_unlock();