s390x/pci: separate s390_sclp_configure function
[qemu.git] / xen-mapcache.c
blob49f394a777d34cfabae642b0b60456282f9bc9e3
1 /*
2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include "qemu/osdep.h"
13 #include <sys/resource.h>
15 #include "hw/xen/xen_backend.h"
16 #include "sysemu/blockdev.h"
17 #include "qemu/bitmap.h"
19 #include <xen/hvm/params.h>
20 #include <sys/mman.h>
22 #include "sysemu/xen-mapcache.h"
23 #include "trace.h"
26 //#define MAPCACHE_DEBUG
28 #ifdef MAPCACHE_DEBUG
29 # define DPRINTF(fmt, ...) do { \
30 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31 } while (0)
32 #else
33 # define DPRINTF(fmt, ...) do { } while (0)
34 #endif
36 #if HOST_LONG_BITS == 32
37 # define MCACHE_BUCKET_SHIFT 16
38 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
39 #else
40 # define MCACHE_BUCKET_SHIFT 20
41 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
42 #endif
43 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
45 /* This is the size of the virtual address space reserve to QEMU that will not
46 * be use by MapCache.
47 * From empirical tests I observed that qemu use 75MB more than the
48 * max_mcache_size.
50 #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
52 typedef struct MapCacheEntry {
53 hwaddr paddr_index;
54 uint8_t *vaddr_base;
55 unsigned long *valid_mapping;
56 uint8_t lock;
57 hwaddr size;
58 struct MapCacheEntry *next;
59 } MapCacheEntry;
61 typedef struct MapCacheRev {
62 uint8_t *vaddr_req;
63 hwaddr paddr_index;
64 hwaddr size;
65 QTAILQ_ENTRY(MapCacheRev) next;
66 } MapCacheRev;
68 typedef struct MapCache {
69 MapCacheEntry *entry;
70 unsigned long nr_buckets;
71 QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
73 /* For most cases (>99.9%), the page address is the same. */
74 MapCacheEntry *last_entry;
75 unsigned long max_mcache_size;
76 unsigned int mcache_bucket_shift;
78 phys_offset_to_gaddr_t phys_offset_to_gaddr;
79 QemuMutex lock;
80 void *opaque;
81 } MapCache;
83 static MapCache *mapcache;
85 static inline void mapcache_lock(void)
87 qemu_mutex_lock(&mapcache->lock);
90 static inline void mapcache_unlock(void)
92 qemu_mutex_unlock(&mapcache->lock);
95 static inline int test_bits(int nr, int size, const unsigned long *addr)
97 unsigned long res = find_next_zero_bit(addr, size + nr, nr);
98 if (res >= nr + size)
99 return 1;
100 else
101 return 0;
104 void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
106 unsigned long size;
107 struct rlimit rlimit_as;
109 mapcache = g_malloc0(sizeof (MapCache));
111 mapcache->phys_offset_to_gaddr = f;
112 mapcache->opaque = opaque;
113 qemu_mutex_init(&mapcache->lock);
115 QTAILQ_INIT(&mapcache->locked_entries);
117 if (geteuid() == 0) {
118 rlimit_as.rlim_cur = RLIM_INFINITY;
119 rlimit_as.rlim_max = RLIM_INFINITY;
120 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
121 } else {
122 getrlimit(RLIMIT_AS, &rlimit_as);
123 rlimit_as.rlim_cur = rlimit_as.rlim_max;
125 if (rlimit_as.rlim_max != RLIM_INFINITY) {
126 fprintf(stderr, "Warning: QEMU's maximum size of virtual"
127 " memory is not infinity.\n");
129 if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
130 mapcache->max_mcache_size = rlimit_as.rlim_max -
131 NON_MCACHE_MEMORY_SIZE;
132 } else {
133 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
137 setrlimit(RLIMIT_AS, &rlimit_as);
139 mapcache->nr_buckets =
140 (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
141 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
142 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
144 size = mapcache->nr_buckets * sizeof (MapCacheEntry);
145 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
146 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
147 mapcache->nr_buckets, size);
148 mapcache->entry = g_malloc0(size);
151 static void xen_remap_bucket(MapCacheEntry *entry,
152 hwaddr size,
153 hwaddr address_index)
155 uint8_t *vaddr_base;
156 xen_pfn_t *pfns;
157 int *err;
158 unsigned int i;
159 hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
161 trace_xen_remap_bucket(address_index);
163 pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
164 err = g_malloc0(nb_pfn * sizeof (int));
166 if (entry->vaddr_base != NULL) {
167 if (munmap(entry->vaddr_base, entry->size) != 0) {
168 perror("unmap fails");
169 exit(-1);
172 g_free(entry->valid_mapping);
173 entry->valid_mapping = NULL;
175 for (i = 0; i < nb_pfn; i++) {
176 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
179 vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
180 nb_pfn, pfns, err);
181 if (vaddr_base == NULL) {
182 perror("xenforeignmemory_map");
183 exit(-1);
186 entry->vaddr_base = vaddr_base;
187 entry->paddr_index = address_index;
188 entry->size = size;
189 entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
190 BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
192 bitmap_zero(entry->valid_mapping, nb_pfn);
193 for (i = 0; i < nb_pfn; i++) {
194 if (!err[i]) {
195 bitmap_set(entry->valid_mapping, i, 1);
199 g_free(pfns);
200 g_free(err);
203 static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
204 uint8_t lock)
206 MapCacheEntry *entry, *pentry = NULL;
207 hwaddr address_index;
208 hwaddr address_offset;
209 hwaddr cache_size = size;
210 hwaddr test_bit_size;
211 bool translated = false;
213 tryagain:
214 address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
215 address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
217 trace_xen_map_cache(phys_addr);
219 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
220 if (size) {
221 test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
223 if (test_bit_size % XC_PAGE_SIZE) {
224 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
226 } else {
227 test_bit_size = XC_PAGE_SIZE;
230 if (mapcache->last_entry != NULL &&
231 mapcache->last_entry->paddr_index == address_index &&
232 !lock && !size &&
233 test_bits(address_offset >> XC_PAGE_SHIFT,
234 test_bit_size >> XC_PAGE_SHIFT,
235 mapcache->last_entry->valid_mapping)) {
236 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
237 return mapcache->last_entry->vaddr_base + address_offset;
240 /* size is always a multiple of MCACHE_BUCKET_SIZE */
241 if (size) {
242 cache_size = size + address_offset;
243 if (cache_size % MCACHE_BUCKET_SIZE) {
244 cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
246 } else {
247 cache_size = MCACHE_BUCKET_SIZE;
250 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
252 while (entry && entry->lock && entry->vaddr_base &&
253 (entry->paddr_index != address_index || entry->size != cache_size ||
254 !test_bits(address_offset >> XC_PAGE_SHIFT,
255 test_bit_size >> XC_PAGE_SHIFT,
256 entry->valid_mapping))) {
257 pentry = entry;
258 entry = entry->next;
260 if (!entry) {
261 entry = g_malloc0(sizeof (MapCacheEntry));
262 pentry->next = entry;
263 xen_remap_bucket(entry, cache_size, address_index);
264 } else if (!entry->lock) {
265 if (!entry->vaddr_base || entry->paddr_index != address_index ||
266 entry->size != cache_size ||
267 !test_bits(address_offset >> XC_PAGE_SHIFT,
268 test_bit_size >> XC_PAGE_SHIFT,
269 entry->valid_mapping)) {
270 xen_remap_bucket(entry, cache_size, address_index);
274 if(!test_bits(address_offset >> XC_PAGE_SHIFT,
275 test_bit_size >> XC_PAGE_SHIFT,
276 entry->valid_mapping)) {
277 mapcache->last_entry = NULL;
278 if (!translated && mapcache->phys_offset_to_gaddr) {
279 phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
280 translated = true;
281 goto tryagain;
283 trace_xen_map_cache_return(NULL);
284 return NULL;
287 mapcache->last_entry = entry;
288 if (lock) {
289 MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
290 entry->lock++;
291 reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
292 reventry->paddr_index = mapcache->last_entry->paddr_index;
293 reventry->size = entry->size;
294 QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
297 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
298 return mapcache->last_entry->vaddr_base + address_offset;
301 uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
302 uint8_t lock)
304 uint8_t *p;
306 mapcache_lock();
307 p = xen_map_cache_unlocked(phys_addr, size, lock);
308 mapcache_unlock();
309 return p;
312 ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
314 MapCacheEntry *entry = NULL;
315 MapCacheRev *reventry;
316 hwaddr paddr_index;
317 hwaddr size;
318 ram_addr_t raddr;
319 int found = 0;
321 mapcache_lock();
322 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
323 if (reventry->vaddr_req == ptr) {
324 paddr_index = reventry->paddr_index;
325 size = reventry->size;
326 found = 1;
327 break;
330 if (!found) {
331 fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
332 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
333 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
334 reventry->vaddr_req);
336 abort();
337 return 0;
340 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
341 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
342 entry = entry->next;
344 if (!entry) {
345 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
346 raddr = 0;
347 } else {
348 raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
349 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
351 mapcache_unlock();
352 return raddr;
355 static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
357 MapCacheEntry *entry = NULL, *pentry = NULL;
358 MapCacheRev *reventry;
359 hwaddr paddr_index;
360 hwaddr size;
361 int found = 0;
363 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
364 if (reventry->vaddr_req == buffer) {
365 paddr_index = reventry->paddr_index;
366 size = reventry->size;
367 found = 1;
368 break;
371 if (!found) {
372 DPRINTF("%s, could not find %p\n", __func__, buffer);
373 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
374 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
376 return;
378 QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
379 g_free(reventry);
381 if (mapcache->last_entry != NULL &&
382 mapcache->last_entry->paddr_index == paddr_index) {
383 mapcache->last_entry = NULL;
386 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
387 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
388 pentry = entry;
389 entry = entry->next;
391 if (!entry) {
392 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
393 return;
395 entry->lock--;
396 if (entry->lock > 0 || pentry == NULL) {
397 return;
400 pentry->next = entry->next;
401 if (munmap(entry->vaddr_base, entry->size) != 0) {
402 perror("unmap fails");
403 exit(-1);
405 g_free(entry->valid_mapping);
406 g_free(entry);
409 void xen_invalidate_map_cache_entry(uint8_t *buffer)
411 mapcache_lock();
412 xen_invalidate_map_cache_entry_unlocked(buffer);
413 mapcache_unlock();
416 void xen_invalidate_map_cache(void)
418 unsigned long i;
419 MapCacheRev *reventry;
421 /* Flush pending AIO before destroying the mapcache */
422 bdrv_drain_all();
424 mapcache_lock();
426 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
427 DPRINTF("There should be no locked mappings at this time, "
428 "but "TARGET_FMT_plx" -> %p is present\n",
429 reventry->paddr_index, reventry->vaddr_req);
432 for (i = 0; i < mapcache->nr_buckets; i++) {
433 MapCacheEntry *entry = &mapcache->entry[i];
435 if (entry->vaddr_base == NULL) {
436 continue;
438 if (entry->lock > 0) {
439 continue;
442 if (munmap(entry->vaddr_base, entry->size) != 0) {
443 perror("unmap fails");
444 exit(-1);
447 entry->paddr_index = 0;
448 entry->vaddr_base = NULL;
449 entry->size = 0;
450 g_free(entry->valid_mapping);
451 entry->valid_mapping = NULL;
454 mapcache->last_entry = NULL;
456 mapcache_unlock();