2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
11 #include "qemu/osdep.h"
13 #include <sys/resource.h>
15 #include "hw/xen/xen_backend.h"
16 #include "sysemu/blockdev.h"
17 #include "qemu/bitmap.h"
19 #include <xen/hvm/params.h>
21 #include "sysemu/xen-mapcache.h"
25 //#define MAPCACHE_DEBUG
28 # define DPRINTF(fmt, ...) do { \
29 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
32 # define DPRINTF(fmt, ...) do { } while (0)
35 #if HOST_LONG_BITS == 32
36 # define MCACHE_BUCKET_SHIFT 16
37 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
39 # define MCACHE_BUCKET_SHIFT 20
40 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
42 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
44 /* This is the size of the virtual address space reserve to QEMU that will not
46 * From empirical tests I observed that qemu use 75MB more than the
49 #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
51 typedef struct MapCacheEntry
{
54 unsigned long *valid_mapping
;
57 struct MapCacheEntry
*next
;
60 typedef struct MapCacheRev
{
64 QTAILQ_ENTRY(MapCacheRev
) next
;
68 typedef struct MapCache
{
70 unsigned long nr_buckets
;
71 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
73 /* For most cases (>99.9%), the page address is the same. */
74 MapCacheEntry
*last_entry
;
75 unsigned long max_mcache_size
;
76 unsigned int mcache_bucket_shift
;
78 phys_offset_to_gaddr_t phys_offset_to_gaddr
;
83 static MapCache
*mapcache
;
85 static inline void mapcache_lock(void)
87 qemu_mutex_lock(&mapcache
->lock
);
90 static inline void mapcache_unlock(void)
92 qemu_mutex_unlock(&mapcache
->lock
);
95 static inline int test_bits(int nr
, int size
, const unsigned long *addr
)
97 unsigned long res
= find_next_zero_bit(addr
, size
+ nr
, nr
);
104 void xen_map_cache_init(phys_offset_to_gaddr_t f
, void *opaque
)
107 struct rlimit rlimit_as
;
109 mapcache
= g_malloc0(sizeof (MapCache
));
111 mapcache
->phys_offset_to_gaddr
= f
;
112 mapcache
->opaque
= opaque
;
113 qemu_mutex_init(&mapcache
->lock
);
115 QTAILQ_INIT(&mapcache
->locked_entries
);
117 if (geteuid() == 0) {
118 rlimit_as
.rlim_cur
= RLIM_INFINITY
;
119 rlimit_as
.rlim_max
= RLIM_INFINITY
;
120 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
122 getrlimit(RLIMIT_AS
, &rlimit_as
);
123 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
125 if (rlimit_as
.rlim_max
!= RLIM_INFINITY
) {
126 fprintf(stderr
, "Warning: QEMU's maximum size of virtual"
127 " memory is not infinity.\n");
129 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
+ NON_MCACHE_MEMORY_SIZE
) {
130 mapcache
->max_mcache_size
= rlimit_as
.rlim_max
-
131 NON_MCACHE_MEMORY_SIZE
;
133 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
137 setrlimit(RLIMIT_AS
, &rlimit_as
);
139 mapcache
->nr_buckets
=
140 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
141 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
142 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
144 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
145 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
146 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__
,
147 mapcache
->nr_buckets
, size
);
148 mapcache
->entry
= g_malloc0(size
);
151 static void xen_remap_bucket(MapCacheEntry
*entry
,
153 hwaddr address_index
)
159 hwaddr nb_pfn
= size
>> XC_PAGE_SHIFT
;
161 trace_xen_remap_bucket(address_index
);
163 pfns
= g_malloc0(nb_pfn
* sizeof (xen_pfn_t
));
164 err
= g_malloc0(nb_pfn
* sizeof (int));
166 if (entry
->vaddr_base
!= NULL
) {
167 ram_block_notify_remove(entry
->vaddr_base
, entry
->size
);
168 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
169 perror("unmap fails");
173 g_free(entry
->valid_mapping
);
174 entry
->valid_mapping
= NULL
;
176 for (i
= 0; i
< nb_pfn
; i
++) {
177 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
180 vaddr_base
= xenforeignmemory_map(xen_fmem
, xen_domid
, PROT_READ
|PROT_WRITE
,
182 if (vaddr_base
== NULL
) {
183 perror("xenforeignmemory_map");
187 entry
->vaddr_base
= vaddr_base
;
188 entry
->paddr_index
= address_index
;
190 entry
->valid_mapping
= (unsigned long *) g_malloc0(sizeof(unsigned long) *
191 BITS_TO_LONGS(size
>> XC_PAGE_SHIFT
));
193 ram_block_notify_add(entry
->vaddr_base
, entry
->size
);
194 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
195 for (i
= 0; i
< nb_pfn
; i
++) {
197 bitmap_set(entry
->valid_mapping
, i
, 1);
205 static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr
, hwaddr size
,
206 uint8_t lock
, bool dma
)
208 MapCacheEntry
*entry
, *pentry
= NULL
;
209 hwaddr address_index
;
210 hwaddr address_offset
;
211 hwaddr cache_size
= size
;
212 hwaddr test_bit_size
;
213 bool translated
= false;
216 address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
217 address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
219 trace_xen_map_cache(phys_addr
);
221 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
223 test_bit_size
= size
+ (phys_addr
& (XC_PAGE_SIZE
- 1));
225 if (test_bit_size
% XC_PAGE_SIZE
) {
226 test_bit_size
+= XC_PAGE_SIZE
- (test_bit_size
% XC_PAGE_SIZE
);
229 test_bit_size
= XC_PAGE_SIZE
;
232 if (mapcache
->last_entry
!= NULL
&&
233 mapcache
->last_entry
->paddr_index
== address_index
&&
235 test_bits(address_offset
>> XC_PAGE_SHIFT
,
236 test_bit_size
>> XC_PAGE_SHIFT
,
237 mapcache
->last_entry
->valid_mapping
)) {
238 trace_xen_map_cache_return(mapcache
->last_entry
->vaddr_base
+ address_offset
);
239 return mapcache
->last_entry
->vaddr_base
+ address_offset
;
242 /* size is always a multiple of MCACHE_BUCKET_SIZE */
244 cache_size
= size
+ address_offset
;
245 if (cache_size
% MCACHE_BUCKET_SIZE
) {
246 cache_size
+= MCACHE_BUCKET_SIZE
- (cache_size
% MCACHE_BUCKET_SIZE
);
249 cache_size
= MCACHE_BUCKET_SIZE
;
252 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
254 while (entry
&& entry
->lock
&& entry
->vaddr_base
&&
255 (entry
->paddr_index
!= address_index
|| entry
->size
!= cache_size
||
256 !test_bits(address_offset
>> XC_PAGE_SHIFT
,
257 test_bit_size
>> XC_PAGE_SHIFT
,
258 entry
->valid_mapping
))) {
263 entry
= g_malloc0(sizeof (MapCacheEntry
));
264 pentry
->next
= entry
;
265 xen_remap_bucket(entry
, cache_size
, address_index
);
266 } else if (!entry
->lock
) {
267 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
268 entry
->size
!= cache_size
||
269 !test_bits(address_offset
>> XC_PAGE_SHIFT
,
270 test_bit_size
>> XC_PAGE_SHIFT
,
271 entry
->valid_mapping
)) {
272 xen_remap_bucket(entry
, cache_size
, address_index
);
276 if(!test_bits(address_offset
>> XC_PAGE_SHIFT
,
277 test_bit_size
>> XC_PAGE_SHIFT
,
278 entry
->valid_mapping
)) {
279 mapcache
->last_entry
= NULL
;
280 if (!translated
&& mapcache
->phys_offset_to_gaddr
) {
281 phys_addr
= mapcache
->phys_offset_to_gaddr(phys_addr
, size
, mapcache
->opaque
);
285 trace_xen_map_cache_return(NULL
);
289 mapcache
->last_entry
= entry
;
291 MapCacheRev
*reventry
= g_malloc0(sizeof(MapCacheRev
));
294 reventry
->vaddr_req
= mapcache
->last_entry
->vaddr_base
+ address_offset
;
295 reventry
->paddr_index
= mapcache
->last_entry
->paddr_index
;
296 reventry
->size
= entry
->size
;
297 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
300 trace_xen_map_cache_return(mapcache
->last_entry
->vaddr_base
+ address_offset
);
301 return mapcache
->last_entry
->vaddr_base
+ address_offset
;
304 uint8_t *xen_map_cache(hwaddr phys_addr
, hwaddr size
,
305 uint8_t lock
, bool dma
)
310 p
= xen_map_cache_unlocked(phys_addr
, size
, lock
, dma
);
315 ram_addr_t
xen_ram_addr_from_mapcache(void *ptr
)
317 MapCacheEntry
*entry
= NULL
;
318 MapCacheRev
*reventry
;
325 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
326 if (reventry
->vaddr_req
== ptr
) {
327 paddr_index
= reventry
->paddr_index
;
328 size
= reventry
->size
;
334 fprintf(stderr
, "%s, could not find %p\n", __func__
, ptr
);
335 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
336 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
337 reventry
->vaddr_req
);
343 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
344 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
348 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr
);
351 raddr
= (reventry
->paddr_index
<< MCACHE_BUCKET_SHIFT
) +
352 ((unsigned long) ptr
- (unsigned long) entry
->vaddr_base
);
358 static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer
)
360 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
361 MapCacheRev
*reventry
;
366 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
367 if (reventry
->vaddr_req
== buffer
) {
368 paddr_index
= reventry
->paddr_index
;
369 size
= reventry
->size
;
375 DPRINTF("%s, could not find %p\n", __func__
, buffer
);
376 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
377 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
381 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
384 if (mapcache
->last_entry
!= NULL
&&
385 mapcache
->last_entry
->paddr_index
== paddr_index
) {
386 mapcache
->last_entry
= NULL
;
389 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
390 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
395 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
399 if (entry
->lock
> 0 || pentry
== NULL
) {
403 pentry
->next
= entry
->next
;
404 ram_block_notify_remove(entry
->vaddr_base
, entry
->size
);
405 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
406 perror("unmap fails");
409 g_free(entry
->valid_mapping
);
413 void xen_invalidate_map_cache_entry(uint8_t *buffer
)
416 xen_invalidate_map_cache_entry_unlocked(buffer
);
420 void xen_invalidate_map_cache(void)
423 MapCacheRev
*reventry
;
425 /* Flush pending AIO before destroying the mapcache */
430 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
431 if (!reventry
->dma
) {
434 fprintf(stderr
, "Locked DMA mapping while invalidating mapcache!"
435 " "TARGET_FMT_plx
" -> %p is present\n",
436 reventry
->paddr_index
, reventry
->vaddr_req
);
439 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
440 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
442 if (entry
->vaddr_base
== NULL
) {
445 if (entry
->lock
> 0) {
449 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
450 perror("unmap fails");
454 entry
->paddr_index
= 0;
455 entry
->vaddr_base
= NULL
;
457 g_free(entry
->valid_mapping
);
458 entry
->valid_mapping
= NULL
;
461 mapcache
->last_entry
= NULL
;