2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
13 #include <sys/resource.h>
15 #include "hw/xen_backend.h"
19 #include <xen/hvm/params.h>
22 #include "xen-mapcache.h"
26 //#define MAPCACHE_DEBUG
29 # define DPRINTF(fmt, ...) do { \
30 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
33 # define DPRINTF(fmt, ...) do { } while (0)
37 # define MCACHE_BUCKET_SHIFT 16
38 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
39 #elif defined(__x86_64__)
40 # define MCACHE_BUCKET_SHIFT 20
41 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
43 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
45 /* This is the size of the virtual address space reserve to QEMU that will not
47 * From empirical tests I observed that qemu use 75MB more than the
50 #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
52 #define mapcache_lock() ((void)0)
53 #define mapcache_unlock() ((void)0)
55 typedef struct MapCacheEntry
{
56 target_phys_addr_t paddr_index
;
58 unsigned long *valid_mapping
;
60 target_phys_addr_t size
;
61 struct MapCacheEntry
*next
;
64 typedef struct MapCacheRev
{
66 target_phys_addr_t paddr_index
;
67 target_phys_addr_t size
;
68 QTAILQ_ENTRY(MapCacheRev
) next
;
71 typedef struct MapCache
{
73 unsigned long nr_buckets
;
74 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
76 /* For most cases (>99.9%), the page address is the same. */
77 target_phys_addr_t last_address_index
;
78 uint8_t *last_address_vaddr
;
79 unsigned long max_mcache_size
;
80 unsigned int mcache_bucket_shift
;
83 static MapCache
*mapcache
;
85 static inline int test_bits(int nr
, int size
, const unsigned long *addr
)
87 unsigned long res
= find_next_zero_bit(addr
, size
+ nr
, nr
);
94 void xen_map_cache_init(void)
97 struct rlimit rlimit_as
;
99 mapcache
= g_malloc0(sizeof (MapCache
));
101 QTAILQ_INIT(&mapcache
->locked_entries
);
102 mapcache
->last_address_index
= -1;
104 if (geteuid() == 0) {
105 rlimit_as
.rlim_cur
= RLIM_INFINITY
;
106 rlimit_as
.rlim_max
= RLIM_INFINITY
;
107 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
109 getrlimit(RLIMIT_AS
, &rlimit_as
);
110 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
112 if (rlimit_as
.rlim_max
!= RLIM_INFINITY
) {
113 fprintf(stderr
, "Warning: QEMU's maximum size of virtual"
114 " memory is not infinity.\n");
116 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
+ NON_MCACHE_MEMORY_SIZE
) {
117 mapcache
->max_mcache_size
= rlimit_as
.rlim_max
-
118 NON_MCACHE_MEMORY_SIZE
;
120 mapcache
->max_mcache_size
= MCACHE_MAX_SIZE
;
124 setrlimit(RLIMIT_AS
, &rlimit_as
);
126 mapcache
->nr_buckets
=
127 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
128 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
129 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
131 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
132 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
133 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__
,
134 mapcache
->nr_buckets
, size
);
135 mapcache
->entry
= g_malloc0(size
);
138 static void xen_remap_bucket(MapCacheEntry
*entry
,
139 target_phys_addr_t size
,
140 target_phys_addr_t address_index
)
146 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
148 trace_xen_remap_bucket(address_index
);
150 pfns
= g_malloc0(nb_pfn
* sizeof (xen_pfn_t
));
151 err
= g_malloc0(nb_pfn
* sizeof (int));
153 if (entry
->vaddr_base
!= NULL
) {
154 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
155 perror("unmap fails");
159 if (entry
->valid_mapping
!= NULL
) {
160 g_free(entry
->valid_mapping
);
161 entry
->valid_mapping
= NULL
;
164 for (i
= 0; i
< nb_pfn
; i
++) {
165 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
168 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
170 if (vaddr_base
== NULL
) {
171 perror("xc_map_foreign_bulk");
175 entry
->vaddr_base
= vaddr_base
;
176 entry
->paddr_index
= address_index
;
178 entry
->valid_mapping
= (unsigned long *) g_malloc0(sizeof(unsigned long) *
179 BITS_TO_LONGS(size
>> XC_PAGE_SHIFT
));
181 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
182 for (i
= 0; i
< nb_pfn
; i
++) {
184 bitmap_set(entry
->valid_mapping
, i
, 1);
192 uint8_t *xen_map_cache(target_phys_addr_t phys_addr
, target_phys_addr_t size
,
195 MapCacheEntry
*entry
, *pentry
= NULL
;
196 target_phys_addr_t address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
197 target_phys_addr_t address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
198 target_phys_addr_t __size
= size
;
200 trace_xen_map_cache(phys_addr
);
202 if (address_index
== mapcache
->last_address_index
&& !lock
&& !__size
) {
203 trace_xen_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
204 return mapcache
->last_address_vaddr
+ address_offset
;
207 /* size is always a multiple of MCACHE_BUCKET_SIZE */
208 if ((address_offset
+ (__size
% MCACHE_BUCKET_SIZE
)) > MCACHE_BUCKET_SIZE
)
209 __size
+= MCACHE_BUCKET_SIZE
;
210 if (__size
% MCACHE_BUCKET_SIZE
)
211 __size
+= MCACHE_BUCKET_SIZE
- (__size
% MCACHE_BUCKET_SIZE
);
213 __size
= MCACHE_BUCKET_SIZE
;
215 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
217 while (entry
&& entry
->lock
&& entry
->vaddr_base
&&
218 (entry
->paddr_index
!= address_index
|| entry
->size
!= __size
||
219 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
220 entry
->valid_mapping
))) {
225 entry
= g_malloc0(sizeof (MapCacheEntry
));
226 pentry
->next
= entry
;
227 xen_remap_bucket(entry
, __size
, address_index
);
228 } else if (!entry
->lock
) {
229 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
230 entry
->size
!= __size
||
231 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
232 entry
->valid_mapping
)) {
233 xen_remap_bucket(entry
, __size
, address_index
);
237 if(!test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
238 entry
->valid_mapping
)) {
239 mapcache
->last_address_index
= -1;
240 trace_xen_map_cache_return(NULL
);
244 mapcache
->last_address_index
= address_index
;
245 mapcache
->last_address_vaddr
= entry
->vaddr_base
;
247 MapCacheRev
*reventry
= g_malloc0(sizeof(MapCacheRev
));
249 reventry
->vaddr_req
= mapcache
->last_address_vaddr
+ address_offset
;
250 reventry
->paddr_index
= mapcache
->last_address_index
;
251 reventry
->size
= entry
->size
;
252 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
255 trace_xen_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
256 return mapcache
->last_address_vaddr
+ address_offset
;
259 ram_addr_t
xen_ram_addr_from_mapcache(void *ptr
)
261 MapCacheEntry
*entry
= NULL
;
262 MapCacheRev
*reventry
;
263 target_phys_addr_t paddr_index
;
264 target_phys_addr_t size
;
267 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
268 if (reventry
->vaddr_req
== ptr
) {
269 paddr_index
= reventry
->paddr_index
;
270 size
= reventry
->size
;
276 fprintf(stderr
, "%s, could not find %p\n", __func__
, ptr
);
277 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
278 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
279 reventry
->vaddr_req
);
285 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
286 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
290 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr
);
293 return (reventry
->paddr_index
<< MCACHE_BUCKET_SHIFT
) +
294 ((unsigned long) ptr
- (unsigned long) entry
->vaddr_base
);
297 void xen_invalidate_map_cache_entry(uint8_t *buffer
)
299 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
300 MapCacheRev
*reventry
;
301 target_phys_addr_t paddr_index
;
302 target_phys_addr_t size
;
305 if (mapcache
->last_address_vaddr
== buffer
) {
306 mapcache
->last_address_index
= -1;
309 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
310 if (reventry
->vaddr_req
== buffer
) {
311 paddr_index
= reventry
->paddr_index
;
312 size
= reventry
->size
;
318 DPRINTF("%s, could not find %p\n", __func__
, buffer
);
319 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
320 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
324 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
327 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
328 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
333 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
337 if (entry
->lock
> 0 || pentry
== NULL
) {
341 pentry
->next
= entry
->next
;
342 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
343 perror("unmap fails");
346 g_free(entry
->valid_mapping
);
350 void xen_invalidate_map_cache(void)
353 MapCacheRev
*reventry
;
355 /* Flush pending AIO before destroying the mapcache */
358 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
359 DPRINTF("There should be no locked mappings at this time, "
360 "but "TARGET_FMT_plx
" -> %p is present\n",
361 reventry
->paddr_index
, reventry
->vaddr_req
);
366 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
367 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
369 if (entry
->vaddr_base
== NULL
) {
373 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
374 perror("unmap fails");
378 entry
->paddr_index
= 0;
379 entry
->vaddr_base
= NULL
;
381 g_free(entry
->valid_mapping
);
382 entry
->valid_mapping
= NULL
;
385 mapcache
->last_address_index
= -1;
386 mapcache
->last_address_vaddr
= NULL
;