2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
11 #include <sys/resource.h>
13 #include "hw/xen_backend.h"
17 #include <xen/hvm/params.h>
20 #include "xen-mapcache.h"
24 //#define MAPCACHE_DEBUG
27 # define DPRINTF(fmt, ...) do { \
28 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31 # define DPRINTF(fmt, ...) do { } while (0)
35 # define MCACHE_BUCKET_SHIFT 16
36 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
37 #elif defined(__x86_64__)
38 # define MCACHE_BUCKET_SHIFT 20
39 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
41 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
43 typedef struct MapCacheEntry
{
44 target_phys_addr_t paddr_index
;
46 unsigned long *valid_mapping
;
48 target_phys_addr_t size
;
49 struct MapCacheEntry
*next
;
52 typedef struct MapCacheRev
{
54 target_phys_addr_t paddr_index
;
55 target_phys_addr_t size
;
56 QTAILQ_ENTRY(MapCacheRev
) next
;
59 typedef struct MapCache
{
61 unsigned long nr_buckets
;
62 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
64 /* For most cases (>99.9%), the page address is the same. */
65 target_phys_addr_t last_address_index
;
66 uint8_t *last_address_vaddr
;
67 unsigned long max_mcache_size
;
68 unsigned int mcache_bucket_shift
;
71 static MapCache
*mapcache
;
73 static inline int test_bits(int nr
, int size
, const unsigned long *addr
)
75 unsigned long res
= find_next_zero_bit(addr
, size
+ nr
, nr
);
82 void qemu_map_cache_init(void)
85 struct rlimit rlimit_as
;
87 mapcache
= qemu_mallocz(sizeof (MapCache
));
89 QTAILQ_INIT(&mapcache
->locked_entries
);
90 mapcache
->last_address_index
= -1;
92 getrlimit(RLIMIT_AS
, &rlimit_as
);
93 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
) {
94 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
96 rlimit_as
.rlim_cur
= MCACHE_MAX_SIZE
;
99 setrlimit(RLIMIT_AS
, &rlimit_as
);
100 mapcache
->max_mcache_size
= rlimit_as
.rlim_cur
;
102 mapcache
->nr_buckets
=
103 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
104 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
105 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
107 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
108 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
109 DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache
->nr_buckets
, size
);
110 mapcache
->entry
= qemu_mallocz(size
);
113 static void qemu_remap_bucket(MapCacheEntry
*entry
,
114 target_phys_addr_t size
,
115 target_phys_addr_t address_index
)
121 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
123 trace_qemu_remap_bucket(address_index
);
125 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
126 err
= qemu_mallocz(nb_pfn
* sizeof (int));
128 if (entry
->vaddr_base
!= NULL
) {
129 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
130 perror("unmap fails");
134 if (entry
->valid_mapping
!= NULL
) {
135 qemu_free(entry
->valid_mapping
);
136 entry
->valid_mapping
= NULL
;
139 for (i
= 0; i
< nb_pfn
; i
++) {
140 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
143 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
145 if (vaddr_base
== NULL
) {
146 perror("xc_map_foreign_bulk");
150 entry
->vaddr_base
= vaddr_base
;
151 entry
->paddr_index
= address_index
;
153 entry
->valid_mapping
= (unsigned long *) qemu_mallocz(sizeof(unsigned long) *
154 BITS_TO_LONGS(size
>> XC_PAGE_SHIFT
));
156 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
157 for (i
= 0; i
< nb_pfn
; i
++) {
159 bitmap_set(entry
->valid_mapping
, i
, 1);
167 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr
, target_phys_addr_t size
, uint8_t lock
)
169 MapCacheEntry
*entry
, *pentry
= NULL
;
170 target_phys_addr_t address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
171 target_phys_addr_t address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
172 target_phys_addr_t __size
= size
;
174 trace_qemu_map_cache(phys_addr
);
176 if (address_index
== mapcache
->last_address_index
&& !lock
&& !__size
) {
177 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
178 return mapcache
->last_address_vaddr
+ address_offset
;
181 /* size is always a multiple of MCACHE_BUCKET_SIZE */
182 if ((address_offset
+ (__size
% MCACHE_BUCKET_SIZE
)) > MCACHE_BUCKET_SIZE
)
183 __size
+= MCACHE_BUCKET_SIZE
;
184 if (__size
% MCACHE_BUCKET_SIZE
)
185 __size
+= MCACHE_BUCKET_SIZE
- (__size
% MCACHE_BUCKET_SIZE
);
187 __size
= MCACHE_BUCKET_SIZE
;
189 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
191 while (entry
&& entry
->lock
&& entry
->vaddr_base
&&
192 (entry
->paddr_index
!= address_index
|| entry
->size
!= __size
||
193 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
194 entry
->valid_mapping
))) {
199 entry
= qemu_mallocz(sizeof (MapCacheEntry
));
200 pentry
->next
= entry
;
201 qemu_remap_bucket(entry
, __size
, address_index
);
202 } else if (!entry
->lock
) {
203 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
204 entry
->size
!= __size
||
205 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
206 entry
->valid_mapping
)) {
207 qemu_remap_bucket(entry
, __size
, address_index
);
211 if(!test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
212 entry
->valid_mapping
)) {
213 mapcache
->last_address_index
= -1;
214 trace_qemu_map_cache_return(NULL
);
218 mapcache
->last_address_index
= address_index
;
219 mapcache
->last_address_vaddr
= entry
->vaddr_base
;
221 MapCacheRev
*reventry
= qemu_mallocz(sizeof(MapCacheRev
));
223 reventry
->vaddr_req
= mapcache
->last_address_vaddr
+ address_offset
;
224 reventry
->paddr_index
= mapcache
->last_address_index
;
225 reventry
->size
= entry
->size
;
226 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
229 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
230 return mapcache
->last_address_vaddr
+ address_offset
;
233 ram_addr_t
qemu_ram_addr_from_mapcache(void *ptr
)
235 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
236 MapCacheRev
*reventry
;
237 target_phys_addr_t paddr_index
;
238 target_phys_addr_t size
;
241 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
242 if (reventry
->vaddr_req
== ptr
) {
243 paddr_index
= reventry
->paddr_index
;
244 size
= reventry
->size
;
250 fprintf(stderr
, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr
);
251 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
252 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
253 reventry
->vaddr_req
);
259 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
260 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
265 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr
);
268 return (reventry
->paddr_index
<< MCACHE_BUCKET_SHIFT
) +
269 ((unsigned long) ptr
- (unsigned long) entry
->vaddr_base
);
272 void qemu_invalidate_entry(uint8_t *buffer
)
274 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
275 MapCacheRev
*reventry
;
276 target_phys_addr_t paddr_index
;
277 target_phys_addr_t size
;
280 if (mapcache
->last_address_vaddr
== buffer
) {
281 mapcache
->last_address_index
= -1;
284 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
285 if (reventry
->vaddr_req
== buffer
) {
286 paddr_index
= reventry
->paddr_index
;
287 size
= reventry
->size
;
293 DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer
);
294 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
295 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
299 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
302 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
303 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
308 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
312 if (entry
->lock
> 0 || pentry
== NULL
) {
316 pentry
->next
= entry
->next
;
317 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
318 perror("unmap fails");
321 qemu_free(entry
->valid_mapping
);
325 void qemu_invalidate_map_cache(void)
328 MapCacheRev
*reventry
;
330 /* Flush pending AIO before destroying the mapcache */
333 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
334 DPRINTF("There should be no locked mappings at this time, "
335 "but "TARGET_FMT_plx
" -> %p is present\n",
336 reventry
->paddr_index
, reventry
->vaddr_req
);
341 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
342 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
344 if (entry
->vaddr_base
== NULL
) {
348 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
349 perror("unmap fails");
353 entry
->paddr_index
= 0;
354 entry
->vaddr_base
= NULL
;
356 qemu_free(entry
->valid_mapping
);
357 entry
->valid_mapping
= NULL
;
360 mapcache
->last_address_index
= -1;
361 mapcache
->last_address_vaddr
= NULL
;