2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
11 #include <sys/resource.h>
13 #include "hw/xen_backend.h"
17 #include <xen/hvm/params.h>
20 #include "xen-mapcache.h"
24 //#define MAPCACHE_DEBUG
27 # define DPRINTF(fmt, ...) do { \
28 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31 # define DPRINTF(fmt, ...) do { } while (0)
35 # define MCACHE_BUCKET_SHIFT 16
36 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
37 #elif defined(__x86_64__)
38 # define MCACHE_BUCKET_SHIFT 20
39 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
41 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
43 typedef struct MapCacheEntry
{
44 target_phys_addr_t paddr_index
;
46 DECLARE_BITMAP(valid_mapping
, MCACHE_BUCKET_SIZE
>> XC_PAGE_SHIFT
);
48 struct MapCacheEntry
*next
;
51 typedef struct MapCacheRev
{
53 target_phys_addr_t paddr_index
;
54 QTAILQ_ENTRY(MapCacheRev
) next
;
57 typedef struct MapCache
{
59 unsigned long nr_buckets
;
60 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
62 /* For most cases (>99.9%), the page address is the same. */
63 target_phys_addr_t last_address_index
;
64 uint8_t *last_address_vaddr
;
65 unsigned long max_mcache_size
;
66 unsigned int mcache_bucket_shift
;
69 static MapCache
*mapcache
;
71 void qemu_map_cache_init(void)
74 struct rlimit rlimit_as
;
76 mapcache
= qemu_mallocz(sizeof (MapCache
));
78 QTAILQ_INIT(&mapcache
->locked_entries
);
79 mapcache
->last_address_index
= -1;
81 getrlimit(RLIMIT_AS
, &rlimit_as
);
82 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
) {
83 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
85 rlimit_as
.rlim_cur
= MCACHE_MAX_SIZE
;
88 setrlimit(RLIMIT_AS
, &rlimit_as
);
89 mapcache
->max_mcache_size
= rlimit_as
.rlim_cur
;
91 mapcache
->nr_buckets
=
92 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
93 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
94 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
96 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
97 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
98 DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache
->nr_buckets
, size
);
99 mapcache
->entry
= qemu_mallocz(size
);
102 static void qemu_remap_bucket(MapCacheEntry
*entry
,
103 target_phys_addr_t size
,
104 target_phys_addr_t address_index
)
110 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
112 trace_qemu_remap_bucket(address_index
);
114 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
115 err
= qemu_mallocz(nb_pfn
* sizeof (int));
117 if (entry
->vaddr_base
!= NULL
) {
118 if (munmap(entry
->vaddr_base
, size
) != 0) {
119 perror("unmap fails");
124 for (i
= 0; i
< nb_pfn
; i
++) {
125 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
128 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
130 if (vaddr_base
== NULL
) {
131 perror("xc_map_foreign_bulk");
135 entry
->vaddr_base
= vaddr_base
;
136 entry
->paddr_index
= address_index
;
138 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
139 for (i
= 0; i
< nb_pfn
; i
++) {
141 bitmap_set(entry
->valid_mapping
, i
, 1);
149 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr
, target_phys_addr_t size
, uint8_t lock
)
151 MapCacheEntry
*entry
, *pentry
= NULL
;
152 target_phys_addr_t address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
153 target_phys_addr_t address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
155 trace_qemu_map_cache(phys_addr
);
157 if (address_index
== mapcache
->last_address_index
&& !lock
) {
158 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
159 return mapcache
->last_address_vaddr
+ address_offset
;
162 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
164 while (entry
&& entry
->lock
&& entry
->paddr_index
!= address_index
&& entry
->vaddr_base
) {
169 entry
= qemu_mallocz(sizeof (MapCacheEntry
));
170 pentry
->next
= entry
;
171 qemu_remap_bucket(entry
, size
? : MCACHE_BUCKET_SIZE
, address_index
);
172 } else if (!entry
->lock
) {
173 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
174 !test_bit(address_offset
>> XC_PAGE_SHIFT
, entry
->valid_mapping
)) {
175 qemu_remap_bucket(entry
, size
? : MCACHE_BUCKET_SIZE
, address_index
);
179 if (!test_bit(address_offset
>> XC_PAGE_SHIFT
, entry
->valid_mapping
)) {
180 mapcache
->last_address_index
= -1;
181 trace_qemu_map_cache_return(NULL
);
185 mapcache
->last_address_index
= address_index
;
186 mapcache
->last_address_vaddr
= entry
->vaddr_base
;
188 MapCacheRev
*reventry
= qemu_mallocz(sizeof(MapCacheRev
));
190 reventry
->vaddr_req
= mapcache
->last_address_vaddr
+ address_offset
;
191 reventry
->paddr_index
= mapcache
->last_address_index
;
192 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
195 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
196 return mapcache
->last_address_vaddr
+ address_offset
;
199 void qemu_map_cache_unlock(void *buffer
)
201 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
202 MapCacheRev
*reventry
;
203 target_phys_addr_t paddr_index
;
206 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
207 if (reventry
->vaddr_req
== buffer
) {
208 paddr_index
= reventry
->paddr_index
;
216 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
219 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
220 while (entry
&& entry
->paddr_index
!= paddr_index
) {
227 if (entry
->lock
> 0) {
232 ram_addr_t
qemu_ram_addr_from_mapcache(void *ptr
)
234 MapCacheRev
*reventry
;
235 target_phys_addr_t paddr_index
;
238 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
239 if (reventry
->vaddr_req
== ptr
) {
240 paddr_index
= reventry
->paddr_index
;
246 fprintf(stderr
, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr
);
247 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
248 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
249 reventry
->vaddr_req
);
255 return paddr_index
<< MCACHE_BUCKET_SHIFT
;
258 void qemu_invalidate_entry(uint8_t *buffer
)
260 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
261 MapCacheRev
*reventry
;
262 target_phys_addr_t paddr_index
;
265 if (mapcache
->last_address_vaddr
== buffer
) {
266 mapcache
->last_address_index
= -1;
269 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
270 if (reventry
->vaddr_req
== buffer
) {
271 paddr_index
= reventry
->paddr_index
;
277 DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer
);
278 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
279 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
283 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
286 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
287 while (entry
&& entry
->paddr_index
!= paddr_index
) {
292 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
296 if (entry
->lock
> 0 || pentry
== NULL
) {
300 pentry
->next
= entry
->next
;
301 if (munmap(entry
->vaddr_base
, MCACHE_BUCKET_SIZE
) != 0) {
302 perror("unmap fails");
308 void qemu_invalidate_map_cache(void)
311 MapCacheRev
*reventry
;
313 /* Flush pending AIO before destroying the mapcache */
316 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
317 DPRINTF("There should be no locked mappings at this time, "
318 "but "TARGET_FMT_plx
" -> %p is present\n",
319 reventry
->paddr_index
, reventry
->vaddr_req
);
324 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
325 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
327 if (entry
->vaddr_base
== NULL
) {
331 if (munmap(entry
->vaddr_base
, MCACHE_BUCKET_SIZE
) != 0) {
332 perror("unmap fails");
336 entry
->paddr_index
= 0;
337 entry
->vaddr_base
= NULL
;
340 mapcache
->last_address_index
= -1;
341 mapcache
->last_address_vaddr
= NULL
;
346 uint8_t *xen_map_block(target_phys_addr_t phys_addr
, target_phys_addr_t size
)
352 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
354 trace_xen_map_block(phys_addr
, size
);
355 phys_addr
>>= XC_PAGE_SHIFT
;
357 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
358 err
= qemu_mallocz(nb_pfn
* sizeof (int));
360 for (i
= 0; i
< nb_pfn
; i
++) {
361 pfns
[i
] = phys_addr
+ i
;
364 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
366 if (vaddr_base
== NULL
) {
367 perror("xc_map_foreign_bulk");