2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
11 #include <sys/resource.h>
13 #include "hw/xen_backend.h"
17 #include <xen/hvm/params.h>
20 #include "xen-mapcache.h"
24 //#define MAPCACHE_DEBUG
27 # define DPRINTF(fmt, ...) do { \
28 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31 # define DPRINTF(fmt, ...) do { } while (0)
35 # define MCACHE_BUCKET_SHIFT 16
36 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
37 #elif defined(__x86_64__)
38 # define MCACHE_BUCKET_SHIFT 20
39 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
41 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
43 #define mapcache_lock() ((void)0)
44 #define mapcache_unlock() ((void)0)
46 typedef struct MapCacheEntry
{
47 target_phys_addr_t paddr_index
;
49 unsigned long *valid_mapping
;
51 target_phys_addr_t size
;
52 struct MapCacheEntry
*next
;
55 typedef struct MapCacheRev
{
57 target_phys_addr_t paddr_index
;
58 target_phys_addr_t size
;
59 QTAILQ_ENTRY(MapCacheRev
) next
;
62 typedef struct MapCache
{
64 unsigned long nr_buckets
;
65 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
67 /* For most cases (>99.9%), the page address is the same. */
68 target_phys_addr_t last_address_index
;
69 uint8_t *last_address_vaddr
;
70 unsigned long max_mcache_size
;
71 unsigned int mcache_bucket_shift
;
74 static MapCache
*mapcache
;
76 static inline int test_bits(int nr
, int size
, const unsigned long *addr
)
78 unsigned long res
= find_next_zero_bit(addr
, size
+ nr
, nr
);
85 void xen_map_cache_init(void)
88 struct rlimit rlimit_as
;
90 mapcache
= qemu_mallocz(sizeof (MapCache
));
92 QTAILQ_INIT(&mapcache
->locked_entries
);
93 mapcache
->last_address_index
= -1;
95 getrlimit(RLIMIT_AS
, &rlimit_as
);
96 if (rlimit_as
.rlim_max
< MCACHE_MAX_SIZE
) {
97 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
99 rlimit_as
.rlim_cur
= MCACHE_MAX_SIZE
;
102 setrlimit(RLIMIT_AS
, &rlimit_as
);
103 mapcache
->max_mcache_size
= rlimit_as
.rlim_cur
;
105 mapcache
->nr_buckets
=
106 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
107 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
108 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
110 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
111 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
112 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__
,
113 mapcache
->nr_buckets
, size
);
114 mapcache
->entry
= qemu_mallocz(size
);
117 static void xen_remap_bucket(MapCacheEntry
*entry
,
118 target_phys_addr_t size
,
119 target_phys_addr_t address_index
)
125 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
127 trace_xen_remap_bucket(address_index
);
129 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
130 err
= qemu_mallocz(nb_pfn
* sizeof (int));
132 if (entry
->vaddr_base
!= NULL
) {
133 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
134 perror("unmap fails");
138 if (entry
->valid_mapping
!= NULL
) {
139 qemu_free(entry
->valid_mapping
);
140 entry
->valid_mapping
= NULL
;
143 for (i
= 0; i
< nb_pfn
; i
++) {
144 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
147 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
149 if (vaddr_base
== NULL
) {
150 perror("xc_map_foreign_bulk");
154 entry
->vaddr_base
= vaddr_base
;
155 entry
->paddr_index
= address_index
;
157 entry
->valid_mapping
= (unsigned long *) qemu_mallocz(sizeof(unsigned long) *
158 BITS_TO_LONGS(size
>> XC_PAGE_SHIFT
));
160 bitmap_zero(entry
->valid_mapping
, nb_pfn
);
161 for (i
= 0; i
< nb_pfn
; i
++) {
163 bitmap_set(entry
->valid_mapping
, i
, 1);
171 uint8_t *xen_map_cache(target_phys_addr_t phys_addr
, target_phys_addr_t size
,
174 MapCacheEntry
*entry
, *pentry
= NULL
;
175 target_phys_addr_t address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
176 target_phys_addr_t address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
177 target_phys_addr_t __size
= size
;
179 trace_xen_map_cache(phys_addr
);
181 if (address_index
== mapcache
->last_address_index
&& !lock
&& !__size
) {
182 trace_xen_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
183 return mapcache
->last_address_vaddr
+ address_offset
;
186 /* size is always a multiple of MCACHE_BUCKET_SIZE */
187 if ((address_offset
+ (__size
% MCACHE_BUCKET_SIZE
)) > MCACHE_BUCKET_SIZE
)
188 __size
+= MCACHE_BUCKET_SIZE
;
189 if (__size
% MCACHE_BUCKET_SIZE
)
190 __size
+= MCACHE_BUCKET_SIZE
- (__size
% MCACHE_BUCKET_SIZE
);
192 __size
= MCACHE_BUCKET_SIZE
;
194 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
196 while (entry
&& entry
->lock
&& entry
->vaddr_base
&&
197 (entry
->paddr_index
!= address_index
|| entry
->size
!= __size
||
198 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
199 entry
->valid_mapping
))) {
204 entry
= qemu_mallocz(sizeof (MapCacheEntry
));
205 pentry
->next
= entry
;
206 xen_remap_bucket(entry
, __size
, address_index
);
207 } else if (!entry
->lock
) {
208 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
209 entry
->size
!= __size
||
210 !test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
211 entry
->valid_mapping
)) {
212 xen_remap_bucket(entry
, __size
, address_index
);
216 if(!test_bits(address_offset
>> XC_PAGE_SHIFT
, size
>> XC_PAGE_SHIFT
,
217 entry
->valid_mapping
)) {
218 mapcache
->last_address_index
= -1;
219 trace_xen_map_cache_return(NULL
);
223 mapcache
->last_address_index
= address_index
;
224 mapcache
->last_address_vaddr
= entry
->vaddr_base
;
226 MapCacheRev
*reventry
= qemu_mallocz(sizeof(MapCacheRev
));
228 reventry
->vaddr_req
= mapcache
->last_address_vaddr
+ address_offset
;
229 reventry
->paddr_index
= mapcache
->last_address_index
;
230 reventry
->size
= entry
->size
;
231 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
234 trace_xen_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
235 return mapcache
->last_address_vaddr
+ address_offset
;
238 ram_addr_t
xen_ram_addr_from_mapcache(void *ptr
)
240 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
241 MapCacheRev
*reventry
;
242 target_phys_addr_t paddr_index
;
243 target_phys_addr_t size
;
246 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
247 if (reventry
->vaddr_req
== ptr
) {
248 paddr_index
= reventry
->paddr_index
;
249 size
= reventry
->size
;
255 fprintf(stderr
, "%s, could not find %p\n", __func__
, ptr
);
256 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
257 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
258 reventry
->vaddr_req
);
264 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
265 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
270 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr
);
273 return (reventry
->paddr_index
<< MCACHE_BUCKET_SHIFT
) +
274 ((unsigned long) ptr
- (unsigned long) entry
->vaddr_base
);
277 void xen_invalidate_map_cache_entry(uint8_t *buffer
)
279 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
280 MapCacheRev
*reventry
;
281 target_phys_addr_t paddr_index
;
282 target_phys_addr_t size
;
285 if (mapcache
->last_address_vaddr
== buffer
) {
286 mapcache
->last_address_index
= -1;
289 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
290 if (reventry
->vaddr_req
== buffer
) {
291 paddr_index
= reventry
->paddr_index
;
292 size
= reventry
->size
;
298 DPRINTF("%s, could not find %p\n", __func__
, buffer
);
299 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
300 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
304 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
307 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
308 while (entry
&& (entry
->paddr_index
!= paddr_index
|| entry
->size
!= size
)) {
313 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
317 if (entry
->lock
> 0 || pentry
== NULL
) {
321 pentry
->next
= entry
->next
;
322 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
323 perror("unmap fails");
326 qemu_free(entry
->valid_mapping
);
330 void xen_invalidate_map_cache(void)
333 MapCacheRev
*reventry
;
335 /* Flush pending AIO before destroying the mapcache */
338 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
339 DPRINTF("There should be no locked mappings at this time, "
340 "but "TARGET_FMT_plx
" -> %p is present\n",
341 reventry
->paddr_index
, reventry
->vaddr_req
);
346 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
347 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
349 if (entry
->vaddr_base
== NULL
) {
353 if (munmap(entry
->vaddr_base
, entry
->size
) != 0) {
354 perror("unmap fails");
358 entry
->paddr_index
= 0;
359 entry
->vaddr_base
= NULL
;
361 qemu_free(entry
->valid_mapping
);
362 entry
->valid_mapping
= NULL
;
365 mapcache
->last_address_index
= -1;
366 mapcache
->last_address_vaddr
= NULL
;