target/arm: take HSTR traps of cp15 accesses to EL2, not EL1
[qemu/armbru.git] / migration / page_cache.c
blob6d4f7a9bbc1c0af9489506a6a0aa5e45ae3e43fe
1 /*
2 * Page cache for QEMU
3 * The cache is base on a hash of the page address
5 * Copyright 2012 Red Hat, Inc. and/or its affiliates
7 * Authors:
8 * Orit Wasserman <owasserm@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
17 #include "qapi/qmp/qerror.h"
18 #include "qapi/error.h"
19 #include "qemu/host-utils.h"
20 #include "page_cache.h"
21 #include "trace.h"
23 /* the page in cache will not be replaced in two cycles */
24 #define CACHED_PAGE_LIFETIME 2
26 typedef struct CacheItem CacheItem;
28 struct CacheItem {
29 uint64_t it_addr;
30 uint64_t it_age;
31 uint8_t *it_data;
34 struct PageCache {
35 CacheItem *page_cache;
36 size_t page_size;
37 size_t max_num_items;
38 size_t num_items;
41 PageCache *cache_init(uint64_t new_size, size_t page_size, Error **errp)
43 int64_t i;
44 size_t num_pages = new_size / page_size;
45 PageCache *cache;
47 if (new_size < page_size) {
48 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
49 "is smaller than one target page size");
50 return NULL;
53 /* round down to the nearest power of 2 */
54 if (!is_power_of_2(num_pages)) {
55 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
56 "is not a power of two number of pages");
57 return NULL;
60 /* We prefer not to abort if there is no memory */
61 cache = g_try_malloc(sizeof(*cache));
62 if (!cache) {
63 error_setg(errp, "Failed to allocate cache");
64 return NULL;
66 cache->page_size = page_size;
67 cache->num_items = 0;
68 cache->max_num_items = num_pages;
70 trace_migration_pagecache_init(cache->max_num_items);
72 /* We prefer not to abort if there is no memory */
73 cache->page_cache = g_try_malloc((cache->max_num_items) *
74 sizeof(*cache->page_cache));
75 if (!cache->page_cache) {
76 error_setg(errp, "Failed to allocate page cache");
77 g_free(cache);
78 return NULL;
81 for (i = 0; i < cache->max_num_items; i++) {
82 cache->page_cache[i].it_data = NULL;
83 cache->page_cache[i].it_age = 0;
84 cache->page_cache[i].it_addr = -1;
87 return cache;
90 void cache_fini(PageCache *cache)
92 int64_t i;
94 g_assert(cache);
95 g_assert(cache->page_cache);
97 for (i = 0; i < cache->max_num_items; i++) {
98 g_free(cache->page_cache[i].it_data);
101 g_free(cache->page_cache);
102 cache->page_cache = NULL;
103 g_free(cache);
106 static size_t cache_get_cache_pos(const PageCache *cache,
107 uint64_t address)
109 g_assert(cache->max_num_items);
110 return (address / cache->page_size) & (cache->max_num_items - 1);
113 static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
115 size_t pos;
117 g_assert(cache);
118 g_assert(cache->page_cache);
120 pos = cache_get_cache_pos(cache, addr);
122 return &cache->page_cache[pos];
125 uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
127 return cache_get_by_addr(cache, addr)->it_data;
130 bool cache_is_cached(const PageCache *cache, uint64_t addr,
131 uint64_t current_age)
133 CacheItem *it;
135 it = cache_get_by_addr(cache, addr);
137 if (it->it_addr == addr) {
138 /* update the it_age when the cache hit */
139 it->it_age = current_age;
140 return true;
142 return false;
145 int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata,
146 uint64_t current_age)
149 CacheItem *it;
151 /* actual update of entry */
152 it = cache_get_by_addr(cache, addr);
154 if (it->it_data && it->it_addr != addr &&
155 it->it_age + CACHED_PAGE_LIFETIME > current_age) {
156 /* the cache page is fresh, don't replace it */
157 return -1;
159 /* allocate page */
160 if (!it->it_data) {
161 it->it_data = g_try_malloc(cache->page_size);
162 if (!it->it_data) {
163 trace_migration_pagecache_insert();
164 return -1;
166 cache->num_items++;
169 memcpy(it->it_data, pdata, cache->page_size);
171 it->it_age = current_age;
172 it->it_addr = addr;
174 return 0;