1 # Copyright (C) 2009 Canonical Ltd
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 2 of the License, or
6 # (at your option) any later version.
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software
15 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 """A manager of caches."""
25 from bzrlib
import lru_cache
, trace
26 from bzrlib
.plugins
.fastimport
import (
29 from fastimport
.helpers
import (
32 from fastimport
.reftracker
import (
37 class _Cleanup(object):
38 """This class makes sure we clean up when CacheManager goes away.
40 We use a helper class to ensure that we are never in a refcycle.
43 def __init__(self
, disk_blobs
):
44 self
.disk_blobs
= disk_blobs
46 self
.small_blobs
= None
52 if self
.disk_blobs
is not None:
53 for info
in self
.disk_blobs
.itervalues():
54 if info
[-1] is not None:
56 self
.disk_blobs
= None
57 if self
.small_blobs
is not None:
58 self
.small_blobs
.close()
59 self
.small_blobs
= None
60 if self
.tempdir
is not None:
61 shutil
.rmtree(self
.tempdir
)
64 class CacheManager(object):
66 _small_blob_threshold
= 25*1024
67 _sticky_cache_size
= 300*1024*1024
68 _sticky_flushed_size
= 100*1024*1024
70 def __init__(self
, info
=None, verbose
=False, inventory_cache_size
=10):
71 """Create a manager of caches.
73 :param info: a ConfigObj holding the output from
74 the --info processor, or None if no hints are available
76 self
.verbose
= verbose
78 # dataref -> data. datref is either :mark or the sha-1.
79 # Sticky blobs are referenced more than once, and are saved until their
82 self
._sticky
_blobs
= {}
83 self
._sticky
_memory
_bytes
= 0
84 # if we overflow our memory cache, then we will dump large blobs to
85 # disk in this directory
87 # id => (offset, n_bytes, fname)
88 # if fname is None, then the content is stored in the small file
90 self
._cleanup
= _Cleanup(self
._disk
_blobs
)
92 # revision-id -> Inventory cache
93 # these are large and we probably don't need too many as
94 # most parents are recent in history
95 self
.inventories
= lru_cache
.LRUCache(inventory_cache_size
)
97 # import commmit-ids -> revision-id lookup table
98 # we need to keep all of these but they are small
101 # (path, branch_ref) -> file-ids - as generated.
102 # (Use store_file_id/fetch_fileid methods rather than direct access.)
104 # Work out the blobs to make sticky - None means all
105 self
._blob
_ref
_counts
= {}
108 blobs_by_counts
= info
['Blob reference counts']
109 # The parser hands values back as lists, already parsed
110 for count
, blob_list
in blobs_by_counts
.items():
113 self
._blob
_ref
_counts
[b
] = n
115 # info not in file - possible when no blobs used
118 # BranchMapper has no state (for now?), but we keep it around rather
119 # than reinstantiate on every usage
120 self
.branch_mapper
= branch_mapper
.BranchMapper()
122 self
.reftracker
= RefTracker()
124 def add_mark(self
, mark
, commit_id
):
125 assert mark
[0] != ':'
126 self
.marks
[mark
] = commit_id
128 def lookup_committish(self
, committish
):
129 """Resolve a 'committish' to a revision id.
131 :param committish: A "committish" string
132 :return: Bazaar revision id
134 assert committish
[0] == ':'
135 return self
.marks
[committish
.lstrip(':')]
137 def dump_stats(self
, note
=trace
.note
):
138 """Dump some statistics about what we cached."""
139 # TODO: add in inventory stastistics
140 note("Cache statistics:")
141 self
._show
_stats
_for
(self
._sticky
_blobs
, "sticky blobs", note
=note
)
142 self
._show
_stats
_for
(self
.marks
, "revision-ids", note
=note
)
143 # These aren't interesting so omit from the output, at least for now
144 #self._show_stats_for(self._blobs, "other blobs", note=note)
145 #self.reftracker.dump_stats(note=note)
147 def _show_stats_for(self
, dict, label
, note
=trace
.note
, tuple_key
=False):
148 """Dump statistics about a given dictionary.
150 By the key and value need to support len().
154 size
= sum(map(len, (''.join(k
) for k
in dict.keys())))
156 size
= sum(map(len, dict.keys()))
157 size
+= sum(map(len, dict.values()))
158 size
= size
* 1.0 / 1024
166 note(" %-12s: %8.1f %s (%d %s)" % (label
, size
, unit
, count
,
167 single_plural(count
, "item", "items")))
170 """Free up any memory used by the caches."""
172 self
._sticky
_blobs
.clear()
174 self
.reftracker
.clear()
175 self
.inventories
.clear()
177 def _flush_blobs_to_disk(self
):
178 blobs
= self
._sticky
_blobs
.keys()
179 sticky_blobs
= self
._sticky
_blobs
180 total_blobs
= len(sticky_blobs
)
181 blobs
.sort(key
=lambda k
:len(sticky_blobs
[k
]))
182 if self
._tempdir
is None:
183 tempdir
= tempfile
.mkdtemp(prefix
='fastimport_blobs-')
184 self
._tempdir
= tempdir
185 self
._cleanup
.tempdir
= self
._tempdir
186 self
._cleanup
.small_blobs
= tempfile
.TemporaryFile(
187 prefix
='small-blobs-', dir=self
._tempdir
)
188 small_blob_ref
= weakref
.ref(self
._cleanup
.small_blobs
)
189 # Even though we add it to _Cleanup it seems that the object can be
190 # destroyed 'too late' for cleanup to actually occur. Probably a
191 # combination of bzr's "die directly, don't clean up" and how
192 # exceptions close the running stack.
194 small_blob
= small_blob_ref()
195 if small_blob
is not None:
197 shutil
.rmtree(tempdir
, ignore_errors
=True)
198 atexit
.register(exit_cleanup
)
202 while self
._sticky
_memory
_bytes
> self
._sticky
_flushed
_size
:
204 blob
= self
._sticky
_blobs
.pop(id)
206 self
._sticky
_memory
_bytes
-= n_bytes
207 if n_bytes
< self
._small
_blob
_threshold
:
208 f
= self
._cleanup
.small_blobs
209 f
.seek(0, os
.SEEK_END
)
210 self
._disk
_blobs
[id] = (f
.tell(), n_bytes
, None)
212 n_small_bytes
+= n_bytes
214 fd
, name
= tempfile
.mkstemp(prefix
='blob-', dir=self
._tempdir
)
217 self
._disk
_blobs
[id] = (0, n_bytes
, name
)
221 trace
.note('flushed %d/%d blobs w/ %.1fMB (%.1fMB small) to disk'
222 % (count
, total_blobs
, bytes
/ 1024. / 1024,
223 n_small_bytes
/ 1024. / 1024))
225 def store_blob(self
, id, data
):
226 """Store a blob of data."""
227 # Note: If we're not reference counting, everything has to be sticky
228 if not self
._blob
_ref
_counts
or id in self
._blob
_ref
_counts
:
229 self
._sticky
_blobs
[id] = data
230 self
._sticky
_memory
_bytes
+= len(data
)
231 if self
._sticky
_memory
_bytes
> self
._sticky
_cache
_size
:
232 self
._flush
_blobs
_to
_disk
()
234 # Empty data is always sticky
235 self
._sticky
_blobs
[id] = data
237 self
._blobs
[id] = data
239 def _decref(self
, id, cache
, fn
):
240 if not self
._blob
_ref
_counts
:
242 count
= self
._blob
_ref
_counts
.get(id, None)
243 if count
is not None:
249 del self
._blob
_ref
_counts
[id]
252 self
._blob
_ref
_counts
[id] = count
255 def fetch_blob(self
, id):
256 """Fetch a blob of data."""
257 if id in self
._blobs
:
258 return self
._blobs
.pop(id)
259 if id in self
._disk
_blobs
:
260 (offset
, n_bytes
, fn
) = self
._disk
_blobs
[id]
262 f
= self
._cleanup
.small_blobs
264 content
= f
.read(n_bytes
)
271 self
._decref
(id, self
._disk
_blobs
, fn
)
273 content
= self
._sticky
_blobs
[id]
274 if self
._decref
(id, self
._sticky
_blobs
, None):
275 self
._sticky
_memory
_bytes
-= len(content
)