Commit from One Laptop Per Child: Translation System by user HoboPrimate. 31 of 31...
[journal-activity.git] / query.py
blob85f626ef659aec48f3b4d447c66fc5b1ced4ef1f
1 # Copyright (C) 2007, One Laptop Per Child
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 2 of the License, or
6 # (at your option) any later version.
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software
15 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 import logging
19 from sugar.datastore import datastore
21 # Properties the journal cares about.
22 PROPERTIES = ['uid', 'title', 'mtime', 'timestamp', 'keep', 'buddies',
23 'icon-color', 'mime_type', 'progress', 'activity', 'mountpoint',
24 'activity_id']
26 class _Cache(object):
28 __gtype_name__ = 'query_Cache'
30 def __init__(self, jobjects=None):
31 self._array = []
32 self._dict = {}
33 if jobjects is not None:
34 self.append_all(jobjects)
36 def prepend_all(self, jobjects):
37 for jobject in jobjects[::-1]:
38 self._array.insert(0, jobject)
39 self._dict[jobject.object_id] = jobject
41 def append_all(self, jobjects):
42 for jobject in jobjects:
43 self._array.append(jobject)
44 self._dict[jobject.object_id] = jobject
46 def remove_all(self, jobjects):
47 jobjects = jobjects[:]
48 for jobject in jobjects:
49 obj = self._dict[jobject.object_id]
50 self._array.remove(obj)
51 del self._dict[obj.object_id]
52 obj.destroy()
54 def __len__(self):
55 return len(self._array)
57 def __getitem__(self, key):
58 if isinstance(key, basestring):
59 return self._dict[key]
60 else:
61 return self._array[key]
63 def destroy(self):
64 self._destroy_jobjects(self._array)
65 self._array = []
66 self._dict = {}
68 def _destroy_jobjects(self, jobjects):
69 for jobject in jobjects:
70 jobject.destroy()
72 class ResultSet(object):
74 _CACHE_LIMIT = 80
76 def __init__(self, query, sorting):
77 self._total_count = -1
78 self._position = -1
79 self._query = query
80 self._sorting = sorting
82 self._offset = 0
83 self._cache = _Cache()
85 def destroy(self):
86 self._cache.destroy()
88 def get_length(self):
89 if self._total_count == -1:
90 jobjects, self._total_count = datastore.find(self._query,
91 sorting=self._sorting,
92 limit=ResultSet._CACHE_LIMIT,
93 properties=PROPERTIES)
94 self._cache.append_all(jobjects)
95 self._offset = 0
96 return self._total_count
98 length = property(get_length)
100 def seek(self, position):
101 self._position = position
103 def read(self, max_count):
104 logging.debug('ResultSet.read position: %r' % self._position)
106 if max_count * 5 > ResultSet._CACHE_LIMIT:
107 raise RuntimeError('max_count (%i) too big for ResultSet._CACHE_LIMIT'
108 ' (%i).' % (max_count, ResultSet._CACHE_LIMIT))
110 if self._position == -1:
111 self.seek(0)
113 if self._position < self._offset:
114 remaining_forward_entries = 0
115 else:
116 remaining_forward_entries = self._offset + len(self._cache) - \
117 self._position
119 if self._position > self._offset + len(self._cache):
120 remaining_backwards_entries = 0
121 else:
122 remaining_backwards_entries = self._position - self._offset
124 last_cached_entry = self._offset + len(self._cache)
126 if (remaining_forward_entries <= 0 and remaining_backwards_entries <= 0) or \
127 max_count > ResultSet._CACHE_LIMIT:
129 # Total cache miss: remake it
130 offset = max(0, self._position - max_count)
131 logging.debug('remaking cache, offset: %r limit: %r' % (offset, max_count * 2))
132 jobjects, self._total_count = datastore.find(self._query,
133 sorting=self._sorting,
134 offset=offset,
135 limit=ResultSet._CACHE_LIMIT,
136 properties=PROPERTIES)
138 self._cache.remove_all(self._cache)
139 self._cache.append_all(jobjects)
140 self._offset = offset
142 elif remaining_forward_entries < 2 * max_count and \
143 last_cached_entry < self._total_count:
145 # Add one page to the end of cache
146 logging.debug('appending one more page, offset: %r' % last_cached_entry)
147 jobjects, self._total_count = datastore.find(self._query,
148 sorting=self._sorting,
149 offset=last_cached_entry,
150 limit=max_count,
151 properties=PROPERTIES)
152 # update cache
153 self._cache.append_all(jobjects)
155 # apply the cache limit
156 objects_excess = len(self._cache) - ResultSet._CACHE_LIMIT
157 if objects_excess > 0:
158 self._offset += objects_excess
159 self._cache.remove_all(self._cache[:objects_excess])
161 elif remaining_backwards_entries < 2 * max_count and self._offset > 0:
163 # Add one page to the beginning of cache
164 limit = min(self._offset, max_count)
165 self._offset = max(0, self._offset - max_count)
167 logging.debug('prepending one more page, offset: %r limit: %r' %
168 (self._offset, limit))
169 jobjects, self._total_count = datastore.find(self._query,
170 sorting=self._sorting,
171 offset=self._offset,
172 limit=limit,
173 properties=PROPERTIES)
175 # update cache
176 self._cache.prepend_all(jobjects)
178 # apply the cache limit
179 objects_excess = len(self._cache) - ResultSet._CACHE_LIMIT
180 if objects_excess > 0:
181 self._cache.remove_all(self._cache[-objects_excess:])
182 else:
183 logging.debug('cache hit and no need to grow the cache')
185 first_pos = self._position - self._offset
186 last_pos = self._position - self._offset + max_count
187 return self._cache[first_pos:last_pos]
189 def find(query, sorting=['-mtime']):
190 result_set = ResultSet(query, sorting)
191 return result_set
193 if __name__ == "__main__":
194 TOTAL_ITEMS = 1000
195 SCREEN_SIZE = 10
197 def mock_debug(string):
198 print "\tDEBUG: %s" % string
199 logging.debug = mock_debug
201 def mock_find(query, sorting=None, limit=None, offset=None, properties=[]):
202 print "mock_find %r %r" % (offset, (offset + limit))
204 if limit is None or offset is None:
205 raise RuntimeError("Unimplemented test.")
207 result = []
208 for index in range(offset, offset + limit):
209 obj = datastore.DSObject(index, datastore.DSMetadata({}), '')
210 result.append(obj)
212 return result, TOTAL_ITEMS
213 datastore.find = mock_find
215 result_set = find({})
217 print "Get first page"
218 objects = result_set.read(SCREEN_SIZE)
219 print [obj.object_id for obj in objects]
220 assert range(0, SCREEN_SIZE) == [obj.object_id for obj in objects]
221 print ""
223 print "Scroll to 5th item"
224 result_set.seek(5)
225 objects = result_set.read(SCREEN_SIZE)
226 print [obj.object_id for obj in objects]
227 assert range(5, SCREEN_SIZE + 5) == [obj.object_id for obj in objects]
228 print ""
230 print "Scroll back to beginning"
231 result_set.seek(0)
232 objects = result_set.read(SCREEN_SIZE)
233 print [obj.object_id for obj in objects]
234 assert range(0, SCREEN_SIZE) == [obj.object_id for obj in objects]
235 print ""
237 print "Hit PgDn five times"
238 for i in range(0, 5):
239 result_set.seek((i + 1) * SCREEN_SIZE)
240 objects = result_set.read(SCREEN_SIZE)
241 print [obj.object_id for obj in objects]
242 assert range((i + 1) * SCREEN_SIZE, (i + 2) * SCREEN_SIZE) == [obj.object_id for obj in objects]
243 print ""
245 print "Hit PgUp five times"
246 for i in range(0, 5)[::-1]:
247 result_set.seek(i * SCREEN_SIZE)
248 objects = result_set.read(SCREEN_SIZE)
249 print [obj.object_id for obj in objects]
250 assert range(i * SCREEN_SIZE, (i + 1) * SCREEN_SIZE) == [obj.object_id for obj in objects]
251 print ""