App Engine Python SDK version 1.9.2
[gae.git] / python / google / appengine / api / memcache / __init__.py
blobe20515d6f5a9428cc30bf2c73ebc87cd044edfdf
1 #!/usr/bin/env python
3 # Copyright 2007 Google Inc.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
21 """Memcache API.
23 Provides memcached-alike API to application developers to store
24 data in memory when reliable storage via the DataStore API isn't
25 required and higher performance is desired.
26 """
34 import cPickle
35 import cStringIO
36 import hashlib
37 import math
38 import types
40 from google.appengine.api import api_base_pb
41 from google.appengine.api import apiproxy_stub_map
42 from google.appengine.api import capabilities
43 from google.appengine.api import namespace_manager
44 from google.appengine.api.memcache import memcache_service_pb
45 from google.appengine.runtime import apiproxy_errors
48 MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
49 MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
51 MemcacheGetResponse = memcache_service_pb.MemcacheGetResponse
52 MemcacheGetRequest = memcache_service_pb.MemcacheGetRequest
54 MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
55 MemcacheDeleteRequest = memcache_service_pb.MemcacheDeleteRequest
57 MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
58 MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
60 MemcacheBatchIncrementResponse = memcache_service_pb.MemcacheBatchIncrementResponse
61 MemcacheBatchIncrementRequest = memcache_service_pb.MemcacheBatchIncrementRequest
63 MemcacheFlushResponse = memcache_service_pb.MemcacheFlushResponse
64 MemcacheFlushRequest = memcache_service_pb.MemcacheFlushRequest
66 MemcacheStatsRequest = memcache_service_pb.MemcacheStatsRequest
67 MemcacheStatsResponse = memcache_service_pb.MemcacheStatsResponse
70 DELETE_NETWORK_FAILURE = 0
71 DELETE_ITEM_MISSING = 1
72 DELETE_SUCCESSFUL = 2
75 STORED = MemcacheSetResponse.STORED
76 NOT_STORED = MemcacheSetResponse.NOT_STORED
77 ERROR = MemcacheSetResponse.ERROR
78 EXISTS = MemcacheSetResponse.EXISTS
81 MAX_KEY_SIZE = 250
82 MAX_VALUE_SIZE = 10 ** 6
85 STAT_HITS = 'hits'
86 STAT_MISSES = 'misses'
87 STAT_BYTE_HITS = 'byte_hits'
88 STAT_ITEMS = 'items'
89 STAT_BYTES = 'bytes'
90 STAT_OLDEST_ITEM_AGES = 'oldest_item_age'
94 FLAG_TYPE_MASK = 7
95 FLAG_COMPRESSED = 1 << 3
101 TYPE_STR = 0
102 TYPE_UNICODE = 1
103 TYPE_PICKLED = 2
104 TYPE_INT = 3
105 TYPE_LONG = 4
106 TYPE_BOOL = 5
109 CAPABILITY = capabilities.CapabilitySet('memcache')
112 def _is_pair(obj):
113 """Helper to test if something is a pair (2-tuple)."""
114 return isinstance(obj, tuple) and len(obj) == 2
117 def _add_name_space(message, namespace=None):
118 """Populate the name_space field in a messagecol buffer.
120 Args:
121 message: A messagecol buffer supporting the set_name_space() operation.
122 namespace: The name of the namespace part. If None, use the
123 default namespace. The empty namespace (i.e. '') will clear
124 the name_space field.
126 if namespace is None:
127 namespace = namespace_manager.get_namespace()
128 if not namespace:
129 message.clear_name_space()
130 else:
131 message.set_name_space(namespace)
134 def _key_string(key, key_prefix='', server_to_user_dict=None):
135 """Utility function to handle different ways of requesting keys.
137 Args:
138 key: Either a string or tuple of (shard_number, string). In Google App
139 Engine the sharding is automatic so the shard number is ignored.
140 To memcache, the key is just bytes (no defined encoding).
141 key_prefix: Optional string prefix to prepend to key.
142 server_to_user_dict: Optional dictionary to populate with a mapping of
143 server-side key (which includes the key_prefix) to user-supplied key
144 (which does not have the prefix).
146 Returns:
147 The key as a non-unicode string prepended with key_prefix. This is
148 the key sent to and stored by the server. If the resulting key is
149 longer then MAX_KEY_SIZE, it will be hashed with sha1 and will be
150 replaced with the hex representation of the said hash.
152 Raises:
153 TypeError: If provided key isn't a string or tuple of (int, string)
154 or key_prefix.
156 if _is_pair(key):
157 key = key[1]
158 if not isinstance(key, basestring):
159 raise TypeError('Key must be a string instance, received %r' % key)
160 if not isinstance(key_prefix, basestring):
161 raise TypeError('key_prefix must be a string instance, received %r' %
162 key_prefix)
168 server_key = key_prefix + key
169 if isinstance(server_key, unicode):
170 server_key = server_key.encode('utf-8')
172 if len(server_key) > MAX_KEY_SIZE:
173 server_key = hashlib.sha1(server_key).hexdigest()
175 if server_to_user_dict is not None:
176 assert isinstance(server_to_user_dict, dict)
177 server_to_user_dict[server_key] = key
179 return server_key
182 def _validate_encode_value(value, do_pickle):
183 """Utility function to validate and encode server keys and values.
185 Args:
186 value: Value to store in memcache. If it's a string, it will get passed
187 along as-is. If it's a unicode string, it will be marked appropriately,
188 such that retrievals will yield a unicode value. If it's any other data
189 type, this function will attempt to pickle the data and then store the
190 serialized result, unpickling it upon retrieval.
191 do_pickle: Callable that takes an object and returns a non-unicode
192 string containing the pickled object.
194 Returns:
195 Tuple (stored_value, flags) where:
196 stored_value: The value as a non-unicode string that should be stored
197 in memcache.
198 flags: An integer with bits set from the FLAG_* constants in this file
199 to indicate the encoding of the key and value.
201 Raises:
202 ValueError: If the encoded value is too large.
203 pickle.PicklingError: If the value is not a string and could not be pickled.
204 RuntimeError: If a complicated data structure could not be pickled due to
205 too many levels of recursion in its composition.
207 flags = 0
208 stored_value = value
210 if isinstance(value, str):
211 pass
212 elif isinstance(value, unicode):
213 stored_value = value.encode('utf-8')
214 flags |= TYPE_UNICODE
215 elif isinstance(value, bool):
218 stored_value = str(int(value))
219 flags |= TYPE_BOOL
220 elif isinstance(value, int):
221 stored_value = str(value)
222 flags |= TYPE_INT
223 elif isinstance(value, long):
224 stored_value = str(value)
225 flags |= TYPE_LONG
226 else:
227 stored_value = do_pickle(value)
228 flags |= TYPE_PICKLED
234 if len(stored_value) > MAX_VALUE_SIZE:
235 raise ValueError('Values may not be more than %d bytes in length; '
236 'received %d bytes' % (MAX_VALUE_SIZE, len(stored_value)))
238 return (stored_value, flags)
241 def _decode_value(stored_value, flags, do_unpickle):
242 """Utility function for decoding values retrieved from memcache.
244 Args:
245 stored_value: The value as a non-unicode string that was stored.
246 flags: An integer with bits set from the FLAG_* constants in this file
247 that indicate the encoding of the key and value.
248 do_unpickle: Callable that takes a non-unicode string object that contains
249 a pickled object and returns the pickled object.
251 Returns:
252 The original object that was stored, be it a normal string, a unicode
253 string, int, long, or a Python object that was pickled.
255 Raises:
256 pickle.UnpicklingError: If the value could not be unpickled.
258 assert isinstance(stored_value, str)
259 assert isinstance(flags, (int, long))
261 type_number = flags & FLAG_TYPE_MASK
262 value = stored_value
266 if type_number == TYPE_STR:
267 return value
268 elif type_number == TYPE_UNICODE:
269 return unicode(value, 'utf-8')
270 elif type_number == TYPE_PICKLED:
271 return do_unpickle(value)
272 elif type_number == TYPE_BOOL:
273 return bool(int(value))
274 elif type_number == TYPE_INT:
275 return int(value)
276 elif type_number == TYPE_LONG:
277 return long(value)
278 else:
279 assert False, "Unknown stored type"
280 assert False, "Shouldn't get here."
283 def create_rpc(deadline=None, callback=None):
284 """Creates an RPC object for use with the memcache API.
286 Args:
287 deadline: Optional deadline in seconds for the operation; the default
288 is a system-specific deadline (typically 5 seconds).
289 callback: Optional callable to invoke on completion.
291 Returns:
292 An apiproxy_stub_map.UserRPC object specialized for this service.
294 return apiproxy_stub_map.UserRPC('memcache', deadline, callback)
297 class Client(object):
298 """Memcache client object, through which one invokes all memcache operations.
300 Several methods are no-ops to retain source-level compatibility
301 with the existing popular Python memcache library.
303 Any method that takes a 'key' argument will accept that key as a string
304 (unicode or not) or a tuple of (hash_value, string) where the hash_value,
305 normally used for sharding onto a memcache instance, is instead ignored, as
306 Google App Engine deals with the sharding transparently. Keys in memcache are
307 just bytes, without a specified encoding. All such methods may raise TypeError
308 if provided a bogus key value and a ValueError if the key is too large.
310 Any method that takes a 'value' argument will accept as that value any
311 string (unicode or not), int, long, or pickle-able Python object, including
312 all native types. You'll get back from the cache the same type that you
313 originally put in.
315 The Client class is not thread-safe with respect to the gets(), cas() and
316 cas_multi() methods (and other compare-and-set-related methods). Therefore,
317 Client objects should not be used by more than one thread for CAS purposes.
318 Note that the global Client for the module-level functions is okay because it
319 does not expose any of the CAS methods.
322 def __init__(self, servers=None, debug=0,
323 pickleProtocol=cPickle.HIGHEST_PROTOCOL,
324 pickler=cPickle.Pickler,
325 unpickler=cPickle.Unpickler,
326 pload=None,
327 pid=None,
328 make_sync_call=None,
329 _app_id=None):
330 """Create a new Client object.
332 No parameters are required.
334 Arguments:
335 servers: Ignored; only for compatibility.
336 debug: Ignored; only for compatibility.
337 pickleProtocol: Pickle protocol to use for pickling the object.
338 pickler: pickle.Pickler sub-class to use for pickling.
339 unpickler: pickle.Unpickler sub-class to use for unpickling.
340 pload: Callable to use for retrieving objects by persistent id.
341 pid: Callable to use for determine the persistent id for objects, if any.
342 make_sync_call: Ignored; only for compatibility with an earlier version.
349 self._pickler_factory = pickler
350 self._unpickler_factory = unpickler
351 self._pickle_protocol = pickleProtocol
352 self._persistent_id = pid
353 self._persistent_load = pload
354 self._app_id = _app_id
355 self._cas_ids = {}
357 def cas_reset(self):
358 """Clear the remembered CAS ids."""
359 self._cas_ids.clear()
361 def _make_async_call(self, rpc, method, request, response,
362 get_result_hook, user_data):
363 """Internal helper to schedule an asynchronous RPC.
365 Args:
366 rpc: None or a UserRPC object.
367 method: Method name, e.g. 'Get'.
368 request: Request protobuf.
369 response: Response protobuf.
370 get_result_hook: None or hook function used to process results
371 (See UserRPC.make_call() for more info).
372 user_data: None or user data for hook function.
374 Returns:
375 A UserRPC object; either the one passed in as the first argument,
376 or a new one (if the first argument was None).
379 if rpc is None:
380 rpc = create_rpc()
381 assert rpc.service == 'memcache', repr(rpc.service)
382 rpc.make_call(method, request, response, get_result_hook, user_data)
383 return rpc
385 def _do_pickle(self, value):
386 """Pickles a provided value."""
387 pickle_data = cStringIO.StringIO()
388 pickler = self._pickler_factory(pickle_data,
389 protocol=self._pickle_protocol)
390 if self._persistent_id is not None:
391 pickler.persistent_id = self._persistent_id
392 pickler.dump(value)
393 return pickle_data.getvalue()
395 def _do_unpickle(self, value):
396 """Unpickles a provided value."""
397 pickle_data = cStringIO.StringIO(value)
398 unpickler = self._unpickler_factory(pickle_data)
399 if self._persistent_load is not None:
400 unpickler.persistent_load = self._persistent_load
401 return unpickler.load()
403 def _add_app_id(self, message):
404 """Populates override field in message if accessing another app's memcache.
406 Args:
407 message: A protocol buffer supporting the mutable_override() operation.
409 if self._app_id:
410 app_override = message.mutable_override()
411 app_override.set_app_id(self._app_id)
413 def set_servers(self, servers):
414 """Sets the pool of memcache servers used by the client.
416 This is purely a compatibility method. In Google App Engine, it's a no-op.
418 pass
420 def disconnect_all(self):
421 """Closes all connections to memcache servers.
423 This is purely a compatibility method. In Google App Engine, it's a no-op.
425 pass
427 def forget_dead_hosts(self):
428 """Resets all servers to the alive status.
430 This is purely a compatibility method. In Google App Engine, it's a no-op.
432 pass
434 def debuglog(self):
435 """Logging function for debugging information.
437 This is purely a compatibility method. In Google App Engine, it's a no-op.
439 pass
441 def get_stats(self):
442 """Gets memcache statistics for this application.
444 All of these statistics may reset due to various transient conditions. They
445 provide the best information available at the time of being called.
447 Returns:
448 Dictionary mapping statistic names to associated values. Statistics and
449 their associated meanings:
451 hits: Number of cache get requests resulting in a cache hit.
452 misses: Number of cache get requests resulting in a cache miss.
453 byte_hits: Sum of bytes transferred on get requests. Rolls over to
454 zero on overflow.
455 items: Number of key/value pairs in the cache.
456 bytes: Total size of all items in the cache.
457 oldest_item_age: How long in seconds since the oldest item in the
458 cache was accessed. Effectively, this indicates how long a new
459 item will survive in the cache without being accessed. This is
460 _not_ the amount of time that has elapsed since the item was
461 created.
463 On error, returns None.
465 rpc = self.get_stats_async()
466 return rpc.get_result()
468 def get_stats_async(self, rpc=None):
469 """Async version of get_stats().
471 Returns:
472 A UserRPC instance whose get_result() method returns None if
473 there was a network error, otherwise a dict just like
474 get_stats() returns.
476 request = MemcacheStatsRequest()
477 self._add_app_id(request)
478 response = MemcacheStatsResponse()
479 return self._make_async_call(rpc, 'Stats', request, response,
480 self.__get_stats_hook, None)
482 def __get_stats_hook(self, rpc):
483 try:
484 rpc.check_success()
485 except apiproxy_errors.Error:
486 return None
487 response = rpc.response
488 if not response.has_stats():
489 return {
490 STAT_HITS: 0,
491 STAT_MISSES: 0,
492 STAT_BYTE_HITS: 0,
493 STAT_ITEMS: 0,
494 STAT_BYTES: 0,
495 STAT_OLDEST_ITEM_AGES: 0,
498 stats = response.stats()
499 return {
500 STAT_HITS: stats.hits(),
501 STAT_MISSES: stats.misses(),
502 STAT_BYTE_HITS: stats.byte_hits(),
503 STAT_ITEMS: stats.items(),
504 STAT_BYTES: stats.bytes(),
505 STAT_OLDEST_ITEM_AGES: stats.oldest_item_age(),
508 def flush_all(self):
509 """Deletes everything in memcache.
511 Returns:
512 True on success, False on RPC or server error.
514 rpc = self.flush_all_async()
515 return rpc.get_result()
517 def flush_all_async(self, rpc=None):
518 """Async version of flush_all().
520 Returns:
521 A UserRPC instance whose get_result() method returns True on
522 success, False on RPC or server error.
524 request = MemcacheFlushRequest()
525 self._add_app_id(request)
526 response = MemcacheFlushResponse()
527 return self._make_async_call(rpc, 'FlushAll', request, response,
528 self.__flush_all_hook, None)
530 def __flush_all_hook(self, rpc):
531 try:
532 rpc.check_success()
533 except apiproxy_errors.Error:
534 return False
535 return True
537 def get(self, key, namespace=None, for_cas=False):
538 """Looks up a single key in memcache.
540 If you have multiple items to load, though, it's much more efficient
541 to use get_multi() instead, which loads them in one bulk operation,
542 reducing the networking latency that'd otherwise be required to do
543 many serialized get() operations.
545 Args:
546 key: The key in memcache to look up. See docs on Client
547 for details of format.
548 namespace: a string specifying an optional namespace to use in
549 the request.
550 for_cas: If True, request and store CAS ids on the client (see
551 cas() operation below).
553 Returns:
554 The value of the key, if found in memcache, else None.
556 if _is_pair(key):
557 key = key[1]
558 rpc = self.get_multi_async([key], namespace=namespace, for_cas=for_cas)
559 results = rpc.get_result()
560 return results.get(key)
562 def gets(self, key, namespace=None):
563 """An alias for get(..., for_cas=True)."""
564 return self.get(key, namespace=namespace, for_cas=True)
566 def get_multi(self, keys, key_prefix='', namespace=None, for_cas=False):
567 """Looks up multiple keys from memcache in one operation.
569 This is the recommended way to do bulk loads.
571 Args:
572 keys: List of keys to look up. Keys may be strings or
573 tuples of (hash_value, string). Google App Engine
574 does the sharding and hashing automatically, though, so the hash
575 value is ignored. To memcache, keys are just series of bytes,
576 and not in any particular encoding.
577 key_prefix: Prefix to prepend to all keys when talking to the server;
578 not included in the returned dictionary.
579 namespace: a string specifying an optional namespace to use in
580 the request.
581 for_cas: If True, request and store CAS ids on the client.
583 Returns:
584 A dictionary of the keys and values that were present in memcache.
585 Even if the key_prefix was specified, that key_prefix won't be on
586 the keys in the returned dictionary.
588 rpc = self.get_multi_async(keys, key_prefix, namespace, for_cas)
589 return rpc.get_result()
591 def get_multi_async(self, keys, key_prefix='', namespace=None,
592 for_cas=False, rpc=None):
593 """Async version of get_multi().
595 Returns:
596 A UserRPC instance whose get_result() method returns {} if
597 there was a network error, otherwise a dict just like
598 get_multi() returns.
600 request = MemcacheGetRequest()
601 self._add_app_id(request)
602 _add_name_space(request, namespace)
603 if for_cas:
604 request.set_for_cas(True)
605 response = MemcacheGetResponse()
606 user_key = {}
607 for key in keys:
608 request.add_key(_key_string(key, key_prefix, user_key))
610 return self._make_async_call(rpc, 'Get', request, response,
611 self.__get_hook, user_key)
613 def __get_hook(self, rpc):
614 try:
615 rpc.check_success()
616 except apiproxy_errors.Error:
617 return {}
618 for_cas = rpc.request.for_cas()
619 response = rpc.response
620 user_key = rpc.user_data
621 return_value = {}
622 for returned_item in response.item_list():
623 value = _decode_value(returned_item.value(), returned_item.flags(),
624 self._do_unpickle)
625 raw_key = returned_item.key()
626 if for_cas:
627 self._cas_ids[raw_key] = returned_item.cas_id()
628 return_value[user_key[raw_key]] = value
629 return return_value
636 def delete(self, key, seconds=0, namespace=None):
637 """Deletes a key from memcache.
639 Args:
640 key: Key to delete. See docs on Client for detils.
641 seconds: Optional number of seconds to make deleted items 'locked'
642 for 'add' operations. Value can be a delta from current time (up to
643 1 month), or an absolute Unix epoch time. Defaults to 0, which means
644 items can be immediately added. With or without this option,
645 a 'set' operation will always work. Float values will be rounded up to
646 the nearest whole second.
647 namespace: a string specifying an optional namespace to use in
648 the request.
650 Returns:
651 DELETE_NETWORK_FAILURE (0) on network failure,
652 DELETE_ITEM_MISSING (1) if the server tried to delete the item but
653 didn't have it, or
654 DELETE_SUCCESSFUL (2) if the item was actually deleted.
655 This can be used as a boolean value, where a network failure is the
656 only bad condition.
658 rpc = self.delete_multi_async([key], seconds, namespace=namespace)
659 results = rpc.get_result()
660 if not results:
661 return DELETE_NETWORK_FAILURE
662 return results[0]
664 def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
665 """Delete multiple keys at once.
667 Args:
668 keys: List of keys to delete.
669 seconds: Optional number of seconds to make deleted items 'locked'
670 for 'add' operations. Value can be a delta from current time (up to
671 1 month), or an absolute Unix epoch time. Defaults to 0, which means
672 items can be immediately added. With or without this option,
673 a 'set' operation will always work. Float values will be rounded up to
674 the nearest whole second.
675 key_prefix: Prefix to put on all keys when sending specified
676 keys to memcache. See docs for get_multi() and set_multi().
677 namespace: a string specifying an optional namespace to use in
678 the request.
680 Returns:
681 True if all operations completed successfully. False if one
682 or more failed to complete.
684 rpc = self.delete_multi_async(keys, seconds, key_prefix, namespace)
685 results = rpc.get_result()
686 return bool(results)
688 def delete_multi_async(self, keys, seconds=0, key_prefix='',
689 namespace=None, rpc=None):
690 """Async version of delete_multi() -- note different return value.
692 Returns:
693 A UserRPC instance whose get_result() method returns None if
694 there was a network error, or a list of status values otherwise,
695 where each status corresponds to a key and is either
696 DELETE_SUCCESSFUL, DELETE_ITEM_MISSING, or DELETE_NETWORK_FAILURE
697 (see delete() docstring for details).
699 if not isinstance(seconds, (int, long, float)):
700 raise TypeError('Delete timeout must be a number.')
701 if seconds < 0:
702 raise ValueError('Delete timeout must not be negative.')
704 request = MemcacheDeleteRequest()
705 self._add_app_id(request)
706 _add_name_space(request, namespace)
707 response = MemcacheDeleteResponse()
709 for key in keys:
710 delete_item = request.add_item()
711 delete_item.set_key(_key_string(key, key_prefix=key_prefix))
712 delete_item.set_delete_time(int(math.ceil(seconds)))
714 return self._make_async_call(rpc, 'Delete', request, response,
715 self.__delete_hook, None)
717 def __delete_hook(self, rpc):
718 try:
719 rpc.check_success()
720 except apiproxy_errors.Error:
721 return None
722 result = []
723 for status in rpc.response.delete_status_list():
724 if status == MemcacheDeleteResponse.DELETED:
725 result.append(DELETE_SUCCESSFUL)
726 elif status == MemcacheDeleteResponse.NOT_FOUND:
727 result.append(DELETE_ITEM_MISSING)
728 else:
729 result.append(DELETE_NETWORK_FAILURE)
730 return result
740 def set(self, key, value, time=0, min_compress_len=0, namespace=None):
741 """Sets a key's value, regardless of previous contents in cache.
743 Unlike add() and replace(), this method always sets (or
744 overwrites) the value in memcache, regardless of previous
745 contents.
747 Args:
748 key: Key to set. See docs on Client for details.
749 value: Value to set. Any type. If complex, will be pickled.
750 time: Optional expiration time, either relative number of seconds
751 from current time (up to 1 month), or an absolute Unix epoch time.
752 By default, items never expire, though items may be evicted due to
753 memory pressure. Float values will be rounded up to the nearest
754 whole second.
755 min_compress_len: Ignored option for compatibility.
756 namespace: a string specifying an optional namespace to use in
757 the request.
759 Returns:
760 True if set. False on error.
762 return self._set_with_policy(MemcacheSetRequest.SET, key, value, time=time,
763 namespace=namespace)
765 def add(self, key, value, time=0, min_compress_len=0, namespace=None):
766 """Sets a key's value, iff item is not already in memcache.
768 Args:
769 key: Key to set. See docs on Client for details.
770 value: Value to set. Any type. If complex, will be pickled.
771 time: Optional expiration time, either relative number of seconds
772 from current time (up to 1 month), or an absolute Unix epoch time.
773 By default, items never expire, though items may be evicted due to
774 memory pressure. Float values will be rounded up to the nearest
775 whole second.
776 min_compress_len: Ignored option for compatibility.
777 namespace: a string specifying an optional namespace to use in
778 the request.
780 Returns:
781 True if added. False on error.
783 return self._set_with_policy(MemcacheSetRequest.ADD, key, value, time=time,
784 namespace=namespace)
786 def replace(self, key, value, time=0, min_compress_len=0, namespace=None):
787 """Replaces a key's value, failing if item isn't already in memcache.
789 Args:
790 key: Key to set. See docs on Client for details.
791 value: Value to set. Any type. If complex, will be pickled.
792 time: Optional expiration time, either relative number of seconds
793 from current time (up to 1 month), or an absolute Unix epoch time.
794 By default, items never expire, though items may be evicted due to
795 memory pressure. Float values will be rounded up to the nearest
796 whole second.
797 min_compress_len: Ignored option for compatibility.
798 namespace: a string specifying an optional namespace to use in
799 the request.
801 Returns:
802 True if replaced. False on RPC error or cache miss.
804 return self._set_with_policy(MemcacheSetRequest.REPLACE,
805 key, value, time=time, namespace=namespace)
807 def cas(self, key, value, time=0, min_compress_len=0, namespace=None):
808 """Compare-And-Set update.
810 This requires that the key has previously been successfully
811 fetched with gets() or get(..., for_cas=True), and that no changes
812 have been made to the key since that fetch. Typical usage is:
814 key = ...
815 client = memcache.Client()
816 value = client.gets(key) # OR client.get(key, for_cas=True)
817 <updated value>
818 ok = client.cas(key, value)
820 If two processes run similar code, the first one calling cas()
821 will succeed (ok == True), while the second one will fail (ok ==
822 False). This can be used to detect race conditions.
824 NOTE: some state (the CAS id) is stored on the Client object for
825 each key ever used with gets(). To prevent ever-increasing memory
826 usage, you must use a Client object when using cas(), and the
827 lifetime of your Client object should be limited to that of one
828 incoming HTTP request. You cannot use the global-function-based
829 API.
831 Args:
832 key: Key to set. See docs on Client for details.
833 value: The new value.
834 time: Optional expiration time, either relative number of seconds
835 from current time (up to 1 month), or an absolute Unix epoch time.
836 By default, items never expire, though items may be evicted due to
837 memory pressure. Float values will be rounded up to the nearest
838 whole second.
839 min_compress_len: Ignored option for compatibility.
840 namespace: a string specifying an optional namespace to use in
841 the request.
843 Returns:
844 True if updated. False on RPC error or if the CAS id didn't match.
846 return self._set_with_policy(MemcacheSetRequest.CAS, key, value,
847 time, namespace)
849 def _set_with_policy(self, policy, key, value, time=0, namespace=None):
850 """Sets a single key with a specified policy.
852 Helper function for set(), add(), and replace().
854 Args:
855 policy: One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
856 key: Key to add, set, or replace. See docs on Client for details.
857 value: Value to set.
858 time: Expiration time, defaulting to 0 (never expiring).
859 namespace: a string specifying an optional namespace to use in
860 the request.
862 Returns:
863 True if stored, False on RPC error or policy error, e.g. a replace
864 that failed due to the item not already existing, or an add
865 failing due to the item not already existing.
867 rpc = self._set_multi_async_with_policy(policy, {key: value},
868 time, '', namespace)
869 status_dict = rpc.get_result()
870 if not status_dict:
871 return False
872 return status_dict.get(key) == MemcacheSetResponse.STORED
879 def _set_multi_with_policy(self, policy, mapping, time=0, key_prefix='',
880 namespace=None):
881 """Set multiple keys with a specified policy.
883 Helper function for set_multi(), add_multi(), and replace_multi(). This
884 reduces the network latency of doing many requests in serial.
886 Args:
887 policy: One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
888 mapping: Dictionary of keys to values. If policy == CAS, the
889 values must be (value, cas_id) tuples.
890 time: Optional expiration time, either relative number of seconds
891 from current time (up to 1 month), or an absolute Unix epoch time.
892 By default, items never expire, though items may be evicted due to
893 memory pressure. Float values will be rounded up to the nearest
894 whole second.
895 key_prefix: Prefix for to prepend to all keys.
896 namespace: a string specifying an optional namespace to use in
897 the request.
899 Returns:
900 A list of keys whose values were NOT set. On total success,
901 this list should be empty. On network/RPC/server errors,
902 a list of all input keys is returned; in this case the keys
903 may or may not have been updated.
905 rpc = self._set_multi_async_with_policy(policy, mapping, time,
906 key_prefix, namespace)
907 status_dict = rpc.get_result()
908 server_keys, user_key = rpc.user_data
910 if not status_dict:
911 return user_key.values()
914 unset_list = []
915 for server_key in server_keys:
916 key = user_key[server_key]
917 set_status = status_dict[key]
918 if set_status != MemcacheSetResponse.STORED:
919 unset_list.append(key)
921 return unset_list
924 def _set_multi_async_with_policy(self, policy, mapping, time=0,
925 key_prefix='', namespace=None, rpc=None):
926 """Async version of _set_multi_with_policy() -- note different return.
928 Returns:
929 A UserRPC instance whose get_result() method returns None if
930 there was a network error, or a dict mapping (user) keys to
931 status values otherwise, where each status is one of STORED,
932 NOT_STORED, ERROR, or EXISTS.
934 if not isinstance(time, (int, long, float)):
935 raise TypeError('Expiration must be a number.')
936 if time < 0.0:
937 raise ValueError('Expiration must not be negative.')
939 request = MemcacheSetRequest()
940 self._add_app_id(request)
941 _add_name_space(request, namespace)
942 user_key = {}
943 server_keys = []
944 set_cas_id = (policy == MemcacheSetRequest.CAS)
945 for key, value in mapping.iteritems():
946 server_key = _key_string(key, key_prefix, user_key)
947 stored_value, flags = _validate_encode_value(value, self._do_pickle)
948 server_keys.append(server_key)
950 item = request.add_item()
951 item.set_key(server_key)
952 item.set_value(stored_value)
953 item.set_flags(flags)
954 item.set_set_policy(policy)
955 item.set_expiration_time(int(math.ceil(time)))
956 if set_cas_id:
957 cas_id = self._cas_ids.get(server_key)
959 if cas_id is not None:
960 item.set_cas_id(cas_id)
963 item.set_for_cas(True)
965 response = MemcacheSetResponse()
968 return self._make_async_call(rpc, 'Set', request, response,
969 self.__set_with_policy_hook,
970 (server_keys, user_key))
972 def __set_with_policy_hook(self, rpc):
973 try:
974 rpc.check_success()
975 except apiproxy_errors.Error:
976 return None
978 response = rpc.response
979 server_keys, user_key = rpc.user_data
980 assert response.set_status_size() == len(server_keys)
981 status_dict = {}
982 for server_key, status in zip(server_keys, response.set_status_list()):
983 status_dict[user_key[server_key]] = status
984 return status_dict
986 def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
987 namespace=None):
988 """Set multiple keys' values at once, regardless of previous contents.
990 Args:
991 mapping: Dictionary of keys to values.
992 time: Optional expiration time, either relative number of seconds
993 from current time (up to 1 month), or an absolute Unix epoch time.
994 By default, items never expire, though items may be evicted due to
995 memory pressure. Float values will be rounded up to the nearest
996 whole second.
997 key_prefix: Prefix for to prepend to all keys.
998 min_compress_len: Unimplemented compatibility option.
999 namespace: a string specifying an optional namespace to use in
1000 the request.
1002 Returns:
1003 A list of keys whose values were NOT set. On total success,
1004 this list should be empty.
1006 return self._set_multi_with_policy(MemcacheSetRequest.SET, mapping,
1007 time=time, key_prefix=key_prefix,
1008 namespace=namespace)
1010 def set_multi_async(self, mapping, time=0, key_prefix='',
1011 min_compress_len=0, namespace=None, rpc=None):
1012 """Async version of set_multi() -- note different return value.
1014 Returns:
1015 See _set_multi_async_with_policy().
1017 return self._set_multi_async_with_policy(MemcacheSetRequest.SET, mapping,
1018 time=time, key_prefix=key_prefix,
1019 namespace=namespace, rpc=rpc)
1021 def add_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
1022 namespace=None):
1023 """Set multiple keys' values iff items are not already in memcache.
1025 Args:
1026 mapping: Dictionary of keys to values.
1027 time: Optional expiration time, either relative number of seconds
1028 from current time (up to 1 month), or an absolute Unix epoch time.
1029 By default, items never expire, though items may be evicted due to
1030 memory pressure. Float values will be rounded up to the nearest
1031 whole second.
1032 key_prefix: Prefix for to prepend to all keys.
1033 min_compress_len: Unimplemented compatibility option.
1034 namespace: a string specifying an optional namespace to use in
1035 the request.
1037 Returns:
1038 A list of keys whose values were NOT set because they did not already
1039 exist in memcache. On total success, this list should be empty.
1041 return self._set_multi_with_policy(MemcacheSetRequest.ADD, mapping,
1042 time=time, key_prefix=key_prefix,
1043 namespace=namespace)
1045 def add_multi_async(self, mapping, time=0, key_prefix='',
1046 min_compress_len=0, namespace=None, rpc=None):
1047 """Async version of add_multi() -- note different return value.
1049 Returns:
1050 See _set_multi_async_with_policy().
1052 return self._set_multi_async_with_policy(MemcacheSetRequest.ADD, mapping,
1053 time=time, key_prefix=key_prefix,
1054 namespace=namespace, rpc=rpc)
1056 def replace_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
1057 namespace=None):
1058 """Replace multiple keys' values, failing if the items aren't in memcache.
1060 Args:
1061 mapping: Dictionary of keys to values.
1062 time: Optional expiration time, either relative number of seconds
1063 from current time (up to 1 month), or an absolute Unix epoch time.
1064 By default, items never expire, though items may be evicted due to
1065 memory pressure. Float values will be rounded up to the nearest
1066 whole second.
1067 key_prefix: Prefix for to prepend to all keys.
1068 min_compress_len: Unimplemented compatibility option.
1069 namespace: a string specifying an optional namespace to use in
1070 the request.
1072 Returns:
1073 A list of keys whose values were NOT set because they already existed
1074 in memcache. On total success, this list should be empty.
1076 return self._set_multi_with_policy(MemcacheSetRequest.REPLACE, mapping,
1077 time=time, key_prefix=key_prefix,
1078 namespace=namespace)
1080 def replace_multi_async(self, mapping, time=0, key_prefix='',
1081 min_compress_len=0, namespace=None, rpc=None):
1082 """Async version of replace_multi() -- note different return value.
1084 Returns:
1085 See _set_multi_async_with_policy().
1087 return self._set_multi_async_with_policy(MemcacheSetRequest.REPLACE,
1088 mapping,
1089 time=time, key_prefix=key_prefix,
1090 namespace=namespace, rpc=rpc)
1092 def cas_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
1093 namespace=None):
1094 """Compare-And-Set update for multiple keys.
1096 See cas() docstring for an explanation.
1098 Args:
1099 mapping: Dictionary of keys to values.
1100 time: Optional expiration time, either relative number of seconds
1101 from current time (up to 1 month), or an absolute Unix epoch time.
1102 By default, items never expire, though items may be evicted due to
1103 memory pressure. Float values will be rounded up to the nearest
1104 whole second.
1105 key_prefix: Prefix for to prepend to all keys.
1106 min_compress_len: Unimplemented compatibility option.
1107 namespace: a string specifying an optional namespace to use in
1108 the request.
1110 Returns:
1111 A list of keys whose values were NOT set because the compare
1112 failed. On total success, this list should be empty.
1114 return self._set_multi_with_policy(MemcacheSetRequest.CAS, mapping,
1115 time=time, key_prefix=key_prefix,
1116 namespace=namespace)
1118 def cas_multi_async(self, mapping, time=0, key_prefix='',
1119 min_compress_len=0, namespace=None, rpc=None):
1120 """Async version of cas_multi() -- note different return value.
1122 Returns:
1123 See _set_multi_async_with_policy().
1125 return self._set_multi_async_with_policy(MemcacheSetRequest.CAS, mapping,
1126 time=time, key_prefix=key_prefix,
1127 namespace=namespace, rpc=rpc)
1129 def incr(self, key, delta=1, namespace=None, initial_value=None):
1130 """Atomically increments a key's value.
1132 Internally, the value is a unsigned 64-bit integer. Memcache
1133 doesn't check 64-bit overflows. The value, if too large, will
1134 wrap around.
1136 Unless an initial_value is specified, the key must already exist
1137 in the cache to be incremented. To initialize a counter, either
1138 specify initial_value or set() it to the initial value, as an
1139 ASCII decimal integer. Future get()s of the key, post-increment,
1140 will still be an ASCII decimal value.
1142 Args:
1143 key: Key to increment. If an iterable collection, each one of the keys
1144 will be offset. See Client's docstring for details.
1145 delta: Non-negative integer value (int or long) to increment key by,
1146 defaulting to 1.
1147 namespace: a string specifying an optional namespace to use in
1148 the request.
1149 initial_value: initial value to put in the cache, if it doesn't
1150 already exist. The default value, None, will not create a cache
1151 entry if it doesn't already exist.
1153 Returns:
1154 If key was a single value, the new long integer value, or None if key
1155 was not in the cache, could not be incremented for any other reason, or
1156 a network/RPC/server error occurred.
1158 If key was an iterable collection, a dictionary will be returned
1159 mapping supplied keys to values, with the values having the same meaning
1160 as the singular return value of this method.
1162 Raises:
1163 ValueError: If number is negative.
1164 TypeError: If delta isn't an int or long.
1166 return self._incrdecr(key, False, delta, namespace=namespace,
1167 initial_value=initial_value)
1169 def incr_async(self, key, delta=1, namespace=None, initial_value=None,
1170 rpc=None):
1171 """Async version of incr().
1173 Returns:
1174 A UserRPC instance whose get_result() method returns the same
1175 kind of value as incr() returns.
1177 return self._incrdecr_async(key, False, delta, namespace=namespace,
1178 initial_value=initial_value, rpc=rpc)
1180 def decr(self, key, delta=1, namespace=None, initial_value=None):
1181 """Atomically decrements a key's value.
1183 Internally, the value is a unsigned 64-bit integer. Memcache
1184 caps decrementing below zero to zero.
1186 The key must already exist in the cache to be decremented. See
1187 docs on incr() for details.
1189 Args:
1190 key: Key to decrement. If an iterable collection, each one of the keys
1191 will be offset. See Client's docstring for details.
1192 delta: Non-negative integer value (int or long) to decrement key by,
1193 defaulting to 1.
1194 namespace: a string specifying an optional namespace to use in
1195 the request.
1196 initial_value: initial value to put in the cache, if it doesn't
1197 already exist. The default value, None, will not create a cache
1198 entry if it doesn't already exist.
1200 Returns:
1201 If key was a single value, the new long integer value, or None if key
1202 was not in the cache, could not be decremented for any other reason, or
1203 a network/RPC/server error occurred.
1205 If key was an iterable collection, a dictionary will be returned
1206 mapping supplied keys to values, with the values having the same meaning
1207 as the singular return value of this method.
1209 Raises:
1210 ValueError: If number is negative.
1211 TypeError: If delta isn't an int or long.
1213 return self._incrdecr(key, True, delta, namespace=namespace,
1214 initial_value=initial_value)
1216 def decr_async(self, key, delta=1, namespace=None, initial_value=None,
1217 rpc=None):
1218 """Async version of decr().
1220 Returns:
1221 A UserRPC instance whose get_result() method returns the same
1222 kind of value as decr() returns.
1224 return self._incrdecr_async(key, True, delta, namespace=namespace,
1225 initial_value=initial_value, rpc=rpc)
1227 def _incrdecr(self, key, is_negative, delta, namespace=None,
1228 initial_value=None):
1229 """Increment or decrement a key by a provided delta.
1231 Args:
1232 key: Key to increment or decrement. If an iterable collection, each
1233 one of the keys will be offset.
1234 is_negative: Boolean, if this is a decrement.
1235 delta: Non-negative integer amount (int or long) to increment
1236 or decrement by.
1237 namespace: a string specifying an optional namespace to use in
1238 the request.
1239 initial_value: initial value to put in the cache, if it doesn't
1240 already exist. The default value, None, will not create a cache
1241 entry if it doesn't already exist.
1243 Returns:
1244 New long integer value, or None on cache miss or network/RPC/server
1245 error.
1247 Raises:
1248 ValueError: If delta is negative.
1249 TypeError: If delta isn't an int or long.
1251 rpc = self._incrdecr_async(key, is_negative, delta, namespace,
1252 initial_value)
1253 return rpc.get_result()
1255 def _incrdecr_async(self, key, is_negative, delta, namespace=None,
1256 initial_value=None, rpc=None):
1257 """Async version of _incrdecr().
1259 Returns:
1260 A UserRPC instance whose get_result() method returns the same
1261 kind of value as _incrdecr() returns.
1263 if not isinstance(delta, (int, long)):
1264 raise TypeError('Delta must be an integer or long, received %r' % delta)
1265 if delta < 0:
1266 raise ValueError('Delta must not be negative.')
1269 if not isinstance(key, basestring):
1270 try:
1271 it = iter(key)
1272 except TypeError:
1274 pass
1275 else:
1276 if is_negative:
1277 delta = -delta
1278 return self.offset_multi_async(dict((k, delta) for k in it),
1279 namespace=namespace,
1280 initial_value=initial_value,
1281 rpc=rpc)
1284 request = MemcacheIncrementRequest()
1285 self._add_app_id(request)
1286 _add_name_space(request, namespace)
1287 response = MemcacheIncrementResponse()
1288 request.set_key(_key_string(key))
1289 request.set_delta(delta)
1290 if is_negative:
1291 request.set_direction(MemcacheIncrementRequest.DECREMENT)
1292 else:
1293 request.set_direction(MemcacheIncrementRequest.INCREMENT)
1294 if initial_value is not None:
1296 request.set_initial_value(long(initial_value))
1297 initial_flags = None
1298 if isinstance(initial_value, int):
1299 initial_flags = TYPE_INT
1300 elif isinstance(initial_value, long):
1301 initial_flags = TYPE_LONG
1302 if initial_flags is not None:
1303 request.set_initial_flags(initial_flags)
1306 return self._make_async_call(rpc, 'Increment', request, response,
1307 self.__incrdecr_hook, None)
1309 def __incrdecr_hook(self, rpc):
1310 try:
1311 rpc.check_success()
1312 except apiproxy_errors.Error:
1313 return None
1315 response = rpc.response
1316 if response.has_new_value():
1317 return response.new_value()
1318 return None
1320 def offset_multi(self, mapping, key_prefix='',
1321 namespace=None, initial_value=None):
1322 """Offsets multiple keys by a delta, incrementing and decrementing in batch.
1324 Args:
1325 mapping: Dictionary mapping keys to deltas (positive or negative integers)
1326 to apply to each corresponding key.
1327 key_prefix: Prefix for to prepend to all keys.
1328 initial_value: Initial value to put in the cache, if it doesn't
1329 already exist. The default value, None, will not create a cache
1330 entry if it doesn't already exist.
1331 namespace: A string specifying an optional namespace to use in
1332 the request.
1334 Returns:
1335 Dictionary mapping input keys to new integer values. The new value will
1336 be None if an error occurs, the key does not already exist, or the value
1337 was not an integer type. The values will wrap-around at unsigned 64-bit
1338 integer-maximum and underflow will be floored at zero.
1340 rpc = self.offset_multi_async(mapping, key_prefix,
1341 namespace, initial_value)
1342 return rpc.get_result()
1344 def offset_multi_async(self, mapping, key_prefix='',
1345 namespace=None, initial_value=None, rpc=None):
1346 """Async version of offset_multi().
1348 Returns:
1349 A UserRPC instance whose get_result() method returns a dict just
1350 like offset_multi() returns.
1352 initial_flags = None
1353 if initial_value is not None:
1354 if not isinstance(initial_value, (int, long)):
1355 raise TypeError('initial_value must be an integer')
1356 if initial_value < 0:
1357 raise ValueError('initial_value must be >= 0')
1358 if isinstance(initial_value, int):
1359 initial_flags = TYPE_INT
1360 else:
1361 initial_flags = TYPE_LONG
1363 request = MemcacheBatchIncrementRequest()
1364 self._add_app_id(request)
1365 response = MemcacheBatchIncrementResponse()
1366 _add_name_space(request, namespace)
1368 for key, delta in mapping.iteritems():
1369 if not isinstance(delta, (int, long)):
1370 raise TypeError('Delta must be an integer or long, received %r' % delta)
1371 if delta >= 0:
1372 direction = MemcacheIncrementRequest.INCREMENT
1373 else:
1374 delta = -delta
1375 direction = MemcacheIncrementRequest.DECREMENT
1377 server_key = _key_string(key, key_prefix)
1379 item = request.add_item()
1380 item.set_key(server_key)
1381 item.set_delta(delta)
1382 item.set_direction(direction)
1383 if initial_value is not None:
1384 item.set_initial_value(initial_value)
1385 item.set_initial_flags(initial_flags)
1388 return self._make_async_call(rpc, 'BatchIncrement', request, response,
1389 self.__offset_hook, mapping)
1391 def __offset_hook(self, rpc):
1392 mapping = rpc.user_data
1393 try:
1394 rpc.check_success()
1395 except apiproxy_errors.Error:
1396 return dict((k, None) for k in mapping.iterkeys())
1398 response = rpc.response
1399 assert response.item_size() == len(mapping)
1401 result_dict = {}
1402 for key, resp_item in zip(mapping.iterkeys(), response.item_list()):
1403 if (resp_item.increment_status() == MemcacheIncrementResponse.OK and
1404 resp_item.has_new_value()):
1405 result_dict[key] = resp_item.new_value()
1406 else:
1407 result_dict[key] = None
1409 return result_dict
1415 _CLIENT = None
1418 def setup_client(client_obj):
1419 """Sets the Client object instance to use for all module-level methods.
1421 Use this method if you want to have customer persistent_id() or
1422 persistent_load() functions associated with your client.
1424 NOTE: We don't expose the _async methods as functions; they're too
1425 obscure; and we don't expose gets(), cas() and cas_multi() because
1426 they maintain state on the client object.
1428 Args:
1429 client_obj: Instance of the memcache.Client object.
1431 global _CLIENT
1432 var_dict = globals()
1434 _CLIENT = client_obj
1435 var_dict['set_servers'] = _CLIENT.set_servers
1436 var_dict['disconnect_all'] = _CLIENT.disconnect_all
1437 var_dict['forget_dead_hosts'] = _CLIENT.forget_dead_hosts
1438 var_dict['debuglog'] = _CLIENT.debuglog
1439 var_dict['get'] = _CLIENT.get
1440 var_dict['get_multi'] = _CLIENT.get_multi
1441 var_dict['set'] = _CLIENT.set
1442 var_dict['set_multi'] = _CLIENT.set_multi
1443 var_dict['add'] = _CLIENT.add
1444 var_dict['add_multi'] = _CLIENT.add_multi
1445 var_dict['replace'] = _CLIENT.replace
1446 var_dict['replace_multi'] = _CLIENT.replace_multi
1447 var_dict['delete'] = _CLIENT.delete
1448 var_dict['delete_multi'] = _CLIENT.delete_multi
1449 var_dict['incr'] = _CLIENT.incr
1450 var_dict['decr'] = _CLIENT.decr
1451 var_dict['flush_all'] = _CLIENT.flush_all
1452 var_dict['get_stats'] = _CLIENT.get_stats
1453 var_dict['offset_multi'] = _CLIENT.offset_multi
1456 setup_client(Client())