3 # Copyright 2007 Google Inc.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
23 Provides memcached-alike API to application developers to store
24 data in memory when reliable storage via the DataStore API isn't
25 required and higher performance is desired.
40 from google
.appengine
.api
import api_base_pb
41 from google
.appengine
.api
import apiproxy_stub_map
42 from google
.appengine
.api
import capabilities
43 from google
.appengine
.api
import namespace_manager
44 from google
.appengine
.api
.memcache
import memcache_service_pb
45 from google
.appengine
.runtime
import apiproxy_errors
48 MemcacheSetResponse
= memcache_service_pb
.MemcacheSetResponse
49 MemcacheSetRequest
= memcache_service_pb
.MemcacheSetRequest
51 MemcacheGetResponse
= memcache_service_pb
.MemcacheGetResponse
52 MemcacheGetRequest
= memcache_service_pb
.MemcacheGetRequest
54 MemcacheDeleteResponse
= memcache_service_pb
.MemcacheDeleteResponse
55 MemcacheDeleteRequest
= memcache_service_pb
.MemcacheDeleteRequest
57 MemcacheIncrementResponse
= memcache_service_pb
.MemcacheIncrementResponse
58 MemcacheIncrementRequest
= memcache_service_pb
.MemcacheIncrementRequest
60 MemcacheBatchIncrementResponse
= memcache_service_pb
.MemcacheBatchIncrementResponse
61 MemcacheBatchIncrementRequest
= memcache_service_pb
.MemcacheBatchIncrementRequest
63 MemcacheFlushResponse
= memcache_service_pb
.MemcacheFlushResponse
64 MemcacheFlushRequest
= memcache_service_pb
.MemcacheFlushRequest
66 MemcacheStatsRequest
= memcache_service_pb
.MemcacheStatsRequest
67 MemcacheStatsResponse
= memcache_service_pb
.MemcacheStatsResponse
70 DELETE_NETWORK_FAILURE
= 0
71 DELETE_ITEM_MISSING
= 1
75 STORED
= MemcacheSetResponse
.STORED
76 NOT_STORED
= MemcacheSetResponse
.NOT_STORED
77 ERROR
= MemcacheSetResponse
.ERROR
78 EXISTS
= MemcacheSetResponse
.EXISTS
82 MAX_VALUE_SIZE
= 10 ** 6
86 STAT_MISSES
= 'misses'
87 STAT_BYTE_HITS
= 'byte_hits'
90 STAT_OLDEST_ITEM_AGES
= 'oldest_item_age'
95 FLAG_COMPRESSED
= 1 << 3
109 CAPABILITY
= capabilities
.CapabilitySet('memcache')
113 """Helper to test if something is a pair (2-tuple)."""
114 return isinstance(obj
, tuple) and len(obj
) == 2
117 def _add_name_space(message
, namespace
=None):
118 """Populate the name_space field in a messagecol buffer.
121 message: A messagecol buffer supporting the set_name_space() operation.
122 namespace: The name of the namespace part. If None, use the
123 default namespace. The empty namespace (i.e. '') will clear
124 the name_space field.
126 if namespace
is None:
127 namespace
= namespace_manager
.get_namespace()
129 message
.clear_name_space()
131 message
.set_name_space(namespace
)
134 def _key_string(key
, key_prefix
='', server_to_user_dict
=None):
135 """Utility function to handle different ways of requesting keys.
138 key: Either a string or tuple of (shard_number, string). In Google App
139 Engine the sharding is automatic so the shard number is ignored.
140 To memcache, the key is just bytes (no defined encoding).
141 key_prefix: Optional string prefix to prepend to key.
142 server_to_user_dict: Optional dictionary to populate with a mapping of
143 server-side key (which includes the key_prefix) to user-supplied key
144 (which does not have the prefix).
147 The key as a non-unicode string prepended with key_prefix. This is
148 the key sent to and stored by the server. If the resulting key is
149 longer then MAX_KEY_SIZE, it will be hashed with sha1 and will be
150 replaced with the hex representation of the said hash.
153 TypeError: If provided key isn't a string or tuple of (int, string)
158 if not isinstance(key
, basestring
):
159 raise TypeError('Key must be a string instance, received %r' % key
)
160 if not isinstance(key_prefix
, basestring
):
161 raise TypeError('key_prefix must be a string instance, received %r' %
168 server_key
= key_prefix
+ key
169 if isinstance(server_key
, unicode):
170 server_key
= server_key
.encode('utf-8')
172 if len(server_key
) > MAX_KEY_SIZE
:
173 server_key
= hashlib
.sha1(server_key
).hexdigest()
175 if server_to_user_dict
is not None:
176 assert isinstance(server_to_user_dict
, dict)
177 server_to_user_dict
[server_key
] = key
182 def _validate_encode_value(value
, do_pickle
):
183 """Utility function to validate and encode server keys and values.
186 value: Value to store in memcache. If it's a string, it will get passed
187 along as-is. If it's a unicode string, it will be marked appropriately,
188 such that retrievals will yield a unicode value. If it's any other data
189 type, this function will attempt to pickle the data and then store the
190 serialized result, unpickling it upon retrieval.
191 do_pickle: Callable that takes an object and returns a non-unicode
192 string containing the pickled object.
195 Tuple (stored_value, flags) where:
196 stored_value: The value as a non-unicode string that should be stored
198 flags: An integer with bits set from the FLAG_* constants in this file
199 to indicate the encoding of the key and value.
202 ValueError: If the encoded value is too large.
203 pickle.PicklingError: If the value is not a string and could not be pickled.
204 RuntimeError: If a complicated data structure could not be pickled due to
205 too many levels of recursion in its composition.
210 if isinstance(value
, str):
212 elif isinstance(value
, unicode):
213 stored_value
= value
.encode('utf-8')
214 flags |
= TYPE_UNICODE
215 elif isinstance(value
, bool):
218 stored_value
= str(int(value
))
220 elif isinstance(value
, int):
221 stored_value
= str(value
)
223 elif isinstance(value
, long):
224 stored_value
= str(value
)
227 stored_value
= do_pickle(value
)
228 flags |
= TYPE_PICKLED
234 if len(stored_value
) > MAX_VALUE_SIZE
:
235 raise ValueError('Values may not be more than %d bytes in length; '
236 'received %d bytes' % (MAX_VALUE_SIZE
, len(stored_value
)))
238 return (stored_value
, flags
)
241 def _decode_value(stored_value
, flags
, do_unpickle
):
242 """Utility function for decoding values retrieved from memcache.
245 stored_value: The value as a non-unicode string that was stored.
246 flags: An integer with bits set from the FLAG_* constants in this file
247 that indicate the encoding of the key and value.
248 do_unpickle: Callable that takes a non-unicode string object that contains
249 a pickled object and returns the pickled object.
252 The original object that was stored, be it a normal string, a unicode
253 string, int, long, or a Python object that was pickled.
256 pickle.UnpicklingError: If the value could not be unpickled.
258 assert isinstance(stored_value
, str)
259 assert isinstance(flags
, (int, long))
261 type_number
= flags
& FLAG_TYPE_MASK
266 if type_number
== TYPE_STR
:
268 elif type_number
== TYPE_UNICODE
:
269 return unicode(value
, 'utf-8')
270 elif type_number
== TYPE_PICKLED
:
271 return do_unpickle(value
)
272 elif type_number
== TYPE_BOOL
:
273 return bool(int(value
))
274 elif type_number
== TYPE_INT
:
276 elif type_number
== TYPE_LONG
:
279 assert False, "Unknown stored type"
280 assert False, "Shouldn't get here."
283 def create_rpc(deadline
=None, callback
=None):
284 """Creates an RPC object for use with the memcache API.
287 deadline: Optional deadline in seconds for the operation; the default
288 is a system-specific deadline (typically 5 seconds).
289 callback: Optional callable to invoke on completion.
292 An apiproxy_stub_map.UserRPC object specialized for this service.
294 return apiproxy_stub_map
.UserRPC('memcache', deadline
, callback
)
297 class Client(object):
298 """Memcache client object, through which one invokes all memcache operations.
300 Several methods are no-ops to retain source-level compatibility
301 with the existing popular Python memcache library.
303 Any method that takes a 'key' argument will accept that key as a string
304 (unicode or not) or a tuple of (hash_value, string) where the hash_value,
305 normally used for sharding onto a memcache instance, is instead ignored, as
306 Google App Engine deals with the sharding transparently. Keys in memcache are
307 just bytes, without a specified encoding. All such methods may raise TypeError
308 if provided a bogus key value and a ValueError if the key is too large.
310 Any method that takes a 'value' argument will accept as that value any
311 string (unicode or not), int, long, or pickle-able Python object, including
312 all native types. You'll get back from the cache the same type that you
315 The Client class is not thread-safe with respect to the gets(), cas() and
316 cas_multi() methods (and other compare-and-set-related methods). Therefore,
317 Client objects should not be used by more than one thread for CAS purposes.
318 Note that the global Client for the module-level functions is okay because it
319 does not expose any of the CAS methods.
322 def __init__(self
, servers
=None, debug
=0,
323 pickleProtocol
=cPickle
.HIGHEST_PROTOCOL
,
324 pickler
=cPickle
.Pickler
,
325 unpickler
=cPickle
.Unpickler
,
330 """Create a new Client object.
332 No parameters are required.
335 servers: Ignored; only for compatibility.
336 debug: Ignored; only for compatibility.
337 pickleProtocol: Pickle protocol to use for pickling the object.
338 pickler: pickle.Pickler sub-class to use for pickling.
339 unpickler: pickle.Unpickler sub-class to use for unpickling.
340 pload: Callable to use for retrieving objects by persistent id.
341 pid: Callable to use for determine the persistent id for objects, if any.
342 make_sync_call: Ignored; only for compatibility with an earlier version.
349 self
._pickler
_factory
= pickler
350 self
._unpickler
_factory
= unpickler
351 self
._pickle
_protocol
= pickleProtocol
352 self
._persistent
_id
= pid
353 self
._persistent
_load
= pload
354 self
._app
_id
= _app_id
358 """Clear the remembered CAS ids."""
359 self
._cas
_ids
.clear()
361 def _make_async_call(self
, rpc
, method
, request
, response
,
362 get_result_hook
, user_data
):
363 """Internal helper to schedule an asynchronous RPC.
366 rpc: None or a UserRPC object.
367 method: Method name, e.g. 'Get'.
368 request: Request protobuf.
369 response: Response protobuf.
370 get_result_hook: None or hook function used to process results
371 (See UserRPC.make_call() for more info).
372 user_data: None or user data for hook function.
375 A UserRPC object; either the one passed in as the first argument,
376 or a new one (if the first argument was None).
381 assert rpc
.service
== 'memcache', repr(rpc
.service
)
382 rpc
.make_call(method
, request
, response
, get_result_hook
, user_data
)
385 def _do_pickle(self
, value
):
386 """Pickles a provided value."""
387 pickle_data
= cStringIO
.StringIO()
388 pickler
= self
._pickler
_factory
(pickle_data
,
389 protocol
=self
._pickle
_protocol
)
390 if self
._persistent
_id
is not None:
391 pickler
.persistent_id
= self
._persistent
_id
393 return pickle_data
.getvalue()
395 def _do_unpickle(self
, value
):
396 """Unpickles a provided value."""
397 pickle_data
= cStringIO
.StringIO(value
)
398 unpickler
= self
._unpickler
_factory
(pickle_data
)
399 if self
._persistent
_load
is not None:
400 unpickler
.persistent_load
= self
._persistent
_load
401 return unpickler
.load()
403 def _add_app_id(self
, message
):
404 """Populates override field in message if accessing another app's memcache.
407 message: A protocol buffer supporting the mutable_override() operation.
410 app_override
= message
.mutable_override()
411 app_override
.set_app_id(self
._app
_id
)
413 def set_servers(self
, servers
):
414 """Sets the pool of memcache servers used by the client.
416 This is purely a compatibility method. In Google App Engine, it's a no-op.
420 def disconnect_all(self
):
421 """Closes all connections to memcache servers.
423 This is purely a compatibility method. In Google App Engine, it's a no-op.
427 def forget_dead_hosts(self
):
428 """Resets all servers to the alive status.
430 This is purely a compatibility method. In Google App Engine, it's a no-op.
435 """Logging function for debugging information.
437 This is purely a compatibility method. In Google App Engine, it's a no-op.
442 """Gets memcache statistics for this application.
444 All of these statistics may reset due to various transient conditions. They
445 provide the best information available at the time of being called.
448 Dictionary mapping statistic names to associated values. Statistics and
449 their associated meanings:
451 hits: Number of cache get requests resulting in a cache hit.
452 misses: Number of cache get requests resulting in a cache miss.
453 byte_hits: Sum of bytes transferred on get requests. Rolls over to
455 items: Number of key/value pairs in the cache.
456 bytes: Total size of all items in the cache.
457 oldest_item_age: How long in seconds since the oldest item in the
458 cache was accessed. Effectively, this indicates how long a new
459 item will survive in the cache without being accessed. This is
460 _not_ the amount of time that has elapsed since the item was
463 On error, returns None.
465 rpc
= self
.get_stats_async()
466 return rpc
.get_result()
468 def get_stats_async(self
, rpc
=None):
469 """Async version of get_stats().
472 A UserRPC instance whose get_result() method returns None if
473 there was a network error, otherwise a dict just like
476 request
= MemcacheStatsRequest()
477 self
._add
_app
_id
(request
)
478 response
= MemcacheStatsResponse()
479 return self
._make
_async
_call
(rpc
, 'Stats', request
, response
,
480 self
.__get
_stats
_hook
, None)
482 def __get_stats_hook(self
, rpc
):
485 except apiproxy_errors
.Error
:
487 response
= rpc
.response
488 if not response
.has_stats():
495 STAT_OLDEST_ITEM_AGES
: 0,
498 stats
= response
.stats()
500 STAT_HITS
: stats
.hits(),
501 STAT_MISSES
: stats
.misses(),
502 STAT_BYTE_HITS
: stats
.byte_hits(),
503 STAT_ITEMS
: stats
.items(),
504 STAT_BYTES
: stats
.bytes(),
505 STAT_OLDEST_ITEM_AGES
: stats
.oldest_item_age(),
509 """Deletes everything in memcache.
512 True on success, False on RPC or server error.
514 rpc
= self
.flush_all_async()
515 return rpc
.get_result()
517 def flush_all_async(self
, rpc
=None):
518 """Async version of flush_all().
521 A UserRPC instance whose get_result() method returns True on
522 success, False on RPC or server error.
524 request
= MemcacheFlushRequest()
525 self
._add
_app
_id
(request
)
526 response
= MemcacheFlushResponse()
527 return self
._make
_async
_call
(rpc
, 'FlushAll', request
, response
,
528 self
.__flush
_all
_hook
, None)
530 def __flush_all_hook(self
, rpc
):
533 except apiproxy_errors
.Error
:
537 def get(self
, key
, namespace
=None, for_cas
=False):
538 """Looks up a single key in memcache.
540 If you have multiple items to load, though, it's much more efficient
541 to use get_multi() instead, which loads them in one bulk operation,
542 reducing the networking latency that'd otherwise be required to do
543 many serialized get() operations.
546 key: The key in memcache to look up. See docs on Client
547 for details of format.
548 namespace: a string specifying an optional namespace to use in
550 for_cas: If True, request and store CAS ids on the client (see
551 cas() operation below).
554 The value of the key, if found in memcache, else None.
558 rpc
= self
.get_multi_async([key
], namespace
=namespace
, for_cas
=for_cas
)
559 results
= rpc
.get_result()
560 return results
.get(key
)
562 def gets(self
, key
, namespace
=None):
563 """An alias for get(..., for_cas=True)."""
564 return self
.get(key
, namespace
=namespace
, for_cas
=True)
566 def get_multi(self
, keys
, key_prefix
='', namespace
=None, for_cas
=False):
567 """Looks up multiple keys from memcache in one operation.
569 This is the recommended way to do bulk loads.
572 keys: List of keys to look up. Keys may be strings or
573 tuples of (hash_value, string). Google App Engine
574 does the sharding and hashing automatically, though, so the hash
575 value is ignored. To memcache, keys are just series of bytes,
576 and not in any particular encoding.
577 key_prefix: Prefix to prepend to all keys when talking to the server;
578 not included in the returned dictionary.
579 namespace: a string specifying an optional namespace to use in
581 for_cas: If True, request and store CAS ids on the client.
584 A dictionary of the keys and values that were present in memcache.
585 Even if the key_prefix was specified, that key_prefix won't be on
586 the keys in the returned dictionary.
588 rpc
= self
.get_multi_async(keys
, key_prefix
, namespace
, for_cas
)
589 return rpc
.get_result()
591 def get_multi_async(self
, keys
, key_prefix
='', namespace
=None,
592 for_cas
=False, rpc
=None):
593 """Async version of get_multi().
596 A UserRPC instance whose get_result() method returns {} if
597 there was a network error, otherwise a dict just like
600 request
= MemcacheGetRequest()
601 self
._add
_app
_id
(request
)
602 _add_name_space(request
, namespace
)
604 request
.set_for_cas(True)
605 response
= MemcacheGetResponse()
608 request
.add_key(_key_string(key
, key_prefix
, user_key
))
610 return self
._make
_async
_call
(rpc
, 'Get', request
, response
,
611 self
.__get
_hook
, user_key
)
613 def __get_hook(self
, rpc
):
616 except apiproxy_errors
.Error
:
618 for_cas
= rpc
.request
.for_cas()
619 response
= rpc
.response
620 user_key
= rpc
.user_data
622 for returned_item
in response
.item_list():
623 value
= _decode_value(returned_item
.value(), returned_item
.flags(),
625 raw_key
= returned_item
.key()
627 self
._cas
_ids
[raw_key
] = returned_item
.cas_id()
628 return_value
[user_key
[raw_key
]] = value
636 def delete(self
, key
, seconds
=0, namespace
=None):
637 """Deletes a key from memcache.
640 key: Key to delete. See docs on Client for detils.
641 seconds: Optional number of seconds to make deleted items 'locked'
642 for 'add' operations. Value can be a delta from current time (up to
643 1 month), or an absolute Unix epoch time. Defaults to 0, which means
644 items can be immediately added. With or without this option,
645 a 'set' operation will always work. Float values will be rounded up to
646 the nearest whole second.
647 namespace: a string specifying an optional namespace to use in
651 DELETE_NETWORK_FAILURE (0) on network failure,
652 DELETE_ITEM_MISSING (1) if the server tried to delete the item but
654 DELETE_SUCCESSFUL (2) if the item was actually deleted.
655 This can be used as a boolean value, where a network failure is the
658 rpc
= self
.delete_multi_async([key
], seconds
, namespace
=namespace
)
659 results
= rpc
.get_result()
661 return DELETE_NETWORK_FAILURE
664 def delete_multi(self
, keys
, seconds
=0, key_prefix
='', namespace
=None):
665 """Delete multiple keys at once.
668 keys: List of keys to delete.
669 seconds: Optional number of seconds to make deleted items 'locked'
670 for 'add' operations. Value can be a delta from current time (up to
671 1 month), or an absolute Unix epoch time. Defaults to 0, which means
672 items can be immediately added. With or without this option,
673 a 'set' operation will always work. Float values will be rounded up to
674 the nearest whole second.
675 key_prefix: Prefix to put on all keys when sending specified
676 keys to memcache. See docs for get_multi() and set_multi().
677 namespace: a string specifying an optional namespace to use in
681 True if all operations completed successfully. False if one
682 or more failed to complete.
684 rpc
= self
.delete_multi_async(keys
, seconds
, key_prefix
, namespace
)
685 results
= rpc
.get_result()
688 def delete_multi_async(self
, keys
, seconds
=0, key_prefix
='',
689 namespace
=None, rpc
=None):
690 """Async version of delete_multi() -- note different return value.
693 A UserRPC instance whose get_result() method returns None if
694 there was a network error, or a list of status values otherwise,
695 where each status corresponds to a key and is either
696 DELETE_SUCCESSFUL, DELETE_ITEM_MISSING, or DELETE_NETWORK_FAILURE
697 (see delete() docstring for details).
699 if not isinstance(seconds
, (int, long, float)):
700 raise TypeError('Delete timeout must be a number.')
702 raise ValueError('Delete timeout must not be negative.')
704 request
= MemcacheDeleteRequest()
705 self
._add
_app
_id
(request
)
706 _add_name_space(request
, namespace
)
707 response
= MemcacheDeleteResponse()
710 delete_item
= request
.add_item()
711 delete_item
.set_key(_key_string(key
, key_prefix
=key_prefix
))
712 delete_item
.set_delete_time(int(math
.ceil(seconds
)))
714 return self
._make
_async
_call
(rpc
, 'Delete', request
, response
,
715 self
.__delete
_hook
, None)
717 def __delete_hook(self
, rpc
):
720 except apiproxy_errors
.Error
:
723 for status
in rpc
.response
.delete_status_list():
724 if status
== MemcacheDeleteResponse
.DELETED
:
725 result
.append(DELETE_SUCCESSFUL
)
726 elif status
== MemcacheDeleteResponse
.NOT_FOUND
:
727 result
.append(DELETE_ITEM_MISSING
)
729 result
.append(DELETE_NETWORK_FAILURE
)
740 def set(self
, key
, value
, time
=0, min_compress_len
=0, namespace
=None):
741 """Sets a key's value, regardless of previous contents in cache.
743 Unlike add() and replace(), this method always sets (or
744 overwrites) the value in memcache, regardless of previous
748 key: Key to set. See docs on Client for details.
749 value: Value to set. Any type. If complex, will be pickled.
750 time: Optional expiration time, either relative number of seconds
751 from current time (up to 1 month), or an absolute Unix epoch time.
752 By default, items never expire, though items may be evicted due to
753 memory pressure. Float values will be rounded up to the nearest
755 min_compress_len: Ignored option for compatibility.
756 namespace: a string specifying an optional namespace to use in
760 True if set. False on error.
762 return self
._set
_with
_policy
(MemcacheSetRequest
.SET
, key
, value
, time
=time
,
765 def add(self
, key
, value
, time
=0, min_compress_len
=0, namespace
=None):
766 """Sets a key's value, iff item is not already in memcache.
769 key: Key to set. See docs on Client for details.
770 value: Value to set. Any type. If complex, will be pickled.
771 time: Optional expiration time, either relative number of seconds
772 from current time (up to 1 month), or an absolute Unix epoch time.
773 By default, items never expire, though items may be evicted due to
774 memory pressure. Float values will be rounded up to the nearest
776 min_compress_len: Ignored option for compatibility.
777 namespace: a string specifying an optional namespace to use in
781 True if added. False on error.
783 return self
._set
_with
_policy
(MemcacheSetRequest
.ADD
, key
, value
, time
=time
,
786 def replace(self
, key
, value
, time
=0, min_compress_len
=0, namespace
=None):
787 """Replaces a key's value, failing if item isn't already in memcache.
790 key: Key to set. See docs on Client for details.
791 value: Value to set. Any type. If complex, will be pickled.
792 time: Optional expiration time, either relative number of seconds
793 from current time (up to 1 month), or an absolute Unix epoch time.
794 By default, items never expire, though items may be evicted due to
795 memory pressure. Float values will be rounded up to the nearest
797 min_compress_len: Ignored option for compatibility.
798 namespace: a string specifying an optional namespace to use in
802 True if replaced. False on RPC error or cache miss.
804 return self
._set
_with
_policy
(MemcacheSetRequest
.REPLACE
,
805 key
, value
, time
=time
, namespace
=namespace
)
807 def cas(self
, key
, value
, time
=0, min_compress_len
=0, namespace
=None):
808 """Compare-And-Set update.
810 This requires that the key has previously been successfully
811 fetched with gets() or get(..., for_cas=True), and that no changes
812 have been made to the key since that fetch. Typical usage is:
815 client = memcache.Client()
816 value = client.gets(key) # OR client.get(key, for_cas=True)
818 ok = client.cas(key, value)
820 If two processes run similar code, the first one calling cas()
821 will succeed (ok == True), while the second one will fail (ok ==
822 False). This can be used to detect race conditions.
824 NOTE: some state (the CAS id) is stored on the Client object for
825 each key ever used with gets(). To prevent ever-increasing memory
826 usage, you must use a Client object when using cas(), and the
827 lifetime of your Client object should be limited to that of one
828 incoming HTTP request. You cannot use the global-function-based
832 key: Key to set. See docs on Client for details.
833 value: The new value.
834 time: Optional expiration time, either relative number of seconds
835 from current time (up to 1 month), or an absolute Unix epoch time.
836 By default, items never expire, though items may be evicted due to
837 memory pressure. Float values will be rounded up to the nearest
839 min_compress_len: Ignored option for compatibility.
840 namespace: a string specifying an optional namespace to use in
844 True if updated. False on RPC error or if the CAS id didn't match.
846 return self
._set
_with
_policy
(MemcacheSetRequest
.CAS
, key
, value
,
849 def _set_with_policy(self
, policy
, key
, value
, time
=0, namespace
=None):
850 """Sets a single key with a specified policy.
852 Helper function for set(), add(), and replace().
855 policy: One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
856 key: Key to add, set, or replace. See docs on Client for details.
858 time: Expiration time, defaulting to 0 (never expiring).
859 namespace: a string specifying an optional namespace to use in
863 True if stored, False on RPC error or policy error, e.g. a replace
864 that failed due to the item not already existing, or an add
865 failing due to the item not already existing.
867 rpc
= self
._set
_multi
_async
_with
_policy
(policy
, {key
: value
},
869 status_dict
= rpc
.get_result()
872 return status_dict
.get(key
) == MemcacheSetResponse
.STORED
879 def _set_multi_with_policy(self
, policy
, mapping
, time
=0, key_prefix
='',
881 """Set multiple keys with a specified policy.
883 Helper function for set_multi(), add_multi(), and replace_multi(). This
884 reduces the network latency of doing many requests in serial.
887 policy: One of MemcacheSetRequest.SET, .ADD, .REPLACE or .CAS.
888 mapping: Dictionary of keys to values. If policy == CAS, the
889 values must be (value, cas_id) tuples.
890 time: Optional expiration time, either relative number of seconds
891 from current time (up to 1 month), or an absolute Unix epoch time.
892 By default, items never expire, though items may be evicted due to
893 memory pressure. Float values will be rounded up to the nearest
895 key_prefix: Prefix for to prepend to all keys.
896 namespace: a string specifying an optional namespace to use in
900 A list of keys whose values were NOT set. On total success,
901 this list should be empty. On network/RPC/server errors,
902 a list of all input keys is returned; in this case the keys
903 may or may not have been updated.
905 rpc
= self
._set
_multi
_async
_with
_policy
(policy
, mapping
, time
,
906 key_prefix
, namespace
)
907 status_dict
= rpc
.get_result()
908 server_keys
, user_key
= rpc
.user_data
911 return user_key
.values()
915 for server_key
in server_keys
:
916 key
= user_key
[server_key
]
917 set_status
= status_dict
[key
]
918 if set_status
!= MemcacheSetResponse
.STORED
:
919 unset_list
.append(key
)
924 def _set_multi_async_with_policy(self
, policy
, mapping
, time
=0,
925 key_prefix
='', namespace
=None, rpc
=None):
926 """Async version of _set_multi_with_policy() -- note different return.
929 A UserRPC instance whose get_result() method returns None if
930 there was a network error, or a dict mapping (user) keys to
931 status values otherwise, where each status is one of STORED,
932 NOT_STORED, ERROR, or EXISTS.
934 if not isinstance(time
, (int, long, float)):
935 raise TypeError('Expiration must be a number.')
937 raise ValueError('Expiration must not be negative.')
939 request
= MemcacheSetRequest()
940 self
._add
_app
_id
(request
)
941 _add_name_space(request
, namespace
)
944 set_cas_id
= (policy
== MemcacheSetRequest
.CAS
)
945 for key
, value
in mapping
.iteritems():
946 server_key
= _key_string(key
, key_prefix
, user_key
)
947 stored_value
, flags
= _validate_encode_value(value
, self
._do
_pickle
)
948 server_keys
.append(server_key
)
950 item
= request
.add_item()
951 item
.set_key(server_key
)
952 item
.set_value(stored_value
)
953 item
.set_flags(flags
)
954 item
.set_set_policy(policy
)
955 item
.set_expiration_time(int(math
.ceil(time
)))
957 cas_id
= self
._cas
_ids
.get(server_key
)
959 if cas_id
is not None:
960 item
.set_cas_id(cas_id
)
963 item
.set_for_cas(True)
965 response
= MemcacheSetResponse()
968 return self
._make
_async
_call
(rpc
, 'Set', request
, response
,
969 self
.__set
_with
_policy
_hook
,
970 (server_keys
, user_key
))
972 def __set_with_policy_hook(self
, rpc
):
975 except apiproxy_errors
.Error
:
978 response
= rpc
.response
979 server_keys
, user_key
= rpc
.user_data
980 assert response
.set_status_size() == len(server_keys
)
982 for server_key
, status
in zip(server_keys
, response
.set_status_list()):
983 status_dict
[user_key
[server_key
]] = status
986 def set_multi(self
, mapping
, time
=0, key_prefix
='', min_compress_len
=0,
988 """Set multiple keys' values at once, regardless of previous contents.
991 mapping: Dictionary of keys to values.
992 time: Optional expiration time, either relative number of seconds
993 from current time (up to 1 month), or an absolute Unix epoch time.
994 By default, items never expire, though items may be evicted due to
995 memory pressure. Float values will be rounded up to the nearest
997 key_prefix: Prefix for to prepend to all keys.
998 min_compress_len: Unimplemented compatibility option.
999 namespace: a string specifying an optional namespace to use in
1003 A list of keys whose values were NOT set. On total success,
1004 this list should be empty.
1006 return self
._set
_multi
_with
_policy
(MemcacheSetRequest
.SET
, mapping
,
1007 time
=time
, key_prefix
=key_prefix
,
1008 namespace
=namespace
)
1010 def set_multi_async(self
, mapping
, time
=0, key_prefix
='',
1011 min_compress_len
=0, namespace
=None, rpc
=None):
1012 """Async version of set_multi() -- note different return value.
1015 See _set_multi_async_with_policy().
1017 return self
._set
_multi
_async
_with
_policy
(MemcacheSetRequest
.SET
, mapping
,
1018 time
=time
, key_prefix
=key_prefix
,
1019 namespace
=namespace
, rpc
=rpc
)
1021 def add_multi(self
, mapping
, time
=0, key_prefix
='', min_compress_len
=0,
1023 """Set multiple keys' values iff items are not already in memcache.
1026 mapping: Dictionary of keys to values.
1027 time: Optional expiration time, either relative number of seconds
1028 from current time (up to 1 month), or an absolute Unix epoch time.
1029 By default, items never expire, though items may be evicted due to
1030 memory pressure. Float values will be rounded up to the nearest
1032 key_prefix: Prefix for to prepend to all keys.
1033 min_compress_len: Unimplemented compatibility option.
1034 namespace: a string specifying an optional namespace to use in
1038 A list of keys whose values were NOT set because they did not already
1039 exist in memcache. On total success, this list should be empty.
1041 return self
._set
_multi
_with
_policy
(MemcacheSetRequest
.ADD
, mapping
,
1042 time
=time
, key_prefix
=key_prefix
,
1043 namespace
=namespace
)
1045 def add_multi_async(self
, mapping
, time
=0, key_prefix
='',
1046 min_compress_len
=0, namespace
=None, rpc
=None):
1047 """Async version of add_multi() -- note different return value.
1050 See _set_multi_async_with_policy().
1052 return self
._set
_multi
_async
_with
_policy
(MemcacheSetRequest
.ADD
, mapping
,
1053 time
=time
, key_prefix
=key_prefix
,
1054 namespace
=namespace
, rpc
=rpc
)
1056 def replace_multi(self
, mapping
, time
=0, key_prefix
='', min_compress_len
=0,
1058 """Replace multiple keys' values, failing if the items aren't in memcache.
1061 mapping: Dictionary of keys to values.
1062 time: Optional expiration time, either relative number of seconds
1063 from current time (up to 1 month), or an absolute Unix epoch time.
1064 By default, items never expire, though items may be evicted due to
1065 memory pressure. Float values will be rounded up to the nearest
1067 key_prefix: Prefix for to prepend to all keys.
1068 min_compress_len: Unimplemented compatibility option.
1069 namespace: a string specifying an optional namespace to use in
1073 A list of keys whose values were NOT set because they already existed
1074 in memcache. On total success, this list should be empty.
1076 return self
._set
_multi
_with
_policy
(MemcacheSetRequest
.REPLACE
, mapping
,
1077 time
=time
, key_prefix
=key_prefix
,
1078 namespace
=namespace
)
1080 def replace_multi_async(self
, mapping
, time
=0, key_prefix
='',
1081 min_compress_len
=0, namespace
=None, rpc
=None):
1082 """Async version of replace_multi() -- note different return value.
1085 See _set_multi_async_with_policy().
1087 return self
._set
_multi
_async
_with
_policy
(MemcacheSetRequest
.REPLACE
,
1089 time
=time
, key_prefix
=key_prefix
,
1090 namespace
=namespace
, rpc
=rpc
)
1092 def cas_multi(self
, mapping
, time
=0, key_prefix
='', min_compress_len
=0,
1094 """Compare-And-Set update for multiple keys.
1096 See cas() docstring for an explanation.
1099 mapping: Dictionary of keys to values.
1100 time: Optional expiration time, either relative number of seconds
1101 from current time (up to 1 month), or an absolute Unix epoch time.
1102 By default, items never expire, though items may be evicted due to
1103 memory pressure. Float values will be rounded up to the nearest
1105 key_prefix: Prefix for to prepend to all keys.
1106 min_compress_len: Unimplemented compatibility option.
1107 namespace: a string specifying an optional namespace to use in
1111 A list of keys whose values were NOT set because the compare
1112 failed. On total success, this list should be empty.
1114 return self
._set
_multi
_with
_policy
(MemcacheSetRequest
.CAS
, mapping
,
1115 time
=time
, key_prefix
=key_prefix
,
1116 namespace
=namespace
)
1118 def cas_multi_async(self
, mapping
, time
=0, key_prefix
='',
1119 min_compress_len
=0, namespace
=None, rpc
=None):
1120 """Async version of cas_multi() -- note different return value.
1123 See _set_multi_async_with_policy().
1125 return self
._set
_multi
_async
_with
_policy
(MemcacheSetRequest
.CAS
, mapping
,
1126 time
=time
, key_prefix
=key_prefix
,
1127 namespace
=namespace
, rpc
=rpc
)
1129 def incr(self
, key
, delta
=1, namespace
=None, initial_value
=None):
1130 """Atomically increments a key's value.
1132 Internally, the value is a unsigned 64-bit integer. Memcache
1133 doesn't check 64-bit overflows. The value, if too large, will
1136 Unless an initial_value is specified, the key must already exist
1137 in the cache to be incremented. To initialize a counter, either
1138 specify initial_value or set() it to the initial value, as an
1139 ASCII decimal integer. Future get()s of the key, post-increment,
1140 will still be an ASCII decimal value.
1143 key: Key to increment. If an iterable collection, each one of the keys
1144 will be offset. See Client's docstring for details.
1145 delta: Non-negative integer value (int or long) to increment key by,
1147 namespace: a string specifying an optional namespace to use in
1149 initial_value: initial value to put in the cache, if it doesn't
1150 already exist. The default value, None, will not create a cache
1151 entry if it doesn't already exist.
1154 If key was a single value, the new long integer value, or None if key
1155 was not in the cache, could not be incremented for any other reason, or
1156 a network/RPC/server error occurred.
1158 If key was an iterable collection, a dictionary will be returned
1159 mapping supplied keys to values, with the values having the same meaning
1160 as the singular return value of this method.
1163 ValueError: If number is negative.
1164 TypeError: If delta isn't an int or long.
1166 return self
._incrdecr
(key
, False, delta
, namespace
=namespace
,
1167 initial_value
=initial_value
)
1169 def incr_async(self
, key
, delta
=1, namespace
=None, initial_value
=None,
1171 """Async version of incr().
1174 A UserRPC instance whose get_result() method returns the same
1175 kind of value as incr() returns.
1177 return self
._incrdecr
_async
(key
, False, delta
, namespace
=namespace
,
1178 initial_value
=initial_value
, rpc
=rpc
)
1180 def decr(self
, key
, delta
=1, namespace
=None, initial_value
=None):
1181 """Atomically decrements a key's value.
1183 Internally, the value is a unsigned 64-bit integer. Memcache
1184 caps decrementing below zero to zero.
1186 The key must already exist in the cache to be decremented. See
1187 docs on incr() for details.
1190 key: Key to decrement. If an iterable collection, each one of the keys
1191 will be offset. See Client's docstring for details.
1192 delta: Non-negative integer value (int or long) to decrement key by,
1194 namespace: a string specifying an optional namespace to use in
1196 initial_value: initial value to put in the cache, if it doesn't
1197 already exist. The default value, None, will not create a cache
1198 entry if it doesn't already exist.
1201 If key was a single value, the new long integer value, or None if key
1202 was not in the cache, could not be decremented for any other reason, or
1203 a network/RPC/server error occurred.
1205 If key was an iterable collection, a dictionary will be returned
1206 mapping supplied keys to values, with the values having the same meaning
1207 as the singular return value of this method.
1210 ValueError: If number is negative.
1211 TypeError: If delta isn't an int or long.
1213 return self
._incrdecr
(key
, True, delta
, namespace
=namespace
,
1214 initial_value
=initial_value
)
1216 def decr_async(self
, key
, delta
=1, namespace
=None, initial_value
=None,
1218 """Async version of decr().
1221 A UserRPC instance whose get_result() method returns the same
1222 kind of value as decr() returns.
1224 return self
._incrdecr
_async
(key
, True, delta
, namespace
=namespace
,
1225 initial_value
=initial_value
, rpc
=rpc
)
1227 def _incrdecr(self
, key
, is_negative
, delta
, namespace
=None,
1228 initial_value
=None):
1229 """Increment or decrement a key by a provided delta.
1232 key: Key to increment or decrement. If an iterable collection, each
1233 one of the keys will be offset.
1234 is_negative: Boolean, if this is a decrement.
1235 delta: Non-negative integer amount (int or long) to increment
1237 namespace: a string specifying an optional namespace to use in
1239 initial_value: initial value to put in the cache, if it doesn't
1240 already exist. The default value, None, will not create a cache
1241 entry if it doesn't already exist.
1244 New long integer value, or None on cache miss or network/RPC/server
1248 ValueError: If delta is negative.
1249 TypeError: If delta isn't an int or long.
1251 rpc
= self
._incrdecr
_async
(key
, is_negative
, delta
, namespace
,
1253 return rpc
.get_result()
1255 def _incrdecr_async(self
, key
, is_negative
, delta
, namespace
=None,
1256 initial_value
=None, rpc
=None):
1257 """Async version of _incrdecr().
1260 A UserRPC instance whose get_result() method returns the same
1261 kind of value as _incrdecr() returns.
1263 if not isinstance(delta
, (int, long)):
1264 raise TypeError('Delta must be an integer or long, received %r' % delta
)
1266 raise ValueError('Delta must not be negative.')
1269 if not isinstance(key
, basestring
):
1278 return self
.offset_multi_async(dict((k
, delta
) for k
in it
),
1279 namespace
=namespace
,
1280 initial_value
=initial_value
,
1284 request
= MemcacheIncrementRequest()
1285 self
._add
_app
_id
(request
)
1286 _add_name_space(request
, namespace
)
1287 response
= MemcacheIncrementResponse()
1288 request
.set_key(_key_string(key
))
1289 request
.set_delta(delta
)
1291 request
.set_direction(MemcacheIncrementRequest
.DECREMENT
)
1293 request
.set_direction(MemcacheIncrementRequest
.INCREMENT
)
1294 if initial_value
is not None:
1296 request
.set_initial_value(long(initial_value
))
1297 initial_flags
= None
1298 if isinstance(initial_value
, int):
1299 initial_flags
= TYPE_INT
1300 elif isinstance(initial_value
, long):
1301 initial_flags
= TYPE_LONG
1302 if initial_flags
is not None:
1303 request
.set_initial_flags(initial_flags
)
1306 return self
._make
_async
_call
(rpc
, 'Increment', request
, response
,
1307 self
.__incrdecr
_hook
, None)
1309 def __incrdecr_hook(self
, rpc
):
1312 except apiproxy_errors
.Error
:
1315 response
= rpc
.response
1316 if response
.has_new_value():
1317 return response
.new_value()
1320 def offset_multi(self
, mapping
, key_prefix
='',
1321 namespace
=None, initial_value
=None):
1322 """Offsets multiple keys by a delta, incrementing and decrementing in batch.
1325 mapping: Dictionary mapping keys to deltas (positive or negative integers)
1326 to apply to each corresponding key.
1327 key_prefix: Prefix for to prepend to all keys.
1328 initial_value: Initial value to put in the cache, if it doesn't
1329 already exist. The default value, None, will not create a cache
1330 entry if it doesn't already exist.
1331 namespace: A string specifying an optional namespace to use in
1335 Dictionary mapping input keys to new integer values. The new value will
1336 be None if an error occurs, the key does not already exist, or the value
1337 was not an integer type. The values will wrap-around at unsigned 64-bit
1338 integer-maximum and underflow will be floored at zero.
1340 rpc
= self
.offset_multi_async(mapping
, key_prefix
,
1341 namespace
, initial_value
)
1342 return rpc
.get_result()
1344 def offset_multi_async(self
, mapping
, key_prefix
='',
1345 namespace
=None, initial_value
=None, rpc
=None):
1346 """Async version of offset_multi().
1349 A UserRPC instance whose get_result() method returns a dict just
1350 like offset_multi() returns.
1352 initial_flags
= None
1353 if initial_value
is not None:
1354 if not isinstance(initial_value
, (int, long)):
1355 raise TypeError('initial_value must be an integer')
1356 if initial_value
< 0:
1357 raise ValueError('initial_value must be >= 0')
1358 if isinstance(initial_value
, int):
1359 initial_flags
= TYPE_INT
1361 initial_flags
= TYPE_LONG
1363 request
= MemcacheBatchIncrementRequest()
1364 self
._add
_app
_id
(request
)
1365 response
= MemcacheBatchIncrementResponse()
1366 _add_name_space(request
, namespace
)
1368 for key
, delta
in mapping
.iteritems():
1369 if not isinstance(delta
, (int, long)):
1370 raise TypeError('Delta must be an integer or long, received %r' % delta
)
1372 direction
= MemcacheIncrementRequest
.INCREMENT
1375 direction
= MemcacheIncrementRequest
.DECREMENT
1377 server_key
= _key_string(key
, key_prefix
)
1379 item
= request
.add_item()
1380 item
.set_key(server_key
)
1381 item
.set_delta(delta
)
1382 item
.set_direction(direction
)
1383 if initial_value
is not None:
1384 item
.set_initial_value(initial_value
)
1385 item
.set_initial_flags(initial_flags
)
1388 return self
._make
_async
_call
(rpc
, 'BatchIncrement', request
, response
,
1389 self
.__offset
_hook
, mapping
)
1391 def __offset_hook(self
, rpc
):
1392 mapping
= rpc
.user_data
1395 except apiproxy_errors
.Error
:
1396 return dict((k
, None) for k
in mapping
.iterkeys())
1398 response
= rpc
.response
1399 assert response
.item_size() == len(mapping
)
1402 for key
, resp_item
in zip(mapping
.iterkeys(), response
.item_list()):
1403 if (resp_item
.increment_status() == MemcacheIncrementResponse
.OK
and
1404 resp_item
.has_new_value()):
1405 result_dict
[key
] = resp_item
.new_value()
1407 result_dict
[key
] = None
1418 def setup_client(client_obj
):
1419 """Sets the Client object instance to use for all module-level methods.
1421 Use this method if you want to have customer persistent_id() or
1422 persistent_load() functions associated with your client.
1424 NOTE: We don't expose the _async methods as functions; they're too
1425 obscure; and we don't expose gets(), cas() and cas_multi() because
1426 they maintain state on the client object.
1429 client_obj: Instance of the memcache.Client object.
1432 var_dict
= globals()
1434 _CLIENT
= client_obj
1435 var_dict
['set_servers'] = _CLIENT
.set_servers
1436 var_dict
['disconnect_all'] = _CLIENT
.disconnect_all
1437 var_dict
['forget_dead_hosts'] = _CLIENT
.forget_dead_hosts
1438 var_dict
['debuglog'] = _CLIENT
.debuglog
1439 var_dict
['get'] = _CLIENT
.get
1440 var_dict
['get_multi'] = _CLIENT
.get_multi
1441 var_dict
['set'] = _CLIENT
.set
1442 var_dict
['set_multi'] = _CLIENT
.set_multi
1443 var_dict
['add'] = _CLIENT
.add
1444 var_dict
['add_multi'] = _CLIENT
.add_multi
1445 var_dict
['replace'] = _CLIENT
.replace
1446 var_dict
['replace_multi'] = _CLIENT
.replace_multi
1447 var_dict
['delete'] = _CLIENT
.delete
1448 var_dict
['delete_multi'] = _CLIENT
.delete_multi
1449 var_dict
['incr'] = _CLIENT
.incr
1450 var_dict
['decr'] = _CLIENT
.decr
1451 var_dict
['flush_all'] = _CLIENT
.flush_all
1452 var_dict
['get_stats'] = _CLIENT
.get_stats
1453 var_dict
['offset_multi'] = _CLIENT
.offset_multi
1456 setup_client(Client())