3 # Copyright 2007 Google Inc.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
21 """The Python datastore API used by app developers.
23 Defines Entity, Query, and Iterator classes, as well as methods for all of the
24 datastore's calls. Also defines conversions between the Python classes and
25 their PB counterparts.
27 The datastore errors are defined in the datastore_errors module. That module is
28 only required to avoid circular imports. datastore imports datastore_types,
29 which needs BadValueError, so it can't be defined in datastore.
55 from xml
.sax
import saxutils
57 from google
.appengine
.api
import apiproxy_stub_map
58 from google
.appengine
.api
import capabilities
59 from google
.appengine
.api
import datastore_errors
60 from google
.appengine
.api
import datastore_types
61 from google
.appengine
.datastore
import datastore_pb
62 from google
.appengine
.datastore
import datastore_query
63 from google
.appengine
.datastore
import datastore_rpc
64 from google
.appengine
.datastore
import entity_pb
68 MAX_ALLOWABLE_QUERIES
= 30
71 MAXIMUM_RESULTS
= 1000
77 DEFAULT_TRANSACTION_RETRIES
= 3
80 READ_CAPABILITY
= capabilities
.CapabilitySet('datastore_v3')
81 WRITE_CAPABILITY
= capabilities
.CapabilitySet(
83 capabilities
=['write'])
91 _MAX_INDEXED_PROPERTIES
= 20000
94 _MAX_ID_BATCH_SIZE
= datastore_rpc
._MAX
_ID
_BATCH
_SIZE
96 Key
= datastore_types
.Key
97 typename
= datastore_types
.typename
100 STRONG_CONSISTENCY
= datastore_rpc
.Configuration
.STRONG_CONSISTENCY
101 EVENTUAL_CONSISTENCY
= datastore_rpc
.Configuration
.EVENTUAL_CONSISTENCY
105 _MAX_INT_32
= 2**31-1
108 def NormalizeAndTypeCheck(arg
, types
):
109 """Normalizes and type checks the given argument.
112 arg: an instance or iterable of the given type(s)
113 types: allowed type or tuple of types
116 A (list, bool) tuple. The list is a normalized, shallow copy of the
117 argument. The boolean is True if the argument was a sequence, False
118 if it was a single object.
121 AssertionError: types includes list or tuple.
122 BadArgumentError: arg is not an instance or sequence of one of the given
125 if not isinstance(types
, (list, tuple)):
128 assert list not in types
and tuple not in types
130 if isinstance(arg
, types
):
136 if isinstance(arg
, basestring
):
137 raise datastore_errors
.BadArgumentError(
138 'Expected an instance or iterable of %s; received %s (a %s).' %
139 (types
, arg
, typename(arg
)))
146 raise datastore_errors
.BadArgumentError(
147 'Expected an instance or iterable of %s; received %s (a %s).' %
148 (types
, arg
, typename(arg
)))
152 if not isinstance(val
, types
):
153 raise datastore_errors
.BadArgumentError(
154 'Expected one of %s; received %s (a %s).' %
155 (types
, val
, typename(val
)))
157 return arg_list
, True
160 def NormalizeAndTypeCheckKeys(keys
):
161 """Normalizes and type checks that the given argument is a valid key or keys.
163 A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
164 Entities, and normalizes to Keys.
167 keys: a Key or sequence of Keys
170 A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
173 BadArgumentError: arg is not an instance or sequence of one of the given
176 keys
, multiple
= NormalizeAndTypeCheck(keys
, (basestring
, Entity
, Key
))
178 keys
= [_GetCompleteKeyOrError(key
) for key
in keys
]
180 return (keys
, multiple
)
183 def _GetConfigFromKwargs(kwargs
, convert_rpc
=False,
184 config_class
=datastore_rpc
.Configuration
):
185 """Get a Configuration object from the keyword arguments.
187 This is purely an internal helper for the various public APIs below
191 kwargs: A dict containing the keyword arguments passed to a public API.
192 convert_rpc: If the an rpc should be converted or passed on directly.
193 config_class: The config class that should be generated.
196 A UserRPC instance, or a Configuration instance, or None.
199 TypeError if unexpected keyword arguments are present.
205 rpc
= kwargs
.pop('rpc', None)
207 if not isinstance(rpc
, apiproxy_stub_map
.UserRPC
):
208 raise datastore_errors
.BadArgumentError(
209 'rpc= argument should be None or a UserRPC instance')
210 if 'config' in kwargs
:
211 raise datastore_errors
.BadArgumentError(
212 'Expected rpc= or config= argument but not both')
215 raise datastore_errors
.BadArgumentError(
216 'Unexpected keyword arguments: %s' % ', '.join(kwargs
))
220 read_policy
= getattr(rpc
, 'read_policy', None)
221 kwargs
['config'] = datastore_rpc
.Configuration(
222 deadline
=rpc
.deadline
, read_policy
=read_policy
,
223 config
=_GetConnection().config
)
225 return config_class(**kwargs
)
228 class _BaseIndex(object):
231 BUILDING
, SERVING
, DELETING
, ERROR
= range(4)
234 ASCENDING
= datastore_query
.PropertyOrder
.ASCENDING
235 DESCENDING
= datastore_query
.PropertyOrder
.DESCENDING
237 def __init__(self
, index_id
, kind
, has_ancestor
, properties
):
238 """Construct a datastore index instance.
241 index_id: Required long; Uniquely identifies the index
242 kind: Required string; Specifies the kind of the entities to index
243 has_ancestor: Required boolean; indicates if the index supports a query
244 that filters entities by the entity group parent
245 properties: Required list of (string, int) tuples; The entity properties
246 to index. First item in a tuple is the property name and the second
247 item is the sorting direction (ASCENDING|DESCENDING).
248 The order of the properties is based on the order in the index.
250 argument_error
= datastore_errors
.BadArgumentError
251 datastore_types
.ValidateInteger(index_id
, 'index_id', argument_error
,
253 datastore_types
.ValidateString(kind
, 'kind', argument_error
, empty_ok
=True)
254 if not isinstance(properties
, (list, tuple)):
255 raise argument_error('properties must be a list or a tuple')
256 for idx
, index_property
in enumerate(properties
):
257 if not isinstance(index_property
, (list, tuple)):
258 raise argument_error('property[%d] must be a list or a tuple' % idx
)
259 if len(index_property
) != 2:
260 raise argument_error('property[%d] length should be 2 but was %d' %
261 (idx
, len(index_property
)))
262 datastore_types
.ValidateString(index_property
[0], 'property name',
264 _BaseIndex
.__ValidateEnum
(index_property
[1],
265 (self
.ASCENDING
, self
.DESCENDING
),
267 self
.__id
= long(index_id
)
269 self
.__has
_ancestor
= bool(has_ancestor
)
270 self
.__properties
= properties
273 def __ValidateEnum(value
, accepted_values
, name
='value',
274 exception
=datastore_errors
.BadArgumentError
):
275 datastore_types
.ValidateInteger(value
, name
, exception
)
276 if not value
in accepted_values
:
277 raise exception('%s should be one of %s but was %d' %
278 (name
, str(accepted_values
), value
))
281 """Returns the index id, a long."""
285 """Returns the index kind, a string. Empty string ('') if none."""
288 def _HasAncestor(self
):
289 """Indicates if this is an ancestor index, a boolean."""
290 return self
.__has
_ancestor
292 def _Properties(self
):
293 """Returns the index properties. a tuple of
294 (index name as a string, [ASCENDING|DESCENDING]) tuples.
296 return self
.__properties
298 def __eq__(self
, other
):
299 return self
.__id
== other
.__id
301 def __ne__(self
, other
):
302 return self
.__id
!= other
.__id
305 return hash(self
.__id
)
308 class Index(_BaseIndex
):
309 """A datastore index."""
312 Kind
= _BaseIndex
._Kind
313 HasAncestor
= _BaseIndex
._HasAncestor
314 Properties
= _BaseIndex
._Properties
317 class DatastoreAdapter(datastore_rpc
.AbstractAdapter
):
318 """Adapter between datatypes defined here (Entity etc.) and protobufs.
320 See the base class in datastore_rpc.py for more docs.
324 index_state_mappings
= {
325 entity_pb
.CompositeIndex
.ERROR
: Index
.ERROR
,
326 entity_pb
.CompositeIndex
.DELETED
: Index
.DELETING
,
327 entity_pb
.CompositeIndex
.READ_WRITE
: Index
.SERVING
,
328 entity_pb
.CompositeIndex
.WRITE_ONLY
: Index
.BUILDING
332 index_direction_mappings
= {
333 entity_pb
.Index_Property
.ASCENDING
: Index
.ASCENDING
,
334 entity_pb
.Index_Property
.DESCENDING
: Index
.DESCENDING
337 def key_to_pb(self
, key
):
338 return key
._Key
__reference
340 def pb_to_key(self
, pb
):
341 return Key
._FromPb
(pb
)
343 def entity_to_pb(self
, entity
):
344 return entity
._ToPb
()
346 def pb_to_entity(self
, pb
):
347 return Entity
._FromPb
(pb
)
349 def pb_to_index(self
, pb
):
350 index_def
= pb
.definition()
351 properties
= [(property.name().decode('utf-8'),
352 DatastoreAdapter
.index_direction_mappings
.get(property.direction()))
353 for property in index_def
.property_list()]
354 index
= Index(pb
.id(), index_def
.entity_type().decode('utf-8'),
355 index_def
.ancestor(), properties
)
356 state
= DatastoreAdapter
.index_state_mappings
.get(pb
.state())
360 _adapter
= DatastoreAdapter()
361 _thread_local
= threading
.local()
364 _ENV_KEY
= '__DATASTORE_CONNECTION_INITIALIZED__'
367 def __InitConnection():
368 """Internal method to make sure the connection state has been initialized."""
381 if os
.getenv(_ENV_KEY
) and hasattr(_thread_local
, 'connection_stack'):
383 _thread_local
.connection_stack
= [datastore_rpc
.Connection(adapter
=_adapter
)]
385 os
.environ
[_ENV_KEY
] = '1'
388 def _GetConnection():
389 """Internal method to retrieve a datastore connection local to the thread."""
391 return _thread_local
.connection_stack
[-1]
394 def _SetConnection(connection
):
395 """Internal method to replace the current thread local connection."""
397 _thread_local
.connection_stack
[-1] = connection
400 def _PushConnection(new_connection
):
401 """Internal method to save the current connection and sets a new one.
404 new_connection: The connection to set.
407 _thread_local
.connection_stack
.append(new_connection
)
410 def _PopConnection():
411 """Internal method to restores the previous connection.
414 The current connection.
417 assert len(_thread_local
.connection_stack
) >= 2
418 return _thread_local
.connection_stack
.pop()
424 def _MakeSyncCall(service
, call
, request
, response
, config
=None):
425 """The APIProxy entry point for a synchronous API call.
428 service: For backwards compatibility, must be 'datastore_v3'.
429 call: String representing which function to call.
430 request: Protocol buffer for the request.
431 response: Protocol buffer for the response.
432 config: Optional Configuration to use for this request.
435 Response protocol buffer. Caller should always use returned value
436 which may or may not be same as passed in 'response'.
439 apiproxy_errors.Error or a subclass.
441 conn
= _GetConnection()
442 if isinstance(request
, datastore_pb
.Query
):
443 conn
._set
_request
_read
_policy
(request
, config
)
444 conn
._set
_request
_transaction
(request
)
445 rpc
= conn
._make
_rpc
_call
(config
, call
, request
, response
)
446 conn
.check_rpc_success(rpc
)
450 def CreateRPC(service
='datastore_v3',
451 deadline
=None, callback
=None, read_policy
=None):
452 """Create an rpc for use in configuring datastore calls.
454 NOTE: This functions exists for backwards compatibility. Please use
455 CreateConfig() instead. NOTE: the latter uses 'on_completion',
456 which is a function taking an argument, wherease CreateRPC uses
457 'callback' which is a function without arguments.
460 service: Optional string; for backwards compatibility, must be
462 deadline: Optional int or float, deadline for calls in seconds.
463 callback: Optional callable, a callback triggered when this rpc
464 completes; takes no arguments.
465 read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
466 enable eventually consistent reads (i.e. reads that may be
467 satisfied from an older version of the datastore in some cases).
468 The default read policy may have to wait until in-flight
469 transactions are committed.
474 assert service
== 'datastore_v3'
475 conn
= _GetConnection()
477 if deadline
is not None:
478 config
= datastore_rpc
.Configuration(deadline
=deadline
)
479 rpc
= conn
._create
_rpc
(config
)
480 rpc
.callback
= callback
481 if read_policy
is not None:
482 rpc
.read_policy
= read_policy
486 def CreateConfig(**kwds
):
487 """Create a Configuration object for use in configuring datastore calls.
489 This configuration can be passed to most datastore calls using the
490 'config=...' argument.
493 deadline: Optional deadline; default None (which means the
494 system default deadline will be used, typically 5 seconds).
495 on_completion: Optional callback function; default None. If
496 specified, it will be called with a UserRPC object as argument
497 when an RPC completes.
498 read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
499 enable eventually consistent reads (i.e. reads that may be
500 satisfied from an older version of the datastore in some cases).
501 The default read policy may have to wait until in-flight
502 transactions are committed.
503 **kwds: Other keyword arguments as long as they are supported by
504 datastore_rpc.Configuration().
507 A datastore_rpc.Configuration instance.
509 return datastore_rpc
.Configuration(**kwds
)
512 def CreateTransactionOptions(**kwds
):
513 """Create a configuration object for use in configuring transactions.
515 This configuration can be passed as run_in_transaction_option's first
519 deadline: Optional deadline; default None (which means the
520 system default deadline will be used, typically 5 seconds).
521 on_completion: Optional callback function; default None. If
522 specified, it will be called with a UserRPC object as argument
523 when an RPC completes.
524 xg: set to true to allow cross-group transactions (high replication
526 retries: set the number of retries for a transaction
527 **kwds: Other keyword arguments as long as they are supported by
528 datastore_rpc.TransactionOptions().
531 A datastore_rpc.TransactionOptions instance.
533 return datastore_rpc
.TransactionOptions(**kwds
)
536 def PutAsync(entities
, **kwargs
):
537 """Asynchronously store one or more entities in the datastore.
539 Identical to datastore.Put() except returns an asynchronous object. Call
540 get_result() on the return value to block on the call and get the results.
542 extra_hook
= kwargs
.pop('extra_hook', None)
543 config
= _GetConfigFromKwargs(kwargs
)
544 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
545 raise datastore_errors
.BadRequestError(
546 'read_policy is only supported on read operations.')
547 entities
, multiple
= NormalizeAndTypeCheck(entities
, Entity
)
549 for entity
in entities
:
550 if entity
.is_projection():
551 raise datastore_errors
.BadRequestError(
552 'Cannot put a partial entity: %s' % entity
)
553 if not entity
.kind() or not entity
.app():
554 raise datastore_errors
.BadRequestError(
555 'App and kind must not be empty, in entity: %s' % entity
)
557 def local_extra_hook(keys
):
559 num_entities
= len(entities
)
560 if num_keys
!= num_entities
:
561 raise datastore_errors
.InternalError(
562 'Put accepted %d entities but returned %d keys.' %
563 (num_entities
, num_keys
))
565 for entity
, key
in zip(entities
, keys
):
566 if entity
._Entity
__key
._Key
__reference
!= key
._Key
__reference
:
567 assert not entity
._Entity
__key
.has_id_or_name()
568 entity
._Entity
__key
._Key
__reference
.CopyFrom(key
._Key
__reference
)
576 return extra_hook(result
)
579 return _GetConnection().async_put(config
, entities
, local_extra_hook
)
582 def Put(entities
, **kwargs
):
583 """Store one or more entities in the datastore.
585 The entities may be new or previously existing. For new entities, Put() will
586 fill in the app id and key assigned by the datastore.
588 If the argument is a single Entity, a single Key will be returned. If the
589 argument is a list of Entity, a list of Keys will be returned.
592 entities: Entity or list of Entities
593 config: Optional Configuration to use for this request, must be specified
594 as a keyword argument.
600 TransactionFailedError, if the Put could not be committed.
602 return PutAsync(entities
, **kwargs
).get_result()
605 def GetAsync(keys
, **kwargs
):
606 """Asynchronously retrieves one or more entities from the datastore.
608 Identical to datastore.Get() except returns an asynchronous object. Call
609 get_result() on the return value to block on the call and get the results.
611 extra_hook
= kwargs
.pop('extra_hook', None)
612 config
= _GetConfigFromKwargs(kwargs
)
613 keys
, multiple
= NormalizeAndTypeCheckKeys(keys
)
615 def local_extra_hook(entities
):
619 if entities
[0] is None:
620 raise datastore_errors
.EntityNotFoundError()
623 return extra_hook(result
)
626 return _GetConnection().async_get(config
, keys
, local_extra_hook
)
629 def Get(keys
, **kwargs
):
630 """Retrieves one or more entities from the datastore.
632 Retrieves the entity or entities with the given key(s) from the datastore
633 and returns them as fully populated Entity objects, as defined below. If
634 there is an error, raises a subclass of datastore_errors.Error.
636 If keys is a single key or string, an Entity will be returned, or
637 EntityNotFoundError will be raised if no existing entity matches the key.
639 However, if keys is a list or tuple, a list of entities will be returned
640 that corresponds to the sequence of keys. It will include entities for keys
641 that were found and None placeholders for keys that were not found.
644 keys: Key or string or list of Keys or strings
645 config: Optional Configuration to use for this request, must be specified
646 as a keyword argument.
649 Entity or list of Entity objects
651 return GetAsync(keys
, **kwargs
).get_result()
653 def GetIndexesAsync(**kwargs
):
654 """Asynchronously retrieves the application indexes and their states.
656 Identical to GetIndexes() except returns an asynchronous object. Call
657 get_result() on the return value to block on the call and get the results.
659 extra_hook
= kwargs
.pop('extra_hook', None)
660 config
= _GetConfigFromKwargs(kwargs
)
662 def local_extra_hook(result
):
664 return extra_hook(result
)
667 return _GetConnection().async_get_indexes(config
, local_extra_hook
)
670 def GetIndexes(**kwargs
):
671 """Retrieves the application indexes and their states.
674 config: Optional Configuration to use for this request, must be specified
675 as a keyword argument.
678 A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
679 An index can be in the following states:
680 Index.BUILDING: Index is being built and therefore can not serve queries
681 Index.SERVING: Index is ready to service queries
682 Index.DELETING: Index is being deleted
683 Index.ERROR: Index encounted an error in the BUILDING state
685 return GetIndexesAsync(**kwargs
).get_result()
687 def DeleteAsync(keys
, **kwargs
):
688 """Asynchronously deletes one or more entities from the datastore.
690 Identical to datastore.Delete() except returns an asynchronous object. Call
691 get_result() on the return value to block on the call.
693 config
= _GetConfigFromKwargs(kwargs
)
694 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
695 raise datastore_errors
.BadRequestError(
696 'read_policy is only supported on read operations.')
697 keys
, _
= NormalizeAndTypeCheckKeys(keys
)
699 return _GetConnection().async_delete(config
, keys
)
702 def Delete(keys
, **kwargs
):
703 """Deletes one or more entities from the datastore. Use with care!
705 Deletes the given entity(ies) from the datastore. You can only delete
706 entities from your app. If there is an error, raises a subclass of
707 datastore_errors.Error.
710 # the primary key(s) of the entity(ies) to delete
711 keys: Key or string or list of Keys or strings
712 config: Optional Configuration to use for this request, must be specified
713 as a keyword argument.
716 TransactionFailedError, if the Delete could not be committed.
718 return DeleteAsync(keys
, **kwargs
).get_result()
722 """A datastore entity.
724 Includes read-only accessors for app id, kind, and primary key. Also
725 provides dictionary-style access to properties.
731 def __init__(self
, kind
, parent
=None, _app
=None, name
=None, id=None,
732 unindexed_properties
=[], namespace
=None, **kwds
):
733 """Constructor. Takes the kind and transaction root, which cannot be
734 changed after the entity is constructed, and an optional parent. Raises
735 BadArgumentError or BadKeyError if kind is invalid or parent is not an
736 existing Entity or Key in the datastore.
741 # if provided, this entity's parent. Its key must be complete.
742 parent: Entity or Key
743 # if provided, this entity's name.
745 # if provided, this entity's id.
747 # if provided, a sequence of property names that should not be indexed
748 # by the built-in single property indices.
749 unindexed_properties: list or tuple of strings
751 # if provided, overrides the default namespace_manager setting.
758 ref
= entity_pb
.Reference()
759 _app
= datastore_types
.ResolveAppId(_app
)
762 _namespace
= kwds
.pop('_namespace', None)
765 raise datastore_errors
.BadArgumentError(
766 'Excess keyword arguments ' + repr(kwds
))
771 if namespace
is None:
772 namespace
= _namespace
773 elif _namespace
is not None:
774 raise datastore_errors
.BadArgumentError(
775 "Must not set both _namespace and namespace parameters.")
777 datastore_types
.ValidateString(kind
, 'kind',
778 datastore_errors
.BadArgumentError
)
780 if parent
is not None:
781 parent
= _GetCompleteKeyOrError(parent
)
782 if _app
!= parent
.app():
783 raise datastore_errors
.BadArgumentError(
784 " %s doesn't match parent's app %s" %
785 (_app
, parent
.app()))
788 if namespace
is None:
789 namespace
= parent
.namespace()
790 elif namespace
!= parent
.namespace():
791 raise datastore_errors
.BadArgumentError(
792 " %s doesn't match parent's namespace %s" %
793 (namespace
, parent
.namespace()))
794 ref
.CopyFrom(parent
._Key
__reference
)
796 namespace
= datastore_types
.ResolveNamespace(namespace
)
797 datastore_types
.SetNamespace(ref
, namespace
)
799 last_path
= ref
.mutable_path().add_element()
800 last_path
.set_type(kind
.encode('utf-8'))
802 if name
is not None and id is not None:
803 raise datastore_errors
.BadArgumentError(
804 "Cannot set both name and id on an Entity")
808 datastore_types
.ValidateString(name
, 'name')
809 last_path
.set_name(name
.encode('utf-8'))
812 datastore_types
.ValidateInteger(id, 'id')
815 self
.set_unindexed_properties(unindexed_properties
)
817 self
.__key
= Key
._FromPb
(ref
)
820 """Returns the name of the application that created this entity, a
821 string or None if not set.
823 return self
.__key
.app()
826 """Returns the namespace of this entity, a string or None."""
827 return self
.__key
.namespace()
830 """Returns this entity's kind, a string."""
831 return self
.__key
.kind()
834 """Returns if this entity has been saved to the datastore."""
835 last_path
= self
.__key
._Key
__reference
.path().element_list()[-1]
836 return ((last_path
.has_name() ^ last_path
.has_id()) and
837 self
.__key
.has_id_or_name())
839 def is_projection(self
):
840 """Returns if this entity is a projection from full entity.
843 - may not contain all properties from the original entity;
844 - only contain single values for lists;
845 - may not contain values with the same type as the original entity.
847 return self
.__projection
850 """Returns this entity's primary key, a Key instance."""
854 """Returns this entity's parent, as a Key. If this entity has no parent,
857 return self
.key().parent()
859 def entity_group(self
):
860 """Returns this entity's entity group as a Key.
862 Note that the returned Key will be incomplete if this is a a root entity
863 and its key is incomplete.
865 return self
.key().entity_group()
867 def unindexed_properties(self
):
868 """Returns this entity's unindexed properties, as a frozenset of strings."""
870 return getattr(self
, '_Entity__unindexed_properties', [])
872 def set_unindexed_properties(self
, unindexed_properties
):
874 unindexed_properties
, multiple
= NormalizeAndTypeCheck(unindexed_properties
, basestring
)
876 raise datastore_errors
.BadArgumentError(
877 'unindexed_properties must be a sequence; received %s (a %s).' %
878 (unindexed_properties
, typename(unindexed_properties
)))
879 for prop
in unindexed_properties
:
880 datastore_types
.ValidateProperty(prop
, None)
881 self
.__unindexed
_properties
= frozenset(unindexed_properties
)
883 def __setitem__(self
, name
, value
):
884 """Implements the [] operator. Used to set property value(s).
886 If the property name is the empty string or not a string, raises
887 BadPropertyError. If the value is not a supported type, raises
891 datastore_types
.ValidateProperty(name
, value
)
892 dict.__setitem
__(self
, name
, value
)
894 def setdefault(self
, name
, value
):
895 """If the property exists, returns its value. Otherwise sets it to value.
897 If the property name is the empty string or not a string, raises
898 BadPropertyError. If the value is not a supported type, raises
902 datastore_types
.ValidateProperty(name
, value
)
903 return dict.setdefault(self
, name
, value
)
905 def update(self
, other
):
906 """Updates this entity's properties from the values in other.
908 If any property name is the empty string or not a string, raises
909 BadPropertyError. If any value is not a supported type, raises
912 for name
, value
in other
.items():
913 self
.__setitem
__(name
, value
)
916 """The copy method is not supported.
918 raise NotImplementedError('Entity does not support the copy() method.')
921 """Returns an XML representation of this entity. Atom and gd:namespace
922 properties are converted to XML according to their respective schemas. For
923 more information, see:
925 http://www.atomenabled.org/developers/syndication/
926 http://code.google.com/apis/gdata/common-elements.html
928 This is *not* optimized. It shouldn't be used anywhere near code that's
929 performance-critical.
932 xml
= u
'<entity kind=%s' % saxutils
.quoteattr(self
.kind())
933 if self
.__key
.has_id_or_name():
934 xml
+= ' key=%s' % saxutils
.quoteattr(str(self
.__key
))
936 if self
.__key
.has_id_or_name():
937 xml
+= '\n <key>%s</key>' % self
.__key
.ToTagUri()
942 properties
= self
.keys()
945 xml
+= '\n ' + '\n '.join(self
._PropertiesToXml
(properties
))
948 xml
+= '\n</entity>\n'
951 def _PropertiesToXml(self
, properties
):
952 """ Returns a list of the XML representations of each of the given
953 properties. Ignores properties that don't exist in this entity.
956 properties: string or list of strings
963 for propname
in properties
:
964 if not self
.has_key(propname
):
967 propname_xml
= saxutils
.quoteattr(propname
)
969 values
= self
[propname
]
970 if not isinstance(values
, list):
973 proptype
= datastore_types
.PropertyTypeName(values
[0])
974 proptype_xml
= saxutils
.quoteattr(proptype
)
976 escaped_values
= self
._XmlEscapeValues
(propname
)
977 open_tag
= u
'<property name=%s type=%s>' % (propname_xml
, proptype_xml
)
978 close_tag
= u
'</property>'
979 xml_properties
+= [open_tag
+ val
+ close_tag
for val
in escaped_values
]
981 return xml_properties
983 def _XmlEscapeValues(self
, property):
984 """ Returns a list of the XML-escaped string values for the given property.
985 Raises an AssertionError if the property doesn't exist.
993 assert self
.has_key(property)
996 values
= self
[property]
997 if not isinstance(values
, list):
1001 if hasattr(val
, 'ToXml'):
1002 xml
.append(val
.ToXml())
1007 xml
.append(saxutils
.escape(unicode(val
)))
1012 """Converts this Entity to its protocol buffer representation.
1017 return self
._ToPb
(False)
1019 def _ToPb(self
, mark_key_as_saved
=True):
1020 """Converts this Entity to its protocol buffer representation. Not
1021 intended to be used by application developers.
1031 pb
= entity_pb
.EntityProto()
1032 pb
.mutable_key().CopyFrom(self
.key()._ToPb
())
1033 last_path
= pb
.key().path().element_list()[-1]
1035 if mark_key_as_saved
and last_path
.has_name() and last_path
.has_id():
1036 last_path
.clear_id()
1039 group
= pb
.mutable_entity_group()
1040 if self
.__key
.has_id_or_name():
1041 root
= pb
.key().path().element(0)
1042 group
.add_element().CopyFrom(root
)
1045 properties
= self
.items()
1047 for (name
, values
) in properties
:
1048 properties
= datastore_types
.ToPropertyPb(name
, values
)
1049 if not isinstance(properties
, list):
1050 properties
= [properties
]
1052 for prop
in properties
:
1053 if ((prop
.has_meaning() and
1054 prop
.meaning() in datastore_types
._RAW
_PROPERTY
_MEANINGS
) or
1055 name
in self
.unindexed_properties()):
1056 pb
.raw_property_list().append(prop
)
1058 pb
.property_list().append(prop
)
1061 if pb
.property_size() > _MAX_INDEXED_PROPERTIES
:
1062 raise datastore_errors
.BadRequestError(
1063 'Too many indexed properties for entity %r.' % self
.key())
1068 def FromPb(pb
, validate_reserved_properties
=True,
1069 default_kind
='<not specified>'):
1070 """Static factory method. Returns the Entity representation of the
1071 given protocol buffer (datastore_pb.Entity).
1074 pb: datastore_pb.Entity or str encoding of a datastore_pb.Entity
1075 validate_reserved_properties: deprecated
1076 default_kind: str, the kind to use if the pb has no key.
1079 Entity: the Entity representation of pb
1082 if isinstance(pb
, str):
1083 real_pb
= entity_pb
.EntityProto()
1084 real_pb
.ParsePartialFromString(pb
)
1087 return Entity
._FromPb
(
1088 pb
, require_valid_key
=False, default_kind
=default_kind
)
1091 def _FromPb(pb
, require_valid_key
=True, default_kind
='<not specified>'):
1092 """Static factory method. Returns the Entity representation of the
1093 given protocol buffer (datastore_pb.Entity). Not intended to be used by
1094 application developers.
1096 The Entity PB's key must be complete. If it isn't, an AssertionError is
1100 # a protocol buffer Entity
1101 pb: datastore_pb.Entity
1102 default_kind: str, the kind to use if the pb has no key.
1105 # the Entity representation of the argument
1109 if not pb
.key().path().element_size():
1110 pb
.mutable_key().CopyFrom(Key
.from_path(default_kind
, 0)._ToPb
())
1112 last_path
= pb
.key().path().element_list()[-1]
1113 if require_valid_key
:
1114 assert last_path
.has_id() ^ last_path
.has_name()
1115 if last_path
.has_id():
1116 assert last_path
.id() != 0
1118 assert last_path
.has_name()
1119 assert last_path
.name()
1122 unindexed_properties
= [unicode(p
.name(), 'utf-8')
1123 for p
in pb
.raw_property_list()]
1126 if pb
.key().has_name_space():
1127 namespace
= pb
.key().name_space()
1130 e
= Entity(unicode(last_path
.type(), 'utf-8'),
1131 unindexed_properties
=unindexed_properties
,
1132 _app
=pb
.key().app(), namespace
=namespace
)
1133 ref
= e
.__key
._Key
__reference
1134 ref
.CopyFrom(pb
.key())
1138 temporary_values
= {}
1140 for prop_list
in (pb
.property_list(), pb
.raw_property_list()):
1141 for prop
in prop_list
:
1142 if prop
.meaning() == entity_pb
.Property
.INDEX_VALUE
:
1143 e
.__projection
= True
1145 value
= datastore_types
.FromPropertyPb(prop
)
1146 except (AssertionError, AttributeError, TypeError, ValueError), e
:
1147 raise datastore_errors
.Error(
1148 'Property %s is corrupt in the datastore:\n%s' %
1149 (prop
.name(), traceback
.format_exc()))
1151 multiple
= prop
.multiple()
1156 cur_value
= temporary_values
.get(name
)
1157 if cur_value
is None:
1158 temporary_values
[name
] = value
1159 elif not multiple
or not isinstance(cur_value
, list):
1160 raise datastore_errors
.Error(
1161 'Property %s is corrupt in the datastore; it has multiple '
1162 'values, but is not marked as multiply valued.' % name
)
1164 cur_value
.extend(value
)
1168 for name
, value
in temporary_values
.iteritems():
1169 decoded_name
= unicode(name
, 'utf-8')
1174 datastore_types
.ValidateReadProperty(decoded_name
, value
)
1176 dict.__setitem
__(e
, decoded_name
, value
)
1182 """A datastore query.
1184 (Instead of this, consider using appengine.ext.gql.Query! It provides a
1185 query language interface on top of the same functionality.)
1187 Queries are used to retrieve entities that match certain criteria, including
1188 app id, kind, and property filters. Results may also be sorted by properties.
1190 App id and kind are required. Only entities from the given app, of the given
1191 type, are returned. If an ancestor is set, with Ancestor(), only entities
1192 with that ancestor are returned.
1194 Property filters are used to provide criteria based on individual property
1195 values. A filter compares a specific property in each entity to a given
1196 value or list of possible values.
1198 An entity is returned if its property values match *all* of the query's
1199 filters. In other words, filters are combined with AND, not OR. If an
1200 entity does not have a value for a property used in a filter, it is not
1203 Property filters map filter strings of the form '<property name> <operator>'
1204 to filter values. Use dictionary accessors to set property filters, like so:
1206 > query = Query('Person')
1207 > query['name ='] = 'Ryan'
1208 > query['age >='] = 21
1210 This query returns all Person entities where the name property is 'Ryan',
1211 'Ken', or 'Bret', and the age property is at least 21.
1213 Another way to build this query is:
1215 > query = Query('Person')
1216 > query.update({'name =': 'Ryan', 'age >=': 21})
1218 The supported operators are =, >, <, >=, and <=. Only one inequality
1219 filter may be used per query. Any number of equals filters may be used in
1222 A filter value may be a list or tuple of values. This is interpreted as
1223 multiple filters with the same filter string and different values, all ANDed
1224 together. For example, this query returns everyone with the tags "google"
1227 > Query('Person', {'tag =': ('google', 'app engine')})
1229 Result entities can be returned in different orders. Use the Order()
1230 method to specify properties that results will be sorted by, and in which
1233 Note that filters and orderings may be provided at any time before the query
1234 is run. When the query is fully specified, Run() runs the query and returns
1235 an iterator. The query results can be accessed through the iterator.
1237 A query object may be reused after it's been run. Its filters and
1238 orderings can be changed to create a modified query.
1240 If you know how many result entities you need, use Get() to fetch them:
1242 > query = Query('Person', {'age >': 21})
1243 > for person in query.Get(4):
1244 > print 'I have four pints left. Have one on me, %s!' % person['name']
1246 If you don't know how many results you need, or if you need them all, you
1247 can get an iterator over the results by calling Run():
1249 > for person in Query('Person', {'age >': 21}).Run():
1250 > print 'Have a pint on me, %s!' % person['name']
1252 Get() is more efficient than Run(), so use Get() whenever possible.
1254 Finally, the Count() method returns the number of result entities matched by
1255 the query. The returned count is cached; successive Count() calls will not
1256 re-scan the datastore unless the query is changed.
1259 ASCENDING
= datastore_query
.PropertyOrder
.ASCENDING
1260 DESCENDING
= datastore_query
.PropertyOrder
.DESCENDING
1263 ORDER_FIRST
= datastore_query
.QueryOptions
.ORDER_FIRST
1264 ANCESTOR_FIRST
= datastore_query
.QueryOptions
.ANCESTOR_FIRST
1265 FILTER_FIRST
= datastore_query
.QueryOptions
.FILTER_FIRST
1268 OPERATORS
= {'==': datastore_query
.PropertyFilter
._OPERATORS
['=']}
1269 OPERATORS
.update(datastore_query
.PropertyFilter
._OPERATORS
)
1271 INEQUALITY_OPERATORS
= datastore_query
.PropertyFilter
._INEQUALITY
_OPERATORS
1273 UPPERBOUND_INEQUALITY_OPERATORS
= frozenset(['<', '<='])
1274 FILTER_REGEX
= re
.compile(
1275 '^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(OPERATORS
),
1276 re
.IGNORECASE | re
.UNICODE
)
1282 __ancestor_pb
= None
1286 __index_list_source
= None
1287 __cursor_source
= None
1288 __compiled_query_source
= None
1293 __filter_order
= None
1294 __filter_counter
= 0
1297 __inequality_prop
= None
1298 __inequality_count
= 0
1300 def __init__(self
, kind
=None, filters
={}, _app
=None, keys_only
=False,
1301 compile=True, cursor
=None, namespace
=None, end_cursor
=None,
1302 projection
=None, distinct
=None, _namespace
=None):
1305 Raises BadArgumentError if kind is not a string. Raises BadValueError or
1306 BadFilterError if filters is not a dictionary of valid filters.
1309 namespace: string, the namespace to query.
1310 kind: string, the kind of entities to query, or None.
1311 filters: dict, initial set of filters.
1312 keys_only: boolean, if keys should be returned instead of entities.
1313 projection: iterable of property names to project.
1314 distinct: boolean, if projection should be distinct.
1315 compile: boolean, if the query should generate cursors.
1316 cursor: datastore_query.Cursor, the start cursor to use.
1317 end_cursor: datastore_query.Cursor, the end cursor to use.
1318 _namespace: deprecated, use namespace instead.
1328 if namespace
is None:
1329 namespace
= _namespace
1330 elif _namespace
is not None:
1331 raise datastore_errors
.BadArgumentError(
1332 "Must not set both _namespace and namespace parameters.")
1334 if kind
is not None:
1335 datastore_types
.ValidateString(kind
, 'kind',
1336 datastore_errors
.BadArgumentError
)
1339 self
.__orderings
= []
1340 self
.__filter
_order
= {}
1341 self
.update(filters
)
1343 self
.__app
= datastore_types
.ResolveAppId(_app
)
1344 self
.__namespace
= datastore_types
.ResolveNamespace(namespace
)
1347 self
.__query
_options
= datastore_query
.QueryOptions(
1348 keys_only
=keys_only
,
1349 produce_cursors
=compile,
1350 start_cursor
=cursor
,
1351 end_cursor
=end_cursor
,
1352 projection
=projection
)
1355 if not self
.__query
_options
.projection
:
1356 raise datastore_errors
.BadQueryError(
1357 'cannot specify distinct without a projection')
1358 self
.__distinct
= True
1359 self
.__group
_by
= self
.__query
_options
.projection
1361 def Order(self
, *orderings
):
1362 """Specify how the query results should be sorted.
1364 Result entities will be sorted by the first property argument, then by the
1365 second, and so on. For example, this:
1367 > query = Query('Person')
1368 > query.Order('bday', ('age', Query.DESCENDING))
1370 sorts everyone in order of their birthday, starting with January 1.
1371 People with the same birthday are sorted by age, oldest to youngest.
1373 The direction for each sort property may be provided; if omitted, it
1374 defaults to ascending.
1376 Order() may be called multiple times. Each call resets the sort order
1379 If an inequality filter exists in this Query it must be the first property
1380 passed to Order. Any number of sort orders may be used after the
1381 inequality filter property. Without inequality filters, any number of
1382 filters with different orders may be specified.
1384 Entities with multiple values for an order property are sorted by their
1387 Note that a sort order implies an existence filter! In other words,
1388 Entities without the sort order property are filtered out, and *not*
1389 included in the query results.
1391 If the sort order property has different types in different entities - ie,
1392 if bob['id'] is an int and fred['id'] is a string - the entities will be
1393 grouped first by the property type, then sorted within type. No attempt is
1394 made to compare property values across types.
1396 Raises BadArgumentError if any argument is of the wrong format.
1399 # the properties to sort by, in sort order. each argument may be either a
1400 # string or (string, direction) 2-tuple.
1406 orderings
= list(orderings
)
1409 for (order
, i
) in zip(orderings
, range(len(orderings
))):
1410 if not (isinstance(order
, basestring
) or
1411 (isinstance(order
, tuple) and len(order
) in [2, 3])):
1412 raise datastore_errors
.BadArgumentError(
1413 'Order() expects strings or 2- or 3-tuples; received %s (a %s). ' %
1414 (order
, typename(order
)))
1417 if isinstance(order
, basestring
):
1420 datastore_types
.ValidateString(order
[0], 'sort order property',
1421 datastore_errors
.BadArgumentError
)
1425 direction
= order
[-1]
1426 if direction
not in (Query
.ASCENDING
, Query
.DESCENDING
):
1428 raise datastore_errors
.BadArgumentError(
1429 'Order() expects Query.ASCENDING or DESCENDING; received %s' %
1432 direction
= Query
.ASCENDING
1434 if (self
.__kind
is None and
1435 (property != datastore_types
.KEY_SPECIAL_PROPERTY
or
1436 direction
!= Query
.ASCENDING
)):
1437 raise datastore_errors
.BadArgumentError(
1438 'Only %s ascending orders are supported on kindless queries' %
1439 datastore_types
.KEY_SPECIAL_PROPERTY
)
1441 orderings
[i
] = (property, direction
)
1444 if (orderings
and self
.__inequality
_prop
and
1445 orderings
[0][0] != self
.__inequality
_prop
):
1446 raise datastore_errors
.BadArgumentError(
1447 'First ordering property must be the same as inequality filter '
1448 'property, if specified for this query; received %s, expected %s' %
1449 (orderings
[0][0], self
.__inequality
_prop
))
1451 self
.__orderings
= orderings
1454 def Hint(self
, hint
):
1455 """Sets a hint for how this query should run.
1457 The query hint gives us information about how best to execute your query.
1458 Currently, we can only do one index scan, so the query hint should be used
1459 to indicates which index we should scan against.
1461 Use FILTER_FIRST if your first filter will only match a few results. In
1462 this case, it will be most efficient to scan against the index for this
1463 property, load the results into memory, and apply the remaining filters
1464 and sort orders there.
1466 Similarly, use ANCESTOR_FIRST if the query's ancestor only has a few
1467 descendants. In this case, it will be most efficient to scan all entities
1468 below the ancestor and load them into memory first.
1470 Use ORDER_FIRST if the query has a sort order and the result set is large
1471 or you only plan to fetch the first few results. In that case, we
1472 shouldn't try to load all of the results into memory; instead, we should
1473 scan the index for this property, which is in sorted order.
1475 Note that hints are currently ignored in the v3 datastore!
1478 one of datastore.Query.[ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]
1484 if hint
is not self
.__query
_options
.hint
:
1485 self
.__query
_options
= datastore_query
.QueryOptions(
1486 hint
=hint
, config
=self
.__query
_options
)
1489 def Ancestor(self
, ancestor
):
1490 """Sets an ancestor for this query.
1492 This restricts the query to only return result entities that are descended
1493 from a given entity. In other words, all of the results will have the
1494 ancestor as their parent, or parent's parent, or etc.
1496 Raises BadArgumentError or BadKeyError if parent is not an existing Entity
1497 or Key in the datastore.
1500 # the key must be complete
1501 ancestor: Entity or Key
1507 self
.__ancestor
_pb
= _GetCompleteKeyOrError(ancestor
)._ToPb
()
1510 def IsKeysOnly(self
):
1511 """Returns True if this query is keys only, false otherwise."""
1512 return self
.__query
_options
.keys_only
1514 def GetQueryOptions(self
):
1515 """Returns a datastore_query.QueryOptions for the current instance."""
1516 return self
.__query
_options
1519 """Returns a datastore_query.Query for the current instance."""
1520 return datastore_query
.Query(app
=self
.__app
,
1521 namespace
=self
.__namespace
,
1523 ancestor
=self
.__ancestor
_pb
,
1524 filter_predicate
=self
.GetFilterPredicate(),
1525 order
=self
.GetOrder(),
1526 group_by
=self
.__group
_by
)
1529 """Gets a datastore_query.Order for the current instance.
1532 datastore_query.Order or None if there are no sort orders set on the
1537 orders
= [datastore_query
.PropertyOrder(property, direction
)
1538 for property, direction
in self
.__orderings
]
1540 return datastore_query
.CompositeOrder(orders
)
1543 def GetFilterPredicate(self
):
1544 """Returns a datastore_query.FilterPredicate for the current instance.
1547 datastore_query.FilterPredicate or None if no filters are set on the
1551 ordered_filters
= [(i
, f
) for f
, i
in self
.__filter
_order
.iteritems()]
1552 ordered_filters
.sort()
1554 property_filters
= []
1555 for _
, filter_str
in ordered_filters
:
1556 if filter_str
not in self
:
1560 values
= self
[filter_str
]
1561 match
= self
._CheckFilter
(filter_str
, values
)
1562 name
= match
.group(1)
1565 if op
is None or op
== '==':
1569 property_filters
.append(datastore_query
.make_filter(name
, op
, values
))
1571 if property_filters
:
1572 return datastore_query
.CompositeFilter(
1573 datastore_query
.CompositeFilter
.AND
,
1577 def GetDistinct(self
):
1578 """Returns True if the current instance is distinct.
1581 A boolean indicating if the distinct flag is set.
1583 return self
.__distinct
1585 def GetIndexList(self
):
1586 """Get the index list from the last run of this query.
1589 A list of indexes used by the last run of this query.
1592 AssertionError: The query has not yet been run.
1594 index_list_function
= self
.__index
_list
_source
1595 if index_list_function
:
1596 return index_list_function()
1597 raise AssertionError('No index list available because this query has not '
1600 def GetCursor(self
):
1601 """Get the cursor from the last run of this query.
1603 The source of this cursor varies depending on what the last call was:
1604 - Run: A cursor that points immediately after the last result pulled off
1605 the returned iterator.
1606 - Get: A cursor that points immediately after the last result in the
1608 - Count: A cursor that points immediately after the last result counted.
1611 A datastore_query.Cursor object that can be used in subsequent query
1615 AssertionError: The query has not yet been run or cannot be compiled.
1619 cursor_function
= self
.__cursor
_source
1621 cursor
= cursor_function()
1624 raise AssertionError('No cursor available, either this query has not '
1625 'been executed or there is no compilation '
1626 'available for this kind of query')
1628 def GetBatcher(self
, config
=None):
1629 """Runs this query and returns a datastore_query.Batcher.
1631 This is not intended to be used by application developers. Use Get()
1635 config: Optional Configuration to use for this request.
1638 # an iterator that provides access to the query results
1644 query_options
= self
.GetQueryOptions().merge(config
)
1645 if self
.__distinct
and query_options
.projection
!= self
.__group
_by
:
1650 raise datastore_errors
.BadArgumentError(
1651 'cannot override projection when distinct is set')
1652 return self
.GetQuery().run(_GetConnection(), query_options
)
1654 def Run(self
, **kwargs
):
1657 If a filter string is invalid, raises BadFilterError. If a filter value is
1658 invalid, raises BadValueError. If an IN filter is provided, and a sort
1659 order on another property is provided, raises BadQueryError.
1661 If you know in advance how many results you want, use limit=#. It's
1665 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
1668 # an iterator that provides access to the query results
1671 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
1672 config_class
=datastore_query
.QueryOptions
)
1673 itr
= Iterator(self
.GetBatcher(config
=config
))
1675 self
.__index
_list
_source
= itr
.GetIndexList
1677 self
.__cursor
_source
= itr
.cursor
1679 self
.__compiled
_query
_source
= itr
._compiled
_query
1682 def Get(self
, limit
, offset
=0, **kwargs
):
1683 """Deprecated, use list(Run(...)) instead.
1686 limit: int or long representing the maximum number of entities to return.
1687 offset: int or long representing the number of entities to skip
1688 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
1691 # a list of entities
1695 kwargs
.setdefault('batch_size', _MAX_INT_32
)
1697 return list(self
.Run(limit
=limit
, offset
=offset
, **kwargs
))
1699 def Count(self
, limit
=1000, **kwargs
):
1700 """Returns the number of entities that this query matches.
1703 limit, a number or None. If there are more results than this, stop short
1704 and just return this number. Providing this argument makes the count
1705 operation more efficient.
1706 config: Optional Configuration to use for this request.
1709 The number of results.
1711 original_offset
= kwargs
.pop('offset', 0)
1713 offset
= _MAX_INT_32
1715 offset
= min(limit
+ original_offset
, _MAX_INT_32
)
1717 kwargs
['offset'] = offset
1718 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
1719 config_class
=datastore_query
.QueryOptions
)
1721 batch
= self
.GetBatcher(config
=config
).next()
1722 self
.__index
_list
_source
= (
1723 lambda: [index
for index
, state
in batch
.index_list
])
1724 self
.__cursor
_source
= lambda: batch
.cursor(0)
1725 self
.__compiled
_query
_source
= lambda: batch
._compiled
_query
1726 return max(0, batch
.skipped_results
- original_offset
)
1729 raise NotImplementedError(
1730 'Query objects should not be used as iterators. Call Run() first.')
1732 def __getstate__(self
):
1733 state
= self
.__dict
__.copy()
1734 state
['_Query__index_list_source'] = None
1735 state
['_Query__cursor_source'] = None
1736 state
['_Query__compiled_query_source'] = None
1739 def __setstate__(self
, state
):
1741 if '_Query__query_options' not in state
:
1742 state
['_Query__query_options'] = datastore_query
.QueryOptions(
1743 keys_only
=state
.pop('_Query__keys_only'),
1744 produce_cursors
=state
.pop('_Query__compile'),
1745 start_cursor
=state
.pop('_Query__cursor'),
1746 end_cursor
=state
.pop('_Query__end_cursor'))
1747 self
.__dict
__ = state
1749 def __setitem__(self
, filter, value
):
1750 """Implements the [] operator. Used to set filters.
1752 If the filter string is empty or not a string, raises BadFilterError. If
1753 the value is not a supported type, raises BadValueError.
1755 if isinstance(value
, tuple):
1758 datastore_types
.ValidateProperty(' ', value
)
1759 match
= self
._CheckFilter
(filter, value
)
1760 property = match
.group(1)
1761 operator
= match
.group(3)
1763 dict.__setitem
__(self
, filter, value
)
1765 if (operator
in self
.INEQUALITY_OPERATORS
and
1766 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1768 if self
.__inequality
_prop
is None:
1769 self
.__inequality
_prop
= property
1771 assert self
.__inequality
_prop
== property
1772 self
.__inequality
_count
+= 1
1775 if filter not in self
.__filter
_order
:
1776 self
.__filter
_order
[filter] = self
.__filter
_counter
1777 self
.__filter
_counter
+= 1
1779 def setdefault(self
, filter, value
):
1780 """If the filter exists, returns its value. Otherwise sets it to value.
1782 If the property name is the empty string or not a string, raises
1783 BadPropertyError. If the value is not a supported type, raises
1786 datastore_types
.ValidateProperty(' ', value
)
1787 self
._CheckFilter
(filter, value
)
1788 return dict.setdefault(self
, filter, value
)
1790 def __delitem__(self
, filter):
1791 """Implements the del [] operator. Used to remove filters.
1793 dict.__delitem
__(self
, filter)
1794 del self
.__filter
_order
[filter]
1797 match
= Query
.FILTER_REGEX
.match(filter)
1798 property = match
.group(1)
1799 operator
= match
.group(3)
1801 if operator
in self
.INEQUALITY_OPERATORS
:
1802 assert self
.__inequality
_count
>= 1
1803 assert property == self
.__inequality
_prop
1804 self
.__inequality
_count
-= 1
1805 if self
.__inequality
_count
== 0:
1806 self
.__inequality
_prop
= None
1808 def update(self
, other
):
1809 """Updates this query's filters from the ones in other.
1811 If any filter string is invalid, raises BadFilterError. If any value is
1812 not a supported type, raises BadValueError.
1814 for filter, value
in other
.items():
1815 self
.__setitem
__(filter, value
)
1818 """The copy method is not supported.
1820 raise NotImplementedError('Query does not support the copy() method.')
1822 def _CheckFilter(self
, filter, values
):
1823 """Type check a filter string and list of values.
1825 Raises BadFilterError if the filter string is empty, not a string, or
1826 invalid. Raises BadValueError if the value type is not supported.
1829 filter: String containing the filter text.
1830 values: List of associated filter values.
1833 re.MatchObject (never None) that matches the 'filter'. Group 1 is the
1834 property name, group 3 is the operator. (Group 2 is unused.)
1837 match
= Query
.FILTER_REGEX
.match(filter)
1839 raise datastore_errors
.BadFilterError(
1840 'Could not parse filter string: %s' % str(filter))
1842 raise datastore_errors
.BadFilterError(
1843 'Could not parse filter string: %s' % str(filter))
1845 property = match
.group(1)
1846 operator
= match
.group(3)
1847 if operator
is None:
1850 if isinstance(values
, tuple):
1851 values
= list(values
)
1852 elif not isinstance(values
, list):
1854 if isinstance(values
[0], datastore_types
._RAW
_PROPERTY
_TYPES
):
1855 raise datastore_errors
.BadValueError(
1856 'Filtering on %s properties is not supported.' % typename(values
[0]))
1858 if (operator
in self
.INEQUALITY_OPERATORS
and
1859 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1860 if self
.__inequality
_prop
and property != self
.__inequality
_prop
:
1861 raise datastore_errors
.BadFilterError(
1862 'Only one property per query may have inequality filters (%s).' %
1863 ', '.join(self
.INEQUALITY_OPERATORS
))
1864 elif len(self
.__orderings
) >= 1 and self
.__orderings
[0][0] != property:
1865 raise datastore_errors
.BadFilterError(
1866 'Inequality operators (%s) must be on the same property as the '
1867 'first sort order, if any sort orders are supplied' %
1868 ', '.join(self
.INEQUALITY_OPERATORS
))
1870 if (self
.__kind
is None and
1871 property != datastore_types
.KEY_SPECIAL_PROPERTY
and
1872 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1873 raise datastore_errors
.BadFilterError(
1874 'Only %s filters are allowed on kindless queries.' %
1875 datastore_types
.KEY_SPECIAL_PROPERTY
)
1877 if property == datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
:
1879 raise datastore_errors
.BadFilterError(
1880 'Only kindless queries can have %s filters.' %
1881 datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
)
1882 if not operator
in self
.UPPERBOUND_INEQUALITY_OPERATORS
:
1883 raise datastore_errors
.BadFilterError(
1884 'Only %s operators are supported with %s filters.' % (
1885 self
.UPPERBOUND_INEQUALITY_OPERATORS
,
1886 datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
))
1888 if property in datastore_types
._SPECIAL
_PROPERTIES
:
1893 if property == datastore_types
.KEY_SPECIAL_PROPERTY
:
1894 for value
in values
:
1895 if not isinstance(value
, Key
):
1896 raise datastore_errors
.BadFilterError(
1897 '%s filter value must be a Key; received %s (a %s)' %
1898 (datastore_types
.KEY_SPECIAL_PROPERTY
, value
, typename(value
)))
1902 def _Run(self
, limit
=None, offset
=None,
1903 prefetch_count
=None, next_count
=None, **kwargs
):
1904 """Deprecated, use Run() instead."""
1905 return self
.Run(limit
=limit
, offset
=offset
,
1906 prefetch_size
=prefetch_count
, batch_size
=next_count
,
1909 def _ToPb(self
, limit
=None, offset
=None, count
=None):
1911 query_options
= datastore_query
.QueryOptions(
1912 config
=self
.GetQueryOptions(),
1916 return self
.GetQuery()._to
_pb
(_GetConnection(), query_options
)
1918 def _GetCompiledQuery(self
):
1919 """Returns the internal-only pb representation of the last query run.
1924 AssertionError: Query not compiled or not yet executed.
1926 compiled_query_function
= self
.__compiled
_query
_source
1927 if compiled_query_function
:
1928 compiled_query
= compiled_query_function()
1930 return compiled_query
1931 raise AssertionError('No compiled query available, either this query has '
1932 'not been executed or there is no compilation '
1933 'available for this kind of query')
1935 GetCompiledQuery
= _GetCompiledQuery
1936 GetCompiledCursor
= GetCursor
1939 def AllocateIdsAsync(model_key
, size
=None, **kwargs
):
1940 """Asynchronously allocates a range of IDs.
1942 Identical to datastore.AllocateIds() except returns an asynchronous object.
1943 Call get_result() on the return value to block on the call and get the
1946 max = kwargs
.pop('max', None)
1947 config
= _GetConfigFromKwargs(kwargs
)
1948 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
1949 raise datastore_errors
.BadRequestError(
1950 'read_policy is only supported on read operations.')
1951 keys
, _
= NormalizeAndTypeCheckKeys(model_key
)
1954 raise datastore_errors
.BadArgumentError(
1955 'Cannot allocate IDs for more than one model key at a time')
1957 rpc
= _GetConnection().async_allocate_ids(config
, keys
[0], size
, max)
1961 def AllocateIds(model_key
, size
=None, **kwargs
):
1962 """Allocates a range of IDs of size or with max for the given key.
1964 Allocates a range of IDs in the datastore such that those IDs will not
1965 be automatically assigned to new entities. You can only allocate IDs
1966 for model keys from your app. If there is an error, raises a subclass of
1967 datastore_errors.Error.
1969 Either size or max must be provided but not both. If size is provided then a
1970 range of the given size is returned. If max is provided then the largest
1971 range of ids that are safe to use with an upper bound of max is returned (can
1974 Max should only be provided if you have an existing numeric id range that you
1975 want to reserve, e.g. bulk loading entities that already have IDs. If you
1976 don't care about which IDs you receive, use size instead.
1979 model_key: Key or string to serve as a model specifying the ID sequence
1980 in which to allocate IDs
1981 size: integer, number of IDs to allocate.
1982 max: integer, upper bound of the range of IDs to allocate.
1983 config: Optional Configuration to use for this request.
1986 (start, end) of the allocated range, inclusive.
1988 return AllocateIdsAsync(model_key
, size
, **kwargs
).get_result()
1993 class MultiQuery(Query
):
1994 """Class representing a query which requires multiple datastore queries.
1996 This class is actually a subclass of datastore.Query as it is intended to act
1997 like a normal Query object (supporting the same interface).
1999 Does not support keys only queries, since it needs whole entities in order
2000 to merge sort them. (That's not true if there are no sort orders, or if the
2001 sort order is on __key__, but allowing keys only queries in those cases, but
2002 not in others, would be confusing.)
2005 def __init__(self
, bound_queries
, orderings
):
2006 if len(bound_queries
) > MAX_ALLOWABLE_QUERIES
:
2007 raise datastore_errors
.BadArgumentError(
2008 'Cannot satisfy query -- too many subqueries (max: %d, got %d).'
2009 ' Probable cause: too many IN/!= filters in query.' %
2010 (MAX_ALLOWABLE_QUERIES
, len(bound_queries
)))
2012 projection
= (bound_queries
and
2013 bound_queries
[0].GetQueryOptions().projection
)
2015 for query
in bound_queries
:
2016 if projection
!= query
.GetQueryOptions().projection
:
2017 raise datastore_errors
.BadQueryError(
2018 'All queries must have the same projection.')
2019 if query
.IsKeysOnly():
2020 raise datastore_errors
.BadQueryError(
2021 'MultiQuery does not support keys_only.')
2023 self
.__projection
= projection
2024 self
.__bound
_queries
= bound_queries
2025 self
.__orderings
= orderings
2026 self
.__compile
= False
2029 res
= 'MultiQuery: '
2030 for query
in self
.__bound
_queries
:
2031 res
= '%s %s' % (res
, str(query
))
2034 def Get(self
, limit
, offset
=0, **kwargs
):
2035 """Deprecated, use list(Run(...)) instead.
2038 limit: int or long representing the maximum number of entities to return.
2039 offset: int or long representing the number of entities to skip
2040 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
2043 A list of entities with at most "limit" entries (less if the query
2044 completes before reading limit values).
2047 kwargs
.setdefault('batch_size', _MAX_INT_32
)
2048 return list(self
.Run(limit
=limit
, offset
=offset
, **kwargs
))
2050 class SortOrderEntity(object):
2051 """Allow entity comparisons using provided orderings.
2053 The iterator passed to the constructor is eventually consumed via
2054 calls to GetNext(), which generate new SortOrderEntity s with the
2058 def __init__(self
, entity_iterator
, orderings
):
2062 entity_iterator: an iterator of entities which will be wrapped.
2063 orderings: an iterable of (identifier, order) pairs. order
2064 should be either Query.ASCENDING or Query.DESCENDING.
2066 self
.__entity
_iterator
= entity_iterator
2067 self
.__entity
= None
2068 self
.__min
_max
_value
_cache
= {}
2070 self
.__entity
= entity_iterator
.next()
2071 except StopIteration:
2074 self
.__orderings
= orderings
2077 return str(self
.__entity
)
2079 def GetEntity(self
):
2080 """Gets the wrapped entity."""
2081 return self
.__entity
2084 """Wrap and return the next entity.
2086 The entity is retrieved from the iterator given at construction time.
2088 return MultiQuery
.SortOrderEntity(self
.__entity
_iterator
,
2091 def CmpProperties(self
, that
):
2092 """Compare two entities and return their relative order.
2094 Compares self to that based on the current sort orderings and the
2095 key orders between them. Returns negative, 0, or positive depending on
2096 whether self is less, equal to, or greater than that. This
2097 comparison returns as if all values were to be placed in ascending order
2098 (highest value last). Only uses the sort orderings to compare (ignores
2102 that: SortOrderEntity
2105 Negative if self < that
2106 Zero if self == that
2107 Positive if self > that
2110 if not self
.__entity
:
2111 return cmp(self
.__entity
, that
.__entity
)
2114 for (identifier
, order
) in self
.__orderings
:
2116 value1
= self
.__GetValueForId
(self
, identifier
, order
)
2117 value2
= self
.__GetValueForId
(that
, identifier
, order
)
2119 result
= cmp(value1
, value2
)
2120 if order
== Query
.DESCENDING
:
2126 def __GetValueForId(self
, sort_order_entity
, identifier
, sort_order
):
2130 value
= _GetPropertyValue(sort_order_entity
.__entity
, identifier
)
2131 if isinstance(value
, list):
2132 entity_key
= sort_order_entity
.__entity
.key()
2133 if (entity_key
, identifier
) in self
.__min
_max
_value
_cache
:
2134 value
= self
.__min
_max
_value
_cache
[(entity_key
, identifier
)]
2135 elif sort_order
== Query
.DESCENDING
:
2139 self
.__min
_max
_value
_cache
[(entity_key
, identifier
)] = value
2143 def __cmp__(self
, that
):
2144 """Compare self to that w.r.t. values defined in the sort order.
2146 Compare an entity with another, using sort-order first, then the key
2147 order to break ties. This can be used in a heap to have faster min-value
2151 that: other entity to compare to
2153 negative: if self is less than that in sort order
2154 zero: if self is equal to that in sort order
2155 positive: if self is greater than that in sort order
2157 property_compare
= self
.CmpProperties(that
)
2158 if property_compare
:
2159 return property_compare
2162 return cmp(self
.__entity
.key(), that
.__entity
.key())
2164 def _ExtractBounds(self
, config
):
2165 """This function extracts the range of results to consider.
2167 Since MultiQuery dedupes in memory, we must apply the offset and limit in
2168 memory. The results that should be considered are
2169 results[lower_bound:upper_bound].
2171 We also pass the offset=0 and limit=upper_bound to the base queries to
2172 optimize performance.
2175 config: The base datastore_query.QueryOptions.
2178 a tuple consisting of the lower_bound and upper_bound to impose in memory
2179 and the config to use with each bound query. The upper_bound may be None.
2182 return 0, None, None
2184 lower_bound
= config
.offset
or 0
2185 upper_bound
= config
.limit
2187 if upper_bound
is not None:
2188 upper_bound
= min(lower_bound
+ upper_bound
, _MAX_INT_32
)
2189 config
= datastore_query
.QueryOptions(offset
=0,
2192 return lower_bound
, upper_bound
, config
2194 def __GetProjectionOverride(self
, config
):
2195 """Returns a tuple of (original projection, projeciton override).
2197 If projection is None, there is no projection. If override is None,
2198 projection is sufficent for this query.
2200 projection
= datastore_query
.QueryOptions
.projection(config
)
2201 if projection
is None:
2202 projection
= self
.__projection
2204 projection
= projection
2212 for prop
, _
in self
.__orderings
:
2213 if prop
not in projection
:
2216 return projection
, None
2218 return projection
, projection
+ tuple(override
)
2220 def Run(self
, **kwargs
):
2221 """Return an iterable output with all results in order.
2223 Merge sort the results. First create a list of iterators, then walk
2224 though them and yield results in order.
2227 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
2230 An iterator for the result set.
2232 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
2233 config_class
=datastore_query
.QueryOptions
)
2234 if config
and config
.keys_only
:
2235 raise datastore_errors
.BadRequestError(
2236 'keys only queries are not supported by multi-query.')
2239 lower_bound
, upper_bound
, config
= self
._ExtractBounds
(config
)
2241 projection
, override
= self
.__GetProjectionOverride
(config
)
2243 config
= datastore_query
.QueryOptions(projection
=override
, config
=config
)
2247 log_level
= logging
.DEBUG
- 1
2248 for bound_query
in self
.__bound
_queries
:
2249 logging
.log(log_level
, 'Running query #%i' % count
)
2250 results
.append(bound_query
.Run(config
=config
))
2253 def GetDedupeKey(sort_order_entity
):
2256 return (sort_order_entity
.GetEntity().key(),
2258 frozenset(sort_order_entity
.GetEntity().iteritems()))
2260 return sort_order_entity
.GetEntity().key()
2262 def IterateResults(results
):
2263 """Iterator function to return all results in sorted order.
2265 Iterate over the array of results, yielding the next element, in
2266 sorted order. This function is destructive (results will be empty
2267 when the operation is complete).
2270 results: list of result iterators to merge and iterate through
2273 The next result in sorted order.
2278 for result
in results
:
2279 heap_value
= MultiQuery
.SortOrderEntity(result
, self
.__orderings
)
2280 if heap_value
.GetEntity():
2281 heapq
.heappush(result_heap
, heap_value
)
2291 if upper_bound
is not None and len(used_keys
) >= upper_bound
:
2294 top_result
= heapq
.heappop(result_heap
)
2295 dedupe_key
= GetDedupeKey(top_result
)
2296 if dedupe_key
not in used_keys
:
2297 result
= top_result
.GetEntity()
2300 for key
in result
.keys():
2301 if key
not in projection
:
2308 used_keys
.add(dedupe_key
)
2311 results_to_push
= []
2313 next
= heapq
.heappop(result_heap
)
2314 if dedupe_key
!= GetDedupeKey(next
):
2316 results_to_push
.append(next
)
2321 results_to_push
.append(next
.GetNext())
2322 results_to_push
.append(top_result
.GetNext())
2325 for popped_result
in results_to_push
:
2328 if popped_result
.GetEntity():
2329 heapq
.heappush(result_heap
, popped_result
)
2331 it
= IterateResults(results
)
2335 for _
in xrange(lower_bound
):
2337 except StopIteration:
2342 def Count(self
, limit
=1000, **kwargs
):
2343 """Return the number of matched entities for this query.
2345 Will return the de-duplicated count of results. Will call the more
2346 efficient Get() function if a limit is given.
2349 limit: maximum number of entries to count (for any result > limit, return
2351 config: Optional Configuration to use for this request.
2354 count of the number of entries returned.
2357 kwargs
['limit'] = limit
2358 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
2359 config_class
=datastore_query
.QueryOptions
)
2361 projection
, override
= self
.__GetProjectionOverride
(config
)
2364 config
= datastore_query
.QueryOptions(keys_only
=True, config
=config
)
2366 config
= datastore_query
.QueryOptions(projection
=override
, config
=config
)
2369 lower_bound
, upper_bound
, config
= self
._ExtractBounds
(config
)
2373 for bound_query
in self
.__bound
_queries
:
2374 for result
in bound_query
.Run(config
=config
):
2377 dedupe_key
= (result
.key(),
2378 tuple(result
.iteritems()))
2381 used_keys
.add(dedupe_key
)
2382 if upper_bound
and len(used_keys
) >= upper_bound
:
2383 return upper_bound
- lower_bound
2385 return max(0, len(used_keys
) - lower_bound
)
2387 def GetIndexList(self
):
2389 raise AssertionError('No index_list available for a MultiQuery (queries '
2390 'using "IN" or "!=" operators)')
2392 def GetCursor(self
):
2393 raise AssertionError('No cursor available for a MultiQuery (queries '
2394 'using "IN" or "!=" operators)')
2396 def _GetCompiledQuery(self
):
2397 """Internal only, do not use."""
2398 raise AssertionError('No compilation available for a MultiQuery (queries '
2399 'using "IN" or "!=" operators)')
2401 def __setitem__(self
, query_filter
, value
):
2402 """Add a new filter by setting it on all subqueries.
2404 If any of the setting operations raise an exception, the ones
2405 that succeeded are undone and the exception is propagated
2409 query_filter: a string of the form "property operand".
2410 value: the value that the given property is compared against.
2413 for index
, query
in enumerate(self
.__bound
_queries
):
2414 saved_items
.append(query
.get(query_filter
, None))
2416 query
[query_filter
] = value
2418 for q
, old_value
in itertools
.izip(self
.__bound
_queries
[:index
],
2420 if old_value
is not None:
2421 q
[query_filter
] = old_value
2426 def __delitem__(self
, query_filter
):
2427 """Delete a filter by deleting it from all subqueries.
2429 If a KeyError is raised during the attempt, it is ignored, unless
2430 every subquery raised a KeyError. If any other exception is
2431 raised, any deletes will be rolled back.
2434 query_filter: the filter to delete.
2437 KeyError: No subquery had an entry containing query_filter.
2439 subquery_count
= len(self
.__bound
_queries
)
2442 for index
, query
in enumerate(self
.__bound
_queries
):
2444 saved_items
.append(query
.get(query_filter
, None))
2445 del query
[query_filter
]
2449 for q
, old_value
in itertools
.izip(self
.__bound
_queries
[:index
],
2451 if old_value
is not None:
2452 q
[query_filter
] = old_value
2455 if keyerror_count
== subquery_count
:
2456 raise KeyError(query_filter
)
2459 return iter(self
.__bound
_queries
)
2462 GetCompiledCursor
= GetCursor
2463 GetCompiledQuery
= _GetCompiledQuery
2466 def RunInTransaction(function
, *args
, **kwargs
):
2467 """Runs a function inside a datastore transaction.
2469 Runs the user-provided function inside transaction, retries default
2473 function: a function to be run inside the transaction on all remaining
2475 *args: positional arguments for function.
2476 **kwargs: keyword arguments for function.
2479 the function's return value, if any
2482 TransactionFailedError, if the transaction could not be committed.
2484 return RunInTransactionOptions(None, function
, *args
, **kwargs
)
2490 def RunInTransactionCustomRetries(retries
, function
, *args
, **kwargs
):
2491 """Runs a function inside a datastore transaction.
2493 Runs the user-provided function inside transaction, with a specified
2497 retries: number of retries (not counting the initial try)
2498 function: a function to be run inside the transaction on all remaining
2500 *args: positional arguments for function.
2501 **kwargs: keyword arguments for function.
2504 the function's return value, if any
2507 TransactionFailedError, if the transaction could not be committed.
2509 options
= datastore_rpc
.TransactionOptions(retries
=retries
)
2510 return RunInTransactionOptions(options
, function
, *args
, **kwargs
)
2513 def RunInTransactionOptions(options
, function
, *args
, **kwargs
):
2514 """Runs a function inside a datastore transaction.
2516 Runs the user-provided function inside a full-featured, ACID datastore
2517 transaction. Every Put, Get, and Delete call in the function is made within
2518 the transaction. All entities involved in these calls must belong to the
2519 same entity group. Queries are supported as long as they specify an
2520 ancestor belonging to the same entity group.
2522 The trailing arguments are passed to the function as positional arguments.
2523 If the function returns a value, that value will be returned by
2524 RunInTransaction. Otherwise, it will return None.
2526 The function may raise any exception to roll back the transaction instead of
2527 committing it. If this happens, the transaction will be rolled back and the
2528 exception will be re-raised up to RunInTransaction's caller.
2530 If you want to roll back intentionally, but don't have an appropriate
2531 exception to raise, you can raise an instance of datastore_errors.Rollback.
2532 It will cause a rollback, but will *not* be re-raised up to the caller.
2534 The function may be run more than once, so it should be idempotent. It
2535 should avoid side effects, and it shouldn't have *any* side effects that
2536 aren't safe to occur multiple times. This includes modifying the arguments,
2537 since they persist across invocations of the function. However, this doesn't
2538 include Put, Get, and Delete calls, of course.
2542 > def decrement(key, amount=1):
2543 > counter = datastore.Get(key)
2544 > counter['count'] -= amount
2545 > if counter['count'] < 0: # don't let the counter go negative
2546 > raise datastore_errors.Rollback()
2547 > datastore.Put(counter)
2549 > counter = datastore.Query('Counter', {'name': 'foo'})
2550 > datastore.RunInTransaction(decrement, counter.key(), amount=5)
2552 Transactions satisfy the traditional ACID properties. They are:
2554 - Atomic. All of a transaction's operations are executed or none of them are.
2556 - Consistent. The datastore's state is consistent before and after a
2557 transaction, whether it committed or rolled back. Invariants such as
2558 "every entity has a primary key" are preserved.
2560 - Isolated. Transactions operate on a snapshot of the datastore. Other
2561 datastore operations do not see intermediated effects of the transaction;
2562 they only see its effects after it has committed.
2564 - Durable. On commit, all writes are persisted to the datastore.
2566 Nested transactions are not supported.
2569 options: TransactionOptions specifying options (number of retries, etc) for
2571 function: a function to be run inside the transaction on all remaining
2573 *args: positional arguments for function.
2574 **kwargs: keyword arguments for function.
2577 the function's return value, if any
2580 TransactionFailedError, if the transaction could not be committed.
2590 options
= datastore_rpc
.TransactionOptions(options
)
2591 if IsInTransaction():
2592 if options
.propagation
in (None, datastore_rpc
.TransactionOptions
.NESTED
):
2594 raise datastore_errors
.BadRequestError(
2595 'Nested transactions are not supported.')
2596 elif options
.propagation
is datastore_rpc
.TransactionOptions
.INDEPENDENT
:
2599 txn_connection
= _PopConnection()
2601 return RunInTransactionOptions(options
, function
, *args
, **kwargs
)
2603 _PushConnection(txn_connection
)
2604 return function(*args
, **kwargs
)
2606 if options
.propagation
is datastore_rpc
.TransactionOptions
.MANDATORY
:
2607 raise datastore_errors
.BadRequestError('Requires an existing transaction.')
2610 retries
= options
.retries
2612 retries
= DEFAULT_TRANSACTION_RETRIES
2614 conn
= _GetConnection()
2615 _PushConnection(None)
2618 for _
in range(0, retries
+ 1):
2619 _SetConnection(conn
.new_transaction(options
))
2620 ok
, result
= _DoOneTry(function
, args
, kwargs
)
2627 raise datastore_errors
.TransactionFailedError(
2628 'The transaction could not be committed. Please try again.')
2631 def _DoOneTry(function
, args
, kwargs
):
2632 """Helper to call a function in a transaction, once.
2635 function: The function to call.
2636 *args: Tuple of positional arguments.
2637 **kwargs: Dict of keyword arguments.
2640 result
= function(*args
, **kwargs
)
2642 original_exception
= sys
.exc_info()
2644 _GetConnection().rollback()
2649 logging
.exception('Exception sending Rollback:')
2650 type, value
, trace
= original_exception
2651 if isinstance(value
, datastore_errors
.Rollback
):
2654 raise type, value
, trace
2656 if _GetConnection().commit():
2661 logging
.warning('Transaction collision. Retrying... %s', '')
2665 def _MaybeSetupTransaction(request
, keys
):
2666 """Begin a transaction, if necessary, and populate it in the request.
2668 This API exists for internal backwards compatibility, primarily with
2669 api/taskqueue/taskqueue.py.
2672 request: A protobuf with a mutable_transaction() method.
2676 A transaction if we're inside a transaction, otherwise None
2678 return _GetConnection()._set
_request
_transaction
(request
)
2681 def IsInTransaction():
2682 """Determine whether already running in transaction.
2685 True if already running in transaction, else False.
2688 return isinstance(_GetConnection(), datastore_rpc
.TransactionalConnection
)
2691 def Transactional(_func
=None, **kwargs
):
2692 """A decorator that makes sure a function is run in a transaction.
2694 Defaults propagation to datastore_rpc.TransactionOptions.ALLOWED, which means
2695 any existing transaction will be used in place of creating a new one.
2697 WARNING: Reading from the datastore while in a transaction will not see any
2698 changes made in the same transaction. If the function being decorated relies
2699 on seeing all changes made in the calling scoope, set
2700 propagation=datastore_rpc.TransactionOptions.NESTED.
2704 **kwargs: TransactionOptions configuration options.
2707 A wrapper for the given function that creates a new transaction if needed.
2710 if _func
is not None:
2711 return Transactional()(_func
)
2714 if not kwargs
.pop('require_new', None):
2716 kwargs
.setdefault('propagation', datastore_rpc
.TransactionOptions
.ALLOWED
)
2718 options
= datastore_rpc
.TransactionOptions(**kwargs
)
2720 def outer_wrapper(func
):
2721 def inner_wrapper(*args
, **kwds
):
2722 return RunInTransactionOptions(options
, func
, *args
, **kwds
)
2723 return inner_wrapper
2724 return outer_wrapper
2727 @datastore_rpc._positional
(1)
2728 def NonTransactional(_func
=None, allow_existing
=True):
2729 """A decorator that insures a function is run outside a transaction.
2731 If there is an existing transaction (and allow_existing=True), the existing
2732 transaction is paused while the function is executed.
2736 allow_existing: If false, throw an exception if called from within a
2740 A wrapper for the decorated function that ensures it runs outside a
2744 if _func
is not None:
2745 return NonTransactional()(_func
)
2747 def outer_wrapper(func
):
2748 def inner_wrapper(*args
, **kwds
):
2749 if not IsInTransaction():
2750 return func(*args
, **kwds
)
2752 if not allow_existing
:
2753 raise datastore_errors
.BadRequestError(
2754 'Function cannot be called from within a transaction.')
2758 txn_connection
= _PopConnection()
2760 return func(*args
, **kwds
)
2762 _PushConnection(txn_connection
)
2763 return inner_wrapper
2764 return outer_wrapper
2767 def _GetCompleteKeyOrError(arg
):
2768 """Expects an Entity or a Key, and returns the corresponding Key. Raises
2769 BadArgumentError or BadKeyError if arg is a different type or is incomplete.
2778 if isinstance(arg
, Key
):
2780 elif isinstance(arg
, basestring
):
2783 elif isinstance(arg
, Entity
):
2785 elif not isinstance(arg
, Key
):
2786 raise datastore_errors
.BadArgumentError(
2787 'Expects argument to be an Entity or Key; received %s (a %s).' %
2788 (arg
, typename(arg
)))
2789 assert isinstance(key
, Key
)
2792 if not key
.has_id_or_name():
2793 raise datastore_errors
.BadKeyError('Key %r is not complete.' % key
)
2798 def _GetPropertyValue(entity
, property):
2799 """Returns an entity's value for a given property name.
2801 Handles special properties like __key__ as well as normal properties.
2804 entity: datastore.Entity
2805 property: str; the property name
2808 property value. For __key__, a datastore_types.Key.
2811 KeyError, if the entity does not have the given property.
2813 if property in datastore_types
._SPECIAL
_PROPERTIES
:
2814 if property == datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
:
2815 raise KeyError(property)
2817 assert property == datastore_types
.KEY_SPECIAL_PROPERTY
2820 return entity
[property]
2823 def _AddOrAppend(dictionary
, key
, value
):
2824 """Adds the value to the existing values in the dictionary, if any.
2826 If dictionary[key] doesn't exist, sets dictionary[key] to value.
2828 If dictionary[key] is not a list, sets dictionary[key] to [old_value, value].
2830 If dictionary[key] is a list, appends value to that list.
2834 key, value: anything
2836 if key
in dictionary
:
2837 existing_value
= dictionary
[key
]
2838 if isinstance(existing_value
, list):
2839 existing_value
.append(value
)
2841 dictionary
[key
] = [existing_value
, value
]
2843 dictionary
[key
] = value
2846 class Iterator(datastore_query
.ResultsIterator
):
2847 """Thin wrapper of datastore_query.ResultsIterator.
2849 Deprecated, do not use, only for backwards compatability.
2852 def _Next(self
, count
=None):
2857 if len(result
) >= count
:
2862 def GetCompiledCursor(self
, query
):
2863 return self
.cursor()
2865 def GetIndexList(self
):
2866 """Returns the list of indexes used to perform the query."""
2867 tuple_index_list
= super(Iterator
, self
).index_list()
2868 return [index
for index
, state
in tuple_index_list
]
2874 index_list
= GetIndexList
2878 DatastoreRPC
= apiproxy_stub_map
.UserRPC
2879 GetRpcFromKwargs
= _GetConfigFromKwargs
2880 _CurrentTransactionKey
= IsInTransaction
2881 _ToDatastoreError
= datastore_rpc
._ToDatastoreError
2882 _DatastoreExceptionFromErrorCodeAndDetail
= datastore_rpc
._DatastoreExceptionFromErrorCodeAndDetail