3 # Copyright 2007 Google Inc.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
21 """The Python datastore API used by app developers.
23 Defines Entity, Query, and Iterator classes, as well as methods for all of the
24 datastore's calls. Also defines conversions between the Python classes and
25 their PB counterparts.
27 The datastore errors are defined in the datastore_errors module. That module is
28 only required to avoid circular imports. datastore imports datastore_types,
29 which needs BadValueError, so it can't be defined in datastore.
55 from xml
.sax
import saxutils
57 from google
.appengine
.api
import apiproxy_stub_map
58 from google
.appengine
.api
import capabilities
59 from google
.appengine
.api
import datastore_errors
60 from google
.appengine
.api
import datastore_types
61 from google
.appengine
.datastore
import datastore_pb
62 from google
.appengine
.datastore
import datastore_query
63 from google
.appengine
.datastore
import datastore_rpc
64 from google
.appengine
.datastore
import entity_pb
68 MAX_ALLOWABLE_QUERIES
= 30
71 MAXIMUM_RESULTS
= 1000
77 DEFAULT_TRANSACTION_RETRIES
= 3
80 READ_CAPABILITY
= capabilities
.CapabilitySet('datastore_v3')
81 WRITE_CAPABILITY
= capabilities
.CapabilitySet(
83 capabilities
=['write'])
91 _MAX_INDEXED_PROPERTIES
= 20000
94 _MAX_ID_BATCH_SIZE
= datastore_rpc
._MAX
_ID
_BATCH
_SIZE
96 Key
= datastore_types
.Key
97 typename
= datastore_types
.typename
100 STRONG_CONSISTENCY
= datastore_rpc
.Configuration
.STRONG_CONSISTENCY
101 EVENTUAL_CONSISTENCY
= datastore_rpc
.Configuration
.EVENTUAL_CONSISTENCY
105 _MAX_INT_32
= 2**31-1
108 def NormalizeAndTypeCheck(arg
, types
):
109 """Normalizes and type checks the given argument.
112 arg: an instance or iterable of the given type(s)
113 types: allowed type or tuple of types
116 A (list, bool) tuple. The list is a normalized, shallow copy of the
117 argument. The boolean is True if the argument was a sequence, False
118 if it was a single object.
121 AssertionError: types includes list or tuple.
122 BadArgumentError: arg is not an instance or sequence of one of the given
125 if not isinstance(types
, (list, tuple)):
128 assert list not in types
and tuple not in types
130 if isinstance(arg
, types
):
136 if isinstance(arg
, basestring
):
137 raise datastore_errors
.BadArgumentError(
138 'Expected an instance or iterable of %s; received %s (a %s).' %
139 (types
, arg
, typename(arg
)))
146 raise datastore_errors
.BadArgumentError(
147 'Expected an instance or iterable of %s; received %s (a %s).' %
148 (types
, arg
, typename(arg
)))
152 if not isinstance(val
, types
):
153 raise datastore_errors
.BadArgumentError(
154 'Expected one of %s; received %s (a %s).' %
155 (types
, val
, typename(val
)))
157 return arg_list
, True
160 def NormalizeAndTypeCheckKeys(keys
):
161 """Normalizes and type checks that the given argument is a valid key or keys.
163 A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
164 Entities, and normalizes to Keys.
167 keys: a Key or sequence of Keys
170 A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
173 BadArgumentError: arg is not an instance or sequence of one of the given
176 keys
, multiple
= NormalizeAndTypeCheck(keys
, (basestring
, Entity
, Key
))
178 keys
= [_GetCompleteKeyOrError(key
) for key
in keys
]
180 return (keys
, multiple
)
183 def _GetConfigFromKwargs(kwargs
, convert_rpc
=False,
184 config_class
=datastore_rpc
.Configuration
):
185 """Get a Configuration object from the keyword arguments.
187 This is purely an internal helper for the various public APIs below
191 kwargs: A dict containing the keyword arguments passed to a public API.
192 convert_rpc: If the an rpc should be converted or passed on directly.
193 config_class: The config class that should be generated.
196 A UserRPC instance, or a Configuration instance, or None.
199 TypeError if unexpected keyword arguments are present.
205 rpc
= kwargs
.pop('rpc', None)
207 if not isinstance(rpc
, apiproxy_stub_map
.UserRPC
):
208 raise datastore_errors
.BadArgumentError(
209 'rpc= argument should be None or a UserRPC instance')
210 if 'config' in kwargs
:
211 raise datastore_errors
.BadArgumentError(
212 'Expected rpc= or config= argument but not both')
215 raise datastore_errors
.BadArgumentError(
216 'Unexpected keyword arguments: %s' % ', '.join(kwargs
))
220 read_policy
= getattr(rpc
, 'read_policy', None)
221 kwargs
['config'] = datastore_rpc
.Configuration(
222 deadline
=rpc
.deadline
, read_policy
=read_policy
,
223 config
=_GetConnection().config
)
225 return config_class(**kwargs
)
228 class _BaseIndex(object):
231 BUILDING
, SERVING
, DELETING
, ERROR
= range(4)
236 ASCENDING
= datastore_query
.PropertyOrder
.ASCENDING
237 DESCENDING
= datastore_query
.PropertyOrder
.DESCENDING
239 def __init__(self
, index_id
, kind
, has_ancestor
, properties
):
240 """Construct a datastore index instance.
243 index_id: Required long; Uniquely identifies the index
244 kind: Required string; Specifies the kind of the entities to index
245 has_ancestor: Required boolean; indicates if the index supports a query
246 that filters entities by the entity group parent
247 properties: Required list of (string, int) tuples; The entity properties
248 to index. First item in a tuple is the property name and the second
249 item is the sorting direction (ASCENDING|DESCENDING).
250 The order of the properties is based on the order in the index.
252 argument_error
= datastore_errors
.BadArgumentError
253 datastore_types
.ValidateInteger(index_id
, 'index_id', argument_error
,
255 datastore_types
.ValidateString(kind
, 'kind', argument_error
, empty_ok
=True)
256 if not isinstance(properties
, (list, tuple)):
257 raise argument_error('properties must be a list or a tuple')
258 for idx
, index_property
in enumerate(properties
):
259 if not isinstance(index_property
, (list, tuple)):
260 raise argument_error('property[%d] must be a list or a tuple' % idx
)
261 if len(index_property
) != 2:
262 raise argument_error('property[%d] length should be 2 but was %d' %
263 (idx
, len(index_property
)))
264 datastore_types
.ValidateString(index_property
[0], 'property name',
266 _BaseIndex
.__ValidateEnum
(index_property
[1],
267 (self
.ASCENDING
, self
.DESCENDING
),
269 self
.__id
= long(index_id
)
271 self
.__has
_ancestor
= bool(has_ancestor
)
272 self
.__properties
= properties
275 def __ValidateEnum(value
, accepted_values
, name
='value',
276 exception
=datastore_errors
.BadArgumentError
):
277 datastore_types
.ValidateInteger(value
, name
, exception
)
278 if not value
in accepted_values
:
279 raise exception('%s should be one of %s but was %d' %
280 (name
, str(accepted_values
), value
))
283 """Returns the index id, a long."""
287 """Returns the index kind, a string. Empty string ('') if none."""
290 def _HasAncestor(self
):
291 """Indicates if this is an ancestor index, a boolean."""
292 return self
.__has
_ancestor
294 def _Properties(self
):
295 """Returns the index properties. a tuple of
296 (index name as a string, [ASCENDING|DESCENDING]) tuples.
298 return self
.__properties
300 def __eq__(self
, other
):
301 return self
.__id
== other
.__id
303 def __ne__(self
, other
):
304 return self
.__id
!= other
.__id
307 return hash(self
.__id
)
310 class Index(_BaseIndex
):
311 """A datastore index."""
314 Kind
= _BaseIndex
._Kind
315 HasAncestor
= _BaseIndex
._HasAncestor
316 Properties
= _BaseIndex
._Properties
319 class DatastoreAdapter(datastore_rpc
.AbstractAdapter
):
320 """Adapter between datatypes defined here (Entity etc.) and protobufs.
322 See the base class in datastore_rpc.py for more docs.
326 index_state_mappings
= {
327 entity_pb
.CompositeIndex
.ERROR
: Index
.ERROR
,
328 entity_pb
.CompositeIndex
.DELETED
: Index
.DELETING
,
329 entity_pb
.CompositeIndex
.READ_WRITE
: Index
.SERVING
,
330 entity_pb
.CompositeIndex
.WRITE_ONLY
: Index
.BUILDING
334 index_direction_mappings
= {
335 entity_pb
.Index_Property
.ASCENDING
: Index
.ASCENDING
,
336 entity_pb
.Index_Property
.DESCENDING
: Index
.DESCENDING
339 def key_to_pb(self
, key
):
340 return key
._Key
__reference
342 def pb_to_key(self
, pb
):
343 return Key
._FromPb
(pb
)
345 def entity_to_pb(self
, entity
):
346 return entity
._ToPb
()
348 def pb_to_entity(self
, pb
):
349 return Entity
._FromPb
(pb
)
351 def pb_to_index(self
, pb
):
352 index_def
= pb
.definition()
353 properties
= [(property.name().decode('utf-8'),
354 DatastoreAdapter
.index_direction_mappings
.get(property.direction()))
355 for property in index_def
.property_list()]
356 index
= Index(pb
.id(), index_def
.entity_type().decode('utf-8'),
357 index_def
.ancestor(), properties
)
358 state
= DatastoreAdapter
.index_state_mappings
.get(pb
.state())
362 _adapter
= DatastoreAdapter()
363 _thread_local
= threading
.local()
366 _ENV_KEY
= '__DATASTORE_CONNECTION_INITIALIZED__'
369 def __InitConnection():
370 """Internal method to make sure the connection state has been initialized."""
383 if os
.getenv(_ENV_KEY
) and hasattr(_thread_local
, 'connection_stack'):
385 _thread_local
.connection_stack
= [datastore_rpc
.Connection(adapter
=_adapter
)]
387 os
.environ
[_ENV_KEY
] = '1'
390 def _GetConnection():
391 """Internal method to retrieve a datastore connection local to the thread."""
393 return _thread_local
.connection_stack
[-1]
396 def _SetConnection(connection
):
397 """Internal method to replace the current thread local connection."""
399 _thread_local
.connection_stack
[-1] = connection
402 def _PushConnection(new_connection
):
403 """Internal method to save the current connection and sets a new one.
406 new_connection: The connection to set.
409 _thread_local
.connection_stack
.append(new_connection
)
412 def _PopConnection():
413 """Internal method to restores the previous connection.
416 The current connection.
419 assert len(_thread_local
.connection_stack
) >= 2
420 return _thread_local
.connection_stack
.pop()
426 def _MakeSyncCall(service
, call
, request
, response
, config
=None):
427 """The APIProxy entry point for a synchronous API call.
430 service: For backwards compatibility, must be 'datastore_v3'.
431 call: String representing which function to call.
432 request: Protocol buffer for the request.
433 response: Protocol buffer for the response.
434 config: Optional Configuration to use for this request.
437 Response protocol buffer. Caller should always use returned value
438 which may or may not be same as passed in 'response'.
441 apiproxy_errors.Error or a subclass.
443 conn
= _GetConnection()
444 if isinstance(request
, datastore_pb
.Query
):
445 conn
._set
_request
_read
_policy
(request
, config
)
446 conn
._set
_request
_transaction
(request
)
447 rpc
= conn
._make
_rpc
_call
(config
, call
, request
, response
)
448 conn
.check_rpc_success(rpc
)
452 def CreateRPC(service
='datastore_v3',
453 deadline
=None, callback
=None, read_policy
=None):
454 """Create an rpc for use in configuring datastore calls.
456 NOTE: This functions exists for backwards compatibility. Please use
457 CreateConfig() instead. NOTE: the latter uses 'on_completion',
458 which is a function taking an argument, wherease CreateRPC uses
459 'callback' which is a function without arguments.
462 service: Optional string; for backwards compatibility, must be
464 deadline: Optional int or float, deadline for calls in seconds.
465 callback: Optional callable, a callback triggered when this rpc
466 completes; takes no arguments.
467 read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
468 enable eventually consistent reads (i.e. reads that may be
469 satisfied from an older version of the datastore in some cases).
470 The default read policy may have to wait until in-flight
471 transactions are committed.
476 assert service
== 'datastore_v3'
477 conn
= _GetConnection()
479 if deadline
is not None:
480 config
= datastore_rpc
.Configuration(deadline
=deadline
)
481 rpc
= conn
._create
_rpc
(config
)
482 rpc
.callback
= callback
483 if read_policy
is not None:
484 rpc
.read_policy
= read_policy
488 def CreateConfig(**kwds
):
489 """Create a Configuration object for use in configuring datastore calls.
491 This configuration can be passed to most datastore calls using the
492 'config=...' argument.
495 deadline: Optional deadline; default None (which means the
496 system default deadline will be used, typically 5 seconds).
497 on_completion: Optional callback function; default None. If
498 specified, it will be called with a UserRPC object as argument
499 when an RPC completes.
500 read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
501 enable eventually consistent reads (i.e. reads that may be
502 satisfied from an older version of the datastore in some cases).
503 The default read policy may have to wait until in-flight
504 transactions are committed.
505 **kwds: Other keyword arguments as long as they are supported by
506 datastore_rpc.Configuration().
509 A datastore_rpc.Configuration instance.
511 return datastore_rpc
.Configuration(**kwds
)
514 def CreateTransactionOptions(**kwds
):
515 """Create a configuration object for use in configuring transactions.
517 This configuration can be passed as run_in_transaction_option's first
521 deadline: Optional deadline; default None (which means the
522 system default deadline will be used, typically 5 seconds).
523 on_completion: Optional callback function; default None. If
524 specified, it will be called with a UserRPC object as argument
525 when an RPC completes.
526 xg: set to true to allow cross-group transactions (high replication
528 retries: set the number of retries for a transaction
529 **kwds: Other keyword arguments as long as they are supported by
530 datastore_rpc.TransactionOptions().
533 A datastore_rpc.TransactionOptions instance.
535 return datastore_rpc
.TransactionOptions(**kwds
)
538 def PutAsync(entities
, **kwargs
):
539 """Asynchronously store one or more entities in the datastore.
541 Identical to datastore.Put() except returns an asynchronous object. Call
542 get_result() on the return value to block on the call and get the results.
544 extra_hook
= kwargs
.pop('extra_hook', None)
545 config
= _GetConfigFromKwargs(kwargs
)
546 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
547 raise datastore_errors
.BadRequestError(
548 'read_policy is only supported on read operations.')
549 entities
, multiple
= NormalizeAndTypeCheck(entities
, Entity
)
551 for entity
in entities
:
552 if entity
.is_projection():
553 raise datastore_errors
.BadRequestError(
554 'Cannot put a partial entity: %s' % entity
)
555 if not entity
.kind() or not entity
.app():
556 raise datastore_errors
.BadRequestError(
557 'App and kind must not be empty, in entity: %s' % entity
)
559 def local_extra_hook(keys
):
561 num_entities
= len(entities
)
562 if num_keys
!= num_entities
:
563 raise datastore_errors
.InternalError(
564 'Put accepted %d entities but returned %d keys.' %
565 (num_entities
, num_keys
))
567 for entity
, key
in zip(entities
, keys
):
568 if entity
._Entity
__key
._Key
__reference
!= key
._Key
__reference
:
569 assert not entity
._Entity
__key
.has_id_or_name()
570 entity
._Entity
__key
._Key
__reference
.CopyFrom(key
._Key
__reference
)
578 return extra_hook(result
)
581 return _GetConnection().async_put(config
, entities
, local_extra_hook
)
584 def Put(entities
, **kwargs
):
585 """Store one or more entities in the datastore.
587 The entities may be new or previously existing. For new entities, Put() will
588 fill in the app id and key assigned by the datastore.
590 If the argument is a single Entity, a single Key will be returned. If the
591 argument is a list of Entity, a list of Keys will be returned.
594 entities: Entity or list of Entities
595 config: Optional Configuration to use for this request, must be specified
596 as a keyword argument.
602 TransactionFailedError, if the Put could not be committed.
604 return PutAsync(entities
, **kwargs
).get_result()
607 def GetAsync(keys
, **kwargs
):
608 """Asynchronously retrieves one or more entities from the datastore.
610 Identical to datastore.Get() except returns an asynchronous object. Call
611 get_result() on the return value to block on the call and get the results.
613 extra_hook
= kwargs
.pop('extra_hook', None)
614 config
= _GetConfigFromKwargs(kwargs
)
615 keys
, multiple
= NormalizeAndTypeCheckKeys(keys
)
617 def local_extra_hook(entities
):
621 if entities
[0] is None:
622 raise datastore_errors
.EntityNotFoundError()
625 return extra_hook(result
)
628 return _GetConnection().async_get(config
, keys
, local_extra_hook
)
631 def Get(keys
, **kwargs
):
632 """Retrieves one or more entities from the datastore.
634 Retrieves the entity or entities with the given key(s) from the datastore
635 and returns them as fully populated Entity objects, as defined below. If
636 there is an error, raises a subclass of datastore_errors.Error.
638 If keys is a single key or string, an Entity will be returned, or
639 EntityNotFoundError will be raised if no existing entity matches the key.
641 However, if keys is a list or tuple, a list of entities will be returned
642 that corresponds to the sequence of keys. It will include entities for keys
643 that were found and None placeholders for keys that were not found.
646 keys: Key or string or list of Keys or strings
647 config: Optional Configuration to use for this request, must be specified
648 as a keyword argument.
651 Entity or list of Entity objects
653 return GetAsync(keys
, **kwargs
).get_result()
655 def GetIndexesAsync(**kwargs
):
656 """Asynchronously retrieves the application indexes and their states.
658 Identical to GetIndexes() except returns an asynchronous object. Call
659 get_result() on the return value to block on the call and get the results.
661 extra_hook
= kwargs
.pop('extra_hook', None)
662 config
= _GetConfigFromKwargs(kwargs
)
664 def local_extra_hook(result
):
666 return extra_hook(result
)
669 return _GetConnection().async_get_indexes(config
, local_extra_hook
)
672 def GetIndexes(**kwargs
):
673 """Retrieves the application indexes and their states.
676 config: Optional Configuration to use for this request, must be specified
677 as a keyword argument.
680 A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
681 An index can be in the following states:
682 Index.BUILDING: Index is being built and therefore can not serve queries
683 Index.SERVING: Index is ready to service queries
684 Index.DELETING: Index is being deleted
685 Index.ERROR: Index encounted an error in the BUILDING state
687 return GetIndexesAsync(**kwargs
).get_result()
689 def DeleteAsync(keys
, **kwargs
):
690 """Asynchronously deletes one or more entities from the datastore.
692 Identical to datastore.Delete() except returns an asynchronous object. Call
693 get_result() on the return value to block on the call.
695 config
= _GetConfigFromKwargs(kwargs
)
696 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
697 raise datastore_errors
.BadRequestError(
698 'read_policy is only supported on read operations.')
699 keys
, _
= NormalizeAndTypeCheckKeys(keys
)
701 return _GetConnection().async_delete(config
, keys
)
704 def Delete(keys
, **kwargs
):
705 """Deletes one or more entities from the datastore. Use with care!
707 Deletes the given entity(ies) from the datastore. You can only delete
708 entities from your app. If there is an error, raises a subclass of
709 datastore_errors.Error.
712 # the primary key(s) of the entity(ies) to delete
713 keys: Key or string or list of Keys or strings
714 config: Optional Configuration to use for this request, must be specified
715 as a keyword argument.
718 TransactionFailedError, if the Delete could not be committed.
720 return DeleteAsync(keys
, **kwargs
).get_result()
724 """A datastore entity.
726 Includes read-only accessors for app id, kind, and primary key. Also
727 provides dictionary-style access to properties.
733 def __init__(self
, kind
, parent
=None, _app
=None, name
=None, id=None,
734 unindexed_properties
=[], namespace
=None, **kwds
):
735 """Constructor. Takes the kind and transaction root, which cannot be
736 changed after the entity is constructed, and an optional parent. Raises
737 BadArgumentError or BadKeyError if kind is invalid or parent is not an
738 existing Entity or Key in the datastore.
743 # if provided, this entity's parent. Its key must be complete.
744 parent: Entity or Key
745 # if provided, this entity's name.
747 # if provided, this entity's id.
749 # if provided, a sequence of property names that should not be indexed
750 # by the built-in single property indices.
751 unindexed_properties: list or tuple of strings
753 # if provided, overrides the default namespace_manager setting.
760 ref
= entity_pb
.Reference()
761 _app
= datastore_types
.ResolveAppId(_app
)
764 _namespace
= kwds
.pop('_namespace', None)
767 raise datastore_errors
.BadArgumentError(
768 'Excess keyword arguments ' + repr(kwds
))
773 if namespace
is None:
774 namespace
= _namespace
775 elif _namespace
is not None:
776 raise datastore_errors
.BadArgumentError(
777 "Must not set both _namespace and namespace parameters.")
779 datastore_types
.ValidateString(kind
, 'kind',
780 datastore_errors
.BadArgumentError
)
782 if parent
is not None:
783 parent
= _GetCompleteKeyOrError(parent
)
784 if _app
!= parent
.app():
785 raise datastore_errors
.BadArgumentError(
786 " %s doesn't match parent's app %s" %
787 (_app
, parent
.app()))
790 if namespace
is None:
791 namespace
= parent
.namespace()
792 elif namespace
!= parent
.namespace():
793 raise datastore_errors
.BadArgumentError(
794 " %s doesn't match parent's namespace %s" %
795 (namespace
, parent
.namespace()))
796 ref
.CopyFrom(parent
._Key
__reference
)
798 namespace
= datastore_types
.ResolveNamespace(namespace
)
799 datastore_types
.SetNamespace(ref
, namespace
)
801 last_path
= ref
.mutable_path().add_element()
802 last_path
.set_type(kind
.encode('utf-8'))
804 if name
is not None and id is not None:
805 raise datastore_errors
.BadArgumentError(
806 "Cannot set both name and id on an Entity")
810 datastore_types
.ValidateString(name
, 'name')
811 last_path
.set_name(name
.encode('utf-8'))
814 datastore_types
.ValidateInteger(id, 'id')
817 self
.set_unindexed_properties(unindexed_properties
)
819 self
.__key
= Key
._FromPb
(ref
)
822 """Returns the name of the application that created this entity, a
823 string or None if not set.
825 return self
.__key
.app()
828 """Returns the namespace of this entity, a string or None."""
829 return self
.__key
.namespace()
832 """Returns this entity's kind, a string."""
833 return self
.__key
.kind()
836 """Returns if this entity has been saved to the datastore."""
837 last_path
= self
.__key
._Key
__reference
.path().element_list()[-1]
838 return ((last_path
.has_name() ^ last_path
.has_id()) and
839 self
.__key
.has_id_or_name())
841 def is_projection(self
):
842 """Returns if this entity is a projection from full entity.
845 - may not contain all properties from the original entity;
846 - only contain single values for lists;
847 - may not contain values with the same type as the original entity.
849 return self
.__projection
852 """Returns this entity's primary key, a Key instance."""
856 """Returns this entity's parent, as a Key. If this entity has no parent,
859 return self
.key().parent()
861 def entity_group(self
):
862 """Returns this entity's entity group as a Key.
864 Note that the returned Key will be incomplete if this is a a root entity
865 and its key is incomplete.
867 return self
.key().entity_group()
869 def unindexed_properties(self
):
870 """Returns this entity's unindexed properties, as a frozenset of strings."""
872 return getattr(self
, '_Entity__unindexed_properties', [])
874 def set_unindexed_properties(self
, unindexed_properties
):
876 unindexed_properties
, multiple
= NormalizeAndTypeCheck(unindexed_properties
, basestring
)
878 raise datastore_errors
.BadArgumentError(
879 'unindexed_properties must be a sequence; received %s (a %s).' %
880 (unindexed_properties
, typename(unindexed_properties
)))
881 for prop
in unindexed_properties
:
882 datastore_types
.ValidateProperty(prop
, None)
883 self
.__unindexed
_properties
= frozenset(unindexed_properties
)
885 def __setitem__(self
, name
, value
):
886 """Implements the [] operator. Used to set property value(s).
888 If the property name is the empty string or not a string, raises
889 BadPropertyError. If the value is not a supported type, raises
893 datastore_types
.ValidateProperty(name
, value
)
894 dict.__setitem
__(self
, name
, value
)
896 def setdefault(self
, name
, value
):
897 """If the property exists, returns its value. Otherwise sets it to value.
899 If the property name is the empty string or not a string, raises
900 BadPropertyError. If the value is not a supported type, raises
904 datastore_types
.ValidateProperty(name
, value
)
905 return dict.setdefault(self
, name
, value
)
907 def update(self
, other
):
908 """Updates this entity's properties from the values in other.
910 If any property name is the empty string or not a string, raises
911 BadPropertyError. If any value is not a supported type, raises
914 for name
, value
in other
.items():
915 self
.__setitem
__(name
, value
)
918 """The copy method is not supported.
920 raise NotImplementedError('Entity does not support the copy() method.')
923 """Returns an XML representation of this entity. Atom and gd:namespace
924 properties are converted to XML according to their respective schemas. For
925 more information, see:
927 http://www.atomenabled.org/developers/syndication/
928 http://code.google.com/apis/gdata/common-elements.html
930 This is *not* optimized. It shouldn't be used anywhere near code that's
931 performance-critical.
934 xml
= u
'<entity kind=%s' % saxutils
.quoteattr(self
.kind())
935 if self
.__key
.has_id_or_name():
936 xml
+= ' key=%s' % saxutils
.quoteattr(str(self
.__key
))
938 if self
.__key
.has_id_or_name():
939 xml
+= '\n <key>%s</key>' % self
.__key
.ToTagUri()
944 properties
= self
.keys()
947 xml
+= '\n ' + '\n '.join(self
._PropertiesToXml
(properties
))
950 xml
+= '\n</entity>\n'
953 def _PropertiesToXml(self
, properties
):
954 """ Returns a list of the XML representations of each of the given
955 properties. Ignores properties that don't exist in this entity.
958 properties: string or list of strings
965 for propname
in properties
:
966 if not self
.has_key(propname
):
969 propname_xml
= saxutils
.quoteattr(propname
)
971 values
= self
[propname
]
972 if not isinstance(values
, list):
975 proptype
= datastore_types
.PropertyTypeName(values
[0])
976 proptype_xml
= saxutils
.quoteattr(proptype
)
978 escaped_values
= self
._XmlEscapeValues
(propname
)
979 open_tag
= u
'<property name=%s type=%s>' % (propname_xml
, proptype_xml
)
980 close_tag
= u
'</property>'
981 xml_properties
+= [open_tag
+ val
+ close_tag
for val
in escaped_values
]
983 return xml_properties
985 def _XmlEscapeValues(self
, property):
986 """ Returns a list of the XML-escaped string values for the given property.
987 Raises an AssertionError if the property doesn't exist.
995 assert self
.has_key(property)
998 values
= self
[property]
999 if not isinstance(values
, list):
1003 if hasattr(val
, 'ToXml'):
1004 xml
.append(val
.ToXml())
1009 xml
.append(saxutils
.escape(unicode(val
)))
1014 """Converts this Entity to its protocol buffer representation.
1019 return self
._ToPb
(False)
1021 def _ToPb(self
, mark_key_as_saved
=True):
1022 """Converts this Entity to its protocol buffer representation. Not
1023 intended to be used by application developers.
1033 pb
= entity_pb
.EntityProto()
1034 pb
.mutable_key().CopyFrom(self
.key()._ToPb
())
1035 last_path
= pb
.key().path().element_list()[-1]
1037 if mark_key_as_saved
and last_path
.has_name() and last_path
.has_id():
1038 last_path
.clear_id()
1041 group
= pb
.mutable_entity_group()
1042 if self
.__key
.has_id_or_name():
1043 root
= pb
.key().path().element(0)
1044 group
.add_element().CopyFrom(root
)
1047 properties
= self
.items()
1049 for (name
, values
) in properties
:
1050 properties
= datastore_types
.ToPropertyPb(name
, values
)
1051 if not isinstance(properties
, list):
1052 properties
= [properties
]
1054 for prop
in properties
:
1055 if ((prop
.has_meaning() and
1056 prop
.meaning() in datastore_types
._RAW
_PROPERTY
_MEANINGS
) or
1057 name
in self
.unindexed_properties()):
1058 pb
.raw_property_list().append(prop
)
1060 pb
.property_list().append(prop
)
1063 if pb
.property_size() > _MAX_INDEXED_PROPERTIES
:
1064 raise datastore_errors
.BadRequestError(
1065 'Too many indexed properties for entity %r.' % self
.key())
1070 def FromPb(pb
, validate_reserved_properties
=True,
1071 default_kind
='<not specified>'):
1072 """Static factory method. Returns the Entity representation of the
1073 given protocol buffer (datastore_pb.Entity).
1076 pb: datastore_pb.Entity or str encoding of a datastore_pb.Entity
1077 validate_reserved_properties: deprecated
1078 default_kind: str, the kind to use if the pb has no key.
1081 Entity: the Entity representation of pb
1084 if isinstance(pb
, str):
1085 real_pb
= entity_pb
.EntityProto()
1086 real_pb
.ParsePartialFromString(pb
)
1089 return Entity
._FromPb
(
1090 pb
, require_valid_key
=False, default_kind
=default_kind
)
1093 def _FromPb(pb
, require_valid_key
=True, default_kind
='<not specified>'):
1094 """Static factory method. Returns the Entity representation of the
1095 given protocol buffer (datastore_pb.Entity). Not intended to be used by
1096 application developers.
1098 The Entity PB's key must be complete. If it isn't, an AssertionError is
1102 # a protocol buffer Entity
1103 pb: datastore_pb.Entity
1104 default_kind: str, the kind to use if the pb has no key.
1107 # the Entity representation of the argument
1111 if not pb
.key().path().element_size():
1112 pb
.mutable_key().CopyFrom(Key
.from_path(default_kind
, 0)._ToPb
())
1114 last_path
= pb
.key().path().element_list()[-1]
1115 if require_valid_key
:
1116 assert last_path
.has_id() ^ last_path
.has_name()
1117 if last_path
.has_id():
1118 assert last_path
.id() != 0
1120 assert last_path
.has_name()
1121 assert last_path
.name()
1124 unindexed_properties
= [unicode(p
.name(), 'utf-8')
1125 for p
in pb
.raw_property_list()]
1128 if pb
.key().has_name_space():
1129 namespace
= pb
.key().name_space()
1132 e
= Entity(unicode(last_path
.type(), 'utf-8'),
1133 unindexed_properties
=unindexed_properties
,
1134 _app
=pb
.key().app(), namespace
=namespace
)
1135 ref
= e
.__key
._Key
__reference
1136 ref
.CopyFrom(pb
.key())
1140 temporary_values
= {}
1142 for prop_list
in (pb
.property_list(), pb
.raw_property_list()):
1143 for prop
in prop_list
:
1144 if prop
.meaning() == entity_pb
.Property
.INDEX_VALUE
:
1145 e
.__projection
= True
1147 value
= datastore_types
.FromPropertyPb(prop
)
1148 except (AssertionError, AttributeError, TypeError, ValueError), e
:
1149 raise datastore_errors
.Error(
1150 'Property %s is corrupt in the datastore:\n%s' %
1151 (prop
.name(), traceback
.format_exc()))
1153 multiple
= prop
.multiple()
1158 cur_value
= temporary_values
.get(name
)
1159 if cur_value
is None:
1160 temporary_values
[name
] = value
1161 elif not multiple
or not isinstance(cur_value
, list):
1162 raise datastore_errors
.Error(
1163 'Property %s is corrupt in the datastore; it has multiple '
1164 'values, but is not marked as multiply valued.' % name
)
1166 cur_value
.extend(value
)
1170 for name
, value
in temporary_values
.iteritems():
1171 decoded_name
= unicode(name
, 'utf-8')
1176 datastore_types
.ValidateReadProperty(decoded_name
, value
)
1178 dict.__setitem
__(e
, decoded_name
, value
)
1184 """A datastore query.
1186 (Instead of this, consider using appengine.ext.gql.Query! It provides a
1187 query language interface on top of the same functionality.)
1189 Queries are used to retrieve entities that match certain criteria, including
1190 app id, kind, and property filters. Results may also be sorted by properties.
1192 App id and kind are required. Only entities from the given app, of the given
1193 type, are returned. If an ancestor is set, with Ancestor(), only entities
1194 with that ancestor are returned.
1196 Property filters are used to provide criteria based on individual property
1197 values. A filter compares a specific property in each entity to a given
1198 value or list of possible values.
1200 An entity is returned if its property values match *all* of the query's
1201 filters. In other words, filters are combined with AND, not OR. If an
1202 entity does not have a value for a property used in a filter, it is not
1205 Property filters map filter strings of the form '<property name> <operator>'
1206 to filter values. Use dictionary accessors to set property filters, like so:
1208 > query = Query('Person')
1209 > query['name ='] = 'Ryan'
1210 > query['age >='] = 21
1212 This query returns all Person entities where the name property is 'Ryan',
1213 'Ken', or 'Bret', and the age property is at least 21.
1215 Another way to build this query is:
1217 > query = Query('Person')
1218 > query.update({'name =': 'Ryan', 'age >=': 21})
1220 The supported operators are =, >, <, >=, and <=. Only one inequality
1221 filter may be used per query. Any number of equals filters may be used in
1224 A filter value may be a list or tuple of values. This is interpreted as
1225 multiple filters with the same filter string and different values, all ANDed
1226 together. For example, this query returns everyone with the tags "google"
1229 > Query('Person', {'tag =': ('google', 'app engine')})
1231 Result entities can be returned in different orders. Use the Order()
1232 method to specify properties that results will be sorted by, and in which
1235 Note that filters and orderings may be provided at any time before the query
1236 is run. When the query is fully specified, Run() runs the query and returns
1237 an iterator. The query results can be accessed through the iterator.
1239 A query object may be reused after it's been run. Its filters and
1240 orderings can be changed to create a modified query.
1242 If you know how many result entities you need, use Get() to fetch them:
1244 > query = Query('Person', {'age >': 21})
1245 > for person in query.Get(4):
1246 > print 'I have four pints left. Have one on me, %s!' % person['name']
1248 If you don't know how many results you need, or if you need them all, you
1249 can get an iterator over the results by calling Run():
1251 > for person in Query('Person', {'age >': 21}).Run():
1252 > print 'Have a pint on me, %s!' % person['name']
1254 Get() is more efficient than Run(), so use Get() whenever possible.
1256 Finally, the Count() method returns the number of result entities matched by
1257 the query. The returned count is cached; successive Count() calls will not
1258 re-scan the datastore unless the query is changed.
1261 ASCENDING
= datastore_query
.PropertyOrder
.ASCENDING
1262 DESCENDING
= datastore_query
.PropertyOrder
.DESCENDING
1265 ORDER_FIRST
= datastore_query
.QueryOptions
.ORDER_FIRST
1266 ANCESTOR_FIRST
= datastore_query
.QueryOptions
.ANCESTOR_FIRST
1267 FILTER_FIRST
= datastore_query
.QueryOptions
.FILTER_FIRST
1270 OPERATORS
= {'==': datastore_query
.PropertyFilter
._OPERATORS
['=']}
1271 OPERATORS
.update(datastore_query
.PropertyFilter
._OPERATORS
)
1273 INEQUALITY_OPERATORS
= datastore_query
.PropertyFilter
._INEQUALITY
_OPERATORS
1275 UPPERBOUND_INEQUALITY_OPERATORS
= frozenset(['<', '<='])
1276 FILTER_REGEX
= re
.compile(
1277 '^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(OPERATORS
),
1278 re
.IGNORECASE | re
.UNICODE
)
1284 __ancestor_pb
= None
1288 __index_list_source
= None
1289 __cursor_source
= None
1290 __compiled_query_source
= None
1295 __filter_order
= None
1296 __filter_counter
= 0
1299 __inequality_prop
= None
1300 __inequality_count
= 0
1302 def __init__(self
, kind
=None, filters
={}, _app
=None, keys_only
=False,
1303 compile=True, cursor
=None, namespace
=None, end_cursor
=None,
1304 projection
=None, distinct
=None, _namespace
=None):
1307 Raises BadArgumentError if kind is not a string. Raises BadValueError or
1308 BadFilterError if filters is not a dictionary of valid filters.
1311 namespace: string, the namespace to query.
1312 kind: string, the kind of entities to query, or None.
1313 filters: dict, initial set of filters.
1314 keys_only: boolean, if keys should be returned instead of entities.
1315 projection: iterable of property names to project.
1316 distinct: boolean, if projection should be distinct.
1317 compile: boolean, if the query should generate cursors.
1318 cursor: datastore_query.Cursor, the start cursor to use.
1319 end_cursor: datastore_query.Cursor, the end cursor to use.
1320 _namespace: deprecated, use namespace instead.
1330 if namespace
is None:
1331 namespace
= _namespace
1332 elif _namespace
is not None:
1333 raise datastore_errors
.BadArgumentError(
1334 "Must not set both _namespace and namespace parameters.")
1336 if kind
is not None:
1337 datastore_types
.ValidateString(kind
, 'kind',
1338 datastore_errors
.BadArgumentError
)
1341 self
.__orderings
= []
1342 self
.__filter
_order
= {}
1343 self
.update(filters
)
1345 self
.__app
= datastore_types
.ResolveAppId(_app
)
1346 self
.__namespace
= datastore_types
.ResolveNamespace(namespace
)
1349 self
.__query
_options
= datastore_query
.QueryOptions(
1350 keys_only
=keys_only
,
1351 produce_cursors
=compile,
1352 start_cursor
=cursor
,
1353 end_cursor
=end_cursor
,
1354 projection
=projection
)
1357 if not self
.__query
_options
.projection
:
1358 raise datastore_errors
.BadQueryError(
1359 'cannot specify distinct without a projection')
1360 self
.__distinct
= True
1361 self
.__group
_by
= self
.__query
_options
.projection
1363 def Order(self
, *orderings
):
1364 """Specify how the query results should be sorted.
1366 Result entities will be sorted by the first property argument, then by the
1367 second, and so on. For example, this:
1369 > query = Query('Person')
1370 > query.Order('bday', ('age', Query.DESCENDING))
1372 sorts everyone in order of their birthday, starting with January 1.
1373 People with the same birthday are sorted by age, oldest to youngest.
1375 The direction for each sort property may be provided; if omitted, it
1376 defaults to ascending.
1378 Order() may be called multiple times. Each call resets the sort order
1381 If an inequality filter exists in this Query it must be the first property
1382 passed to Order. Any number of sort orders may be used after the
1383 inequality filter property. Without inequality filters, any number of
1384 filters with different orders may be specified.
1386 Entities with multiple values for an order property are sorted by their
1389 Note that a sort order implies an existence filter! In other words,
1390 Entities without the sort order property are filtered out, and *not*
1391 included in the query results.
1393 If the sort order property has different types in different entities - ie,
1394 if bob['id'] is an int and fred['id'] is a string - the entities will be
1395 grouped first by the property type, then sorted within type. No attempt is
1396 made to compare property values across types.
1398 Raises BadArgumentError if any argument is of the wrong format.
1401 # the properties to sort by, in sort order. each argument may be either a
1402 # string or (string, direction) 2-tuple.
1408 orderings
= list(orderings
)
1411 for (order
, i
) in zip(orderings
, range(len(orderings
))):
1412 if not (isinstance(order
, basestring
) or
1413 (isinstance(order
, tuple) and len(order
) in [2, 3])):
1414 raise datastore_errors
.BadArgumentError(
1415 'Order() expects strings or 2- or 3-tuples; received %s (a %s). ' %
1416 (order
, typename(order
)))
1419 if isinstance(order
, basestring
):
1422 datastore_types
.ValidateString(order
[0], 'sort order property',
1423 datastore_errors
.BadArgumentError
)
1427 direction
= order
[-1]
1428 if direction
not in (Query
.ASCENDING
, Query
.DESCENDING
):
1430 raise datastore_errors
.BadArgumentError(
1431 'Order() expects Query.ASCENDING or DESCENDING; received %s' %
1434 direction
= Query
.ASCENDING
1436 if (self
.__kind
is None and
1437 (property != datastore_types
.KEY_SPECIAL_PROPERTY
or
1438 direction
!= Query
.ASCENDING
)):
1439 raise datastore_errors
.BadArgumentError(
1440 'Only %s ascending orders are supported on kindless queries' %
1441 datastore_types
.KEY_SPECIAL_PROPERTY
)
1443 orderings
[i
] = (property, direction
)
1446 if (orderings
and self
.__inequality
_prop
and
1447 orderings
[0][0] != self
.__inequality
_prop
):
1448 raise datastore_errors
.BadArgumentError(
1449 'First ordering property must be the same as inequality filter '
1450 'property, if specified for this query; received %s, expected %s' %
1451 (orderings
[0][0], self
.__inequality
_prop
))
1453 self
.__orderings
= orderings
1456 def Hint(self
, hint
):
1457 """Sets a hint for how this query should run.
1459 The query hint gives us information about how best to execute your query.
1460 Currently, we can only do one index scan, so the query hint should be used
1461 to indicates which index we should scan against.
1463 Use FILTER_FIRST if your first filter will only match a few results. In
1464 this case, it will be most efficient to scan against the index for this
1465 property, load the results into memory, and apply the remaining filters
1466 and sort orders there.
1468 Similarly, use ANCESTOR_FIRST if the query's ancestor only has a few
1469 descendants. In this case, it will be most efficient to scan all entities
1470 below the ancestor and load them into memory first.
1472 Use ORDER_FIRST if the query has a sort order and the result set is large
1473 or you only plan to fetch the first few results. In that case, we
1474 shouldn't try to load all of the results into memory; instead, we should
1475 scan the index for this property, which is in sorted order.
1477 Note that hints are currently ignored in the v3 datastore!
1480 one of datastore.Query.[ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]
1486 if hint
is not self
.__query
_options
.hint
:
1487 self
.__query
_options
= datastore_query
.QueryOptions(
1488 hint
=hint
, config
=self
.__query
_options
)
1491 def Ancestor(self
, ancestor
):
1492 """Sets an ancestor for this query.
1494 This restricts the query to only return result entities that are descended
1495 from a given entity. In other words, all of the results will have the
1496 ancestor as their parent, or parent's parent, or etc.
1498 Raises BadArgumentError or BadKeyError if parent is not an existing Entity
1499 or Key in the datastore.
1502 # the key must be complete
1503 ancestor: Entity or Key
1509 self
.__ancestor
_pb
= _GetCompleteKeyOrError(ancestor
)._ToPb
()
1512 def IsKeysOnly(self
):
1513 """Returns True if this query is keys only, false otherwise."""
1514 return self
.__query
_options
.keys_only
1516 def GetQueryOptions(self
):
1517 """Returns a datastore_query.QueryOptions for the current instance."""
1518 return self
.__query
_options
1521 """Returns a datastore_query.Query for the current instance."""
1522 return datastore_query
.Query(app
=self
.__app
,
1523 namespace
=self
.__namespace
,
1525 ancestor
=self
.__ancestor
_pb
,
1526 filter_predicate
=self
.GetFilterPredicate(),
1527 order
=self
.GetOrder(),
1528 group_by
=self
.__group
_by
)
1531 """Gets a datastore_query.Order for the current instance.
1534 datastore_query.Order or None if there are no sort orders set on the
1539 orders
= [datastore_query
.PropertyOrder(property, direction
)
1540 for property, direction
in self
.__orderings
]
1542 return datastore_query
.CompositeOrder(orders
)
1545 def GetFilterPredicate(self
):
1546 """Returns a datastore_query.FilterPredicate for the current instance.
1549 datastore_query.FilterPredicate or None if no filters are set on the
1553 ordered_filters
= [(i
, f
) for f
, i
in self
.__filter
_order
.iteritems()]
1554 ordered_filters
.sort()
1556 property_filters
= []
1557 for _
, filter_str
in ordered_filters
:
1558 if filter_str
not in self
:
1562 values
= self
[filter_str
]
1563 match
= self
._CheckFilter
(filter_str
, values
)
1564 name
= match
.group(1)
1567 if op
is None or op
== '==':
1571 property_filters
.append(datastore_query
.make_filter(name
, op
, values
))
1573 if property_filters
:
1574 return datastore_query
.CompositeFilter(
1575 datastore_query
.CompositeFilter
.AND
,
1579 def GetDistinct(self
):
1580 """Returns True if the current instance is distinct.
1583 A boolean indicating if the distinct flag is set.
1585 return self
.__distinct
1587 def GetIndexList(self
):
1588 """Get the index list from the last run of this query.
1591 A list of indexes used by the last run of this query.
1594 AssertionError: The query has not yet been run.
1596 index_list_function
= self
.__index
_list
_source
1597 if index_list_function
:
1598 return index_list_function()
1599 raise AssertionError('No index list available because this query has not '
1602 def GetCursor(self
):
1603 """Get the cursor from the last run of this query.
1605 The source of this cursor varies depending on what the last call was:
1606 - Run: A cursor that points immediately after the last result pulled off
1607 the returned iterator.
1608 - Get: A cursor that points immediately after the last result in the
1610 - Count: A cursor that points immediately after the last result counted.
1613 A datastore_query.Cursor object that can be used in subsequent query
1617 AssertionError: The query has not yet been run or cannot be compiled.
1621 cursor_function
= self
.__cursor
_source
1623 cursor
= cursor_function()
1626 raise AssertionError('No cursor available, either this query has not '
1627 'been executed or there is no compilation '
1628 'available for this kind of query')
1630 def GetBatcher(self
, config
=None):
1631 """Runs this query and returns a datastore_query.Batcher.
1633 This is not intended to be used by application developers. Use Get()
1637 config: Optional Configuration to use for this request.
1640 # an iterator that provides access to the query results
1646 query_options
= self
.GetQueryOptions().merge(config
)
1647 if self
.__distinct
and query_options
.projection
!= self
.__group
_by
:
1652 raise datastore_errors
.BadArgumentError(
1653 'cannot override projection when distinct is set')
1654 return self
.GetQuery().run(_GetConnection(), query_options
)
1656 def Run(self
, **kwargs
):
1659 If a filter string is invalid, raises BadFilterError. If a filter value is
1660 invalid, raises BadValueError. If an IN filter is provided, and a sort
1661 order on another property is provided, raises BadQueryError.
1663 If you know in advance how many results you want, use limit=#. It's
1667 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
1670 # an iterator that provides access to the query results
1673 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
1674 config_class
=datastore_query
.QueryOptions
)
1675 itr
= Iterator(self
.GetBatcher(config
=config
))
1677 self
.__index
_list
_source
= itr
.GetIndexList
1679 self
.__cursor
_source
= itr
.cursor
1681 self
.__compiled
_query
_source
= itr
._compiled
_query
1684 def Get(self
, limit
, offset
=0, **kwargs
):
1685 """Deprecated, use list(Run(...)) instead.
1688 limit: int or long representing the maximum number of entities to return.
1689 offset: int or long representing the number of entities to skip
1690 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
1693 # a list of entities
1697 kwargs
.setdefault('batch_size', _MAX_INT_32
)
1699 return list(self
.Run(limit
=limit
, offset
=offset
, **kwargs
))
1701 def Count(self
, limit
=1000, **kwargs
):
1702 """Returns the number of entities that this query matches.
1705 limit, a number or None. If there are more results than this, stop short
1706 and just return this number. Providing this argument makes the count
1707 operation more efficient.
1708 config: Optional Configuration to use for this request.
1711 The number of results.
1713 original_offset
= kwargs
.pop('offset', 0)
1715 offset
= _MAX_INT_32
1717 offset
= min(limit
+ original_offset
, _MAX_INT_32
)
1719 kwargs
['offset'] = offset
1720 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
1721 config_class
=datastore_query
.QueryOptions
)
1723 batch
= self
.GetBatcher(config
=config
).next()
1724 self
.__index
_list
_source
= (
1725 lambda: [index
for index
, state
in batch
.index_list
])
1726 self
.__cursor
_source
= lambda: batch
.cursor(0)
1727 self
.__compiled
_query
_source
= lambda: batch
._compiled
_query
1728 return max(0, batch
.skipped_results
- original_offset
)
1731 raise NotImplementedError(
1732 'Query objects should not be used as iterators. Call Run() first.')
1734 def __getstate__(self
):
1735 state
= self
.__dict
__.copy()
1736 state
['_Query__index_list_source'] = None
1737 state
['_Query__cursor_source'] = None
1738 state
['_Query__compiled_query_source'] = None
1741 def __setstate__(self
, state
):
1743 if '_Query__query_options' not in state
:
1744 state
['_Query__query_options'] = datastore_query
.QueryOptions(
1745 keys_only
=state
.pop('_Query__keys_only'),
1746 produce_cursors
=state
.pop('_Query__compile'),
1747 start_cursor
=state
.pop('_Query__cursor'),
1748 end_cursor
=state
.pop('_Query__end_cursor'))
1749 self
.__dict
__ = state
1751 def __setitem__(self
, filter, value
):
1752 """Implements the [] operator. Used to set filters.
1754 If the filter string is empty or not a string, raises BadFilterError. If
1755 the value is not a supported type, raises BadValueError.
1757 if isinstance(value
, tuple):
1760 datastore_types
.ValidateProperty(' ', value
)
1761 match
= self
._CheckFilter
(filter, value
)
1762 property = match
.group(1)
1763 operator
= match
.group(3)
1765 dict.__setitem
__(self
, filter, value
)
1767 if (operator
in self
.INEQUALITY_OPERATORS
and
1768 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1770 if self
.__inequality
_prop
is None:
1771 self
.__inequality
_prop
= property
1773 assert self
.__inequality
_prop
== property
1774 self
.__inequality
_count
+= 1
1777 if filter not in self
.__filter
_order
:
1778 self
.__filter
_order
[filter] = self
.__filter
_counter
1779 self
.__filter
_counter
+= 1
1781 def setdefault(self
, filter, value
):
1782 """If the filter exists, returns its value. Otherwise sets it to value.
1784 If the property name is the empty string or not a string, raises
1785 BadPropertyError. If the value is not a supported type, raises
1788 datastore_types
.ValidateProperty(' ', value
)
1789 self
._CheckFilter
(filter, value
)
1790 return dict.setdefault(self
, filter, value
)
1792 def __delitem__(self
, filter):
1793 """Implements the del [] operator. Used to remove filters.
1795 dict.__delitem
__(self
, filter)
1796 del self
.__filter
_order
[filter]
1799 match
= Query
.FILTER_REGEX
.match(filter)
1800 property = match
.group(1)
1801 operator
= match
.group(3)
1803 if operator
in self
.INEQUALITY_OPERATORS
:
1804 assert self
.__inequality
_count
>= 1
1805 assert property == self
.__inequality
_prop
1806 self
.__inequality
_count
-= 1
1807 if self
.__inequality
_count
== 0:
1808 self
.__inequality
_prop
= None
1810 def update(self
, other
):
1811 """Updates this query's filters from the ones in other.
1813 If any filter string is invalid, raises BadFilterError. If any value is
1814 not a supported type, raises BadValueError.
1816 for filter, value
in other
.items():
1817 self
.__setitem
__(filter, value
)
1820 """The copy method is not supported.
1822 raise NotImplementedError('Query does not support the copy() method.')
1824 def _CheckFilter(self
, filter, values
):
1825 """Type check a filter string and list of values.
1827 Raises BadFilterError if the filter string is empty, not a string, or
1828 invalid. Raises BadValueError if the value type is not supported.
1831 filter: String containing the filter text.
1832 values: List of associated filter values.
1835 re.MatchObject (never None) that matches the 'filter'. Group 1 is the
1836 property name, group 3 is the operator. (Group 2 is unused.)
1839 match
= Query
.FILTER_REGEX
.match(filter)
1841 raise datastore_errors
.BadFilterError(
1842 'Could not parse filter string: %s' % str(filter))
1844 raise datastore_errors
.BadFilterError(
1845 'Could not parse filter string: %s' % str(filter))
1847 property = match
.group(1)
1848 operator
= match
.group(3)
1849 if operator
is None:
1852 if isinstance(values
, tuple):
1853 values
= list(values
)
1854 elif not isinstance(values
, list):
1856 if isinstance(values
[0], datastore_types
._RAW
_PROPERTY
_TYPES
):
1857 raise datastore_errors
.BadValueError(
1858 'Filtering on %s properties is not supported.' % typename(values
[0]))
1860 if (operator
in self
.INEQUALITY_OPERATORS
and
1861 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1862 if self
.__inequality
_prop
and property != self
.__inequality
_prop
:
1863 raise datastore_errors
.BadFilterError(
1864 'Only one property per query may have inequality filters (%s).' %
1865 ', '.join(self
.INEQUALITY_OPERATORS
))
1866 elif len(self
.__orderings
) >= 1 and self
.__orderings
[0][0] != property:
1867 raise datastore_errors
.BadFilterError(
1868 'Inequality operators (%s) must be on the same property as the '
1869 'first sort order, if any sort orders are supplied' %
1870 ', '.join(self
.INEQUALITY_OPERATORS
))
1872 if (self
.__kind
is None and
1873 property != datastore_types
.KEY_SPECIAL_PROPERTY
and
1874 property != datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
):
1875 raise datastore_errors
.BadFilterError(
1876 'Only %s filters are allowed on kindless queries.' %
1877 datastore_types
.KEY_SPECIAL_PROPERTY
)
1879 if property == datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
:
1881 raise datastore_errors
.BadFilterError(
1882 'Only kindless queries can have %s filters.' %
1883 datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
)
1884 if not operator
in self
.UPPERBOUND_INEQUALITY_OPERATORS
:
1885 raise datastore_errors
.BadFilterError(
1886 'Only %s operators are supported with %s filters.' % (
1887 self
.UPPERBOUND_INEQUALITY_OPERATORS
,
1888 datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
))
1890 if property in datastore_types
._SPECIAL
_PROPERTIES
:
1895 if property == datastore_types
.KEY_SPECIAL_PROPERTY
:
1896 for value
in values
:
1897 if not isinstance(value
, Key
):
1898 raise datastore_errors
.BadFilterError(
1899 '%s filter value must be a Key; received %s (a %s)' %
1900 (datastore_types
.KEY_SPECIAL_PROPERTY
, value
, typename(value
)))
1904 def _Run(self
, limit
=None, offset
=None,
1905 prefetch_count
=None, next_count
=None, **kwargs
):
1906 """Deprecated, use Run() instead."""
1907 return self
.Run(limit
=limit
, offset
=offset
,
1908 prefetch_size
=prefetch_count
, batch_size
=next_count
,
1911 def _ToPb(self
, limit
=None, offset
=None, count
=None):
1913 query_options
= datastore_query
.QueryOptions(
1914 config
=self
.GetQueryOptions(),
1918 return self
.GetQuery()._to
_pb
(_GetConnection(), query_options
)
1920 def _GetCompiledQuery(self
):
1921 """Returns the internal-only pb representation of the last query run.
1926 AssertionError: Query not compiled or not yet executed.
1928 compiled_query_function
= self
.__compiled
_query
_source
1929 if compiled_query_function
:
1930 compiled_query
= compiled_query_function()
1932 return compiled_query
1933 raise AssertionError('No compiled query available, either this query has '
1934 'not been executed or there is no compilation '
1935 'available for this kind of query')
1937 GetCompiledQuery
= _GetCompiledQuery
1938 GetCompiledCursor
= GetCursor
1941 def AllocateIdsAsync(model_key
, size
=None, **kwargs
):
1942 """Asynchronously allocates a range of IDs.
1944 Identical to datastore.AllocateIds() except returns an asynchronous object.
1945 Call get_result() on the return value to block on the call and get the
1948 max = kwargs
.pop('max', None)
1949 config
= _GetConfigFromKwargs(kwargs
)
1950 if getattr(config
, 'read_policy', None) == EVENTUAL_CONSISTENCY
:
1951 raise datastore_errors
.BadRequestError(
1952 'read_policy is only supported on read operations.')
1953 keys
, _
= NormalizeAndTypeCheckKeys(model_key
)
1956 raise datastore_errors
.BadArgumentError(
1957 'Cannot allocate IDs for more than one model key at a time')
1959 rpc
= _GetConnection().async_allocate_ids(config
, keys
[0], size
, max)
1963 def AllocateIds(model_key
, size
=None, **kwargs
):
1964 """Allocates a range of IDs of size or with max for the given key.
1966 Allocates a range of IDs in the datastore such that those IDs will not
1967 be automatically assigned to new entities. You can only allocate IDs
1968 for model keys from your app. If there is an error, raises a subclass of
1969 datastore_errors.Error.
1971 Either size or max must be provided but not both. If size is provided then a
1972 range of the given size is returned. If max is provided then the largest
1973 range of ids that are safe to use with an upper bound of max is returned (can
1976 Max should only be provided if you have an existing numeric id range that you
1977 want to reserve, e.g. bulk loading entities that already have IDs. If you
1978 don't care about which IDs you receive, use size instead.
1981 model_key: Key or string to serve as a model specifying the ID sequence
1982 in which to allocate IDs
1983 size: integer, number of IDs to allocate.
1984 max: integer, upper bound of the range of IDs to allocate.
1985 config: Optional Configuration to use for this request.
1988 (start, end) of the allocated range, inclusive.
1990 return AllocateIdsAsync(model_key
, size
, **kwargs
).get_result()
1995 class MultiQuery(Query
):
1996 """Class representing a query which requires multiple datastore queries.
1998 This class is actually a subclass of datastore.Query as it is intended to act
1999 like a normal Query object (supporting the same interface).
2001 Does not support keys only queries, since it needs whole entities in order
2002 to merge sort them. (That's not true if there are no sort orders, or if the
2003 sort order is on __key__, but allowing keys only queries in those cases, but
2004 not in others, would be confusing.)
2007 def __init__(self
, bound_queries
, orderings
):
2008 if len(bound_queries
) > MAX_ALLOWABLE_QUERIES
:
2009 raise datastore_errors
.BadArgumentError(
2010 'Cannot satisfy query -- too many subqueries (max: %d, got %d).'
2011 ' Probable cause: too many IN/!= filters in query.' %
2012 (MAX_ALLOWABLE_QUERIES
, len(bound_queries
)))
2014 projection
= (bound_queries
and
2015 bound_queries
[0].GetQueryOptions().projection
)
2017 for query
in bound_queries
:
2018 if projection
!= query
.GetQueryOptions().projection
:
2019 raise datastore_errors
.BadQueryError(
2020 'All queries must have the same projection.')
2021 if query
.IsKeysOnly():
2022 raise datastore_errors
.BadQueryError(
2023 'MultiQuery does not support keys_only.')
2025 self
.__projection
= projection
2026 self
.__bound
_queries
= bound_queries
2027 self
.__orderings
= orderings
2028 self
.__compile
= False
2031 res
= 'MultiQuery: '
2032 for query
in self
.__bound
_queries
:
2033 res
= '%s %s' % (res
, str(query
))
2036 def Get(self
, limit
, offset
=0, **kwargs
):
2037 """Deprecated, use list(Run(...)) instead.
2040 limit: int or long representing the maximum number of entities to return.
2041 offset: int or long representing the number of entities to skip
2042 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
2045 A list of entities with at most "limit" entries (less if the query
2046 completes before reading limit values).
2049 kwargs
.setdefault('batch_size', _MAX_INT_32
)
2050 return list(self
.Run(limit
=limit
, offset
=offset
, **kwargs
))
2052 class SortOrderEntity(object):
2053 """Allow entity comparisons using provided orderings.
2055 The iterator passed to the constructor is eventually consumed via
2056 calls to GetNext(), which generate new SortOrderEntity s with the
2060 def __init__(self
, entity_iterator
, orderings
):
2064 entity_iterator: an iterator of entities which will be wrapped.
2065 orderings: an iterable of (identifier, order) pairs. order
2066 should be either Query.ASCENDING or Query.DESCENDING.
2068 self
.__entity
_iterator
= entity_iterator
2069 self
.__entity
= None
2070 self
.__min
_max
_value
_cache
= {}
2072 self
.__entity
= entity_iterator
.next()
2073 except StopIteration:
2076 self
.__orderings
= orderings
2079 return str(self
.__entity
)
2081 def GetEntity(self
):
2082 """Gets the wrapped entity."""
2083 return self
.__entity
2086 """Wrap and return the next entity.
2088 The entity is retrieved from the iterator given at construction time.
2090 return MultiQuery
.SortOrderEntity(self
.__entity
_iterator
,
2093 def CmpProperties(self
, that
):
2094 """Compare two entities and return their relative order.
2096 Compares self to that based on the current sort orderings and the
2097 key orders between them. Returns negative, 0, or positive depending on
2098 whether self is less, equal to, or greater than that. This
2099 comparison returns as if all values were to be placed in ascending order
2100 (highest value last). Only uses the sort orderings to compare (ignores
2104 that: SortOrderEntity
2107 Negative if self < that
2108 Zero if self == that
2109 Positive if self > that
2112 if not self
.__entity
:
2113 return cmp(self
.__entity
, that
.__entity
)
2116 for (identifier
, order
) in self
.__orderings
:
2118 value1
= self
.__GetValueForId
(self
, identifier
, order
)
2119 value2
= self
.__GetValueForId
(that
, identifier
, order
)
2121 result
= cmp(value1
, value2
)
2122 if order
== Query
.DESCENDING
:
2128 def __GetValueForId(self
, sort_order_entity
, identifier
, sort_order
):
2132 value
= _GetPropertyValue(sort_order_entity
.__entity
, identifier
)
2133 if isinstance(value
, list):
2134 entity_key
= sort_order_entity
.__entity
.key()
2135 if (entity_key
, identifier
) in self
.__min
_max
_value
_cache
:
2136 value
= self
.__min
_max
_value
_cache
[(entity_key
, identifier
)]
2137 elif sort_order
== Query
.DESCENDING
:
2141 self
.__min
_max
_value
_cache
[(entity_key
, identifier
)] = value
2145 def __cmp__(self
, that
):
2146 """Compare self to that w.r.t. values defined in the sort order.
2148 Compare an entity with another, using sort-order first, then the key
2149 order to break ties. This can be used in a heap to have faster min-value
2153 that: other entity to compare to
2155 negative: if self is less than that in sort order
2156 zero: if self is equal to that in sort order
2157 positive: if self is greater than that in sort order
2159 property_compare
= self
.CmpProperties(that
)
2160 if property_compare
:
2161 return property_compare
2164 return cmp(self
.__entity
.key(), that
.__entity
.key())
2166 def _ExtractBounds(self
, config
):
2167 """This function extracts the range of results to consider.
2169 Since MultiQuery dedupes in memory, we must apply the offset and limit in
2170 memory. The results that should be considered are
2171 results[lower_bound:upper_bound].
2173 We also pass the offset=0 and limit=upper_bound to the base queries to
2174 optimize performance.
2177 config: The base datastore_query.QueryOptions.
2180 a tuple consisting of the lower_bound and upper_bound to impose in memory
2181 and the config to use with each bound query. The upper_bound may be None.
2184 return 0, None, None
2186 lower_bound
= config
.offset
or 0
2187 upper_bound
= config
.limit
2189 if upper_bound
is not None:
2190 upper_bound
= min(lower_bound
+ upper_bound
, _MAX_INT_32
)
2191 config
= datastore_query
.QueryOptions(offset
=0,
2194 return lower_bound
, upper_bound
, config
2196 def __GetProjectionOverride(self
, config
):
2197 """Returns a tuple of (original projection, projeciton override).
2199 If projection is None, there is no projection. If override is None,
2200 projection is sufficent for this query.
2202 projection
= datastore_query
.QueryOptions
.projection(config
)
2203 if projection
is None:
2204 projection
= self
.__projection
2206 projection
= projection
2214 for prop
, _
in self
.__orderings
:
2215 if prop
not in projection
:
2218 return projection
, None
2220 return projection
, projection
+ tuple(override
)
2222 def Run(self
, **kwargs
):
2223 """Return an iterable output with all results in order.
2225 Merge sort the results. First create a list of iterators, then walk
2226 though them and yield results in order.
2229 kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
2232 An iterator for the result set.
2234 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
2235 config_class
=datastore_query
.QueryOptions
)
2236 if config
and config
.keys_only
:
2237 raise datastore_errors
.BadRequestError(
2238 'keys only queries are not supported by multi-query.')
2241 lower_bound
, upper_bound
, config
= self
._ExtractBounds
(config
)
2243 projection
, override
= self
.__GetProjectionOverride
(config
)
2245 config
= datastore_query
.QueryOptions(projection
=override
, config
=config
)
2249 log_level
= logging
.DEBUG
- 1
2250 for bound_query
in self
.__bound
_queries
:
2251 logging
.log(log_level
, 'Running query #%i' % count
)
2252 results
.append(bound_query
.Run(config
=config
))
2255 def GetDedupeKey(sort_order_entity
):
2258 return (sort_order_entity
.GetEntity().key(),
2260 frozenset(sort_order_entity
.GetEntity().iteritems()))
2262 return sort_order_entity
.GetEntity().key()
2264 def IterateResults(results
):
2265 """Iterator function to return all results in sorted order.
2267 Iterate over the array of results, yielding the next element, in
2268 sorted order. This function is destructive (results will be empty
2269 when the operation is complete).
2272 results: list of result iterators to merge and iterate through
2275 The next result in sorted order.
2280 for result
in results
:
2281 heap_value
= MultiQuery
.SortOrderEntity(result
, self
.__orderings
)
2282 if heap_value
.GetEntity():
2283 heapq
.heappush(result_heap
, heap_value
)
2293 if upper_bound
is not None and len(used_keys
) >= upper_bound
:
2296 top_result
= heapq
.heappop(result_heap
)
2297 dedupe_key
= GetDedupeKey(top_result
)
2298 if dedupe_key
not in used_keys
:
2299 result
= top_result
.GetEntity()
2302 for key
in result
.keys():
2303 if key
not in projection
:
2310 used_keys
.add(dedupe_key
)
2313 results_to_push
= []
2315 next
= heapq
.heappop(result_heap
)
2316 if dedupe_key
!= GetDedupeKey(next
):
2318 results_to_push
.append(next
)
2323 results_to_push
.append(next
.GetNext())
2324 results_to_push
.append(top_result
.GetNext())
2327 for popped_result
in results_to_push
:
2330 if popped_result
.GetEntity():
2331 heapq
.heappush(result_heap
, popped_result
)
2333 it
= IterateResults(results
)
2337 for _
in xrange(lower_bound
):
2339 except StopIteration:
2344 def Count(self
, limit
=1000, **kwargs
):
2345 """Return the number of matched entities for this query.
2347 Will return the de-duplicated count of results. Will call the more
2348 efficient Get() function if a limit is given.
2351 limit: maximum number of entries to count (for any result > limit, return
2353 config: Optional Configuration to use for this request.
2356 count of the number of entries returned.
2359 kwargs
['limit'] = limit
2360 config
= _GetConfigFromKwargs(kwargs
, convert_rpc
=True,
2361 config_class
=datastore_query
.QueryOptions
)
2363 projection
, override
= self
.__GetProjectionOverride
(config
)
2366 config
= datastore_query
.QueryOptions(keys_only
=True, config
=config
)
2368 config
= datastore_query
.QueryOptions(projection
=override
, config
=config
)
2371 lower_bound
, upper_bound
, config
= self
._ExtractBounds
(config
)
2375 for bound_query
in self
.__bound
_queries
:
2376 for result
in bound_query
.Run(config
=config
):
2379 dedupe_key
= (result
.key(),
2380 tuple(result
.iteritems()))
2383 used_keys
.add(dedupe_key
)
2384 if upper_bound
and len(used_keys
) >= upper_bound
:
2385 return upper_bound
- lower_bound
2387 return max(0, len(used_keys
) - lower_bound
)
2389 def GetIndexList(self
):
2391 raise AssertionError('No index_list available for a MultiQuery (queries '
2392 'using "IN" or "!=" operators)')
2394 def GetCursor(self
):
2395 raise AssertionError('No cursor available for a MultiQuery (queries '
2396 'using "IN" or "!=" operators)')
2398 def _GetCompiledQuery(self
):
2399 """Internal only, do not use."""
2400 raise AssertionError('No compilation available for a MultiQuery (queries '
2401 'using "IN" or "!=" operators)')
2403 def __setitem__(self
, query_filter
, value
):
2404 """Add a new filter by setting it on all subqueries.
2406 If any of the setting operations raise an exception, the ones
2407 that succeeded are undone and the exception is propagated
2411 query_filter: a string of the form "property operand".
2412 value: the value that the given property is compared against.
2415 for index
, query
in enumerate(self
.__bound
_queries
):
2416 saved_items
.append(query
.get(query_filter
, None))
2418 query
[query_filter
] = value
2420 for q
, old_value
in itertools
.izip(self
.__bound
_queries
[:index
],
2422 if old_value
is not None:
2423 q
[query_filter
] = old_value
2428 def __delitem__(self
, query_filter
):
2429 """Delete a filter by deleting it from all subqueries.
2431 If a KeyError is raised during the attempt, it is ignored, unless
2432 every subquery raised a KeyError. If any other exception is
2433 raised, any deletes will be rolled back.
2436 query_filter: the filter to delete.
2439 KeyError: No subquery had an entry containing query_filter.
2441 subquery_count
= len(self
.__bound
_queries
)
2444 for index
, query
in enumerate(self
.__bound
_queries
):
2446 saved_items
.append(query
.get(query_filter
, None))
2447 del query
[query_filter
]
2451 for q
, old_value
in itertools
.izip(self
.__bound
_queries
[:index
],
2453 if old_value
is not None:
2454 q
[query_filter
] = old_value
2457 if keyerror_count
== subquery_count
:
2458 raise KeyError(query_filter
)
2461 return iter(self
.__bound
_queries
)
2464 GetCompiledCursor
= GetCursor
2465 GetCompiledQuery
= _GetCompiledQuery
2468 def RunInTransaction(function
, *args
, **kwargs
):
2469 """Runs a function inside a datastore transaction.
2471 Runs the user-provided function inside transaction, retries default
2475 function: a function to be run inside the transaction on all remaining
2477 *args: positional arguments for function.
2478 **kwargs: keyword arguments for function.
2481 the function's return value, if any
2484 TransactionFailedError, if the transaction could not be committed.
2486 return RunInTransactionOptions(None, function
, *args
, **kwargs
)
2492 def RunInTransactionCustomRetries(retries
, function
, *args
, **kwargs
):
2493 """Runs a function inside a datastore transaction.
2495 Runs the user-provided function inside transaction, with a specified
2499 retries: number of retries (not counting the initial try)
2500 function: a function to be run inside the transaction on all remaining
2502 *args: positional arguments for function.
2503 **kwargs: keyword arguments for function.
2506 the function's return value, if any
2509 TransactionFailedError, if the transaction could not be committed.
2511 options
= datastore_rpc
.TransactionOptions(retries
=retries
)
2512 return RunInTransactionOptions(options
, function
, *args
, **kwargs
)
2515 def RunInTransactionOptions(options
, function
, *args
, **kwargs
):
2516 """Runs a function inside a datastore transaction.
2518 Runs the user-provided function inside a full-featured, ACID datastore
2519 transaction. Every Put, Get, and Delete call in the function is made within
2520 the transaction. All entities involved in these calls must belong to the
2521 same entity group. Queries are supported as long as they specify an
2522 ancestor belonging to the same entity group.
2524 The trailing arguments are passed to the function as positional arguments.
2525 If the function returns a value, that value will be returned by
2526 RunInTransaction. Otherwise, it will return None.
2528 The function may raise any exception to roll back the transaction instead of
2529 committing it. If this happens, the transaction will be rolled back and the
2530 exception will be re-raised up to RunInTransaction's caller.
2532 If you want to roll back intentionally, but don't have an appropriate
2533 exception to raise, you can raise an instance of datastore_errors.Rollback.
2534 It will cause a rollback, but will *not* be re-raised up to the caller.
2536 The function may be run more than once, so it should be idempotent. It
2537 should avoid side effects, and it shouldn't have *any* side effects that
2538 aren't safe to occur multiple times. This includes modifying the arguments,
2539 since they persist across invocations of the function. However, this doesn't
2540 include Put, Get, and Delete calls, of course.
2544 > def decrement(key, amount=1):
2545 > counter = datastore.Get(key)
2546 > counter['count'] -= amount
2547 > if counter['count'] < 0: # don't let the counter go negative
2548 > raise datastore_errors.Rollback()
2549 > datastore.Put(counter)
2551 > counter = datastore.Query('Counter', {'name': 'foo'})
2552 > datastore.RunInTransaction(decrement, counter.key(), amount=5)
2554 Transactions satisfy the traditional ACID properties. They are:
2556 - Atomic. All of a transaction's operations are executed or none of them are.
2558 - Consistent. The datastore's state is consistent before and after a
2559 transaction, whether it committed or rolled back. Invariants such as
2560 "every entity has a primary key" are preserved.
2562 - Isolated. Transactions operate on a snapshot of the datastore. Other
2563 datastore operations do not see intermediated effects of the transaction;
2564 they only see its effects after it has committed.
2566 - Durable. On commit, all writes are persisted to the datastore.
2568 Nested transactions are not supported.
2571 options: TransactionOptions specifying options (number of retries, etc) for
2573 function: a function to be run inside the transaction on all remaining
2575 *args: positional arguments for function.
2576 **kwargs: keyword arguments for function.
2579 the function's return value, if any
2582 TransactionFailedError, if the transaction could not be committed.
2592 options
= datastore_rpc
.TransactionOptions(options
)
2593 if IsInTransaction():
2594 if options
.propagation
in (None, datastore_rpc
.TransactionOptions
.NESTED
):
2596 raise datastore_errors
.BadRequestError(
2597 'Nested transactions are not supported.')
2598 elif options
.propagation
is datastore_rpc
.TransactionOptions
.INDEPENDENT
:
2601 txn_connection
= _PopConnection()
2603 return RunInTransactionOptions(options
, function
, *args
, **kwargs
)
2605 _PushConnection(txn_connection
)
2606 return function(*args
, **kwargs
)
2608 if options
.propagation
is datastore_rpc
.TransactionOptions
.MANDATORY
:
2609 raise datastore_errors
.BadRequestError('Requires an existing transaction.')
2612 retries
= options
.retries
2614 retries
= DEFAULT_TRANSACTION_RETRIES
2616 conn
= _GetConnection()
2617 _PushConnection(None)
2620 for _
in range(0, retries
+ 1):
2621 _SetConnection(conn
.new_transaction(options
))
2622 ok
, result
= _DoOneTry(function
, args
, kwargs
)
2629 raise datastore_errors
.TransactionFailedError(
2630 'The transaction could not be committed. Please try again.')
2633 def _DoOneTry(function
, args
, kwargs
):
2634 """Helper to call a function in a transaction, once.
2637 function: The function to call.
2638 *args: Tuple of positional arguments.
2639 **kwargs: Dict of keyword arguments.
2642 result
= function(*args
, **kwargs
)
2644 original_exception
= sys
.exc_info()
2646 _GetConnection().rollback()
2651 logging
.exception('Exception sending Rollback:')
2652 type, value
, trace
= original_exception
2653 if isinstance(value
, datastore_errors
.Rollback
):
2656 raise type, value
, trace
2658 if _GetConnection().commit():
2663 logging
.warning('Transaction collision. Retrying... %s', '')
2667 def _MaybeSetupTransaction(request
, keys
):
2668 """Begin a transaction, if necessary, and populate it in the request.
2670 This API exists for internal backwards compatibility, primarily with
2671 api/taskqueue/taskqueue.py.
2674 request: A protobuf with a mutable_transaction() method.
2678 A transaction if we're inside a transaction, otherwise None
2680 return _GetConnection()._set
_request
_transaction
(request
)
2683 def IsInTransaction():
2684 """Determine whether already running in transaction.
2687 True if already running in transaction, else False.
2690 return isinstance(_GetConnection(), datastore_rpc
.TransactionalConnection
)
2693 def Transactional(_func
=None, **kwargs
):
2694 """A decorator that makes sure a function is run in a transaction.
2696 Defaults propagation to datastore_rpc.TransactionOptions.ALLOWED, which means
2697 any existing transaction will be used in place of creating a new one.
2699 WARNING: Reading from the datastore while in a transaction will not see any
2700 changes made in the same transaction. If the function being decorated relies
2701 on seeing all changes made in the calling scoope, set
2702 propagation=datastore_rpc.TransactionOptions.NESTED.
2706 **kwargs: TransactionOptions configuration options.
2709 A wrapper for the given function that creates a new transaction if needed.
2712 if _func
is not None:
2713 return Transactional()(_func
)
2716 if not kwargs
.pop('require_new', None):
2718 kwargs
.setdefault('propagation', datastore_rpc
.TransactionOptions
.ALLOWED
)
2720 options
= datastore_rpc
.TransactionOptions(**kwargs
)
2722 def outer_wrapper(func
):
2723 def inner_wrapper(*args
, **kwds
):
2724 return RunInTransactionOptions(options
, func
, *args
, **kwds
)
2725 return inner_wrapper
2726 return outer_wrapper
2729 @datastore_rpc._positional
(1)
2730 def NonTransactional(_func
=None, allow_existing
=True):
2731 """A decorator that insures a function is run outside a transaction.
2733 If there is an existing transaction (and allow_existing=True), the existing
2734 transaction is paused while the function is executed.
2738 allow_existing: If false, throw an exception if called from within a
2742 A wrapper for the decorated function that ensures it runs outside a
2746 if _func
is not None:
2747 return NonTransactional()(_func
)
2749 def outer_wrapper(func
):
2750 def inner_wrapper(*args
, **kwds
):
2751 if not IsInTransaction():
2752 return func(*args
, **kwds
)
2754 if not allow_existing
:
2755 raise datastore_errors
.BadRequestError(
2756 'Function cannot be called from within a transaction.')
2760 txn_connection
= _PopConnection()
2762 return func(*args
, **kwds
)
2764 _PushConnection(txn_connection
)
2765 return inner_wrapper
2766 return outer_wrapper
2769 def _GetCompleteKeyOrError(arg
):
2770 """Expects an Entity or a Key, and returns the corresponding Key. Raises
2771 BadArgumentError or BadKeyError if arg is a different type or is incomplete.
2780 if isinstance(arg
, Key
):
2782 elif isinstance(arg
, basestring
):
2785 elif isinstance(arg
, Entity
):
2787 elif not isinstance(arg
, Key
):
2788 raise datastore_errors
.BadArgumentError(
2789 'Expects argument to be an Entity or Key; received %s (a %s).' %
2790 (arg
, typename(arg
)))
2791 assert isinstance(key
, Key
)
2794 if not key
.has_id_or_name():
2795 raise datastore_errors
.BadKeyError('Key %r is not complete.' % key
)
2800 def _GetPropertyValue(entity
, property):
2801 """Returns an entity's value for a given property name.
2803 Handles special properties like __key__ as well as normal properties.
2806 entity: datastore.Entity
2807 property: str; the property name
2810 property value. For __key__, a datastore_types.Key.
2813 KeyError, if the entity does not have the given property.
2815 if property in datastore_types
._SPECIAL
_PROPERTIES
:
2816 if property == datastore_types
._UNAPPLIED
_LOG
_TIMESTAMP
_SPECIAL
_PROPERTY
:
2817 raise KeyError(property)
2819 assert property == datastore_types
.KEY_SPECIAL_PROPERTY
2822 return entity
[property]
2825 def _AddOrAppend(dictionary
, key
, value
):
2826 """Adds the value to the existing values in the dictionary, if any.
2828 If dictionary[key] doesn't exist, sets dictionary[key] to value.
2830 If dictionary[key] is not a list, sets dictionary[key] to [old_value, value].
2832 If dictionary[key] is a list, appends value to that list.
2836 key, value: anything
2838 if key
in dictionary
:
2839 existing_value
= dictionary
[key
]
2840 if isinstance(existing_value
, list):
2841 existing_value
.append(value
)
2843 dictionary
[key
] = [existing_value
, value
]
2845 dictionary
[key
] = value
2848 class Iterator(datastore_query
.ResultsIterator
):
2849 """Thin wrapper of datastore_query.ResultsIterator.
2851 Deprecated, do not use, only for backwards compatability.
2854 def _Next(self
, count
=None):
2859 if len(result
) >= count
:
2864 def GetCompiledCursor(self
, query
):
2865 return self
.cursor()
2867 def GetIndexList(self
):
2868 """Returns the list of indexes used to perform the query."""
2869 tuple_index_list
= super(Iterator
, self
).index_list()
2870 return [index
for index
, state
in tuple_index_list
]
2876 index_list
= GetIndexList
2880 DatastoreRPC
= apiproxy_stub_map
.UserRPC
2881 GetRpcFromKwargs
= _GetConfigFromKwargs
2882 _CurrentTransactionKey
= IsInTransaction
2883 _ToDatastoreError
= datastore_rpc
._ToDatastoreError
2884 _DatastoreExceptionFromErrorCodeAndDetail
= datastore_rpc
._DatastoreExceptionFromErrorCodeAndDetail