Fixed #5937 -- When filtering on generic relations, restrict the target objects to...
[django.git] / django / db / models / sql / query.py
blob162838709694d560a099d8e445348c165d2e7e5e
1 """
2 Create SQL statements for QuerySets.
4 The code in here encapsulates all of the SQL construction so that QuerySets
5 themselves do not have to (and could be backed by things other than SQL
6 databases). The abstraction barrier only works one way: this module has to know
7 all about the internals of models in order to get the information it needs.
8 """
10 from copy import deepcopy
12 from django.utils.tree import Node
13 from django.utils.datastructures import SortedDict
14 from django.utils.encoding import force_unicode
15 from django.db import connection
16 from django.db.models import signals
17 from django.db.models.fields import FieldDoesNotExist
18 from django.db.models.query_utils import select_related_descend
19 from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
20 from django.db.models.sql.datastructures import Count
21 from django.core.exceptions import FieldError
22 from datastructures import EmptyResultSet, Empty, MultiJoin
23 from constants import *
25 try:
26 set
27 except NameError:
28 from sets import Set as set # Python 2.3 fallback
30 __all__ = ['Query']
32 class Query(object):
33 """
34 A single SQL query.
35 """
36 # SQL join types. These are part of the class because their string forms
37 # vary from database to database and can be customised by a subclass.
38 INNER = 'INNER JOIN'
39 LOUTER = 'LEFT OUTER JOIN'
41 alias_prefix = 'T'
42 query_terms = QUERY_TERMS
44 def __init__(self, model, connection, where=WhereNode):
45 self.model = model
46 self.connection = connection
47 self.alias_refcount = {}
48 self.alias_map = {} # Maps alias to join information
49 self.table_map = {} # Maps table names to list of aliases.
50 self.join_map = {}
51 self.rev_join_map = {} # Reverse of join_map.
52 self.quote_cache = {}
53 self.default_cols = True
54 self.default_ordering = True
55 self.standard_ordering = True
56 self.ordering_aliases = []
57 self.start_meta = None
58 self.select_fields = []
59 self.related_select_fields = []
60 self.dupe_avoidance = {}
61 self.used_aliases = set()
62 self.filter_is_sticky = False
64 # SQL-related attributes
65 self.select = []
66 self.tables = [] # Aliases in the order they are created.
67 self.where = where()
68 self.where_class = where
69 self.group_by = []
70 self.having = []
71 self.order_by = []
72 self.low_mark, self.high_mark = 0, None # Used for offset/limit
73 self.distinct = False
74 self.select_related = False
75 self.related_select_cols = []
77 # Arbitrary maximum limit for select_related. Prevents infinite
78 # recursion. Can be changed by the depth parameter to select_related().
79 self.max_depth = 5
81 # These are for extensions. The contents are more or less appended
82 # verbatim to the appropriate clause.
83 self.extra_select = SortedDict() # Maps col_alias -> (col_sql, params).
84 self.extra_tables = ()
85 self.extra_where = ()
86 self.extra_params = ()
87 self.extra_order_by = ()
89 def __str__(self):
90 """
91 Returns the query as a string of SQL with the parameter values
92 substituted in.
94 Parameter values won't necessarily be quoted correctly, since that is
95 done by the database interface at execution time.
96 """
97 sql, params = self.as_sql()
98 return sql % params
100 def __deepcopy__(self, memo):
101 result= self.clone()
102 memo[id(self)] = result
103 return result
105 def __getstate__(self):
107 Pickling support.
109 obj_dict = self.__dict__.copy()
110 obj_dict['related_select_fields'] = []
111 obj_dict['related_select_cols'] = []
112 del obj_dict['connection']
113 return obj_dict
115 def __setstate__(self, obj_dict):
117 Unpickling support.
119 self.__dict__.update(obj_dict)
120 # XXX: Need a better solution for this when multi-db stuff is
121 # supported. It's the only class-reference to the module-level
122 # connection variable.
123 self.connection = connection
125 def get_meta(self):
127 Returns the Options instance (the model._meta) from which to start
128 processing. Normally, this is self.model._meta, but it can change.
130 if self.start_meta:
131 return self.start_meta
132 return self.model._meta
134 def quote_name_unless_alias(self, name):
136 A wrapper around connection.ops.quote_name that doesn't quote aliases
137 for table names. This avoids problems with some SQL dialects that treat
138 quoted strings specially (e.g. PostgreSQL).
140 if name in self.quote_cache:
141 return self.quote_cache[name]
142 if ((name in self.alias_map and name not in self.table_map) or
143 name in self.extra_select):
144 self.quote_cache[name] = name
145 return name
146 r = self.connection.ops.quote_name(name)
147 self.quote_cache[name] = r
148 return r
150 def clone(self, klass=None, **kwargs):
152 Creates a copy of the current instance. The 'kwargs' parameter can be
153 used by clients to update attributes after copying has taken place.
155 obj = Empty()
156 obj.__class__ = klass or self.__class__
157 obj.model = self.model
158 obj.connection = self.connection
159 obj.alias_refcount = self.alias_refcount.copy()
160 obj.alias_map = self.alias_map.copy()
161 obj.table_map = self.table_map.copy()
162 obj.join_map = self.join_map.copy()
163 obj.rev_join_map = self.rev_join_map.copy()
164 obj.quote_cache = {}
165 obj.default_cols = self.default_cols
166 obj.default_ordering = self.default_ordering
167 obj.standard_ordering = self.standard_ordering
168 obj.ordering_aliases = []
169 obj.start_meta = self.start_meta
170 obj.select_fields = self.select_fields[:]
171 obj.related_select_fields = self.related_select_fields[:]
172 obj.dupe_avoidance = self.dupe_avoidance.copy()
173 obj.select = self.select[:]
174 obj.tables = self.tables[:]
175 obj.where = deepcopy(self.where)
176 obj.where_class = self.where_class
177 obj.group_by = self.group_by[:]
178 obj.having = self.having[:]
179 obj.order_by = self.order_by[:]
180 obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
181 obj.distinct = self.distinct
182 obj.select_related = self.select_related
183 obj.related_select_cols = []
184 obj.max_depth = self.max_depth
185 obj.extra_select = self.extra_select.copy()
186 obj.extra_tables = self.extra_tables
187 obj.extra_where = self.extra_where
188 obj.extra_params = self.extra_params
189 obj.extra_order_by = self.extra_order_by
190 if self.filter_is_sticky and self.used_aliases:
191 obj.used_aliases = self.used_aliases.copy()
192 else:
193 obj.used_aliases = set()
194 obj.filter_is_sticky = False
195 obj.__dict__.update(kwargs)
196 if hasattr(obj, '_setup_query'):
197 obj._setup_query()
198 return obj
200 def results_iter(self):
202 Returns an iterator over the results from executing this query.
204 resolve_columns = hasattr(self, 'resolve_columns')
205 fields = None
206 for rows in self.execute_sql(MULTI):
207 for row in rows:
208 if resolve_columns:
209 if fields is None:
210 # We only set this up here because
211 # related_select_fields isn't populated until
212 # execute_sql() has been called.
213 if self.select_fields:
214 fields = self.select_fields + self.related_select_fields
215 else:
216 fields = self.model._meta.fields
217 row = self.resolve_columns(row, fields)
218 yield row
220 def get_count(self):
222 Performs a COUNT() query using the current filter constraints.
224 from subqueries import CountQuery
225 obj = self.clone()
226 obj.clear_ordering(True)
227 obj.clear_limits()
228 obj.select_related = False
229 obj.related_select_cols = []
230 obj.related_select_fields = []
231 if len(obj.select) > 1:
232 obj = self.clone(CountQuery, _query=obj, where=self.where_class(),
233 distinct=False)
234 obj.select = []
235 obj.extra_select = SortedDict()
236 obj.add_count_column()
237 data = obj.execute_sql(SINGLE)
238 if not data:
239 return 0
240 number = data[0]
242 # Apply offset and limit constraints manually, since using LIMIT/OFFSET
243 # in SQL (in variants that provide them) doesn't change the COUNT
244 # output.
245 number = max(0, number - self.low_mark)
246 if self.high_mark:
247 number = min(number, self.high_mark - self.low_mark)
249 return number
251 def as_sql(self, with_limits=True, with_col_aliases=False):
253 Creates the SQL for this query. Returns the SQL string and list of
254 parameters.
256 If 'with_limits' is False, any limit/offset information is not included
257 in the query.
259 self.pre_sql_setup()
260 out_cols = self.get_columns(with_col_aliases)
261 ordering = self.get_ordering()
263 # This must come after 'select' and 'ordering' -- see docstring of
264 # get_from_clause() for details.
265 from_, f_params = self.get_from_clause()
267 where, w_params = self.where.as_sql(qn=self.quote_name_unless_alias)
268 params = []
269 for val in self.extra_select.itervalues():
270 params.extend(val[1])
272 result = ['SELECT']
273 if self.distinct:
274 result.append('DISTINCT')
275 result.append(', '.join(out_cols + self.ordering_aliases))
277 result.append('FROM')
278 result.extend(from_)
279 params.extend(f_params)
281 if where:
282 result.append('WHERE %s' % where)
283 params.extend(w_params)
284 if self.extra_where:
285 if not where:
286 result.append('WHERE')
287 else:
288 result.append('AND')
289 result.append(' AND '.join(self.extra_where))
291 if self.group_by:
292 grouping = self.get_grouping()
293 result.append('GROUP BY %s' % ', '.join(grouping))
295 if ordering:
296 result.append('ORDER BY %s' % ', '.join(ordering))
298 if with_limits:
299 if self.high_mark is not None:
300 result.append('LIMIT %d' % (self.high_mark - self.low_mark))
301 if self.low_mark:
302 if self.high_mark is None:
303 val = self.connection.ops.no_limit_value()
304 if val:
305 result.append('LIMIT %d' % val)
306 result.append('OFFSET %d' % self.low_mark)
308 params.extend(self.extra_params)
309 return ' '.join(result), tuple(params)
311 def combine(self, rhs, connector):
313 Merge the 'rhs' query into the current one (with any 'rhs' effects
314 being applied *after* (that is, "to the right of") anything in the
315 current query. 'rhs' is not modified during a call to this function.
317 The 'connector' parameter describes how to connect filters from the
318 'rhs' query.
320 assert self.model == rhs.model, \
321 "Cannot combine queries on two different base models."
322 assert self.can_filter(), \
323 "Cannot combine queries once a slice has been taken."
324 assert self.distinct == rhs.distinct, \
325 "Cannot combine a unique query with a non-unique query."
327 # Work out how to relabel the rhs aliases, if necessary.
328 change_map = {}
329 used = set()
330 conjunction = (connector == AND)
331 first = True
332 for alias in rhs.tables:
333 if not rhs.alias_refcount[alias]:
334 # An unused alias.
335 continue
336 promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
337 new_alias = self.join(rhs.rev_join_map[alias],
338 (conjunction and not first), used, promote, not conjunction)
339 used.add(new_alias)
340 change_map[alias] = new_alias
341 first = False
343 # So that we don't exclude valid results in an "or" query combination,
344 # the first join that is exclusive to the lhs (self) must be converted
345 # to an outer join.
346 if not conjunction:
347 for alias in self.tables[1:]:
348 if self.alias_refcount[alias] == 1:
349 self.promote_alias(alias, True)
350 break
352 # Now relabel a copy of the rhs where-clause and add it to the current
353 # one.
354 if rhs.where:
355 w = deepcopy(rhs.where)
356 w.relabel_aliases(change_map)
357 if not self.where:
358 # Since 'self' matches everything, add an explicit "include
359 # everything" where-constraint so that connections between the
360 # where clauses won't exclude valid results.
361 self.where.add(EverythingNode(), AND)
362 elif self.where:
363 # rhs has an empty where clause.
364 w = self.where_class()
365 w.add(EverythingNode(), AND)
366 else:
367 w = self.where_class()
368 self.where.add(w, connector)
370 # Selection columns and extra extensions are those provided by 'rhs'.
371 self.select = []
372 for col in rhs.select:
373 if isinstance(col, (list, tuple)):
374 self.select.append((change_map.get(col[0], col[0]), col[1]))
375 else:
376 item = deepcopy(col)
377 item.relabel_aliases(change_map)
378 self.select.append(item)
379 self.select_fields = rhs.select_fields[:]
381 if connector == OR:
382 # It would be nice to be able to handle this, but the queries don't
383 # really make sense (or return consistent value sets). Not worth
384 # the extra complexity when you can write a real query instead.
385 if self.extra_select and rhs.extra_select:
386 raise ValueError("When merging querysets using 'or', you "
387 "cannot have extra(select=...) on both sides.")
388 if self.extra_where and rhs.extra_where:
389 raise ValueError("When merging querysets using 'or', you "
390 "cannot have extra(where=...) on both sides.")
391 self.extra_select.update(rhs.extra_select)
392 self.extra_tables += rhs.extra_tables
393 self.extra_where += rhs.extra_where
394 self.extra_params += rhs.extra_params
396 # Ordering uses the 'rhs' ordering, unless it has none, in which case
397 # the current ordering is used.
398 self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
399 self.extra_order_by = rhs.extra_order_by or self.extra_order_by
401 def pre_sql_setup(self):
403 Does any necessary class setup immediately prior to producing SQL. This
404 is for things that can't necessarily be done in __init__ because we
405 might not have all the pieces in place at that time.
407 if not self.tables:
408 self.join((None, self.model._meta.db_table, None, None))
409 if self.select_related and not self.related_select_cols:
410 self.fill_related_selections()
412 def get_columns(self, with_aliases=False):
414 Return the list of columns to use in the select statement. If no
415 columns have been specified, returns all columns relating to fields in
416 the model.
418 If 'with_aliases' is true, any column names that are duplicated
419 (without the table names) are given unique aliases. This is needed in
420 some cases to avoid ambiguitity with nested queries.
422 qn = self.quote_name_unless_alias
423 qn2 = self.connection.ops.quote_name
424 result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.extra_select.iteritems()]
425 aliases = set(self.extra_select.keys())
426 if with_aliases:
427 col_aliases = aliases.copy()
428 else:
429 col_aliases = set()
430 if self.select:
431 for col in self.select:
432 if isinstance(col, (list, tuple)):
433 r = '%s.%s' % (qn(col[0]), qn(col[1]))
434 if with_aliases and col[1] in col_aliases:
435 c_alias = 'Col%d' % len(col_aliases)
436 result.append('%s AS %s' % (r, c_alias))
437 aliases.add(c_alias)
438 col_aliases.add(c_alias)
439 else:
440 result.append(r)
441 aliases.add(r)
442 col_aliases.add(col[1])
443 else:
444 result.append(col.as_sql(quote_func=qn))
445 if hasattr(col, 'alias'):
446 aliases.add(col.alias)
447 col_aliases.add(col.alias)
448 elif self.default_cols:
449 cols, new_aliases = self.get_default_columns(with_aliases,
450 col_aliases)
451 result.extend(cols)
452 aliases.update(new_aliases)
453 for table, col in self.related_select_cols:
454 r = '%s.%s' % (qn(table), qn(col))
455 if with_aliases and col in col_aliases:
456 c_alias = 'Col%d' % len(col_aliases)
457 result.append('%s AS %s' % (r, c_alias))
458 aliases.add(c_alias)
459 col_aliases.add(c_alias)
460 else:
461 result.append(r)
462 aliases.add(r)
463 col_aliases.add(col)
465 self._select_aliases = aliases
466 return result
468 def get_default_columns(self, with_aliases=False, col_aliases=None,
469 start_alias=None, opts=None, as_pairs=False):
471 Computes the default columns for selecting every field in the base
472 model.
474 Returns a list of strings, quoted appropriately for use in SQL
475 directly, as well as a set of aliases used in the select statement (if
476 'as_pairs' is True, returns a list of (alias, col_name) pairs instead
477 of strings as the first component and None as the second component).
479 result = []
480 if opts is None:
481 opts = self.model._meta
482 if start_alias:
483 table_alias = start_alias
484 else:
485 table_alias = self.tables[0]
486 root_pk = opts.pk.column
487 seen = {None: table_alias}
488 qn = self.quote_name_unless_alias
489 qn2 = self.connection.ops.quote_name
490 aliases = set()
491 for field, model in opts.get_fields_with_model():
492 try:
493 alias = seen[model]
494 except KeyError:
495 alias = self.join((table_alias, model._meta.db_table,
496 root_pk, model._meta.pk.column))
497 seen[model] = alias
498 if as_pairs:
499 result.append((alias, field.column))
500 continue
501 if with_aliases and field.column in col_aliases:
502 c_alias = 'Col%d' % len(col_aliases)
503 result.append('%s.%s AS %s' % (qn(alias),
504 qn2(field.column), c_alias))
505 col_aliases.add(c_alias)
506 aliases.add(c_alias)
507 else:
508 r = '%s.%s' % (qn(alias), qn2(field.column))
509 result.append(r)
510 aliases.add(r)
511 if with_aliases:
512 col_aliases.add(field.column)
513 if as_pairs:
514 return result, None
515 return result, aliases
517 def get_from_clause(self):
519 Returns a list of strings that are joined together to go after the
520 "FROM" part of the query, as well as a list any extra parameters that
521 need to be included. Sub-classes, can override this to create a
522 from-clause via a "select", for example (e.g. CountQuery).
524 This should only be called after any SQL construction methods that
525 might change the tables we need. This means the select columns and
526 ordering must be done first.
528 result = []
529 qn = self.quote_name_unless_alias
530 qn2 = self.connection.ops.quote_name
531 first = True
532 for alias in self.tables:
533 if not self.alias_refcount[alias]:
534 continue
535 try:
536 name, alias, join_type, lhs, lhs_col, col, nullable = self.alias_map[alias]
537 except KeyError:
538 # Extra tables can end up in self.tables, but not in the
539 # alias_map if they aren't in a join. That's OK. We skip them.
540 continue
541 alias_str = (alias != name and ' %s' % alias or '')
542 if join_type and not first:
543 result.append('%s %s%s ON (%s.%s = %s.%s)'
544 % (join_type, qn(name), alias_str, qn(lhs),
545 qn2(lhs_col), qn(alias), qn2(col)))
546 else:
547 connector = not first and ', ' or ''
548 result.append('%s%s%s' % (connector, qn(name), alias_str))
549 first = False
550 for t in self.extra_tables:
551 alias, unused = self.table_alias(t)
552 # Only add the alias if it's not already present (the table_alias()
553 # calls increments the refcount, so an alias refcount of one means
554 # this is the only reference.
555 if alias not in self.alias_map or self.alias_refcount[alias] == 1:
556 connector = not first and ', ' or ''
557 result.append('%s%s' % (connector, qn(alias)))
558 first = False
559 return result, []
561 def get_grouping(self):
563 Returns a tuple representing the SQL elements in the "group by" clause.
565 qn = self.quote_name_unless_alias
566 result = []
567 for col in self.group_by:
568 if isinstance(col, (list, tuple)):
569 result.append('%s.%s' % (qn(col[0]), qn(col[1])))
570 elif hasattr(col, 'as_sql'):
571 result.append(col.as_sql(qn))
572 else:
573 result.append(str(col))
574 return result
576 def get_ordering(self):
578 Returns list representing the SQL elements in the "order by" clause.
579 Also sets the ordering_aliases attribute on this instance to a list of
580 extra aliases needed in the select.
582 Determining the ordering SQL can change the tables we need to include,
583 so this should be run *before* get_from_clause().
585 if self.extra_order_by:
586 ordering = self.extra_order_by
587 elif not self.default_ordering:
588 ordering = []
589 else:
590 ordering = self.order_by or self.model._meta.ordering
591 qn = self.quote_name_unless_alias
592 qn2 = self.connection.ops.quote_name
593 distinct = self.distinct
594 select_aliases = self._select_aliases
595 result = []
596 ordering_aliases = []
597 if self.standard_ordering:
598 asc, desc = ORDER_DIR['ASC']
599 else:
600 asc, desc = ORDER_DIR['DESC']
601 for field in ordering:
602 if field == '?':
603 result.append(self.connection.ops.random_function_sql())
604 continue
605 if isinstance(field, int):
606 if field < 0:
607 order = desc
608 field = -field
609 else:
610 order = asc
611 result.append('%s %s' % (field, order))
612 continue
613 if '.' in field:
614 # This came in through an extra(order_by=...) addition. Pass it
615 # on verbatim.
616 col, order = get_order_dir(field, asc)
617 table, col = col.split('.', 1)
618 elt = '%s.%s' % (qn(table), col)
619 if not distinct or elt in select_aliases:
620 result.append('%s %s' % (elt, order))
621 elif get_order_dir(field)[0] not in self.extra_select:
622 # 'col' is of the form 'field' or 'field1__field2' or
623 # '-field1__field2__field', etc.
624 for table, col, order in self.find_ordering_name(field,
625 self.model._meta, default_order=asc):
626 elt = '%s.%s' % (qn(table), qn2(col))
627 if distinct and elt not in select_aliases:
628 ordering_aliases.append(elt)
629 result.append('%s %s' % (elt, order))
630 else:
631 col, order = get_order_dir(field, asc)
632 elt = qn(col)
633 if distinct and elt not in select_aliases:
634 ordering_aliases.append(elt)
635 result.append('%s %s' % (elt, order))
636 self.ordering_aliases = ordering_aliases
637 return result
639 def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
640 already_seen=None):
642 Returns the table alias (the name might be ambiguous, the alias will
643 not be) and column name for ordering by the given 'name' parameter.
644 The 'name' is of the form 'field1__field2__...__fieldN'.
646 name, order = get_order_dir(name, default_order)
647 pieces = name.split(LOOKUP_SEP)
648 if not alias:
649 alias = self.get_initial_alias()
650 field, target, opts, joins, last, extra = self.setup_joins(pieces,
651 opts, alias, False)
652 alias = joins[-1]
653 col = target.column
654 if not field.rel:
655 # To avoid inadvertent trimming of a necessary alias, use the
656 # refcount to show that we are referencing a non-relation field on
657 # the model.
658 self.ref_alias(alias)
660 # Must use left outer joins for nullable fields.
661 for join in joins:
662 self.promote_alias(join)
664 # If we get to this point and the field is a relation to another model,
665 # append the default ordering for that model.
666 if field.rel and len(joins) > 1 and opts.ordering:
667 # Firstly, avoid infinite loops.
668 if not already_seen:
669 already_seen = set()
670 join_tuple = tuple([self.alias_map[j][TABLE_NAME] for j in joins])
671 if join_tuple in already_seen:
672 raise FieldError('Infinite loop caused by ordering.')
673 already_seen.add(join_tuple)
675 results = []
676 for item in opts.ordering:
677 results.extend(self.find_ordering_name(item, opts, alias,
678 order, already_seen))
679 return results
681 if alias:
682 # We have to do the same "final join" optimisation as in
683 # add_filter, since the final column might not otherwise be part of
684 # the select set (so we can't order on it).
685 while 1:
686 join = self.alias_map[alias]
687 if col != join[RHS_JOIN_COL]:
688 break
689 self.unref_alias(alias)
690 alias = join[LHS_ALIAS]
691 col = join[LHS_JOIN_COL]
692 return [(alias, col, order)]
694 def table_alias(self, table_name, create=False):
696 Returns a table alias for the given table_name and whether this is a
697 new alias or not.
699 If 'create' is true, a new alias is always created. Otherwise, the
700 most recently created alias for the table (if one exists) is reused.
702 current = self.table_map.get(table_name)
703 if not create and current:
704 alias = current[0]
705 self.alias_refcount[alias] += 1
706 return alias, False
708 # Create a new alias for this table.
709 if current:
710 alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
711 current.append(alias)
712 else:
713 # The first occurence of a table uses the table name directly.
714 alias = table_name
715 self.table_map[alias] = [alias]
716 self.alias_refcount[alias] = 1
717 #self.alias_map[alias] = None
718 self.tables.append(alias)
719 return alias, True
721 def ref_alias(self, alias):
722 """ Increases the reference count for this alias. """
723 self.alias_refcount[alias] += 1
725 def unref_alias(self, alias):
726 """ Decreases the reference count for this alias. """
727 self.alias_refcount[alias] -= 1
729 def promote_alias(self, alias, unconditional=False):
731 Promotes the join type of an alias to an outer join if it's possible
732 for the join to contain NULL values on the left. If 'unconditional' is
733 False, the join is only promoted if it is nullable, otherwise it is
734 always promoted.
736 Returns True if the join was promoted.
738 if ((unconditional or self.alias_map[alias][NULLABLE]) and
739 self.alias_map[alias] != self.LOUTER):
740 data = list(self.alias_map[alias])
741 data[JOIN_TYPE] = self.LOUTER
742 self.alias_map[alias] = tuple(data)
743 return True
744 return False
746 def change_aliases(self, change_map):
748 Changes the aliases in change_map (which maps old-alias -> new-alias),
749 relabelling any references to them in select columns and the where
750 clause.
752 assert set(change_map.keys()).intersection(set(change_map.values())) == set()
754 # 1. Update references in "select" and "where".
755 self.where.relabel_aliases(change_map)
756 for pos, col in enumerate(self.select):
757 if isinstance(col, (list, tuple)):
758 self.select[pos] = (change_map.get(old_alias, old_alias), col[1])
759 else:
760 col.relabel_aliases(change_map)
762 # 2. Rename the alias in the internal table/alias datastructures.
763 for old_alias, new_alias in change_map.iteritems():
764 alias_data = list(self.alias_map[old_alias])
765 alias_data[RHS_ALIAS] = new_alias
767 t = self.rev_join_map[old_alias]
768 data = list(self.join_map[t])
769 data[data.index(old_alias)] = new_alias
770 self.join_map[t] = tuple(data)
771 self.rev_join_map[new_alias] = t
772 del self.rev_join_map[old_alias]
773 self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
774 del self.alias_refcount[old_alias]
775 self.alias_map[new_alias] = tuple(alias_data)
776 del self.alias_map[old_alias]
778 table_aliases = self.table_map[alias_data[TABLE_NAME]]
779 for pos, alias in enumerate(table_aliases):
780 if alias == old_alias:
781 table_aliases[pos] = new_alias
782 break
783 for pos, alias in enumerate(self.tables):
784 if alias == old_alias:
785 self.tables[pos] = new_alias
786 break
788 # 3. Update any joins that refer to the old alias.
789 for alias, data in self.alias_map.iteritems():
790 lhs = data[LHS_ALIAS]
791 if lhs in change_map:
792 data = list(data)
793 data[LHS_ALIAS] = change_map[lhs]
794 self.alias_map[alias] = tuple(data)
796 def bump_prefix(self, exceptions=()):
798 Changes the alias prefix to the next letter in the alphabet and
799 relabels all the aliases. Even tables that previously had no alias will
800 get an alias after this call (it's mostly used for nested queries and
801 the outer query will already be using the non-aliased table name).
803 Subclasses who create their own prefix should override this method to
804 produce a similar result (a new prefix and relabelled aliases).
806 The 'exceptions' parameter is a container that holds alias names which
807 should not be changed.
809 assert ord(self.alias_prefix) < ord('Z')
810 self.alias_prefix = chr(ord(self.alias_prefix) + 1)
811 change_map = {}
812 prefix = self.alias_prefix
813 for pos, alias in enumerate(self.tables):
814 if alias in exceptions:
815 continue
816 new_alias = '%s%d' % (prefix, pos)
817 change_map[alias] = new_alias
818 self.tables[pos] = new_alias
819 self.change_aliases(change_map)
821 def get_initial_alias(self):
823 Returns the first alias for this query, after increasing its reference
824 count.
826 if self.tables:
827 alias = self.tables[0]
828 self.ref_alias(alias)
829 else:
830 alias = self.join((None, self.model._meta.db_table, None, None))
831 return alias
833 def count_active_tables(self):
835 Returns the number of tables in this query with a non-zero reference
836 count.
838 return len([1 for count in self.alias_refcount.itervalues() if count])
840 def join(self, connection, always_create=False, exclusions=(),
841 promote=False, outer_if_first=False, nullable=False, reuse=None):
843 Returns an alias for the join in 'connection', either reusing an
844 existing alias for that join or creating a new one. 'connection' is a
845 tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
846 table alias or a table name. The join correspods to the SQL equivalent
847 of::
849 lhs.lhs_col = table.col
851 If 'always_create' is True and 'reuse' is None, a new alias is always
852 created, regardless of whether one already exists or not. Otherwise
853 'reuse' must be a set and a new join is created unless one of the
854 aliases in `reuse` can be used.
856 If 'exclusions' is specified, it is something satisfying the container
857 protocol ("foo in exclusions" must work) and specifies a list of
858 aliases that should not be returned, even if they satisfy the join.
860 If 'promote' is True, the join type for the alias will be LOUTER (if
861 the alias previously existed, the join type will be promoted from INNER
862 to LOUTER, if necessary).
864 If 'outer_if_first' is True and a new join is created, it will have the
865 LOUTER join type. This is used when joining certain types of querysets
866 and Q-objects together.
868 If 'nullable' is True, the join can potentially involve NULL values and
869 is a candidate for promotion (to "left outer") when combining querysets.
871 lhs, table, lhs_col, col = connection
872 if lhs in self.alias_map:
873 lhs_table = self.alias_map[lhs][TABLE_NAME]
874 else:
875 lhs_table = lhs
877 if reuse and always_create and table in self.table_map:
878 # Convert the 'reuse' to case to be "exclude everything but the
879 # reusable set, minus exclusions, for this table".
880 exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
881 always_create = False
882 t_ident = (lhs_table, table, lhs_col, col)
883 if not always_create:
884 for alias in self.join_map.get(t_ident, ()):
885 if alias not in exclusions:
886 if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
887 # The LHS of this join tuple is no longer part of the
888 # query, so skip this possibility.
889 continue
890 self.ref_alias(alias)
891 if promote:
892 self.promote_alias(alias)
893 return alias
895 # No reuse is possible, so we need a new alias.
896 alias, _ = self.table_alias(table, True)
897 if not lhs:
898 # Not all tables need to be joined to anything. No join type
899 # means the later columns are ignored.
900 join_type = None
901 elif promote or outer_if_first:
902 join_type = self.LOUTER
903 else:
904 join_type = self.INNER
905 join = (table, alias, join_type, lhs, lhs_col, col, nullable)
906 self.alias_map[alias] = join
907 if t_ident in self.join_map:
908 self.join_map[t_ident] += (alias,)
909 else:
910 self.join_map[t_ident] = (alias,)
911 self.rev_join_map[alias] = t_ident
912 return alias
914 def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
915 used=None, requested=None, restricted=None, nullable=None,
916 dupe_set=None, avoid_set=None):
918 Fill in the information needed for a select_related query. The current
919 depth is measured as the number of connections away from the root model
920 (for example, cur_depth=1 means we are looking at models with direct
921 connections to the root model).
923 if not restricted and self.max_depth and cur_depth > self.max_depth:
924 # We've recursed far enough; bail out.
925 return
927 if not opts:
928 opts = self.get_meta()
929 root_alias = self.get_initial_alias()
930 self.related_select_cols = []
931 self.related_select_fields = []
932 if not used:
933 used = set()
934 if dupe_set is None:
935 dupe_set = set()
936 if avoid_set is None:
937 avoid_set = set()
938 orig_dupe_set = dupe_set
940 # Setup for the case when only particular related fields should be
941 # included in the related selection.
942 if requested is None and restricted is not False:
943 if isinstance(self.select_related, dict):
944 requested = self.select_related
945 restricted = True
946 else:
947 restricted = False
949 for f, model in opts.get_fields_with_model():
950 if not select_related_descend(f, restricted, requested):
951 continue
952 # The "avoid" set is aliases we want to avoid just for this
953 # particular branch of the recursion. They aren't permanently
954 # forbidden from reuse in the related selection tables (which is
955 # what "used" specifies).
956 avoid = avoid_set.copy()
957 dupe_set = orig_dupe_set.copy()
958 table = f.rel.to._meta.db_table
959 if nullable or f.null:
960 promote = True
961 else:
962 promote = False
963 if model:
964 int_opts = opts
965 alias = root_alias
966 for int_model in opts.get_base_chain(model):
967 lhs_col = int_opts.parents[int_model].column
968 dedupe = lhs_col in opts.duplicate_targets
969 if dedupe:
970 avoid.update(self.dupe_avoidance.get(id(opts), lhs_col),
972 dupe_set.add((opts, lhs_col))
973 int_opts = int_model._meta
974 alias = self.join((alias, int_opts.db_table, lhs_col,
975 int_opts.pk.column), exclusions=used,
976 promote=promote)
977 for (dupe_opts, dupe_col) in dupe_set:
978 self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
979 else:
980 alias = root_alias
982 dedupe = f.column in opts.duplicate_targets
983 if dupe_set or dedupe:
984 avoid.update(self.dupe_avoidance.get((id(opts), f.column), ()))
985 if dedupe:
986 dupe_set.add((opts, f.column))
988 alias = self.join((alias, table, f.column,
989 f.rel.get_related_field().column),
990 exclusions=used.union(avoid), promote=promote)
991 used.add(alias)
992 self.related_select_cols.extend(self.get_default_columns(
993 start_alias=alias, opts=f.rel.to._meta, as_pairs=True)[0])
994 self.related_select_fields.extend(f.rel.to._meta.fields)
995 if restricted:
996 next = requested.get(f.name, {})
997 else:
998 next = False
999 if f.null is not None:
1000 new_nullable = f.null
1001 else:
1002 new_nullable = None
1003 for dupe_opts, dupe_col in dupe_set:
1004 self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
1005 self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
1006 used, next, restricted, new_nullable, dupe_set, avoid)
1008 def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
1009 can_reuse=None, process_extras=True):
1011 Add a single filter to the query. The 'filter_expr' is a pair:
1012 (filter_string, value). E.g. ('name__contains', 'fred')
1014 If 'negate' is True, this is an exclude() filter. It's important to
1015 note that this method does not negate anything in the where-clause
1016 object when inserting the filter constraints. This is because negated
1017 filters often require multiple calls to add_filter() and the negation
1018 should only happen once. So the caller is responsible for this (the
1019 caller will normally be add_q(), so that as an example).
1021 If 'trim' is True, we automatically trim the final join group (used
1022 internally when constructing nested queries).
1024 If 'can_reuse' is a set, we are processing a component of a
1025 multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
1026 will be a set of table aliases that can be reused in this filter, even
1027 if we would otherwise force the creation of new aliases for a join
1028 (needed for nested Q-filters). The set is updated by this method.
1030 If 'process_extras' is set, any extra filters returned from the table
1031 joining process will be processed. This parameter is set to False
1032 during the processing of extra filters to avoid infinite recursion.
1034 arg, value = filter_expr
1035 parts = arg.split(LOOKUP_SEP)
1036 if not parts:
1037 raise FieldError("Cannot parse keyword query %r" % arg)
1039 # Work out the lookup type and remove it from 'parts', if necessary.
1040 if len(parts) == 1 or parts[-1] not in self.query_terms:
1041 lookup_type = 'exact'
1042 else:
1043 lookup_type = parts.pop()
1045 # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
1046 # uses of None as a query value.
1047 if value is None:
1048 if lookup_type != 'exact':
1049 raise ValueError("Cannot use None as a query value")
1050 lookup_type = 'isnull'
1051 value = True
1052 elif callable(value):
1053 value = value()
1055 opts = self.get_meta()
1056 alias = self.get_initial_alias()
1057 allow_many = trim or not negate
1059 try:
1060 field, target, opts, join_list, last, extra_filters = self.setup_joins(
1061 parts, opts, alias, True, allow_many, can_reuse=can_reuse)
1062 except MultiJoin, e:
1063 self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]))
1064 return
1065 final = len(join_list)
1066 penultimate = last.pop()
1067 if penultimate == final:
1068 penultimate = last.pop()
1069 if trim and len(join_list) > 1:
1070 extra = join_list[penultimate:]
1071 join_list = join_list[:penultimate]
1072 final = penultimate
1073 penultimate = last.pop()
1074 col = self.alias_map[extra[0]][LHS_JOIN_COL]
1075 for alias in extra:
1076 self.unref_alias(alias)
1077 else:
1078 col = target.column
1079 alias = join_list[-1]
1081 while final > 1:
1082 # An optimization: if the final join is against the same column as
1083 # we are comparing against, we can go back one step in the join
1084 # chain and compare against the lhs of the join instead (and then
1085 # repeat the optimization). The result, potentially, involves less
1086 # table joins.
1087 join = self.alias_map[alias]
1088 if col != join[RHS_JOIN_COL]:
1089 break
1090 self.unref_alias(alias)
1091 alias = join[LHS_ALIAS]
1092 col = join[LHS_JOIN_COL]
1093 join_list = join_list[:-1]
1094 final -= 1
1095 if final == penultimate:
1096 penultimate = last.pop()
1098 if (lookup_type == 'isnull' and value is True and not negate and
1099 final > 1):
1100 # If the comparison is against NULL, we need to use a left outer
1101 # join when connecting to the previous model. We make that
1102 # adjustment here. We don't do this unless needed as it's less
1103 # efficient at the database level.
1104 self.promote_alias(join_list[penultimate])
1106 if connector == OR:
1107 # Some joins may need to be promoted when adding a new filter to a
1108 # disjunction. We walk the list of new joins and where it diverges
1109 # from any previous joins (ref count is 1 in the table list), we
1110 # make the new additions (and any existing ones not used in the new
1111 # join list) an outer join.
1112 join_it = iter(join_list)
1113 table_it = iter(self.tables)
1114 join_it.next(), table_it.next()
1115 table_promote = False
1116 for join in join_it:
1117 table = table_it.next()
1118 if join == table and self.alias_refcount[join] > 1:
1119 continue
1120 join_promote = self.promote_alias(join)
1121 if table != join:
1122 table_promote = self.promote_alias(table)
1123 break
1124 for join in join_it:
1125 if self.promote_alias(join, join_promote):
1126 join_promote = True
1127 for table in table_it:
1128 # Some of these will have been promoted from the join_list, but
1129 # that's harmless.
1130 if self.promote_alias(table, table_promote):
1131 table_promote = True
1133 self.where.add((alias, col, field, lookup_type, value), connector)
1135 if negate:
1136 for alias in join_list:
1137 self.promote_alias(alias)
1138 if lookup_type != 'isnull':
1139 if final > 1:
1140 for alias in join_list:
1141 if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
1142 j_col = self.alias_map[alias][RHS_JOIN_COL]
1143 entry = self.where_class()
1144 entry.add((alias, j_col, None, 'isnull', True), AND)
1145 entry.negate()
1146 self.where.add(entry, AND)
1147 break
1148 elif not (lookup_type == 'in' and not value) and field.null:
1149 # Leaky abstraction artifact: We have to specifically
1150 # exclude the "foo__in=[]" case from this handling, because
1151 # it's short-circuited in the Where class.
1152 entry = self.where_class()
1153 entry.add((alias, col, None, 'isnull', True), AND)
1154 entry.negate()
1155 self.where.add(entry, AND)
1157 if can_reuse is not None:
1158 can_reuse.update(join_list)
1159 if process_extras:
1160 for filter in extra_filters:
1161 self.add_filter(filter, negate=negate, can_reuse=can_reuse,
1162 process_extras=False)
1164 def add_q(self, q_object, used_aliases=None):
1166 Adds a Q-object to the current filter.
1168 Can also be used to add anything that has an 'add_to_query()' method.
1170 if used_aliases is None:
1171 used_aliases = self.used_aliases
1172 if hasattr(q_object, 'add_to_query'):
1173 # Complex custom objects are responsible for adding themselves.
1174 q_object.add_to_query(self, used_aliases)
1175 else:
1176 if self.where and q_object.connector != AND and len(q_object) > 1:
1177 self.where.start_subtree(AND)
1178 subtree = True
1179 else:
1180 subtree = False
1181 connector = AND
1182 for child in q_object.children:
1183 if isinstance(child, Node):
1184 self.where.start_subtree(connector)
1185 self.add_q(child, used_aliases)
1186 self.where.end_subtree()
1187 else:
1188 self.add_filter(child, connector, q_object.negated,
1189 can_reuse=used_aliases)
1190 connector = q_object.connector
1191 if q_object.negated:
1192 self.where.negate()
1193 if subtree:
1194 self.where.end_subtree()
1195 if self.filter_is_sticky:
1196 self.used_aliases = used_aliases
1198 def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
1199 allow_explicit_fk=False, can_reuse=None):
1201 Compute the necessary table joins for the passage through the fields
1202 given in 'names'. 'opts' is the Options class for the current model
1203 (which gives the table we are joining to), 'alias' is the alias for the
1204 table we are joining to. If dupe_multis is True, any many-to-many or
1205 many-to-one joins will always create a new alias (necessary for
1206 disjunctive filters). If can_reuse is not None, it's a list of aliases
1207 that can be reused in these joins (nothing else can be reused in this
1208 case).
1210 Returns the final field involved in the join, the target database
1211 column (used for any 'where' constraint), the final 'opts' value and the
1212 list of tables joined.
1214 joins = [alias]
1215 last = [0]
1216 dupe_set = set()
1217 exclusions = set()
1218 extra_filters = []
1219 for pos, name in enumerate(names):
1220 try:
1221 exclusions.add(int_alias)
1222 except NameError:
1223 pass
1224 exclusions.add(alias)
1225 last.append(len(joins))
1226 if name == 'pk':
1227 name = opts.pk.name
1229 try:
1230 field, model, direct, m2m = opts.get_field_by_name(name)
1231 except FieldDoesNotExist:
1232 for f in opts.fields:
1233 if allow_explicit_fk and name == f.attname:
1234 # XXX: A hack to allow foo_id to work in values() for
1235 # backwards compatibility purposes. If we dropped that
1236 # feature, this could be removed.
1237 field, model, direct, m2m = opts.get_field_by_name(f.name)
1238 break
1239 else:
1240 names = opts.get_all_field_names()
1241 raise FieldError("Cannot resolve keyword %r into field. "
1242 "Choices are: %s" % (name, ", ".join(names)))
1244 if not allow_many and (m2m or not direct):
1245 for alias in joins:
1246 self.unref_alias(alias)
1247 raise MultiJoin(pos + 1)
1248 if model:
1249 # The field lives on a base class of the current model.
1250 for int_model in opts.get_base_chain(model):
1251 lhs_col = opts.parents[int_model].column
1252 dedupe = lhs_col in opts.duplicate_targets
1253 if dedupe:
1254 exclusions.update(self.dupe_avoidance.get(
1255 (id(opts), lhs_col), ()))
1256 dupe_set.add((opts, lhs_col))
1257 opts = int_model._meta
1258 alias = self.join((alias, opts.db_table, lhs_col,
1259 opts.pk.column), exclusions=exclusions)
1260 joins.append(alias)
1261 exclusions.add(alias)
1262 for (dupe_opts, dupe_col) in dupe_set:
1263 self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
1264 cached_data = opts._join_cache.get(name)
1265 orig_opts = opts
1266 dupe_col = direct and field.column or field.field.column
1267 dedupe = dupe_col in opts.duplicate_targets
1268 if dupe_set or dedupe:
1269 if dedupe:
1270 dupe_set.add((opts, dupe_col))
1271 exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
1272 ()))
1274 if hasattr(field, 'extra_filters'):
1275 extra_filters.append(field.extra_filters(names, pos))
1276 if direct:
1277 if m2m:
1278 # Many-to-many field defined on the current model.
1279 if cached_data:
1280 (table1, from_col1, to_col1, table2, from_col2,
1281 to_col2, opts, target) = cached_data
1282 else:
1283 table1 = field.m2m_db_table()
1284 from_col1 = opts.pk.column
1285 to_col1 = field.m2m_column_name()
1286 opts = field.rel.to._meta
1287 table2 = opts.db_table
1288 from_col2 = field.m2m_reverse_name()
1289 to_col2 = opts.pk.column
1290 target = opts.pk
1291 orig_opts._join_cache[name] = (table1, from_col1,
1292 to_col1, table2, from_col2, to_col2, opts,
1293 target)
1295 int_alias = self.join((alias, table1, from_col1, to_col1),
1296 dupe_multis, exclusions, nullable=True,
1297 reuse=can_reuse)
1298 alias = self.join((int_alias, table2, from_col2, to_col2),
1299 dupe_multis, exclusions, nullable=True,
1300 reuse=can_reuse)
1301 joins.extend([int_alias, alias])
1302 elif field.rel:
1303 # One-to-one or many-to-one field
1304 if cached_data:
1305 (table, from_col, to_col, opts, target) = cached_data
1306 else:
1307 opts = field.rel.to._meta
1308 target = field.rel.get_related_field()
1309 table = opts.db_table
1310 from_col = field.column
1311 to_col = target.column
1312 orig_opts._join_cache[name] = (table, from_col, to_col,
1313 opts, target)
1315 alias = self.join((alias, table, from_col, to_col),
1316 exclusions=exclusions, nullable=field.null)
1317 joins.append(alias)
1318 else:
1319 # Non-relation fields.
1320 target = field
1321 break
1322 else:
1323 orig_field = field
1324 field = field.field
1325 if m2m:
1326 # Many-to-many field defined on the target model.
1327 if cached_data:
1328 (table1, from_col1, to_col1, table2, from_col2,
1329 to_col2, opts, target) = cached_data
1330 else:
1331 table1 = field.m2m_db_table()
1332 from_col1 = opts.pk.column
1333 to_col1 = field.m2m_reverse_name()
1334 opts = orig_field.opts
1335 table2 = opts.db_table
1336 from_col2 = field.m2m_column_name()
1337 to_col2 = opts.pk.column
1338 target = opts.pk
1339 orig_opts._join_cache[name] = (table1, from_col1,
1340 to_col1, table2, from_col2, to_col2, opts,
1341 target)
1343 int_alias = self.join((alias, table1, from_col1, to_col1),
1344 dupe_multis, exclusions, nullable=True,
1345 reuse=can_reuse)
1346 alias = self.join((int_alias, table2, from_col2, to_col2),
1347 dupe_multis, exclusions, nullable=True,
1348 reuse=can_reuse)
1349 joins.extend([int_alias, alias])
1350 else:
1351 # One-to-many field (ForeignKey defined on the target model)
1352 if cached_data:
1353 (table, from_col, to_col, opts, target) = cached_data
1354 else:
1355 local_field = opts.get_field_by_name(
1356 field.rel.field_name)[0]
1357 opts = orig_field.opts
1358 table = opts.db_table
1359 from_col = local_field.column
1360 to_col = field.column
1361 target = opts.pk
1362 orig_opts._join_cache[name] = (table, from_col, to_col,
1363 opts, target)
1365 alias = self.join((alias, table, from_col, to_col),
1366 dupe_multis, exclusions, nullable=True,
1367 reuse=can_reuse)
1368 joins.append(alias)
1370 for (dupe_opts, dupe_col) in dupe_set:
1371 try:
1372 self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
1373 except NameError:
1374 self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
1376 if pos != len(names) - 1:
1377 raise FieldError("Join on field %r not permitted." % name)
1379 return field, target, opts, joins, last, extra_filters
1381 def update_dupe_avoidance(self, opts, col, alias):
1383 For a column that is one of multiple pointing to the same table, update
1384 the internal data structures to note that this alias shouldn't be used
1385 for those other columns.
1387 ident = id(opts)
1388 for name in opts.duplicate_targets[col]:
1389 try:
1390 self.dupe_avoidance[ident, name].add(alias)
1391 except KeyError:
1392 self.dupe_avoidance[ident, name] = set([alias])
1394 def split_exclude(self, filter_expr, prefix):
1396 When doing an exclude against any kind of N-to-many relation, we need
1397 to use a subquery. This method constructs the nested query, given the
1398 original exclude filter (filter_expr) and the portion up to the first
1399 N-to-many relation field.
1401 query = Query(self.model, self.connection)
1402 query.add_filter(filter_expr)
1403 query.set_start(prefix)
1404 query.clear_ordering(True)
1405 self.add_filter(('%s__in' % prefix, query), negate=True, trim=True)
1407 def set_limits(self, low=None, high=None):
1409 Adjusts the limits on the rows retrieved. We use low/high to set these,
1410 as it makes it more Pythonic to read and write. When the SQL query is
1411 created, they are converted to the appropriate offset and limit values.
1413 Any limits passed in here are applied relative to the existing
1414 constraints. So low is added to the current low value and both will be
1415 clamped to any existing high value.
1417 if high is not None:
1418 if self.high_mark:
1419 self.high_mark = min(self.high_mark, self.low_mark + high)
1420 else:
1421 self.high_mark = self.low_mark + high
1422 if low is not None:
1423 if self.high_mark:
1424 self.low_mark = min(self.high_mark, self.low_mark + low)
1425 else:
1426 self.low_mark = self.low_mark + low
1428 def clear_limits(self):
1430 Clears any existing limits.
1432 self.low_mark, self.high_mark = 0, None
1434 def can_filter(self):
1436 Returns True if adding filters to this instance is still possible.
1438 Typically, this means no limits or offsets have been put on the results.
1440 return not (self.low_mark or self.high_mark)
1442 def add_fields(self, field_names, allow_m2m=True):
1444 Adds the given (model) fields to the select set. The field names are
1445 added in the order specified.
1447 alias = self.get_initial_alias()
1448 opts = self.get_meta()
1449 try:
1450 for name in field_names:
1451 field, target, u2, joins, u3, u4 = self.setup_joins(
1452 name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
1453 True)
1454 final_alias = joins[-1]
1455 col = target.column
1456 if len(joins) > 1:
1457 join = self.alias_map[final_alias]
1458 if col == join[RHS_JOIN_COL]:
1459 self.unref_alias(final_alias)
1460 final_alias = join[LHS_ALIAS]
1461 col = join[LHS_JOIN_COL]
1462 joins = joins[:-1]
1463 promote = False
1464 for join in joins[1:]:
1465 # Only nullable aliases are promoted, so we don't end up
1466 # doing unnecessary left outer joins here.
1467 if self.promote_alias(join, promote):
1468 promote = True
1469 self.select.append((final_alias, col))
1470 self.select_fields.append(field)
1471 except MultiJoin:
1472 raise FieldError("Invalid field name: '%s'" % name)
1473 except FieldError:
1474 names = opts.get_all_field_names() + self.extra_select.keys()
1475 names.sort()
1476 raise FieldError("Cannot resolve keyword %r into field. "
1477 "Choices are: %s" % (name, ", ".join(names)))
1479 def add_ordering(self, *ordering):
1481 Adds items from the 'ordering' sequence to the query's "order by"
1482 clause. These items are either field names (not column names) --
1483 possibly with a direction prefix ('-' or '?') -- or ordinals,
1484 corresponding to column positions in the 'select' list.
1486 If 'ordering' is empty, all ordering is cleared from the query.
1488 errors = []
1489 for item in ordering:
1490 if not ORDER_PATTERN.match(item):
1491 errors.append(item)
1492 if errors:
1493 raise FieldError('Invalid order_by arguments: %s' % errors)
1494 if ordering:
1495 self.order_by.extend(ordering)
1496 else:
1497 self.default_ordering = False
1499 def clear_ordering(self, force_empty=False):
1501 Removes any ordering settings. If 'force_empty' is True, there will be
1502 no ordering in the resulting query (not even the model's default).
1504 self.order_by = []
1505 self.extra_order_by = ()
1506 if force_empty:
1507 self.default_ordering = False
1509 def add_count_column(self):
1511 Converts the query to do count(...) or count(distinct(pk)) in order to
1512 get its size.
1514 # TODO: When group_by support is added, this needs to be adjusted so
1515 # that it doesn't totally overwrite the select list.
1516 if not self.distinct:
1517 if not self.select:
1518 select = Count()
1519 else:
1520 assert len(self.select) == 1, \
1521 "Cannot add count col with multiple cols in 'select': %r" % self.select
1522 select = Count(self.select[0])
1523 else:
1524 opts = self.model._meta
1525 if not self.select:
1526 select = Count((self.join((None, opts.db_table, None, None)),
1527 opts.pk.column), True)
1528 else:
1529 # Because of SQL portability issues, multi-column, distinct
1530 # counts need a sub-query -- see get_count() for details.
1531 assert len(self.select) == 1, \
1532 "Cannot add count col with multiple cols in 'select'."
1533 select = Count(self.select[0], True)
1535 # Distinct handling is done in Count(), so don't do it at this
1536 # level.
1537 self.distinct = False
1538 self.select = [select]
1539 self.select_fields = [None]
1540 self.extra_select = {}
1542 def add_select_related(self, fields):
1544 Sets up the select_related data structure so that we only select
1545 certain related models (as opposed to all models, when
1546 self.select_related=True).
1548 field_dict = {}
1549 for field in fields:
1550 d = field_dict
1551 for part in field.split(LOOKUP_SEP):
1552 d = d.setdefault(part, {})
1553 self.select_related = field_dict
1554 self.related_select_cols = []
1555 self.related_select_fields = []
1557 def add_extra(self, select, select_params, where, params, tables, order_by):
1559 Adds data to the various extra_* attributes for user-created additions
1560 to the query.
1562 if select:
1563 # We need to pair any placeholder markers in the 'select'
1564 # dictionary with their parameters in 'select_params' so that
1565 # subsequent updates to the select dictionary also adjust the
1566 # parameters appropriately.
1567 select_pairs = SortedDict()
1568 if select_params:
1569 param_iter = iter(select_params)
1570 else:
1571 param_iter = iter([])
1572 for name, entry in select.items():
1573 entry = force_unicode(entry)
1574 entry_params = []
1575 pos = entry.find("%s")
1576 while pos != -1:
1577 entry_params.append(param_iter.next())
1578 pos = entry.find("%s", pos + 2)
1579 select_pairs[name] = (entry, entry_params)
1580 # This is order preserving, since self.extra_select is a SortedDict.
1581 self.extra_select.update(select_pairs)
1582 if where:
1583 self.extra_where += tuple(where)
1584 if params:
1585 self.extra_params += tuple(params)
1586 if tables:
1587 self.extra_tables += tuple(tables)
1588 if order_by:
1589 self.extra_order_by = order_by
1591 def trim_extra_select(self, names):
1593 Removes any aliases in the extra_select dictionary that aren't in
1594 'names'.
1596 This is needed if we are selecting certain values that don't incldue
1597 all of the extra_select names.
1599 for key in set(self.extra_select).difference(set(names)):
1600 del self.extra_select[key]
1602 def set_start(self, start):
1604 Sets the table from which to start joining. The start position is
1605 specified by the related attribute from the base model. This will
1606 automatically set to the select column to be the column linked from the
1607 previous table.
1609 This method is primarily for internal use and the error checking isn't
1610 as friendly as add_filter(). Mostly useful for querying directly
1611 against the join table of many-to-many relation in a subquery.
1613 opts = self.model._meta
1614 alias = self.get_initial_alias()
1615 field, col, opts, joins, last, extra = self.setup_joins(
1616 start.split(LOOKUP_SEP), opts, alias, False)
1617 alias = joins[last[-1]]
1618 self.select = [(alias, self.alias_map[alias][RHS_JOIN_COL])]
1619 self.select_fields = [field]
1620 self.start_meta = opts
1622 # The call to setup_joins add an extra reference to everything in
1623 # joins. So we need to unref everything once, and everything prior to
1624 # the final join a second time.
1625 for alias in joins:
1626 self.unref_alias(alias)
1627 for alias in joins[:last[-1]]:
1628 self.unref_alias(alias)
1630 def execute_sql(self, result_type=MULTI):
1632 Run the query against the database and returns the result(s). The
1633 return value is a single data item if result_type is SINGLE, or an
1634 iterator over the results if the result_type is MULTI.
1636 result_type is either MULTI (use fetchmany() to retrieve all rows),
1637 SINGLE (only retrieve a single row), or None (no results expected, but
1638 the cursor is returned, since it's used by subclasses such as
1639 InsertQuery).
1641 try:
1642 sql, params = self.as_sql()
1643 if not sql:
1644 raise EmptyResultSet
1645 except EmptyResultSet:
1646 if result_type == MULTI:
1647 return empty_iter()
1648 else:
1649 return
1651 cursor = self.connection.cursor()
1652 cursor.execute(sql, params)
1654 if not result_type:
1655 return cursor
1656 if result_type == SINGLE:
1657 if self.ordering_aliases:
1658 return cursor.fetchone()[:-len(results.ordering_aliases)]
1659 return cursor.fetchone()
1661 # The MULTI case.
1662 if self.ordering_aliases:
1663 result = order_modified_iter(cursor, len(self.ordering_aliases),
1664 self.connection.features.empty_fetchmany_value)
1665 else:
1666 result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
1667 self.connection.features.empty_fetchmany_value)
1668 if not self.connection.features.can_use_chunked_reads:
1669 # If we are using non-chunked reads, we return the same data
1670 # structure as normally, but ensure it is all read into memory
1671 # before going any further.
1672 return list(result)
1673 return result
1675 # Use the backend's custom Query class if it defines one. Otherwise, use the
1676 # default.
1677 if connection.features.uses_custom_query_class:
1678 Query = connection.ops.query_class(Query)
1680 def get_order_dir(field, default='ASC'):
1682 Returns the field name and direction for an order specification. For
1683 example, '-foo' is returned as ('foo', 'DESC').
1685 The 'default' param is used to indicate which way no prefix (or a '+'
1686 prefix) should sort. The '-' prefix always sorts the opposite way.
1688 dirn = ORDER_DIR[default]
1689 if field[0] == '-':
1690 return field[1:], dirn[1]
1691 return field, dirn[0]
1693 def empty_iter():
1695 Returns an iterator containing no results.
1697 yield iter([]).next()
1699 def order_modified_iter(cursor, trim, sentinel):
1701 Yields blocks of rows from a cursor. We use this iterator in the special
1702 case when extra output columns have been added to support ordering
1703 requirements. We must trim those extra columns before anything else can use
1704 the results, since they're only needed to make the SQL valid.
1706 for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
1707 sentinel):
1708 yield [r[:-trim] for r in rows]
1710 def setup_join_cache(sender, **kwargs):
1712 The information needed to join between model fields is something that is
1713 invariant over the life of the model, so we cache it in the model's Options
1714 class, rather than recomputing it all the time.
1716 This method initialises the (empty) cache when the model is created.
1718 sender._meta._join_cache = {}
1720 signals.class_prepared.connect(setup_join_cache)