--- a/app/django/db/models/sql/query.py Tue Oct 14 12:36:55 2008 +0000
+++ b/app/django/db/models/sql/query.py Tue Oct 14 16:00:59 2008 +0000
@@ -11,12 +11,13 @@
from django.utils.tree import Node
from django.utils.datastructures import SortedDict
-from django.dispatch import dispatcher
+from django.utils.encoding import force_unicode
from django.db import connection
from django.db.models import signals
+from django.db.models.fields import FieldDoesNotExist
+from django.db.models.query_utils import select_related_descend
from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
from django.db.models.sql.datastructures import Count
-from django.db.models.fields import FieldDoesNotExist
from django.core.exceptions import FieldError
from datastructures import EmptyResultSet, Empty, MultiJoin
from constants import *
@@ -56,6 +57,9 @@
self.start_meta = None
self.select_fields = []
self.related_select_fields = []
+ self.dupe_avoidance = {}
+ self.used_aliases = set()
+ self.filter_is_sticky = False
# SQL-related attributes
self.select = []
@@ -76,8 +80,7 @@
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
- self.extra_select = {} # Maps col_alias -> col_sql.
- self.extra_select_params = ()
+ self.extra_select = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_tables = ()
self.extra_where = ()
self.extra_params = ()
@@ -104,6 +107,8 @@
Pickling support.
"""
obj_dict = self.__dict__.copy()
+ obj_dict['related_select_fields'] = []
+ obj_dict['related_select_cols'] = []
del obj_dict['connection']
return obj_dict
@@ -164,6 +169,7 @@
obj.start_meta = self.start_meta
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
+ obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = deepcopy(self.where)
@@ -177,11 +183,15 @@
obj.related_select_cols = []
obj.max_depth = self.max_depth
obj.extra_select = self.extra_select.copy()
- obj.extra_select_params = self.extra_select_params
obj.extra_tables = self.extra_tables
obj.extra_where = self.extra_where
obj.extra_params = self.extra_params
obj.extra_order_by = self.extra_order_by
+ if self.filter_is_sticky and self.used_aliases:
+ obj.used_aliases = self.used_aliases.copy()
+ else:
+ obj.used_aliases = set()
+ obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
@@ -192,14 +202,18 @@
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
- if resolve_columns:
- if self.select_fields:
- fields = self.select_fields + self.related_select_fields
- else:
- fields = self.model._meta.fields
+ fields = None
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
+ if fields is None:
+ # We only set this up here because
+ # related_select_fields isn't populated until
+ # execute_sql() has been called.
+ if self.select_fields:
+ fields = self.select_fields + self.related_select_fields
+ else:
+ fields = self.model._meta.fields
row = self.resolve_columns(row, fields)
yield row
@@ -214,11 +228,11 @@
obj.select_related = False
obj.related_select_cols = []
obj.related_select_fields = []
- if obj.distinct and len(obj.select) > 1:
+ if len(obj.select) > 1:
obj = self.clone(CountQuery, _query=obj, where=self.where_class(),
distinct=False)
obj.select = []
- obj.extra_select = {}
+ obj.extra_select = SortedDict()
obj.add_count_column()
data = obj.execute_sql(SINGLE)
if not data:
@@ -251,7 +265,9 @@
from_, f_params = self.get_from_clause()
where, w_params = self.where.as_sql(qn=self.quote_name_unless_alias)
- params = list(self.extra_select_params)
+ params = []
+ for val in self.extra_select.itervalues():
+ params.extend(val[1])
result = ['SELECT']
if self.distinct:
@@ -276,15 +292,19 @@
grouping = self.get_grouping()
result.append('GROUP BY %s' % ', '.join(grouping))
+ if self.having:
+ having, h_params = self.get_having()
+ result.append('HAVING %s' % ', '.join(having))
+ params.extend(h_params)
+
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
- # FIXME: Pull this out to make life easier for Oracle et al.
if with_limits:
- if self.high_mark:
+ if self.high_mark is not None:
result.append('LIMIT %d' % (self.high_mark - self.low_mark))
if self.low_mark:
- if not self.high_mark:
+ if self.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
@@ -362,10 +382,21 @@
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
- self.extra_select = rhs.extra_select.copy()
- self.extra_tables = rhs.extra_tables
- self.extra_where = rhs.extra_where
- self.extra_params = rhs.extra_params
+
+ if connector == OR:
+ # It would be nice to be able to handle this, but the queries don't
+ # really make sense (or return consistent value sets). Not worth
+ # the extra complexity when you can write a real query instead.
+ if self.extra_select and rhs.extra_select:
+ raise ValueError("When merging querysets using 'or', you "
+ "cannot have extra(select=...) on both sides.")
+ if self.extra_where and rhs.extra_where:
+ raise ValueError("When merging querysets using 'or', you "
+ "cannot have extra(where=...) on both sides.")
+ self.extra_select.update(rhs.extra_select)
+ self.extra_tables += rhs.extra_tables
+ self.extra_where += rhs.extra_where
+ self.extra_params += rhs.extra_params
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
@@ -395,7 +426,7 @@
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
- result = ['(%s) AS %s' % (col, qn2(alias)) for alias, col in self.extra_select.iteritems()]
+ result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.extra_select.iteritems()]
aliases = set(self.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
@@ -439,28 +470,39 @@
self._select_aliases = aliases
return result
- def get_default_columns(self, with_aliases=False, col_aliases=None):
+ def get_default_columns(self, with_aliases=False, col_aliases=None,
+ start_alias=None, opts=None, as_pairs=False):
"""
Computes the default columns for selecting every field in the base
model.
Returns a list of strings, quoted appropriately for use in SQL
- directly, as well as a set of aliases used in the select statement.
+ directly, as well as a set of aliases used in the select statement (if
+ 'as_pairs' is True, returns a list of (alias, col_name) pairs instead
+ of strings as the first component and None as the second component).
"""
result = []
- table_alias = self.tables[0]
- root_pk = self.model._meta.pk.column
+ if opts is None:
+ opts = self.model._meta
+ if start_alias:
+ table_alias = start_alias
+ else:
+ table_alias = self.tables[0]
+ root_pk = opts.pk.column
seen = {None: table_alias}
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
- for field, model in self.model._meta.get_fields_with_model():
+ for field, model in opts.get_fields_with_model():
try:
alias = seen[model]
except KeyError:
alias = self.join((table_alias, model._meta.db_table,
root_pk, model._meta.pk.column))
seen[model] = alias
+ if as_pairs:
+ result.append((alias, field.column))
+ continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
@@ -473,6 +515,8 @@
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
+ if as_pairs:
+ return result, None
return result, aliases
def get_from_clause(self):
@@ -510,7 +554,10 @@
first = False
for t in self.extra_tables:
alias, unused = self.table_alias(t)
- if alias not in self.alias_map:
+ # Only add the alias if it's not already present (the table_alias()
+ # calls increments the refcount, so an alias refcount of one means
+ # this is the only reference.
+ if alias not in self.alias_map or self.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
@@ -531,6 +578,24 @@
result.append(str(col))
return result
+ def get_having(self):
+ """
+ Returns a tuple representing the SQL elements in the "having" clause.
+ By default, the elements of self.having have their as_sql() method
+ called or are returned unchanged (if they don't have an as_sql()
+ method).
+ """
+ result = []
+ params = []
+ for elt in self.having:
+ if hasattr(elt, 'as_sql'):
+ sql, params = elt.as_sql()
+ result.append(sql)
+ params.extend(params)
+ else:
+ result.append(elt)
+ return result, params
+
def get_ordering(self):
"""
Returns list representing the SQL elements in the "order by" clause.
@@ -543,7 +608,7 @@
if self.extra_order_by:
ordering = self.extra_order_by
elif not self.default_ordering:
- ordering = []
+ ordering = self.order_by
else:
ordering = self.order_by or self.model._meta.ordering
qn = self.quote_name_unless_alias
@@ -587,8 +652,8 @@
result.append('%s %s' % (elt, order))
else:
col, order = get_order_dir(field, asc)
- elt = qn(col)
- if distinct and elt not in select_aliases:
+ elt = qn2(col)
+ if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
self.ordering_aliases = ordering_aliases
@@ -605,10 +670,18 @@
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.get_initial_alias()
- field, target, opts, joins, last = self.setup_joins(pieces, opts,
- alias, False)
+ field, target, opts, joins, last, extra = self.setup_joins(pieces,
+ opts, alias, False)
alias = joins[-1]
col = target.column
+ if not field.rel:
+ # To avoid inadvertent trimming of a necessary alias, use the
+ # refcount to show that we are referencing a non-relation field on
+ # the model.
+ self.ref_alias(alias)
+
+ # Must use left outer joins for nullable fields.
+ self.promote_alias_chain(joins)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
@@ -631,8 +704,10 @@
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
- join = self.alias_map[alias]
- if col == join[RHS_JOIN_COL]:
+ while 1:
+ join = self.alias_map[alias]
+ if col != join[RHS_JOIN_COL]:
+ break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
@@ -661,7 +736,6 @@
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
- #self.alias_map[alias] = None
self.tables.append(alias)
return alias, True
@@ -679,12 +753,48 @@
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
+
+ Returns True if the join was promoted.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
- self.alias_map[alias] != self.LOUTER):
+ self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
+ return True
+ return False
+
+ def promote_alias_chain(self, chain, must_promote=False):
+ """
+ Walks along a chain of aliases, promoting the first nullable join and
+ any joins following that. If 'must_promote' is True, all the aliases in
+ the chain are promoted.
+ """
+ for alias in chain:
+ if self.promote_alias(alias, must_promote):
+ must_promote = True
+
+ def promote_unused_aliases(self, initial_refcounts, used_aliases):
+ """
+ Given a "before" copy of the alias_refcounts dictionary (as
+ 'initial_refcounts') and a collection of aliases that may have been
+ changed or created, works out which aliases have been created since
+ then and which ones haven't been used and promotes all of those
+ aliases, plus any children of theirs in the alias tree, to outer joins.
+ """
+ # FIXME: There's some (a lot of!) overlap with the similar OR promotion
+ # in add_filter(). It's not quite identical, but is very similar. So
+ # pulling out the common bits is something for later.
+ considered = {}
+ for alias in self.tables:
+ if alias not in used_aliases:
+ continue
+ if (alias not in initial_refcounts or
+ self.alias_refcount[alias] == initial_refcounts[alias]):
+ parent = self.alias_map[alias][LHS_ALIAS]
+ must_promote = considered.get(parent, False)
+ promoted = self.promote_alias(alias, must_promote)
+ considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
@@ -698,6 +808,7 @@
self.where.relabel_aliases(change_map)
for pos, col in enumerate(self.select):
if isinstance(col, (list, tuple)):
+ old_alias = col[0]
self.select[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
@@ -749,10 +860,11 @@
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
- assert ord(self.alias_prefix) < ord('Z')
- self.alias_prefix = chr(ord(self.alias_prefix) + 1)
+ current = ord(self.alias_prefix)
+ assert current < ord('Z')
+ prefix = chr(current + 1)
+ self.alias_prefix = prefix
change_map = {}
- prefix = self.alias_prefix
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
@@ -819,13 +931,19 @@
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
- # reusable set for this table".
- exclusions = set(self.table_map[table]).difference(reuse)
+ # reusable set, minus exclusions, for this table".
+ exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
+ if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
+ # The LHS of this join tuple is no longer part of the
+ # query, so skip this possibility.
+ continue
+ if self.alias_map[alias][LHS_ALIAS] != lhs:
+ continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
@@ -851,7 +969,8 @@
return alias
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
- used=None, requested=None, restricted=None):
+ used=None, requested=None, restricted=None, nullable=None,
+ dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
@@ -861,6 +980,7 @@
if not restricted and self.max_depth and cur_depth > self.max_depth:
# We've recursed far enough; bail out.
return
+
if not opts:
opts = self.get_meta()
root_alias = self.get_initial_alias()
@@ -868,6 +988,11 @@
self.related_select_fields = []
if not used:
used = set()
+ if dupe_set is None:
+ dupe_set = set()
+ if avoid_set is None:
+ avoid_set = set()
+ orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
@@ -879,37 +1004,66 @@
restricted = False
for f, model in opts.get_fields_with_model():
- if (not f.rel or (restricted and f.name not in requested) or
- (not restricted and f.null) or f.rel.parent_link):
+ if not select_related_descend(f, restricted, requested):
continue
+ # The "avoid" set is aliases we want to avoid just for this
+ # particular branch of the recursion. They aren't permanently
+ # forbidden from reuse in the related selection tables (which is
+ # what "used" specifies).
+ avoid = avoid_set.copy()
+ dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
+ if nullable or f.null:
+ promote = True
+ else:
+ promote = False
if model:
int_opts = opts
alias = root_alias
for int_model in opts.get_base_chain(model):
lhs_col = int_opts.parents[int_model].column
+ dedupe = lhs_col in opts.duplicate_targets
+ if dedupe:
+ avoid.update(self.dupe_avoidance.get(id(opts), lhs_col),
+ ())
+ dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
- promote=f.null)
+ promote=promote)
+ for (dupe_opts, dupe_col) in dupe_set:
+ self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
else:
alias = root_alias
+
+ dedupe = f.column in opts.duplicate_targets
+ if dupe_set or dedupe:
+ avoid.update(self.dupe_avoidance.get((id(opts), f.column), ()))
+ if dedupe:
+ dupe_set.add((opts, f.column))
+
alias = self.join((alias, table, f.column,
- f.rel.get_related_field().column), exclusions=used,
- promote=f.null)
+ f.rel.get_related_field().column),
+ exclusions=used.union(avoid), promote=promote)
used.add(alias)
- self.related_select_cols.extend([(alias, f2.column)
- for f2 in f.rel.to._meta.fields])
+ self.related_select_cols.extend(self.get_default_columns(
+ start_alias=alias, opts=f.rel.to._meta, as_pairs=True)[0])
self.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
+ if f.null is not None:
+ new_nullable = f.null
+ else:
+ new_nullable = None
+ for dupe_opts, dupe_col in dupe_set:
+ self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
- used, next, restricted)
+ used, next, restricted, new_nullable, dupe_set, avoid)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
- can_reuse=None):
+ can_reuse=None, process_extras=True):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
@@ -929,6 +1083,10 @@
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
+
+ If 'process_extras' is set, any extra filters returned from the table
+ joining process will be processed. This parameter is set to False
+ during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
@@ -948,6 +1106,10 @@
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
+ elif (value == '' and lookup_type == 'exact' and
+ connection.features.interprets_empty_strings_as_nulls):
+ lookup_type = 'isnull'
+ value = True
elif callable(value):
value = value()
@@ -956,10 +1118,12 @@
allow_many = trim or not negate
try:
- field, target, opts, join_list, last = self.setup_joins(parts, opts,
- alias, True, allow_many, can_reuse=can_reuse)
+ field, target, opts, join_list, last, extra_filters = self.setup_joins(
+ parts, opts, alias, True, allow_many, can_reuse=can_reuse,
+ negate=negate, process_extras=process_extras)
except MultiJoin, e:
- self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]))
+ self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
+ can_reuse)
return
final = len(join_list)
penultimate = last.pop()
@@ -977,20 +1141,22 @@
col = target.column
alias = join_list[-1]
- if final > 1:
+ while final > 1:
# An optimization: if the final join is against the same column as
# we are comparing against, we can go back one step in the join
- # chain and compare against the lhs of the join instead. The result
- # (potentially) involves one less table join.
+ # chain and compare against the lhs of the join instead (and then
+ # repeat the optimization). The result, potentially, involves less
+ # table joins.
join = self.alias_map[alias]
- if col == join[RHS_JOIN_COL]:
- self.unref_alias(alias)
- alias = join[LHS_ALIAS]
- col = join[LHS_JOIN_COL]
- join_list = join_list[:-1]
- final -= 1
- if final == penultimate:
- penultimate = last.pop()
+ if col != join[RHS_JOIN_COL]:
+ break
+ self.unref_alias(alias)
+ alias = join[LHS_ALIAS]
+ col = join[LHS_JOIN_COL]
+ join_list = join_list[:-1]
+ final -= 1
+ if final == penultimate:
+ penultimate = last.pop()
if (lookup_type == 'isnull' and value is True and not negate and
final > 1):
@@ -1009,35 +1175,48 @@
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
+ table_promote = False
+ join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
- self.promote_alias(join)
+ join_promote = self.promote_alias(join)
if table != join:
- self.promote_alias(table)
+ table_promote = self.promote_alias(table)
break
- for join in join_it:
- self.promote_alias(join)
- for table in table_it:
- # Some of these will have been promoted from the join_list, but
- # that's harmless.
- self.promote_alias(table)
+ self.promote_alias_chain(join_it, join_promote)
+ self.promote_alias_chain(table_it, table_promote)
self.where.add((alias, col, field, lookup_type, value), connector)
+
if negate:
- for alias in join_list:
- self.promote_alias(alias)
- if final > 1 and lookup_type != 'isnull':
- for alias in join_list:
- if self.alias_map[alias] == self.LOUTER:
- j_col = self.alias_map[alias][RHS_JOIN_COL]
- entry = Node([(alias, j_col, None, 'isnull', True)])
- entry.negate()
- self.where.add(entry, AND)
- break
+ self.promote_alias_chain(join_list)
+ if lookup_type != 'isnull':
+ if final > 1:
+ for alias in join_list:
+ if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
+ j_col = self.alias_map[alias][RHS_JOIN_COL]
+ entry = self.where_class()
+ entry.add((alias, j_col, None, 'isnull', True), AND)
+ entry.negate()
+ self.where.add(entry, AND)
+ break
+ elif not (lookup_type == 'in' and not value) and field.null:
+ # Leaky abstraction artifact: We have to specifically
+ # exclude the "foo__in=[]" case from this handling, because
+ # it's short-circuited in the Where class.
+ entry = self.where_class()
+ entry.add((alias, col, None, 'isnull', True), AND)
+ entry.negate()
+ self.where.add(entry, AND)
+
if can_reuse is not None:
can_reuse.update(join_list)
+ if process_extras:
+ for filter in extra_filters:
+ self.add_filter(filter, negate=negate, can_reuse=can_reuse,
+ process_extras=False)
def add_q(self, q_object, used_aliases=None):
"""
@@ -1046,41 +1225,56 @@
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
- used_aliases = set()
+ used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
- return
-
- if self.where and q_object.connector != AND and len(q_object) > 1:
- self.where.start_subtree(AND)
- subtree = True
else:
- subtree = False
- connector = AND
- for child in q_object.children:
- if isinstance(child, Node):
- self.where.start_subtree(connector)
- self.add_q(child, used_aliases)
- self.where.end_subtree()
+ if self.where and q_object.connector != AND and len(q_object) > 1:
+ self.where.start_subtree(AND)
+ subtree = True
else:
- self.add_filter(child, connector, q_object.negated,
- can_reuse=used_aliases)
- connector = q_object.connector
- if q_object.negated:
- self.where.negate()
- if subtree:
- self.where.end_subtree()
+ subtree = False
+ connector = AND
+ for child in q_object.children:
+ if connector == OR:
+ refcounts_before = self.alias_refcount.copy()
+ if isinstance(child, Node):
+ self.where.start_subtree(connector)
+ self.add_q(child, used_aliases)
+ self.where.end_subtree()
+ else:
+ self.add_filter(child, connector, q_object.negated,
+ can_reuse=used_aliases)
+ if connector == OR:
+ # Aliases that were newly added or not used at all need to
+ # be promoted to outer joins if they are nullable relations.
+ # (they shouldn't turn the whole conditional into the empty
+ # set just because they don't match anything).
+ self.promote_unused_aliases(refcounts_before, used_aliases)
+ connector = q_object.connector
+ if q_object.negated:
+ self.where.negate()
+ if subtree:
+ self.where.end_subtree()
+ if self.filter_is_sticky:
+ self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
- allow_explicit_fk=False, can_reuse=None):
+ allow_explicit_fk=False, can_reuse=None, negate=False,
+ process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
- disjunctive filters).
+ disjunctive filters). If can_reuse is not None, it's a list of aliases
+ that can be reused in these joins (nothing else can be reused in this
+ case). Finally, 'negate' is used in the same sense as for add_filter()
+ -- it indicates an exclude() filter, or something similar. It is only
+ passed in here so that it can be passed to a field's extra_filter() for
+ customised behaviour.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
@@ -1088,7 +1282,15 @@
"""
joins = [alias]
last = [0]
+ dupe_set = set()
+ exclusions = set()
+ extra_filters = []
for pos, name in enumerate(names):
+ try:
+ exclusions.add(int_alias)
+ except NameError:
+ pass
+ exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
@@ -1107,22 +1309,39 @@
names = opts.get_all_field_names()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
+
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
- alias_list = []
for int_model in opts.get_base_chain(model):
lhs_col = opts.parents[int_model].column
+ dedupe = lhs_col in opts.duplicate_targets
+ if dedupe:
+ exclusions.update(self.dupe_avoidance.get(
+ (id(opts), lhs_col), ()))
+ dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
- opts.pk.column), exclusions=joins)
+ opts.pk.column), exclusions=exclusions)
joins.append(alias)
+ exclusions.add(alias)
+ for (dupe_opts, dupe_col) in dupe_set:
+ self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
+ dupe_col = direct and field.column or field.field.column
+ dedupe = dupe_col in opts.duplicate_targets
+ if dupe_set or dedupe:
+ if dedupe:
+ dupe_set.add((opts, dupe_col))
+ exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
+ ()))
+ if process_extras and hasattr(field, 'extra_filters'):
+ extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
@@ -1143,10 +1362,17 @@
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
- dupe_multis, joins, nullable=True, reuse=can_reuse)
- alias = self.join((int_alias, table2, from_col2, to_col2),
- dupe_multis, joins, nullable=True, reuse=can_reuse)
- joins.extend([int_alias, alias])
+ dupe_multis, exclusions, nullable=True,
+ reuse=can_reuse)
+ if int_alias == table2 and from_col2 == to_col2:
+ joins.append(int_alias)
+ alias = int_alias
+ else:
+ alias = self.join(
+ (int_alias, table2, from_col2, to_col2),
+ dupe_multis, exclusions, nullable=True,
+ reuse=can_reuse)
+ joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
@@ -1161,7 +1387,7 @@
opts, target)
alias = self.join((alias, table, from_col, to_col),
- exclusions=joins, nullable=field.null)
+ exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
@@ -1189,9 +1415,11 @@
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
- dupe_multis, joins, nullable=True, reuse=can_reuse)
+ dupe_multis, exclusions, nullable=True,
+ reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
- dupe_multis, joins, nullable=True, reuse=can_reuse)
+ dupe_multis, exclusions, nullable=True,
+ reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
@@ -1209,15 +1437,35 @@
opts, target)
alias = self.join((alias, table, from_col, to_col),
- dupe_multis, joins, nullable=True, reuse=can_reuse)
+ dupe_multis, exclusions, nullable=True,
+ reuse=can_reuse)
joins.append(alias)
+ for (dupe_opts, dupe_col) in dupe_set:
+ try:
+ self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
+ except NameError:
+ self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
+
if pos != len(names) - 1:
raise FieldError("Join on field %r not permitted." % name)
- return field, target, opts, joins, last
+ return field, target, opts, joins, last, extra_filters
- def split_exclude(self, filter_expr, prefix):
+ def update_dupe_avoidance(self, opts, col, alias):
+ """
+ For a column that is one of multiple pointing to the same table, update
+ the internal data structures to note that this alias shouldn't be used
+ for those other columns.
+ """
+ ident = id(opts)
+ for name in opts.duplicate_targets[col]:
+ try:
+ self.dupe_avoidance[ident, name].add(alias)
+ except KeyError:
+ self.dupe_avoidance[ident, name] = set([alias])
+
+ def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
@@ -1225,10 +1473,12 @@
N-to-many relation field.
"""
query = Query(self.model, self.connection)
- query.add_filter(filter_expr)
+ query.add_filter(filter_expr, can_reuse=can_reuse)
+ query.bump_prefix()
query.set_start(prefix)
query.clear_ordering(True)
- self.add_filter(('%s__in' % prefix, query), negate=True, trim=True)
+ self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
+ can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
@@ -1240,13 +1490,13 @@
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
- if high:
- if self.high_mark:
+ if high is not None:
+ if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
- if low:
- if self.high_mark:
+ if low is not None:
+ if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
@@ -1274,7 +1524,7 @@
opts = self.get_meta()
try:
for name in field_names:
- field, target, u2, joins, u3 = self.setup_joins(
+ field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
@@ -1286,10 +1536,7 @@
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
- for join in joins[1:]:
- # Only nullable aliases are promoted, so we don't end up
- # doing unnecessary left outer joins here.
- self.promote_alias(join)
+ self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
@@ -1362,7 +1609,6 @@
self.select = [select]
self.select_fields = [None]
self.extra_select = {}
- self.extra_select_params = ()
def add_select_related(self, fields):
"""
@@ -1385,14 +1631,25 @@
to the query.
"""
if select:
- # The extra select might be ordered (because it will be accepting
- # parameters).
- if (isinstance(select, SortedDict) and
- not isinstance(self.extra_select, SortedDict)):
- self.extra_select = SortedDict(self.extra_select)
- self.extra_select.update(select)
- if select_params:
- self.extra_select_params += tuple(select_params)
+ # We need to pair any placeholder markers in the 'select'
+ # dictionary with their parameters in 'select_params' so that
+ # subsequent updates to the select dictionary also adjust the
+ # parameters appropriately.
+ select_pairs = SortedDict()
+ if select_params:
+ param_iter = iter(select_params)
+ else:
+ param_iter = iter([])
+ for name, entry in select.items():
+ entry = force_unicode(entry)
+ entry_params = []
+ pos = entry.find("%s")
+ while pos != -1:
+ entry_params.append(param_iter.next())
+ pos = entry.find("%s", pos + 2)
+ select_pairs[name] = (entry, entry_params)
+ # This is order preserving, since self.extra_select is a SortedDict.
+ self.extra_select.update(select_pairs)
if where:
self.extra_where += tuple(where)
if params:
@@ -1426,8 +1683,9 @@
"""
opts = self.model._meta
alias = self.get_initial_alias()
- field, col, opts, joins, last = self.setup_joins(
+ field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
+ self.unref_alias(alias)
alias = joins[last[-1]]
self.select = [(alias, self.alias_map[alias][RHS_JOIN_COL])]
self.select_fields = [field]
@@ -1474,10 +1732,17 @@
# The MULTI case.
if self.ordering_aliases:
- return order_modified_iter(cursor, len(self.ordering_aliases),
+ result = order_modified_iter(cursor, len(self.ordering_aliases),
+ self.connection.features.empty_fetchmany_value)
+ else:
+ result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
- return iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
- self.connection.features.empty_fetchmany_value)
+ if not self.connection.features.can_use_chunked_reads:
+ # If we are using non-chunked reads, we return the same data
+ # structure as normally, but ensure it is all read into memory
+ # before going any further.
+ return list(result)
+ return result
# Use the backend's custom Query class if it defines one. Otherwise, use the
# default.
@@ -1514,7 +1779,7 @@
sentinel):
yield [r[:-trim] for r in rows]
-def setup_join_cache(sender):
+def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
@@ -1524,5 +1789,5 @@
"""
sender._meta._join_cache = {}
-dispatcher.connect(setup_join_cache, signal=signals.class_prepared)
+signals.class_prepared.connect(setup_join_cache)